--- /dev/null
+branch: dev.regabi
+parent-branch: master
typ := p.types[index]
format := typ + " " + in // e.g., "*Node %n"
+ // Do not bother reporting basic types, nor %v, %T, %p.
+ // Vet handles basic types, and those three formats apply to all types.
+ if !strings.Contains(typ, ".") || (in == "%v" || in == "%T" || in == "%p") {
+ return in
+ }
+
// check if format is known
out, known := knownFormats[format]
// typeString returns a string representation of n.
func typeString(typ types.Type) string {
- return filepath.ToSlash(typ.String())
+ s := filepath.ToSlash(typ.String())
+
+ // Report all the concrete IR types as Node, to shorten fmtmap.
+ const ir = "cmd/compile/internal/ir."
+ if s == "*"+ir+"Name" || s == "*"+ir+"Func" || s == "*"+ir+"Decl" ||
+ s == ir+"Ntype" || s == ir+"Expr" || s == ir+"Stmt" ||
+ strings.HasPrefix(s, "*"+ir) && (strings.HasSuffix(s, "Expr") || strings.HasSuffix(s, "Stmt")) {
+ return "cmd/compile/internal/ir.Node"
+ }
+
+ return s
}
// stringLit returns the unquoted string value and true if
// An absent entry means that the format is not recognized as valid.
// An empty new format means that the format should remain unchanged.
var knownFormats = map[string]string{
- "*bytes.Buffer %s": "",
- "*cmd/compile/internal/gc.EscLocation %v": "",
- "*cmd/compile/internal/gc.Mpflt %v": "",
- "*cmd/compile/internal/gc.Mpint %v": "",
- "*cmd/compile/internal/gc.Node %#v": "",
- "*cmd/compile/internal/gc.Node %+S": "",
- "*cmd/compile/internal/gc.Node %+v": "",
- "*cmd/compile/internal/gc.Node %L": "",
- "*cmd/compile/internal/gc.Node %S": "",
- "*cmd/compile/internal/gc.Node %j": "",
- "*cmd/compile/internal/gc.Node %p": "",
- "*cmd/compile/internal/gc.Node %v": "",
- "*cmd/compile/internal/ssa.Block %s": "",
- "*cmd/compile/internal/ssa.Block %v": "",
- "*cmd/compile/internal/ssa.Func %s": "",
- "*cmd/compile/internal/ssa.Func %v": "",
- "*cmd/compile/internal/ssa.Register %s": "",
- "*cmd/compile/internal/ssa.Register %v": "",
- "*cmd/compile/internal/ssa.SparseTreeNode %v": "",
- "*cmd/compile/internal/ssa.Value %s": "",
- "*cmd/compile/internal/ssa.Value %v": "",
- "*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "",
- "*cmd/compile/internal/types.Field %p": "",
- "*cmd/compile/internal/types.Field %v": "",
- "*cmd/compile/internal/types.Sym %0S": "",
- "*cmd/compile/internal/types.Sym %S": "",
- "*cmd/compile/internal/types.Sym %p": "",
- "*cmd/compile/internal/types.Sym %v": "",
- "*cmd/compile/internal/types.Type %#L": "",
- "*cmd/compile/internal/types.Type %#v": "",
- "*cmd/compile/internal/types.Type %+v": "",
- "*cmd/compile/internal/types.Type %-S": "",
- "*cmd/compile/internal/types.Type %0S": "",
- "*cmd/compile/internal/types.Type %L": "",
- "*cmd/compile/internal/types.Type %S": "",
- "*cmd/compile/internal/types.Type %p": "",
- "*cmd/compile/internal/types.Type %s": "",
- "*cmd/compile/internal/types.Type %v": "",
- "*cmd/internal/obj.Addr %v": "",
- "*cmd/internal/obj.LSym %v": "",
- "*math/big.Float %f": "",
- "*math/big.Int %#x": "",
- "*math/big.Int %s": "",
- "*math/big.Int %v": "",
- "[16]byte %x": "",
- "[]*cmd/compile/internal/ssa.Block %v": "",
- "[]*cmd/compile/internal/ssa.Value %v": "",
- "[][]string %q": "",
- "[]byte %s": "",
- "[]byte %x": "",
- "[]cmd/compile/internal/ssa.Edge %v": "",
- "[]cmd/compile/internal/ssa.ID %v": "",
- "[]cmd/compile/internal/ssa.posetNode %v": "",
- "[]cmd/compile/internal/ssa.posetUndo %v": "",
- "[]cmd/compile/internal/syntax.token %s": "",
- "[]string %v": "",
- "[]uint32 %v": "",
- "bool %v": "",
- "byte %08b": "",
- "byte %c": "",
- "byte %q": "",
- "byte %v": "",
- "cmd/compile/internal/arm.shift %d": "",
- "cmd/compile/internal/gc.Class %d": "",
- "cmd/compile/internal/gc.Class %s": "",
- "cmd/compile/internal/gc.Class %v": "",
- "cmd/compile/internal/gc.Ctype %d": "",
- "cmd/compile/internal/gc.Ctype %v": "",
- "cmd/compile/internal/gc.Nodes %#v": "",
- "cmd/compile/internal/gc.Nodes %+v": "",
- "cmd/compile/internal/gc.Nodes %.v": "",
- "cmd/compile/internal/gc.Nodes %v": "",
- "cmd/compile/internal/gc.Op %#v": "",
- "cmd/compile/internal/gc.Op %v": "",
- "cmd/compile/internal/gc.Val %#v": "",
- "cmd/compile/internal/gc.Val %T": "",
- "cmd/compile/internal/gc.Val %v": "",
- "cmd/compile/internal/gc.fmtMode %d": "",
- "cmd/compile/internal/gc.initKind %d": "",
- "cmd/compile/internal/gc.itag %v": "",
- "cmd/compile/internal/ssa.BranchPrediction %d": "",
- "cmd/compile/internal/ssa.Edge %v": "",
- "cmd/compile/internal/ssa.GCNode %v": "",
- "cmd/compile/internal/ssa.ID %d": "",
- "cmd/compile/internal/ssa.ID %v": "",
- "cmd/compile/internal/ssa.LocalSlot %s": "",
- "cmd/compile/internal/ssa.LocalSlot %v": "",
- "cmd/compile/internal/ssa.Location %s": "",
- "cmd/compile/internal/ssa.Op %s": "",
- "cmd/compile/internal/ssa.Op %v": "",
- "cmd/compile/internal/ssa.Sym %v": "",
- "cmd/compile/internal/ssa.ValAndOff %s": "",
- "cmd/compile/internal/ssa.domain %v": "",
- "cmd/compile/internal/ssa.flagConstant %s": "",
- "cmd/compile/internal/ssa.posetNode %v": "",
- "cmd/compile/internal/ssa.posetTestOp %v": "",
- "cmd/compile/internal/ssa.rbrank %d": "",
- "cmd/compile/internal/ssa.regMask %d": "",
- "cmd/compile/internal/ssa.register %d": "",
- "cmd/compile/internal/ssa.relation %s": "",
- "cmd/compile/internal/syntax.Error %q": "",
- "cmd/compile/internal/syntax.Expr %#v": "",
- "cmd/compile/internal/syntax.LitKind %d": "",
- "cmd/compile/internal/syntax.Node %T": "",
- "cmd/compile/internal/syntax.Operator %s": "",
- "cmd/compile/internal/syntax.Pos %s": "",
- "cmd/compile/internal/syntax.Pos %v": "",
- "cmd/compile/internal/syntax.position %s": "",
- "cmd/compile/internal/syntax.token %q": "",
- "cmd/compile/internal/syntax.token %s": "",
- "cmd/compile/internal/types.EType %d": "",
- "cmd/compile/internal/types.EType %s": "",
- "cmd/compile/internal/types.EType %v": "",
- "cmd/internal/obj.ABI %v": "",
- "error %v": "",
- "float64 %.2f": "",
- "float64 %.3f": "",
- "float64 %.6g": "",
- "float64 %g": "",
- "int %#x": "",
- "int %-12d": "",
- "int %-6d": "",
- "int %-8o": "",
- "int %02d": "",
- "int %6d": "",
- "int %c": "",
- "int %d": "",
- "int %v": "",
- "int %x": "",
- "int16 %d": "",
- "int16 %x": "",
- "int32 %#x": "",
- "int32 %d": "",
- "int32 %v": "",
- "int32 %x": "",
- "int64 %#x": "",
- "int64 %+d": "",
- "int64 %-10d": "",
- "int64 %.5d": "",
- "int64 %d": "",
- "int64 %v": "",
- "int64 %x": "",
- "int8 %d": "",
- "int8 %v": "",
- "int8 %x": "",
- "interface{} %#v": "",
- "interface{} %T": "",
- "interface{} %p": "",
- "interface{} %q": "",
- "interface{} %s": "",
- "interface{} %v": "",
- "map[*cmd/compile/internal/gc.Node]*cmd/compile/internal/ssa.Value %v": "",
- "map[*cmd/compile/internal/gc.Node][]*cmd/compile/internal/gc.Node %v": "",
- "map[cmd/compile/internal/ssa.ID]uint32 %v": "",
- "map[int64]uint32 %v": "",
- "math/big.Accuracy %s": "",
- "reflect.Type %s": "",
- "rune %#U": "",
- "rune %c": "",
- "rune %q": "",
- "string %-*s": "",
- "string %-16s": "",
- "string %-6s": "",
- "string %q": "",
- "string %s": "",
- "string %v": "",
- "time.Duration %d": "",
- "time.Duration %v": "",
- "uint %04x": "",
- "uint %5d": "",
- "uint %d": "",
- "uint %x": "",
- "uint16 %d": "",
- "uint16 %x": "",
- "uint32 %#U": "",
- "uint32 %#x": "",
- "uint32 %d": "",
- "uint32 %v": "",
- "uint32 %x": "",
- "uint64 %08x": "",
- "uint64 %b": "",
- "uint64 %d": "",
- "uint64 %x": "",
- "uint8 %#x": "",
- "uint8 %d": "",
- "uint8 %v": "",
- "uint8 %x": "",
- "uintptr %d": "",
+ "*bytes.Buffer %s": "",
+ "*cmd/compile/internal/ssa.Block %s": "",
+ "*cmd/compile/internal/ssa.Func %s": "",
+ "*cmd/compile/internal/ssa.Register %s": "",
+ "*cmd/compile/internal/ssa.Value %s": "",
+ "*cmd/compile/internal/types.Sym %+v": "",
+ "*cmd/compile/internal/types.Sym %S": "",
+ "*cmd/compile/internal/types.Type %+v": "",
+ "*cmd/compile/internal/types.Type %-S": "",
+ "*cmd/compile/internal/types.Type %L": "",
+ "*cmd/compile/internal/types.Type %S": "",
+ "*cmd/compile/internal/types.Type %s": "",
+ "*math/big.Float %f": "",
+ "*math/big.Int %s": "",
+ "[]cmd/compile/internal/syntax.token %s": "",
+ "cmd/compile/internal/arm.shift %d": "",
+ "cmd/compile/internal/gc.RegIndex %d": "",
+ "cmd/compile/internal/gc.initKind %d": "",
+ "cmd/compile/internal/ir.Class %d": "",
+ "cmd/compile/internal/ir.Node %+v": "",
+ "cmd/compile/internal/ir.Node %L": "",
+ "cmd/compile/internal/ir.Nodes %+v": "",
+ "cmd/compile/internal/ir.Nodes %.v": "",
+ "cmd/compile/internal/ir.Op %+v": "",
+ "cmd/compile/internal/ssa.Aux %#v": "",
+ "cmd/compile/internal/ssa.Aux %q": "",
+ "cmd/compile/internal/ssa.Aux %s": "",
+ "cmd/compile/internal/ssa.BranchPrediction %d": "",
+ "cmd/compile/internal/ssa.ID %d": "",
+ "cmd/compile/internal/ssa.LocalSlot %s": "",
+ "cmd/compile/internal/ssa.Location %s": "",
+ "cmd/compile/internal/ssa.Op %s": "",
+ "cmd/compile/internal/ssa.ValAndOff %s": "",
+ "cmd/compile/internal/ssa.flagConstant %s": "",
+ "cmd/compile/internal/ssa.rbrank %d": "",
+ "cmd/compile/internal/ssa.regMask %d": "",
+ "cmd/compile/internal/ssa.register %d": "",
+ "cmd/compile/internal/ssa.relation %s": "",
+ "cmd/compile/internal/syntax.Error %q": "",
+ "cmd/compile/internal/syntax.Expr %#v": "",
+ "cmd/compile/internal/syntax.LitKind %d": "",
+ "cmd/compile/internal/syntax.Operator %s": "",
+ "cmd/compile/internal/syntax.Pos %s": "",
+ "cmd/compile/internal/syntax.position %s": "",
+ "cmd/compile/internal/syntax.token %q": "",
+ "cmd/compile/internal/syntax.token %s": "",
+ "cmd/compile/internal/types.Kind %d": "",
+ "cmd/compile/internal/types.Kind %s": "",
+ "go/constant.Value %#v": "",
+ "math/big.Accuracy %s": "",
+ "reflect.Type %s": "",
+ "time.Duration %d": "",
}
package amd64
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/x86"
if cnt%int64(gc.Widthreg) != 0 {
// should only happen with nacl
if cnt%int64(gc.Widthptr) != 0 {
- gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
+ base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
if *state&ax == 0 {
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
"fmt"
"math"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
r := v.Reg()
// See the comments in cmd/internal/obj/x86/obj6.go
// near CanUse1InsnTLS for a detailed explanation of these instructions.
- if x86.CanUse1InsnTLS(gc.Ctxt) {
+ if x86.CanUse1InsnTLS(base.Ctxt) {
// MOVQ (TLS), r
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_MEM
}
p := s.Prog(mov)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
+ p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
p := s.Prog(v.Op.Asm())
"math"
"math/bits"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *gc.Node:
+ case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpARMLoweredZero:
// MOVW.P Rarg2, 4(R1)
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
import (
"math"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *gc.Node:
+ case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpARM64Equal,
ssa.OpARM64NotEqual,
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "os"
+
+ "cmd/internal/obj"
+)
+
+var Ctxt *obj.Link
+
+var atExitFuncs []func()
+
+func AtExit(f func()) {
+ atExitFuncs = append(atExitFuncs, f)
+}
+
+func Exit(code int) {
+ for i := len(atExitFuncs) - 1; i >= 0; i-- {
+ f := atExitFuncs[i]
+ atExitFuncs = atExitFuncs[:i]
+ f()
+ }
+ os.Exit(code)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Debug arguments, set by -d flag.
+
+package base
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "cmd/internal/objabi"
+)
+
+// Debug holds the parsed debugging configuration values.
+var Debug = DebugFlags{
+ Fieldtrack: &objabi.Fieldtrack_enabled,
+}
+
+// DebugFlags defines the debugging configuration values (see var Debug).
+// Each struct field is a different value, named for the lower-case of the field name.
+// Each field must be an int or string and must have a `help` struct tag.
+//
+// The -d option takes a comma-separated list of settings.
+// Each setting is name=value; for ints, name is short for name=1.
+type DebugFlags struct {
+ Append int `help:"print information about append compilation"`
+ Checkptr int `help:"instrument unsafe pointer conversions"`
+ Closure int `help:"print information about closure compilation"`
+ CompileLater int `help:"compile functions as late as possible"`
+ DclStack int `help:"run internal dclstack check"`
+ Defer int `help:"print information about defer compilation"`
+ DisableNil int `help:"disable nil checks"`
+ DumpPtrs int `help:"show Node pointers values in dump output"`
+ DwarfInl int `help:"print information about DWARF inlined function creation"`
+ Export int `help:"print export data"`
+ Fieldtrack *int `help:"enable field tracking"`
+ GCProg int `help:"print dump of GC programs"`
+ Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"`
+ LocationLists int `help:"print information about DWARF location list creation"`
+ Nil int `help:"print information about nil checks"`
+ PCTab string `help:"print named pc-value table"`
+ Panic int `help:"show all compiler panics"`
+ Slice int `help:"print information about slice compilation"`
+ SoftFloat int `help:"force compiler to emit soft-float code"`
+ TypeAssert int `help:"print information about type assertion inlining"`
+ TypecheckInl int `help:"eager typechecking of inline function bodies"`
+ WB int `help:"print information about write barriers"`
+ ABIWrap int `help:"print information about ABI wrapper generation"`
+
+ any bool // set when any of the values have been set
+}
+
+// Any reports whether any of the debug flags have been set.
+func (d *DebugFlags) Any() bool { return d.any }
+
+type debugField struct {
+ name string
+ help string
+ val interface{} // *int or *string
+}
+
+var debugTab []debugField
+
+func init() {
+ v := reflect.ValueOf(&Debug).Elem()
+ t := v.Type()
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Name == "any" {
+ continue
+ }
+ name := strings.ToLower(f.Name)
+ help := f.Tag.Get("help")
+ if help == "" {
+ panic(fmt.Sprintf("base.Debug.%s is missing help text", f.Name))
+ }
+ ptr := v.Field(i).Addr().Interface()
+ switch ptr.(type) {
+ default:
+ panic(fmt.Sprintf("base.Debug.%s has invalid type %v (must be int or string)", f.Name, f.Type))
+ case *int, *string:
+ // ok
+ case **int:
+ ptr = *ptr.(**int) // record the *int itself
+ }
+ debugTab = append(debugTab, debugField{name, help, ptr})
+ }
+}
+
+// DebugSSA is called to set a -d ssa/... option.
+// If nil, those options are reported as invalid options.
+// If DebugSSA returns a non-empty string, that text is reported as a compiler error.
+var DebugSSA func(phase, flag string, val int, valString string) string
+
+// parseDebug parses the -d debug string argument.
+func parseDebug(debugstr string) {
+ // parse -d argument
+ if debugstr == "" {
+ return
+ }
+ Debug.any = true
+Split:
+ for _, name := range strings.Split(debugstr, ",") {
+ if name == "" {
+ continue
+ }
+ // display help about the -d option itself and quit
+ if name == "help" {
+ fmt.Print(debugHelpHeader)
+ maxLen := len("ssa/help")
+ for _, t := range debugTab {
+ if len(t.name) > maxLen {
+ maxLen = len(t.name)
+ }
+ }
+ for _, t := range debugTab {
+ fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
+ }
+ // ssa options have their own help
+ fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
+ fmt.Print(debugHelpFooter)
+ os.Exit(0)
+ }
+ val, valstring, haveInt := 1, "", true
+ if i := strings.IndexAny(name, "=:"); i >= 0 {
+ var err error
+ name, valstring = name[:i], name[i+1:]
+ val, err = strconv.Atoi(valstring)
+ if err != nil {
+ val, haveInt = 1, false
+ }
+ }
+ for _, t := range debugTab {
+ if t.name != name {
+ continue
+ }
+ switch vp := t.val.(type) {
+ case nil:
+ // Ignore
+ case *string:
+ *vp = valstring
+ case *int:
+ if !haveInt {
+ log.Fatalf("invalid debug value %v", name)
+ }
+ *vp = val
+ default:
+ panic("bad debugtab type")
+ }
+ continue Split
+ }
+ // special case for ssa for now
+ if DebugSSA != nil && strings.HasPrefix(name, "ssa/") {
+ // expect form ssa/phase/flag
+ // e.g. -d=ssa/generic_cse/time
+ // _ in phase name also matches space
+ phase := name[4:]
+ flag := "debug" // default flag is debug
+ if i := strings.Index(phase, "/"); i >= 0 {
+ flag = phase[i+1:]
+ phase = phase[:i]
+ }
+ err := DebugSSA(phase, flag, val, valstring)
+ if err != "" {
+ log.Fatalf(err)
+ }
+ continue Split
+ }
+ log.Fatalf("unknown debug key -d %s\n", name)
+ }
+}
+
+const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
+
+<key> is one of:
+
+`
+
+const debugHelpFooter = `
+<value> is key-specific.
+
+Key "checkptr" supports values:
+ "0": instrumentation disabled
+ "1": conversions involving unsafe.Pointer are instrumented
+ "2": conversions to unsafe.Pointer force heap allocation
+
+Key "pctab" supports values:
+ "pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
+`
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+
+ "cmd/internal/objabi"
+ "cmd/internal/sys"
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
+ objabi.Flagprint(os.Stderr)
+ Exit(2)
+}
+
+// Flag holds the parsed command-line flags.
+// See ParseFlag for non-zero defaults.
+var Flag CmdFlags
+
+// A CountFlag is a counting integer flag.
+// It accepts -name=value to set the value directly,
+// but it also accepts -name with no =value to increment the count.
+type CountFlag int
+
+// CmdFlags defines the command-line flags (see var Flag).
+// Each struct field is a different flag, by default named for the lower-case of the field name.
+// If the flag name is a single letter, the default flag name is left upper-case.
+// If the flag name is "Lower" followed by a single letter, the default flag name is the lower-case of the last letter.
+//
+// If this default flag name can't be made right, the `flag` struct tag can be used to replace it,
+// but this should be done only in exceptional circumstances: it helps everyone if the flag name
+// is obvious from the field name when the flag is used elsewhere in the compiler sources.
+// The `flag:"-"` struct tag makes a field invisible to the flag logic and should also be used sparingly.
+//
+// Each field must have a `help` struct tag giving the flag help message.
+//
+// The allowed field types are bool, int, string, pointers to those (for values stored elsewhere),
+// CountFlag (for a counting flag), and func(string) (for a flag that uses special code for parsing).
+type CmdFlags struct {
+ // Single letters
+ B CountFlag "help:\"disable bounds checking\""
+ C CountFlag "help:\"disable printing of columns in error messages\""
+ D string "help:\"set relative `path` for local imports\""
+ E CountFlag "help:\"debug symbol export\""
+ I func(string) "help:\"add `directory` to import search path\""
+ K CountFlag "help:\"debug missing line numbers\""
+ L CountFlag "help:\"show full file names in error messages\""
+ N CountFlag "help:\"disable optimizations\""
+ S CountFlag "help:\"print assembly listing\""
+ // V is added by objabi.AddVersionFlag
+ W CountFlag "help:\"debug parse tree after type checking\""
+
+ LowerC int "help:\"concurrency during compilation (1 means no concurrency)\""
+ LowerD func(string) "help:\"enable debugging settings; try -d help\""
+ LowerE CountFlag "help:\"no limit on number of errors reported\""
+ LowerH CountFlag "help:\"halt on error\""
+ LowerJ CountFlag "help:\"debug runtime-initialized variables\""
+ LowerL CountFlag "help:\"disable inlining\""
+ LowerM CountFlag "help:\"print optimization decisions\""
+ LowerO string "help:\"write output to `file`\""
+ LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below
+ LowerR CountFlag "help:\"debug generated wrappers\""
+ LowerT bool "help:\"enable tracing for debugging the compiler\""
+ LowerW CountFlag "help:\"debug type checking\""
+ LowerV *bool "help:\"increase debug verbosity\""
+
+ // Special characters
+ Percent int "flag:\"%\" help:\"debug non-static initializers\""
+ CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\""
+
+ // Longer names
+ ABIWrap bool "help:\"enable generation of ABI wrappers\""
+ ABIWrapLimit int "help:\"emit at most N ABI wrappers (for debugging)\""
+ AsmHdr string "help:\"write assembly header to `file`\""
+ Bench string "help:\"append benchmark times to `file`\""
+ BlockProfile string "help:\"write block profile to `file`\""
+ BuildID string "help:\"record `id` as the build id in the export metadata\""
+ CPUProfile string "help:\"write cpu profile to `file`\""
+ Complete bool "help:\"compiling complete package (no C or assembly)\""
+ Dwarf bool "help:\"generate DWARF symbols\""
+ DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below
+ DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below
+ Dynlink *bool "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below
+ EmbedCfg func(string) "help:\"read go:embed configuration from `file`\""
+ GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
+ GoVersion string "help:\"required version of the runtime\""
+ ImportCfg func(string) "help:\"read import configuration from `file`\""
+ ImportMap func(string) "help:\"add `definition` of the form source=actual to import map\""
+ InstallSuffix string "help:\"set pkg directory `suffix`\""
+ JSON string "help:\"version,file for JSON compiler/optimizer detail output\""
+ Lang string "help:\"Go language version source code expects\""
+ LinkObj string "help:\"write linker-specific object to `file`\""
+ LinkShared *bool "help:\"generate code that will be linked against Go shared libraries\"" // &Ctxt.Flag_linkshared, set below
+ Live CountFlag "help:\"debug liveness analysis\""
+ MSan bool "help:\"build code compatible with C/C++ memory sanitizer\""
+ MemProfile string "help:\"write memory profile to `file`\""
+ MemProfileRate int64 "help:\"set runtime.MemProfileRate to `rate`\""
+ MutexProfile string "help:\"write mutex profile to `file`\""
+ NoLocalImports bool "help:\"reject local (relative) imports\""
+ Pack bool "help:\"write to file.a instead of file.o\""
+ Race bool "help:\"enable race detector\""
+ Shared *bool "help:\"generate code that can be linked into a shared library\"" // &Ctxt.Flag_shared, set below
+ SmallFrames bool "help:\"reduce the size limit for stack allocated objects\"" // small stacks, to diagnose GC latency; see golang.org/issue/27732
+ Spectre string "help:\"enable spectre mitigations in `list` (all, index, ret)\""
+ Std bool "help:\"compiling standard library\""
+ SymABIs string "help:\"read symbol ABIs from `file`\""
+ TraceProfile string "help:\"write an execution trace to `file`\""
+ TrimPath string "help:\"remove `prefix` from recorded source file paths\""
+ WB bool "help:\"enable write barrier\"" // TODO: remove
+
+ // Configuration derived from flags; not a flag itself.
+ Cfg struct {
+ Embed struct { // set by -embedcfg
+ Patterns map[string][]string
+ Files map[string]string
+ }
+ ImportDirs []string // appended to by -I
+ ImportMap map[string]string // set by -importmap OR -importcfg
+ PackageFile map[string]string // set by -importcfg; nil means not in use
+ SpectreIndex bool // set by -spectre=index or -spectre=all
+ }
+}
+
+// ParseFlags parses the command-line flags into Flag.
+func ParseFlags() {
+ Flag.I = addImportDir
+
+ Flag.LowerC = 1
+ Flag.LowerD = parseDebug
+ Flag.LowerP = &Ctxt.Pkgpath
+ Flag.LowerV = &Ctxt.Debugvlog
+
+ Flag.ABIWrap = objabi.Regabi_enabled != 0
+ Flag.Dwarf = objabi.GOARCH != "wasm"
+ Flag.DwarfBASEntries = &Ctxt.UseBASEntries
+ Flag.DwarfLocationLists = &Ctxt.Flag_locationlists
+ *Flag.DwarfLocationLists = true
+ Flag.Dynlink = &Ctxt.Flag_dynlink
+ Flag.EmbedCfg = readEmbedCfg
+ Flag.GenDwarfInl = 2
+ Flag.ImportCfg = readImportCfg
+ Flag.ImportMap = addImportMap
+ Flag.LinkShared = &Ctxt.Flag_linkshared
+ Flag.Shared = &Ctxt.Flag_shared
+ Flag.WB = true
+
+ Flag.Cfg.ImportMap = make(map[string]string)
+
+ objabi.AddVersionFlag() // -V
+ registerFlags()
+ objabi.Flagparse(usage)
+
+ if Flag.MSan && !sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
+ log.Fatalf("%s/%s does not support -msan", objabi.GOOS, objabi.GOARCH)
+ }
+ if Flag.Race && !sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
+ log.Fatalf("%s/%s does not support -race", objabi.GOOS, objabi.GOARCH)
+ }
+ if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) {
+ log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH)
+ }
+ parseSpectre(Flag.Spectre) // left as string for recordFlags
+
+ Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
+ Ctxt.Flag_optimize = Flag.N == 0
+ Ctxt.Debugasm = int(Flag.S)
+
+ if flag.NArg() < 1 {
+ usage()
+ }
+
+ if Flag.GoVersion != "" && Flag.GoVersion != runtime.Version() {
+ fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), Flag.GoVersion)
+ Exit(2)
+ }
+
+ if Flag.LowerO == "" {
+ p := flag.Arg(0)
+ if i := strings.LastIndex(p, "/"); i >= 0 {
+ p = p[i+1:]
+ }
+ if runtime.GOOS == "windows" {
+ if i := strings.LastIndex(p, `\`); i >= 0 {
+ p = p[i+1:]
+ }
+ }
+ if i := strings.LastIndex(p, "."); i >= 0 {
+ p = p[:i]
+ }
+ suffix := ".o"
+ if Flag.Pack {
+ suffix = ".a"
+ }
+ Flag.LowerO = p + suffix
+ }
+
+ if Flag.Race && Flag.MSan {
+ log.Fatal("cannot use both -race and -msan")
+ }
+ if Flag.Race || Flag.MSan {
+ // -race and -msan imply -d=checkptr for now.
+ Debug.Checkptr = 1
+ }
+
+ if Flag.CompilingRuntime && Flag.N != 0 {
+ log.Fatal("cannot disable optimizations while compiling runtime")
+ }
+ if Flag.LowerC < 1 {
+ log.Fatalf("-c must be at least 1, got %d", Flag.LowerC)
+ }
+ if Flag.LowerC > 1 && !concurrentBackendAllowed() {
+ log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
+ }
+
+ if Flag.CompilingRuntime {
+ // Runtime can't use -d=checkptr, at least not yet.
+ Debug.Checkptr = 0
+
+ // Fuzzing the runtime isn't interesting either.
+ Debug.Libfuzzer = 0
+ }
+
+ // set via a -d flag
+ Ctxt.Debugpcln = Debug.PCTab
+}
+
+// registerFlags adds flag registrations for all the fields in Flag.
+// See the comment on type CmdFlags for the rules.
+func registerFlags() {
+ var (
+ boolType = reflect.TypeOf(bool(false))
+ intType = reflect.TypeOf(int(0))
+ stringType = reflect.TypeOf(string(""))
+ ptrBoolType = reflect.TypeOf(new(bool))
+ ptrIntType = reflect.TypeOf(new(int))
+ ptrStringType = reflect.TypeOf(new(string))
+ countType = reflect.TypeOf(CountFlag(0))
+ funcType = reflect.TypeOf((func(string))(nil))
+ )
+
+ v := reflect.ValueOf(&Flag).Elem()
+ t := v.Type()
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Name == "Cfg" {
+ continue
+ }
+
+ var name string
+ if len(f.Name) == 1 {
+ name = f.Name
+ } else if len(f.Name) == 6 && f.Name[:5] == "Lower" && 'A' <= f.Name[5] && f.Name[5] <= 'Z' {
+ name = string(rune(f.Name[5] + 'a' - 'A'))
+ } else {
+ name = strings.ToLower(f.Name)
+ }
+ if tag := f.Tag.Get("flag"); tag != "" {
+ name = tag
+ }
+
+ help := f.Tag.Get("help")
+ if help == "" {
+ panic(fmt.Sprintf("base.Flag.%s is missing help text", f.Name))
+ }
+
+ if k := f.Type.Kind(); (k == reflect.Ptr || k == reflect.Func) && v.Field(i).IsNil() {
+ panic(fmt.Sprintf("base.Flag.%s is uninitialized %v", f.Name, f.Type))
+ }
+
+ switch f.Type {
+ case boolType:
+ p := v.Field(i).Addr().Interface().(*bool)
+ flag.BoolVar(p, name, *p, help)
+ case intType:
+ p := v.Field(i).Addr().Interface().(*int)
+ flag.IntVar(p, name, *p, help)
+ case stringType:
+ p := v.Field(i).Addr().Interface().(*string)
+ flag.StringVar(p, name, *p, help)
+ case ptrBoolType:
+ p := v.Field(i).Interface().(*bool)
+ flag.BoolVar(p, name, *p, help)
+ case ptrIntType:
+ p := v.Field(i).Interface().(*int)
+ flag.IntVar(p, name, *p, help)
+ case ptrStringType:
+ p := v.Field(i).Interface().(*string)
+ flag.StringVar(p, name, *p, help)
+ case countType:
+ p := (*int)(v.Field(i).Addr().Interface().(*CountFlag))
+ objabi.Flagcount(name, help, p)
+ case funcType:
+ f := v.Field(i).Interface().(func(string))
+ objabi.Flagfn1(name, help, f)
+ }
+ }
+}
+
+// concurrentFlagOk reports whether the current compiler flags
+// are compatible with concurrent compilation.
+func concurrentFlagOk() bool {
+ // TODO(rsc): Many of these are fine. Remove them.
+ return Flag.Percent == 0 &&
+ Flag.E == 0 &&
+ Flag.K == 0 &&
+ Flag.L == 0 &&
+ Flag.LowerH == 0 &&
+ Flag.LowerJ == 0 &&
+ Flag.LowerM == 0 &&
+ Flag.LowerR == 0
+}
+
+func concurrentBackendAllowed() bool {
+ if !concurrentFlagOk() {
+ return false
+ }
+
+ // Debug.S by itself is ok, because all printing occurs
+ // while writing the object file, and that is non-concurrent.
+ // Adding Debug_vlog, however, causes Debug.S to also print
+ // while flushing the plist, which happens concurrently.
+ if Ctxt.Debugvlog || Debug.Any() || Flag.Live > 0 {
+ return false
+ }
+ // TODO: Test and delete this condition.
+ if objabi.Fieldtrack_enabled != 0 {
+ return false
+ }
+ // TODO: fix races and enable the following flags
+ if Ctxt.Flag_shared || Ctxt.Flag_dynlink || Flag.Race {
+ return false
+ }
+ return true
+}
+
+func addImportDir(dir string) {
+ if dir != "" {
+ Flag.Cfg.ImportDirs = append(Flag.Cfg.ImportDirs, dir)
+ }
+}
+
+func addImportMap(s string) {
+ if Flag.Cfg.ImportMap == nil {
+ Flag.Cfg.ImportMap = make(map[string]string)
+ }
+ if strings.Count(s, "=") != 1 {
+ log.Fatal("-importmap argument must be of the form source=actual")
+ }
+ i := strings.Index(s, "=")
+ source, actual := s[:i], s[i+1:]
+ if source == "" || actual == "" {
+ log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
+ }
+ Flag.Cfg.ImportMap[source] = actual
+}
+
+func readImportCfg(file string) {
+ if Flag.Cfg.ImportMap == nil {
+ Flag.Cfg.ImportMap = make(map[string]string)
+ }
+ Flag.Cfg.PackageFile = map[string]string{}
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-importcfg: %v", err)
+ }
+
+ for lineNum, line := range strings.Split(string(data), "\n") {
+ lineNum++ // 1-based
+ line = strings.TrimSpace(line)
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ var verb, args string
+ if i := strings.Index(line, " "); i < 0 {
+ verb = line
+ } else {
+ verb, args = line[:i], strings.TrimSpace(line[i+1:])
+ }
+ var before, after string
+ if i := strings.Index(args, "="); i >= 0 {
+ before, after = args[:i], args[i+1:]
+ }
+ switch verb {
+ default:
+ log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
+ case "importmap":
+ if before == "" || after == "" {
+ log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
+ }
+ Flag.Cfg.ImportMap[before] = after
+ case "packagefile":
+ if before == "" || after == "" {
+ log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
+ }
+ Flag.Cfg.PackageFile[before] = after
+ }
+ }
+}
+
+func readEmbedCfg(file string) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-embedcfg: %v", err)
+ }
+ if err := json.Unmarshal(data, &Flag.Cfg.Embed); err != nil {
+ log.Fatalf("%s: %v", file, err)
+ }
+ if Flag.Cfg.Embed.Patterns == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
+ }
+ if Flag.Cfg.Embed.Files == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Files", file)
+ }
+}
+
+// parseSpectre parses the spectre configuration from the string s.
+func parseSpectre(s string) {
+ for _, f := range strings.Split(s, ",") {
+ f = strings.TrimSpace(f)
+ switch f {
+ default:
+ log.Fatalf("unknown setting -spectre=%s", f)
+ case "":
+ // nothing
+ case "all":
+ Flag.Cfg.SpectreIndex = true
+ Ctxt.Retpoline = true
+ case "index":
+ Flag.Cfg.SpectreIndex = true
+ case "ret":
+ Ctxt.Retpoline = true
+ }
+ }
+
+ if Flag.Cfg.SpectreIndex {
+ switch objabi.GOARCH {
+ case "amd64":
+ // ok
+ default:
+ log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
+ }
+ }
+}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "fmt"
+ "os"
+ "runtime/debug"
+ "sort"
+ "strings"
+
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// An errorMsg is a queued error message, waiting to be printed.
+type errorMsg struct {
+ pos src.XPos
+ msg string
+}
+
+// Pos is the current source position being processed,
+// printed by Errorf, ErrorfLang, Fatalf, and Warnf.
+var Pos src.XPos
+
+var (
+ errorMsgs []errorMsg
+ numErrors int // number of entries in errorMsgs that are errors (as opposed to warnings)
+ numSyntaxErrors int
+)
+
+// Errors returns the number of errors reported.
+func Errors() int {
+ return numErrors
+}
+
+// SyntaxErrors returns the number of syntax errors reported
+func SyntaxErrors() int {
+ return numSyntaxErrors
+}
+
+// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs.
+func addErrorMsg(pos src.XPos, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+ // Only add the position if know the position.
+ // See issue golang.org/issue/11361.
+ if pos.IsKnown() {
+ msg = fmt.Sprintf("%v: %s", FmtPos(pos), msg)
+ }
+ errorMsgs = append(errorMsgs, errorMsg{
+ pos: pos,
+ msg: msg + "\n",
+ })
+}
+
+// FmtPos formats pos as a file:line string.
+func FmtPos(pos src.XPos) string {
+ if Ctxt == nil {
+ return "???"
+ }
+ return Ctxt.OutermostPos(pos).Format(Flag.C == 0, Flag.L == 1)
+}
+
+// byPos sorts errors by source position.
+type byPos []errorMsg
+
+func (x byPos) Len() int { return len(x) }
+func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
+func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+// FlushErrors sorts errors seen so far by line number, prints them to stdout,
+// and empties the errors array.
+func FlushErrors() {
+ Ctxt.Bso.Flush()
+ if len(errorMsgs) == 0 {
+ return
+ }
+ sort.Stable(byPos(errorMsgs))
+ for i, err := range errorMsgs {
+ if i == 0 || err.msg != errorMsgs[i-1].msg {
+ fmt.Printf("%s", err.msg)
+ }
+ }
+ errorMsgs = errorMsgs[:0]
+}
+
+// lasterror keeps track of the most recently issued error,
+// to avoid printing multiple error messages on the same line.
+var lasterror struct {
+ syntax src.XPos // source position of last syntax error
+ other src.XPos // source position of last non-syntax error
+ msg string // error message of last non-syntax error
+}
+
+// sameline reports whether two positions a, b are on the same line.
+func sameline(a, b src.XPos) bool {
+ p := Ctxt.PosTable.Pos(a)
+ q := Ctxt.PosTable.Pos(b)
+ return p.Base() == q.Base() && p.Line() == q.Line()
+}
+
+// Errorf reports a formatted error at the current line.
+func Errorf(format string, args ...interface{}) {
+ ErrorfAt(Pos, format, args...)
+}
+
+// ErrorfAt reports a formatted error message at pos.
+func ErrorfAt(pos src.XPos, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+
+ if strings.HasPrefix(msg, "syntax error") {
+ numSyntaxErrors++
+ // only one syntax error per line, no matter what error
+ if sameline(lasterror.syntax, pos) {
+ return
+ }
+ lasterror.syntax = pos
+ } else {
+ // only one of multiple equal non-syntax errors per line
+ // (flusherrors shows only one of them, so we filter them
+ // here as best as we can (they may not appear in order)
+ // so that we don't count them here and exit early, and
+ // then have nothing to show for.)
+ if sameline(lasterror.other, pos) && lasterror.msg == msg {
+ return
+ }
+ lasterror.other = pos
+ lasterror.msg = msg
+ }
+
+ addErrorMsg(pos, "%s", msg)
+ numErrors++
+
+ hcrash()
+ if numErrors >= 10 && Flag.LowerE == 0 {
+ FlushErrors()
+ fmt.Printf("%v: too many errors\n", FmtPos(pos))
+ ErrorExit()
+ }
+}
+
+// ErrorfVers reports that a language feature (format, args) requires a later version of Go.
+func ErrorfVers(lang string, format string, args ...interface{}) {
+ Errorf("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang)
+}
+
+// UpdateErrorDot is a clumsy hack that rewrites the last error,
+// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR".
+// It is used to give better error messages for dot (selector) expressions.
+func UpdateErrorDot(line string, name, expr string) {
+ if len(errorMsgs) == 0 {
+ return
+ }
+ e := &errorMsgs[len(errorMsgs)-1]
+ if strings.HasPrefix(e.msg, line) && e.msg == fmt.Sprintf("%v: undefined: %v\n", line, name) {
+ e.msg = fmt.Sprintf("%v: undefined: %v in %v\n", line, name, expr)
+ }
+}
+
+// Warnf reports a formatted warning at the current line.
+// In general the Go compiler does NOT generate warnings,
+// so this should be used only when the user has opted in
+// to additional output by setting a particular flag.
+func Warn(format string, args ...interface{}) {
+ WarnfAt(Pos, format, args...)
+}
+
+// WarnfAt reports a formatted warning at pos.
+// In general the Go compiler does NOT generate warnings,
+// so this should be used only when the user has opted in
+// to additional output by setting a particular flag.
+func WarnfAt(pos src.XPos, format string, args ...interface{}) {
+ addErrorMsg(pos, format, args...)
+ if Flag.LowerM != 0 {
+ FlushErrors()
+ }
+}
+
+// Fatalf reports a fatal error - an internal problem - at the current line and exits.
+// If other errors have already been printed, then Fatalf just quietly exits.
+// (The internal problem may have been caused by incomplete information
+// after the already-reported errors, so best to let users fix those and
+// try again without being bothered about a spurious internal error.)
+//
+// But if no errors have been printed, or if -d panic has been specified,
+// Fatalf prints the error as an "internal compiler error". In a released build,
+// it prints an error asking to file a bug report. In development builds, it
+// prints a stack trace.
+//
+// If -h has been specified, Fatalf panics to force the usual runtime info dump.
+func Fatalf(format string, args ...interface{}) {
+ FatalfAt(Pos, format, args...)
+}
+
+// FatalfAt reports a fatal error - an internal problem - at pos and exits.
+// If other errors have already been printed, then FatalfAt just quietly exits.
+// (The internal problem may have been caused by incomplete information
+// after the already-reported errors, so best to let users fix those and
+// try again without being bothered about a spurious internal error.)
+//
+// But if no errors have been printed, or if -d panic has been specified,
+// FatalfAt prints the error as an "internal compiler error". In a released build,
+// it prints an error asking to file a bug report. In development builds, it
+// prints a stack trace.
+//
+// If -h has been specified, FatalfAt panics to force the usual runtime info dump.
+func FatalfAt(pos src.XPos, format string, args ...interface{}) {
+ FlushErrors()
+
+ if Debug.Panic != 0 || numErrors == 0 {
+ fmt.Printf("%v: internal compiler error: ", FmtPos(pos))
+ fmt.Printf(format, args...)
+ fmt.Printf("\n")
+
+ // If this is a released compiler version, ask for a bug report.
+ if strings.HasPrefix(objabi.Version, "go") {
+ fmt.Printf("\n")
+ fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
+ fmt.Printf("https://golang.org/issue/new\n")
+ } else {
+ // Not a release; dump a stack trace, too.
+ fmt.Println()
+ os.Stdout.Write(debug.Stack())
+ fmt.Println()
+ }
+ }
+
+ hcrash()
+ ErrorExit()
+}
+
+// hcrash crashes the compiler when -h is set, to find out where a message is generated.
+func hcrash() {
+ if Flag.LowerH != 0 {
+ FlushErrors()
+ if Flag.LowerO != "" {
+ os.Remove(Flag.LowerO)
+ }
+ panic("-h")
+ }
+}
+
+// ErrorExit handles an error-status exit.
+// It flushes any pending errors, removes the output file, and exits.
+func ErrorExit() {
+ FlushErrors()
+ if Flag.LowerO != "" {
+ os.Remove(Flag.LowerO)
+ }
+ os.Exit(2)
+}
+
+// ExitIfErrors calls ErrorExit if any errors have been reported.
+func ExitIfErrors() {
+ if Errors() > 0 {
+ ErrorExit()
+ }
+}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "sync"
+)
+
+//......................................................................
+//
+// Public/exported bits of the ABI utilities.
+//
+
+// ABIParamResultInfo stores the results of processing a given
+// function type to compute stack layout and register assignments. For
+// each input and output parameter we capture whether the param was
+// register-assigned (and to which register(s)) or the stack offset
+// for the param if is not going to be passed in registers according
+// to the rules in the Go internal ABI specification (1.17).
+type ABIParamResultInfo struct {
+ inparams []ABIParamAssignment // Includes receiver for method calls. Does NOT include hidden closure pointer.
+ outparams []ABIParamAssignment
+ intSpillSlots int
+ floatSpillSlots int
+ offsetToSpillArea int64
+ config ABIConfig // to enable String() method
+}
+
+// RegIndex stores the index into the set of machine registers used by
+// the ABI on a specific architecture for parameter passing. RegIndex
+// values 0 through N-1 (where N is the number of integer registers
+// used for param passing according to the ABI rules) describe integer
+// registers; values N through M (where M is the number of floating
+// point registers used). Thus if the ABI says there are 5 integer
+// registers and 7 floating point registers, then RegIndex value of 4
+// indicates the 5th integer register, and a RegIndex value of 11
+// indicates the 7th floating point register.
+type RegIndex uint8
+
+// ABIParamAssignment holds information about how a specific param or
+// result will be passed: in registers (in which case 'Registers' is
+// populated) or on the stack (in which case 'Offset' is set to a
+// non-negative stack offset. The values in 'Registers' are indices (as
+// described above), not architected registers.
+type ABIParamAssignment struct {
+ Type *types.Type
+ Registers []RegIndex
+ Offset int32
+}
+
+// RegAmounts holds a specified number of integer/float registers.
+type RegAmounts struct {
+ intRegs int
+ floatRegs int
+}
+
+// ABIConfig captures the number of registers made available
+// by the ABI rules for parameter passing and result returning.
+type ABIConfig struct {
+ // Do we need anything more than this?
+ regAmounts RegAmounts
+}
+
+// ABIAnalyze takes a function type 't' and an ABI rules description
+// 'config' and analyzes the function to determine how its parameters
+// and results will be passed (in registers or on the stack), returning
+// an ABIParamResultInfo object that holds the results of the analysis.
+func ABIAnalyze(t *types.Type, config ABIConfig) ABIParamResultInfo {
+ setup()
+ s := assignState{
+ rTotal: config.regAmounts,
+ }
+ result := ABIParamResultInfo{config: config}
+
+ // Receiver
+ ft := t.FuncType()
+ if t.NumRecvs() != 0 {
+ rfsl := ft.Receiver.FieldSlice()
+ result.inparams = append(result.inparams,
+ s.assignParamOrReturn(rfsl[0].Type))
+ }
+
+ // Inputs
+ ifsl := ft.Params.FieldSlice()
+ for _, f := range ifsl {
+ result.inparams = append(result.inparams,
+ s.assignParamOrReturn(f.Type))
+ }
+ s.stackOffset = Rnd(s.stackOffset, int64(Widthreg))
+
+ // Record number of spill slots needed.
+ result.intSpillSlots = s.rUsed.intRegs
+ result.floatSpillSlots = s.rUsed.floatRegs
+
+ // Outputs
+ s.rUsed = RegAmounts{}
+ ofsl := ft.Results.FieldSlice()
+ for _, f := range ofsl {
+ result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type))
+ }
+ result.offsetToSpillArea = s.stackOffset
+
+ return result
+}
+
+//......................................................................
+//
+// Non-public portions.
+
+// regString produces a human-readable version of a RegIndex.
+func (c *RegAmounts) regString(r RegIndex) string {
+ if int(r) < c.intRegs {
+ return fmt.Sprintf("I%d", int(r))
+ } else if int(r) < c.intRegs+c.floatRegs {
+ return fmt.Sprintf("F%d", int(r)-c.intRegs)
+ }
+ return fmt.Sprintf("<?>%d", r)
+}
+
+// toString method renders an ABIParamAssignment in human-readable
+// form, suitable for debugging or unit testing.
+func (ri *ABIParamAssignment) toString(config ABIConfig) string {
+ regs := "R{"
+ for _, r := range ri.Registers {
+ regs += " " + config.regAmounts.regString(r)
+ }
+ return fmt.Sprintf("%s } offset: %d typ: %v", regs, ri.Offset, ri.Type)
+}
+
+// toString method renders an ABIParamResultInfo in human-readable
+// form, suitable for debugging or unit testing.
+func (ri *ABIParamResultInfo) String() string {
+ res := ""
+ for k, p := range ri.inparams {
+ res += fmt.Sprintf("IN %d: %s\n", k, p.toString(ri.config))
+ }
+ for k, r := range ri.outparams {
+ res += fmt.Sprintf("OUT %d: %s\n", k, r.toString(ri.config))
+ }
+ res += fmt.Sprintf("intspill: %d floatspill: %d offsetToSpillArea: %d",
+ ri.intSpillSlots, ri.floatSpillSlots, ri.offsetToSpillArea)
+ return res
+}
+
+// assignState holds intermediate state during the register assigning process
+// for a given function signature.
+type assignState struct {
+ rTotal RegAmounts // total reg amounts from ABI rules
+ rUsed RegAmounts // regs used by params completely assigned so far
+ pUsed RegAmounts // regs used by the current param (or pieces therein)
+ stackOffset int64 // current stack offset
+}
+
+// stackSlot returns a stack offset for a param or result of the
+// specified type.
+func (state *assignState) stackSlot(t *types.Type) int64 {
+ if t.Align > 0 {
+ state.stackOffset = Rnd(state.stackOffset, int64(t.Align))
+ }
+ rv := state.stackOffset
+ state.stackOffset += t.Width
+ return rv
+}
+
+// allocateRegs returns a set of register indices for a parameter or result
+// that we've just determined to be register-assignable. The number of registers
+// needed is assumed to be stored in state.pUsed.
+func (state *assignState) allocateRegs() []RegIndex {
+ regs := []RegIndex{}
+
+ // integer
+ for r := state.rUsed.intRegs; r < state.rUsed.intRegs+state.pUsed.intRegs; r++ {
+ regs = append(regs, RegIndex(r))
+ }
+ state.rUsed.intRegs += state.pUsed.intRegs
+
+ // floating
+ for r := state.rUsed.floatRegs; r < state.rUsed.floatRegs+state.pUsed.floatRegs; r++ {
+ regs = append(regs, RegIndex(r+state.rTotal.intRegs))
+ }
+ state.rUsed.floatRegs += state.pUsed.floatRegs
+
+ return regs
+}
+
+// regAllocate creates a register ABIParamAssignment object for a param
+// or result with the specified type, as a final step (this assumes
+// that all of the safety/suitability analysis is complete).
+func (state *assignState) regAllocate(t *types.Type) ABIParamAssignment {
+ return ABIParamAssignment{
+ Type: t,
+ Registers: state.allocateRegs(),
+ Offset: -1,
+ }
+}
+
+// stackAllocate creates a stack memory ABIParamAssignment object for
+// a param or result with the specified type, as a final step (this
+// assumes that all of the safety/suitability analysis is complete).
+func (state *assignState) stackAllocate(t *types.Type) ABIParamAssignment {
+ return ABIParamAssignment{
+ Type: t,
+ Offset: int32(state.stackSlot(t)),
+ }
+}
+
+// intUsed returns the number of integer registers consumed
+// at a given point within an assignment stage.
+func (state *assignState) intUsed() int {
+ return state.rUsed.intRegs + state.pUsed.intRegs
+}
+
+// floatUsed returns the number of floating point registers consumed at
+// a given point within an assignment stage.
+func (state *assignState) floatUsed() int {
+ return state.rUsed.floatRegs + state.pUsed.floatRegs
+}
+
+// regassignIntegral examines a param/result of integral type 't' to
+// determines whether it can be register-assigned. Returns TRUE if we
+// can register allocate, FALSE otherwise (and updates state
+// accordingly).
+func (state *assignState) regassignIntegral(t *types.Type) bool {
+ regsNeeded := int(Rnd(t.Width, int64(Widthptr)) / int64(Widthptr))
+
+ // Floating point and complex.
+ if t.IsFloat() || t.IsComplex() {
+ if regsNeeded+state.floatUsed() > state.rTotal.floatRegs {
+ // not enough regs
+ return false
+ }
+ state.pUsed.floatRegs += regsNeeded
+ return true
+ }
+
+ // Non-floating point
+ if regsNeeded+state.intUsed() > state.rTotal.intRegs {
+ // not enough regs
+ return false
+ }
+ state.pUsed.intRegs += regsNeeded
+ return true
+}
+
+// regassignArray processes an array type (or array component within some
+// other enclosing type) to determine if it can be register assigned.
+// Returns TRUE if we can register allocate, FALSE otherwise.
+func (state *assignState) regassignArray(t *types.Type) bool {
+
+ nel := t.NumElem()
+ if nel == 0 {
+ return true
+ }
+ if nel > 1 {
+ // Not an array of length 1: stack assign
+ return false
+ }
+ // Visit element
+ return state.regassign(t.Elem())
+}
+
+// regassignStruct processes a struct type (or struct component within
+// some other enclosing type) to determine if it can be register
+// assigned. Returns TRUE if we can register allocate, FALSE otherwise.
+func (state *assignState) regassignStruct(t *types.Type) bool {
+ for _, field := range t.FieldSlice() {
+ if !state.regassign(field.Type) {
+ return false
+ }
+ }
+ return true
+}
+
+// synthOnce ensures that we only create the synth* fake types once.
+var synthOnce sync.Once
+
+// synthSlice, synthString, and syncIface are synthesized struct types
+// meant to capture the underlying implementations of string/slice/interface.
+var synthSlice *types.Type
+var synthString *types.Type
+var synthIface *types.Type
+
+// setup performs setup for the register assignment utilities, manufacturing
+// a small set of synthesized types that we'll need along the way.
+func setup() {
+ synthOnce.Do(func() {
+ fname := types.BuiltinPkg.Lookup
+ nxp := src.NoXPos
+ unsp := types.Types[types.TUNSAFEPTR]
+ ui := types.Types[types.TUINTPTR]
+ synthSlice = types.NewStruct(types.NoPkg, []*types.Field{
+ types.NewField(nxp, fname("ptr"), unsp),
+ types.NewField(nxp, fname("len"), ui),
+ types.NewField(nxp, fname("cap"), ui),
+ })
+ synthString = types.NewStruct(types.NoPkg, []*types.Field{
+ types.NewField(nxp, fname("data"), unsp),
+ types.NewField(nxp, fname("len"), ui),
+ })
+ synthIface = types.NewStruct(types.NoPkg, []*types.Field{
+ types.NewField(nxp, fname("f1"), unsp),
+ types.NewField(nxp, fname("f2"), unsp),
+ })
+ })
+}
+
+// regassign examines a given param type (or component within some
+// composite) to determine if it can be register assigned. Returns
+// TRUE if we can register allocate, FALSE otherwise.
+func (state *assignState) regassign(pt *types.Type) bool {
+ typ := pt.Kind()
+ if pt.IsScalar() || pt.IsPtrShaped() {
+ return state.regassignIntegral(pt)
+ }
+ switch typ {
+ case types.TARRAY:
+ return state.regassignArray(pt)
+ case types.TSTRUCT:
+ return state.regassignStruct(pt)
+ case types.TSLICE:
+ return state.regassignStruct(synthSlice)
+ case types.TSTRING:
+ return state.regassignStruct(synthString)
+ case types.TINTER:
+ return state.regassignStruct(synthIface)
+ default:
+ panic("not expected")
+ }
+}
+
+// assignParamOrReturn processes a given receiver, param, or result
+// of type 'pt' to determine whether it can be register assigned.
+// The result of the analysis is recorded in the result
+// ABIParamResultInfo held in 'state'.
+func (state *assignState) assignParamOrReturn(pt *types.Type) ABIParamAssignment {
+ state.pUsed = RegAmounts{}
+ if pt.Width == types.BADWIDTH {
+ panic("should never happen")
+ } else if pt.Width == 0 {
+ return state.stackAllocate(pt)
+ } else if state.regassign(pt) {
+ return state.regAllocate(pt)
+ } else {
+ return state.stackAllocate(pt)
+ }
+}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bufio"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "cmd/internal/src"
+ "os"
+ "testing"
+)
+
+// AMD64 registers available:
+// - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11
+// - floating point: X0 - X14
+var configAMD64 = ABIConfig{
+ regAmounts: RegAmounts{
+ intRegs: 9,
+ floatRegs: 15,
+ },
+}
+
+func TestMain(m *testing.M) {
+ thearch.LinkArch = &x86.Linkamd64
+ thearch.REGSP = x86.REGSP
+ thearch.MAXWIDTH = 1 << 50
+ MaxWidth = thearch.MAXWIDTH
+ base.Ctxt = obj.Linknew(thearch.LinkArch)
+ base.Ctxt.DiagFunc = base.Errorf
+ base.Ctxt.DiagFlush = base.FlushErrors
+ base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
+ Widthptr = thearch.LinkArch.PtrSize
+ Widthreg = thearch.LinkArch.RegSize
+ types.TypeLinkSym = func(t *types.Type) *obj.LSym {
+ return typenamesym(t).Linksym()
+ }
+ types.TypeLinkSym = func(t *types.Type) *obj.LSym {
+ return typenamesym(t).Linksym()
+ }
+ TypecheckInit()
+ os.Exit(m.Run())
+}
+
+func TestABIUtilsBasic1(t *testing.T) {
+
+ // func(x int32) int32
+ i32 := types.Types[types.TINT32]
+ ft := mkFuncType(nil, []*types.Type{i32}, []*types.Type{i32})
+
+ // expected results
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } offset: -1 typ: int32
+ OUT 0: R{ I0 } offset: -1 typ: int32
+ intspill: 1 floatspill: 0 offsetToSpillArea: 0
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsBasic2(t *testing.T) {
+ // func(x int32, y float64) (int32, float64, float64)
+ i8 := types.Types[types.TINT8]
+ i16 := types.Types[types.TINT16]
+ i32 := types.Types[types.TINT32]
+ i64 := types.Types[types.TINT64]
+ f32 := types.Types[types.TFLOAT32]
+ f64 := types.Types[types.TFLOAT64]
+ c64 := types.Types[types.TCOMPLEX64]
+ c128 := types.Types[types.TCOMPLEX128]
+ ft := mkFuncType(nil,
+ []*types.Type{
+ i8, i16, i32, i64,
+ f32, f32, f64, f64,
+ i8, i16, i32, i64,
+ f32, f32, f64, f64,
+ c128, c128, c128, c128, c64,
+ i8, i16, i32, i64,
+ i8, i16, i32, i64},
+ []*types.Type{i32, f64, f64})
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } offset: -1 typ: int8
+ IN 1: R{ I1 } offset: -1 typ: int16
+ IN 2: R{ I2 } offset: -1 typ: int32
+ IN 3: R{ I3 } offset: -1 typ: int64
+ IN 4: R{ F0 } offset: -1 typ: float32
+ IN 5: R{ F1 } offset: -1 typ: float32
+ IN 6: R{ F2 } offset: -1 typ: float64
+ IN 7: R{ F3 } offset: -1 typ: float64
+ IN 8: R{ I4 } offset: -1 typ: int8
+ IN 9: R{ I5 } offset: -1 typ: int16
+ IN 10: R{ I6 } offset: -1 typ: int32
+ IN 11: R{ I7 } offset: -1 typ: int64
+ IN 12: R{ F4 } offset: -1 typ: float32
+ IN 13: R{ F5 } offset: -1 typ: float32
+ IN 14: R{ F6 } offset: -1 typ: float64
+ IN 15: R{ F7 } offset: -1 typ: float64
+ IN 16: R{ F8 F9 } offset: -1 typ: complex128
+ IN 17: R{ F10 F11 } offset: -1 typ: complex128
+ IN 18: R{ F12 F13 } offset: -1 typ: complex128
+ IN 19: R{ } offset: 0 typ: complex128
+ IN 20: R{ F14 } offset: -1 typ: complex64
+ IN 21: R{ I8 } offset: -1 typ: int8
+ IN 22: R{ } offset: 16 typ: int16
+ IN 23: R{ } offset: 20 typ: int32
+ IN 24: R{ } offset: 24 typ: int64
+ IN 25: R{ } offset: 32 typ: int8
+ IN 26: R{ } offset: 34 typ: int16
+ IN 27: R{ } offset: 36 typ: int32
+ IN 28: R{ } offset: 40 typ: int64
+ OUT 0: R{ I0 } offset: -1 typ: int32
+ OUT 1: R{ F0 } offset: -1 typ: float64
+ OUT 2: R{ F1 } offset: -1 typ: float64
+ intspill: 9 floatspill: 15 offsetToSpillArea: 48
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsArrays(t *testing.T) {
+ i32 := types.Types[types.TINT32]
+ ae := types.NewArray(i32, 0)
+ a1 := types.NewArray(i32, 1)
+ a2 := types.NewArray(i32, 2)
+ aa1 := types.NewArray(a1, 1)
+ ft := mkFuncType(nil, []*types.Type{a1, ae, aa1, a2},
+ []*types.Type{a2, a1, ae, aa1})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } offset: -1 typ: [1]int32
+ IN 1: R{ } offset: 0 typ: [0]int32
+ IN 2: R{ I1 } offset: -1 typ: [1][1]int32
+ IN 3: R{ } offset: 0 typ: [2]int32
+ OUT 0: R{ } offset: 8 typ: [2]int32
+ OUT 1: R{ I0 } offset: -1 typ: [1]int32
+ OUT 2: R{ } offset: 16 typ: [0]int32
+ OUT 3: R{ I1 } offset: -1 typ: [1][1]int32
+ intspill: 2 floatspill: 0 offsetToSpillArea: 16
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsStruct1(t *testing.T) {
+ i8 := types.Types[types.TINT8]
+ i16 := types.Types[types.TINT16]
+ i32 := types.Types[types.TINT32]
+ i64 := types.Types[types.TINT64]
+ s := mkstruct([]*types.Type{i8, i8, mkstruct([]*types.Type{}), i8, i16})
+ ft := mkFuncType(nil, []*types.Type{i8, s, i64},
+ []*types.Type{s, i8, i32})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } offset: -1 typ: int8
+ IN 1: R{ I1 I2 I3 I4 } offset: -1 typ: struct { int8; int8; struct {}; int8; int16 }
+ IN 2: R{ I5 } offset: -1 typ: int64
+ OUT 0: R{ I0 I1 I2 I3 } offset: -1 typ: struct { int8; int8; struct {}; int8; int16 }
+ OUT 1: R{ I4 } offset: -1 typ: int8
+ OUT 2: R{ I5 } offset: -1 typ: int32
+ intspill: 6 floatspill: 0 offsetToSpillArea: 0
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsStruct2(t *testing.T) {
+ f64 := types.Types[types.TFLOAT64]
+ i64 := types.Types[types.TINT64]
+ s := mkstruct([]*types.Type{i64, mkstruct([]*types.Type{})})
+ fs := mkstruct([]*types.Type{f64, s, mkstruct([]*types.Type{})})
+ ft := mkFuncType(nil, []*types.Type{s, s, fs},
+ []*types.Type{fs, fs})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } offset: -1 typ: struct { int64; struct {} }
+ IN 1: R{ I1 } offset: -1 typ: struct { int64; struct {} }
+ IN 2: R{ I2 F0 } offset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} }
+ OUT 0: R{ I0 F0 } offset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} }
+ OUT 1: R{ I1 F1 } offset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} }
+ intspill: 3 floatspill: 1 offsetToSpillArea: 0
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsSliceString(t *testing.T) {
+ i32 := types.Types[types.TINT32]
+ sli32 := types.NewSlice(i32)
+ str := types.New(types.TSTRING)
+ i8 := types.Types[types.TINT8]
+ i64 := types.Types[types.TINT64]
+ ft := mkFuncType(nil, []*types.Type{sli32, i8, sli32, i8, str, i8, i64, sli32},
+ []*types.Type{str, i64, str, sli32})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 I1 I2 } offset: -1 typ: []int32
+ IN 1: R{ I3 } offset: -1 typ: int8
+ IN 2: R{ I4 I5 I6 } offset: -1 typ: []int32
+ IN 3: R{ I7 } offset: -1 typ: int8
+ IN 4: R{ } offset: 0 typ: string
+ IN 5: R{ I8 } offset: -1 typ: int8
+ IN 6: R{ } offset: 16 typ: int64
+ IN 7: R{ } offset: 24 typ: []int32
+ OUT 0: R{ I0 I1 } offset: -1 typ: string
+ OUT 1: R{ I2 } offset: -1 typ: int64
+ OUT 2: R{ I3 I4 } offset: -1 typ: string
+ OUT 3: R{ I5 I6 I7 } offset: -1 typ: []int32
+ intspill: 9 floatspill: 0 offsetToSpillArea: 48
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsMethod(t *testing.T) {
+ i16 := types.Types[types.TINT16]
+ i64 := types.Types[types.TINT64]
+ f64 := types.Types[types.TFLOAT64]
+
+ s1 := mkstruct([]*types.Type{i16, i16, i16})
+ ps1 := types.NewPtr(s1)
+ a7 := types.NewArray(ps1, 7)
+ ft := mkFuncType(s1, []*types.Type{ps1, a7, f64, i16, i16, i16},
+ []*types.Type{a7, f64, i64})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 I1 I2 } offset: -1 typ: struct { int16; int16; int16 }
+ IN 1: R{ I3 } offset: -1 typ: *struct { int16; int16; int16 }
+ IN 2: R{ } offset: 0 typ: [7]*struct { int16; int16; int16 }
+ IN 3: R{ F0 } offset: -1 typ: float64
+ IN 4: R{ I4 } offset: -1 typ: int16
+ IN 5: R{ I5 } offset: -1 typ: int16
+ IN 6: R{ I6 } offset: -1 typ: int16
+ OUT 0: R{ } offset: 56 typ: [7]*struct { int16; int16; int16 }
+ OUT 1: R{ F0 } offset: -1 typ: float64
+ OUT 2: R{ I0 } offset: -1 typ: int64
+ intspill: 7 floatspill: 1 offsetToSpillArea: 112
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsInterfaces(t *testing.T) {
+ ei := types.Types[types.TINTER] // interface{}
+ pei := types.NewPtr(ei) // *interface{}
+ fldt := mkFuncType(types.FakeRecvType(), []*types.Type{},
+ []*types.Type{types.UntypedString})
+ field := types.NewField(src.NoXPos, nil, fldt)
+ // interface{ ...() string }
+ nei := types.NewInterface(types.LocalPkg, []*types.Field{field})
+
+ i16 := types.Types[types.TINT16]
+ tb := types.Types[types.TBOOL]
+ s1 := mkstruct([]*types.Type{i16, i16, tb})
+
+ ft := mkFuncType(nil, []*types.Type{s1, ei, ei, nei, pei, nei, i16},
+ []*types.Type{ei, nei, pei})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 I1 I2 } offset: -1 typ: struct { int16; int16; bool }
+ IN 1: R{ I3 I4 } offset: -1 typ: interface {}
+ IN 2: R{ I5 I6 } offset: -1 typ: interface {}
+ IN 3: R{ I7 I8 } offset: -1 typ: interface { () untyped string }
+ IN 4: R{ } offset: 0 typ: *interface {}
+ IN 5: R{ } offset: 8 typ: interface { () untyped string }
+ IN 6: R{ } offset: 24 typ: int16
+ OUT 0: R{ I0 I1 } offset: -1 typ: interface {}
+ OUT 1: R{ I2 I3 } offset: -1 typ: interface { () untyped string }
+ OUT 2: R{ I4 } offset: -1 typ: *interface {}
+ intspill: 9 floatspill: 0 offsetToSpillArea: 32
+`)
+
+ abitest(t, ft, exp)
+}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+// This file contains utility routines and harness infrastructure used
+// by the ABI tests in "abiutils_test.go".
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+ "testing"
+ "text/scanner"
+)
+
+func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field {
+ field := types.NewField(src.NoXPos, s, t)
+ n := NewName(s)
+ n.SetClass(which)
+ field.Nname = n
+ n.SetType(t)
+ return field
+}
+
+// mkstruct is a helper routine to create a struct type with fields
+// of the types specified in 'fieldtypes'.
+func mkstruct(fieldtypes []*types.Type) *types.Type {
+ fields := make([]*types.Field, len(fieldtypes))
+ for k, t := range fieldtypes {
+ if t == nil {
+ panic("bad -- field has no type")
+ }
+ f := types.NewField(src.NoXPos, nil, t)
+ fields[k] = f
+ }
+ s := types.NewStruct(types.LocalPkg, fields)
+ return s
+}
+
+func mkFuncType(rcvr *types.Type, ins []*types.Type, outs []*types.Type) *types.Type {
+ q := lookup("?")
+ inf := []*types.Field{}
+ for _, it := range ins {
+ inf = append(inf, mkParamResultField(it, q, ir.PPARAM))
+ }
+ outf := []*types.Field{}
+ for _, ot := range outs {
+ outf = append(outf, mkParamResultField(ot, q, ir.PPARAMOUT))
+ }
+ var rf *types.Field
+ if rcvr != nil {
+ rf = mkParamResultField(rcvr, q, ir.PPARAM)
+ }
+ return types.NewSignature(types.LocalPkg, rf, inf, outf)
+}
+
+type expectedDump struct {
+ dump string
+ file string
+ line int
+}
+
+func tokenize(src string) []string {
+ var s scanner.Scanner
+ s.Init(strings.NewReader(src))
+ res := []string{}
+ for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
+ res = append(res, s.TokenText())
+ }
+ return res
+}
+
+func verifyParamResultOffset(t *testing.T, f *types.Field, r ABIParamAssignment, which string, idx int) int {
+ n := ir.AsNode(f.Nname).(*ir.Name)
+ if n.FrameOffset() != int64(r.Offset) {
+ t.Errorf("%s %d: got offset %d wanted %d t=%v",
+ which, idx, r.Offset, n.Offset(), f.Type)
+ return 1
+ }
+ return 0
+}
+
+func makeExpectedDump(e string) expectedDump {
+ return expectedDump{dump: e}
+}
+
+func difftokens(atoks []string, etoks []string) string {
+ if len(atoks) != len(etoks) {
+ return fmt.Sprintf("expected %d tokens got %d",
+ len(etoks), len(atoks))
+ }
+ for i := 0; i < len(etoks); i++ {
+ if etoks[i] == atoks[i] {
+ continue
+ }
+
+ return fmt.Sprintf("diff at token %d: expected %q got %q",
+ i, etoks[i], atoks[i])
+ }
+ return ""
+}
+
+func abitest(t *testing.T, ft *types.Type, exp expectedDump) {
+
+ dowidth(ft)
+
+ // Analyze with full set of registers.
+ regRes := ABIAnalyze(ft, configAMD64)
+ regResString := strings.TrimSpace(regRes.String())
+
+ // Check results.
+ reason := difftokens(tokenize(regResString), tokenize(exp.dump))
+ if reason != "" {
+ t.Errorf("\nexpected:\n%s\ngot:\n%s\nreason: %s",
+ strings.TrimSpace(exp.dump), regResString, reason)
+ }
+
+ // Analyze again with empty register set.
+ empty := ABIConfig{}
+ emptyRes := ABIAnalyze(ft, empty)
+ emptyResString := emptyRes.String()
+
+ // Walk the results and make sure the offsets assigned match
+ // up with those assiged by dowidth. This checks to make sure that
+ // when we have no available registers the ABI assignment degenerates
+ // back to the original ABI0.
+
+ // receiver
+ failed := 0
+ rfsl := ft.Recvs().Fields().Slice()
+ poff := 0
+ if len(rfsl) != 0 {
+ failed |= verifyParamResultOffset(t, rfsl[0], emptyRes.inparams[0], "receiver", 0)
+ poff = 1
+ }
+ // params
+ pfsl := ft.Params().Fields().Slice()
+ for k, f := range pfsl {
+ verifyParamResultOffset(t, f, emptyRes.inparams[k+poff], "param", k)
+ }
+ // results
+ ofsl := ft.Results().Fields().Slice()
+ for k, f := range ofsl {
+ failed |= verifyParamResultOffset(t, f, emptyRes.outparams[k], "result", k)
+ }
+
+ if failed != 0 {
+ t.Logf("emptyres:\n%s\n", emptyResString)
+ }
+}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
// t must be comparable.
func EqCanPanic(t *types.Type) bool {
- switch t.Etype {
+ switch t.Kind() {
default:
return false
- case TINTER:
+ case types.TINTER:
return true
- case TARRAY:
+ case types.TARRAY:
return EqCanPanic(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
return true
return ANOEQ, t
}
- switch t.Etype {
- case TANY, TFORW:
+ switch t.Kind() {
+ case types.TANY, types.TFORW:
// will be defined later.
return ANOEQ, t
- case TINT8, TUINT8, TINT16, TUINT16,
- TINT32, TUINT32, TINT64, TUINT64,
- TINT, TUINT, TUINTPTR,
- TBOOL, TPTR,
- TCHAN, TUNSAFEPTR:
+ case types.TINT8, types.TUINT8, types.TINT16, types.TUINT16,
+ types.TINT32, types.TUINT32, types.TINT64, types.TUINT64,
+ types.TINT, types.TUINT, types.TUINTPTR,
+ types.TBOOL, types.TPTR,
+ types.TCHAN, types.TUNSAFEPTR:
return AMEM, nil
- case TFUNC, TMAP:
+ case types.TFUNC, types.TMAP:
return ANOEQ, t
- case TFLOAT32:
+ case types.TFLOAT32:
return AFLOAT32, nil
- case TFLOAT64:
+ case types.TFLOAT64:
return AFLOAT64, nil
- case TCOMPLEX64:
+ case types.TCOMPLEX64:
return ACPLX64, nil
- case TCOMPLEX128:
+ case types.TCOMPLEX128:
return ACPLX128, nil
- case TSTRING:
+ case types.TSTRING:
return ASTRING, nil
- case TINTER:
+ case types.TINTER:
if t.IsEmptyInterface() {
return ANILINTER, nil
}
return AINTER, nil
- case TSLICE:
+ case types.TSLICE:
return ANOEQ, t
- case TARRAY:
+ case types.TARRAY:
a, bad := algtype1(t.Elem())
switch a {
case AMEM:
return ASPECIAL, nil
- case TSTRUCT:
+ case types.TSTRUCT:
fields := t.FieldSlice()
// One-field struct is same as that one field alone.
return ret, nil
}
- Fatalf("algtype1: unexpected type %v", t)
+ base.Fatalf("algtype1: unexpected type %v", t)
return 0, nil
}
switch algtype(t) {
default:
// genhash is only called for types that have equality
- Fatalf("genhash %v", t)
+ base.Fatalf("genhash %v", t)
case AMEM0:
return sysClosure("memhash0")
case AMEM8:
// (And the closure generated by genhash will also get
// dead-code eliminated, as we call the subtype hashers
// directly.)
- switch t.Etype {
+ switch t.Kind() {
case types.TARRAY:
genhash(t.Elem())
case types.TSTRUCT:
}
sym := typesymprefix(".hash", t)
- if Debug.r != 0 {
+ if base.Flag.LowerR != 0 {
fmt.Printf("genhash %v %v %v\n", closure, sym, t)
}
- lineno = autogeneratedPos // less confusing than end of input
- dclcontext = PEXTERN
+ base.Pos = autogeneratedPos // less confusing than end of input
+ dclcontext = ir.PEXTERN
// func sym(p *T, h uintptr) uintptr
- tfn := nod(OTFUNC, nil, nil)
- tfn.List.Set2(
+ args := []*ir.Field{
namedfield("p", types.NewPtr(t)),
- namedfield("h", types.Types[TUINTPTR]),
- )
- tfn.Rlist.Set1(anonfield(types.Types[TUINTPTR]))
+ namedfield("h", types.Types[types.TUINTPTR]),
+ }
+ results := []*ir.Field{anonfield(types.Types[types.TUINTPTR])}
+ tfn := ir.NewFuncType(base.Pos, nil, args, results)
fn := dclfunc(sym, tfn)
- np := asNode(tfn.Type.Params().Field(0).Nname)
- nh := asNode(tfn.Type.Params().Field(1).Nname)
+ np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
+ nh := ir.AsNode(tfn.Type().Params().Field(1).Nname)
- switch t.Etype {
+ switch t.Kind() {
case types.TARRAY:
// An array of pure memory would be handled by the
// standard algorithm, so the element type must not be
// pure memory.
hashel := hashfor(t.Elem())
- n := nod(ORANGE, nil, nod(ODEREF, np, nil))
- ni := newname(lookup("i"))
- ni.Type = types.Types[TINT]
- n.List.Set1(ni)
- n.SetColas(true)
- colasdefn(n.List.Slice(), n)
- ni = n.List.First()
+ // for i := 0; i < nelem; i++
+ ni := temp(types.Types[types.TINT])
+ init := ir.Nod(ir.OAS, ni, nodintconst(0))
+ cond := ir.Nod(ir.OLT, ni, nodintconst(t.NumElem()))
+ post := ir.Nod(ir.OAS, ni, ir.Nod(ir.OADD, ni, nodintconst(1)))
+ loop := ir.Nod(ir.OFOR, cond, post)
+ loop.PtrInit().Append(init)
// h = hashel(&p[i], h)
- call := nod(OCALL, hashel, nil)
+ call := ir.Nod(ir.OCALL, hashel, nil)
- nx := nod(OINDEX, np, ni)
+ nx := ir.Nod(ir.OINDEX, np, ni)
nx.SetBounded(true)
- na := nod(OADDR, nx, nil)
- call.List.Append(na)
- call.List.Append(nh)
- n.Nbody.Append(nod(OAS, nh, call))
+ na := nodAddr(nx)
+ call.PtrList().Append(na)
+ call.PtrList().Append(nh)
+ loop.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
- fn.Nbody.Append(n)
+ fn.PtrBody().Append(loop)
case types.TSTRUCT:
// Walk the struct using memhash for runs of AMEM
// Hash non-memory fields with appropriate hash function.
if !IsRegularMemory(f.Type) {
hashel := hashfor(f.Type)
- call := nod(OCALL, hashel, nil)
- nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
- na := nod(OADDR, nx, nil)
- call.List.Append(na)
- call.List.Append(nh)
- fn.Nbody.Append(nod(OAS, nh, call))
+ call := ir.Nod(ir.OCALL, hashel, nil)
+ nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
+ na := nodAddr(nx)
+ call.PtrList().Append(na)
+ call.PtrList().Append(nh)
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
i++
continue
}
// h = hashel(&p.first, size, h)
hashel := hashmem(f.Type)
- call := nod(OCALL, hashel, nil)
- nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
- na := nod(OADDR, nx, nil)
- call.List.Append(na)
- call.List.Append(nh)
- call.List.Append(nodintconst(size))
- fn.Nbody.Append(nod(OAS, nh, call))
+ call := ir.Nod(ir.OCALL, hashel, nil)
+ nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
+ na := nodAddr(nx)
+ call.PtrList().Append(na)
+ call.PtrList().Append(nh)
+ call.PtrList().Append(nodintconst(size))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
i = next
}
}
- r := nod(ORETURN, nil, nil)
- r.List.Append(nh)
- fn.Nbody.Append(r)
+ r := ir.Nod(ir.ORETURN, nil, nil)
+ r.PtrList().Append(nh)
+ fn.PtrBody().Append(r)
- if Debug.r != 0 {
- dumplist("genhash body", fn.Nbody)
+ if base.Flag.LowerR != 0 {
+ ir.DumpList("genhash body", fn.Body())
}
funcbody()
- fn.Func.SetDupok(true)
- fn = typecheck(fn, ctxStmt)
+ fn.SetDupok(true)
+ typecheckFunc(fn)
Curfn = fn
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
Curfn = nil
- if debug_dclstack != 0 {
+ if base.Debug.DclStack != 0 {
testdclstack()
}
- fn.Func.SetNilCheckDisabled(true)
- xtop = append(xtop, fn)
+ fn.SetNilCheckDisabled(true)
+ Target.Decls = append(Target.Decls, fn)
// Build closure. It doesn't close over any variables, so
// it contains just the function pointer.
return closure
}
-func hashfor(t *types.Type) *Node {
+func hashfor(t *types.Type) ir.Node {
var sym *types.Sym
switch a, _ := algtype1(t); a {
case AMEM:
- Fatalf("hashfor with AMEM type")
+ base.Fatalf("hashfor with AMEM type")
case AINTER:
sym = Runtimepkg.Lookup("interhash")
case ANILINTER:
sym = typesymprefix(".hash", t)
}
- n := newname(sym)
+ n := NewName(sym)
setNodeNameFunc(n)
- n.Type = functype(nil, []*Node{
+ n.SetType(functype(nil, []*ir.Field{
anonfield(types.NewPtr(t)),
- anonfield(types.Types[TUINTPTR]),
- }, []*Node{
- anonfield(types.Types[TUINTPTR]),
- })
+ anonfield(types.Types[types.TUINTPTR]),
+ }, []*ir.Field{
+ anonfield(types.Types[types.TUINTPTR]),
+ }))
return n
}
return closure
}
sym := typesymprefix(".eq", t)
- if Debug.r != 0 {
+ if base.Flag.LowerR != 0 {
fmt.Printf("geneq %v\n", t)
}
// Autogenerate code for equality of structs and arrays.
- lineno = autogeneratedPos // less confusing than end of input
- dclcontext = PEXTERN
+ base.Pos = autogeneratedPos // less confusing than end of input
+ dclcontext = ir.PEXTERN
// func sym(p, q *T) bool
- tfn := nod(OTFUNC, nil, nil)
- tfn.List.Set2(
- namedfield("p", types.NewPtr(t)),
- namedfield("q", types.NewPtr(t)),
- )
- tfn.Rlist.Set1(namedfield("r", types.Types[TBOOL]))
+ tfn := ir.NewFuncType(base.Pos, nil,
+ []*ir.Field{namedfield("p", types.NewPtr(t)), namedfield("q", types.NewPtr(t))},
+ []*ir.Field{namedfield("r", types.Types[types.TBOOL])})
fn := dclfunc(sym, tfn)
- np := asNode(tfn.Type.Params().Field(0).Nname)
- nq := asNode(tfn.Type.Params().Field(1).Nname)
- nr := asNode(tfn.Type.Results().Field(0).Nname)
+ np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
+ nq := ir.AsNode(tfn.Type().Params().Field(1).Nname)
+ nr := ir.AsNode(tfn.Type().Results().Field(0).Nname)
// Label to jump to if an equality test fails.
neq := autolabel(".neq")
// We reach here only for types that have equality but
// cannot be handled by the standard algorithms,
// so t must be either an array or a struct.
- switch t.Etype {
+ switch t.Kind() {
default:
- Fatalf("geneq %v", t)
+ base.Fatalf("geneq %v", t)
- case TARRAY:
+ case types.TARRAY:
nelem := t.NumElem()
// checkAll generates code to check the equality of all array elements.
//
// TODO(josharian): consider doing some loop unrolling
// for larger nelem as well, processing a few elements at a time in a loop.
- checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
+ checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
// checkIdx generates a node to check for equality at index i.
- checkIdx := func(i *Node) *Node {
+ checkIdx := func(i ir.Node) ir.Node {
// pi := p[i]
- pi := nod(OINDEX, np, i)
+ pi := ir.Nod(ir.OINDEX, np, i)
pi.SetBounded(true)
- pi.Type = t.Elem()
+ pi.SetType(t.Elem())
// qi := q[i]
- qi := nod(OINDEX, nq, i)
+ qi := ir.Nod(ir.OINDEX, nq, i)
qi.SetBounded(true)
- qi.Type = t.Elem()
+ qi.SetType(t.Elem())
return eq(pi, qi)
}
// Generate a series of checks.
for i := int64(0); i < nelem; i++ {
// if check {} else { goto neq }
- nif := nod(OIF, checkIdx(nodintconst(i)), nil)
- nif.Rlist.Append(nodSym(OGOTO, nil, neq))
- fn.Nbody.Append(nif)
+ nif := ir.Nod(ir.OIF, checkIdx(nodintconst(i)), nil)
+ nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
+ fn.PtrBody().Append(nif)
}
if last {
- fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, checkIdx(nodintconst(nelem))))
}
} else {
// Generate a for loop.
// for i := 0; i < nelem; i++
- i := temp(types.Types[TINT])
- init := nod(OAS, i, nodintconst(0))
- cond := nod(OLT, i, nodintconst(nelem))
- post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
- loop := nod(OFOR, cond, post)
- loop.Ninit.Append(init)
+ i := temp(types.Types[types.TINT])
+ init := ir.Nod(ir.OAS, i, nodintconst(0))
+ cond := ir.Nod(ir.OLT, i, nodintconst(nelem))
+ post := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1)))
+ loop := ir.Nod(ir.OFOR, cond, post)
+ loop.PtrInit().Append(init)
// if eq(pi, qi) {} else { goto neq }
- nif := nod(OIF, checkIdx(i), nil)
- nif.Rlist.Append(nodSym(OGOTO, nil, neq))
- loop.Nbody.Append(nif)
- fn.Nbody.Append(loop)
+ nif := ir.Nod(ir.OIF, checkIdx(i), nil)
+ nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
+ loop.PtrBody().Append(nif)
+ fn.PtrBody().Append(loop)
if last {
- fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true)))
}
}
}
- switch t.Elem().Etype {
- case TSTRING:
+ switch t.Elem().Kind() {
+ case types.TSTRING:
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// TODO: when the array size is small, unroll the length match checks.
- checkAll(3, false, func(pi, qi *Node) *Node {
+ checkAll(3, false, func(pi, qi ir.Node) ir.Node {
// Compare lengths.
eqlen, _ := eqstring(pi, qi)
return eqlen
})
- checkAll(1, true, func(pi, qi *Node) *Node {
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// Compare contents.
_, eqmem := eqstring(pi, qi)
return eqmem
})
- case TFLOAT32, TFLOAT64:
- checkAll(2, true, func(pi, qi *Node) *Node {
+ case types.TFLOAT32, types.TFLOAT64:
+ checkAll(2, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
- return nod(OEQ, pi, qi)
+ return ir.Nod(ir.OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
- checkAll(1, true, func(pi, qi *Node) *Node {
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
- return nod(OEQ, pi, qi)
+ return ir.Nod(ir.OEQ, pi, qi)
})
}
- case TSTRUCT:
+ case types.TSTRUCT:
// Build a list of conditions to satisfy.
// The conditions are a list-of-lists. Conditions are reorderable
// within each inner list. The outer lists must be evaluated in order.
- var conds [][]*Node
- conds = append(conds, []*Node{})
- and := func(n *Node) {
+ var conds [][]ir.Node
+ conds = append(conds, []ir.Node{})
+ and := func(n ir.Node) {
i := len(conds) - 1
conds[i] = append(conds[i], n)
}
if !IsRegularMemory(f.Type) {
if EqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
- conds = append(conds, []*Node{})
+ conds = append(conds, []ir.Node{})
}
- p := nodSym(OXDOT, np, f.Sym)
- q := nodSym(OXDOT, nq, f.Sym)
+ p := nodSym(ir.OXDOT, np, f.Sym)
+ q := nodSym(ir.OXDOT, nq, f.Sym)
switch {
case f.Type.IsString():
eqlen, eqmem := eqstring(p, q)
and(eqlen)
and(eqmem)
default:
- and(nod(OEQ, p, q))
+ and(ir.Nod(ir.OEQ, p, q))
}
if EqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
- conds = append(conds, []*Node{})
+ conds = append(conds, []ir.Node{})
}
i++
continue
// Sort conditions to put runtime calls last.
// Preserve the rest of the ordering.
- var flatConds []*Node
+ var flatConds []ir.Node
for _, c := range conds {
- isCall := func(n *Node) bool {
- return n.Op == OCALL || n.Op == OCALLFUNC
+ isCall := func(n ir.Node) bool {
+ return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
}
sort.SliceStable(c, func(i, j int) bool {
return !isCall(c[i]) && isCall(c[j])
}
if len(flatConds) == 0 {
- fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true)))
} else {
for _, c := range flatConds[:len(flatConds)-1] {
// if cond {} else { goto neq }
- n := nod(OIF, c, nil)
- n.Rlist.Append(nodSym(OGOTO, nil, neq))
- fn.Nbody.Append(n)
+ n := ir.Nod(ir.OIF, c, nil)
+ n.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
+ fn.PtrBody().Append(n)
}
- fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, flatConds[len(flatConds)-1]))
}
}
// ret:
// return
ret := autolabel(".ret")
- fn.Nbody.Append(nodSym(OLABEL, nil, ret))
- fn.Nbody.Append(nod(ORETURN, nil, nil))
+ fn.PtrBody().Append(nodSym(ir.OLABEL, nil, ret))
+ fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil))
// neq:
// r = false
// return (or goto ret)
- fn.Nbody.Append(nodSym(OLABEL, nil, neq))
- fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
- if EqCanPanic(t) || hasCall(fn) {
+ fn.PtrBody().Append(nodSym(ir.OLABEL, nil, neq))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(false)))
+ if EqCanPanic(t) || anyCall(fn) {
// Epilogue is large, so share it with the equal case.
- fn.Nbody.Append(nodSym(OGOTO, nil, ret))
+ fn.PtrBody().Append(nodSym(ir.OGOTO, nil, ret))
} else {
// Epilogue is small, so don't bother sharing.
- fn.Nbody.Append(nod(ORETURN, nil, nil))
+ fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil))
}
// TODO(khr): the epilogue size detection condition above isn't perfect.
// We should really do a generic CL that shares epilogues across
// the board. See #24936.
- if Debug.r != 0 {
- dumplist("geneq body", fn.Nbody)
+ if base.Flag.LowerR != 0 {
+ ir.DumpList("geneq body", fn.Body())
}
funcbody()
- fn.Func.SetDupok(true)
- fn = typecheck(fn, ctxStmt)
+ fn.SetDupok(true)
+ typecheckFunc(fn)
Curfn = fn
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
Curfn = nil
- if debug_dclstack != 0 {
+ if base.Debug.DclStack != 0 {
testdclstack()
}
// We are comparing a struct or an array,
// neither of which can be nil, and our comparisons
// are shallow.
- fn.Func.SetNilCheckDisabled(true)
- xtop = append(xtop, fn)
+ fn.SetNilCheckDisabled(true)
+ Target.Decls = append(Target.Decls, fn)
// Generate a closure which points at the function we just generated.
dsymptr(closure, 0, sym.Linksym(), 0)
return closure
}
-func hasCall(n *Node) bool {
- if n.Op == OCALL || n.Op == OCALLFUNC {
- return true
- }
- if n.Left != nil && hasCall(n.Left) {
- return true
- }
- if n.Right != nil && hasCall(n.Right) {
- return true
- }
- for _, x := range n.Ninit.Slice() {
- if hasCall(x) {
- return true
- }
- }
- for _, x := range n.Nbody.Slice() {
- if hasCall(x) {
- return true
- }
- }
- for _, x := range n.List.Slice() {
- if hasCall(x) {
- return true
- }
- }
- for _, x := range n.Rlist.Slice() {
- if hasCall(x) {
- return true
- }
- }
- return false
+func anyCall(fn *ir.Func) bool {
+ return ir.Any(fn, func(n ir.Node) bool {
+ // TODO(rsc): No methods?
+ op := n.Op()
+ return op == ir.OCALL || op == ir.OCALLFUNC
+ })
}
// eqfield returns the node
// p.field == q.field
-func eqfield(p *Node, q *Node, field *types.Sym) *Node {
- nx := nodSym(OXDOT, p, field)
- ny := nodSym(OXDOT, q, field)
- ne := nod(OEQ, nx, ny)
+func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
+ nx := nodSym(ir.OXDOT, p, field)
+ ny := nodSym(ir.OXDOT, q, field)
+ ne := ir.Nod(ir.OEQ, nx, ny)
return ne
}
// memequal(s.ptr, t.ptr, len(s))
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
-func eqstring(s, t *Node) (eqlen, eqmem *Node) {
- s = conv(s, types.Types[TSTRING])
- t = conv(t, types.Types[TSTRING])
- sptr := nod(OSPTR, s, nil)
- tptr := nod(OSPTR, t, nil)
- slen := conv(nod(OLEN, s, nil), types.Types[TUINTPTR])
- tlen := conv(nod(OLEN, t, nil), types.Types[TUINTPTR])
+func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
+ s = conv(s, types.Types[types.TSTRING])
+ t = conv(t, types.Types[types.TSTRING])
+ sptr := ir.Nod(ir.OSPTR, s, nil)
+ tptr := ir.Nod(ir.OSPTR, t, nil)
+ slen := conv(ir.Nod(ir.OLEN, s, nil), types.Types[types.TUINTPTR])
+ tlen := conv(ir.Nod(ir.OLEN, t, nil), types.Types[types.TUINTPTR])
fn := syslook("memequal")
- fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8])
- call := nod(OCALL, fn, nil)
- call.List.Append(sptr, tptr, slen.copy())
- call = typecheck(call, ctxExpr|ctxMultiOK)
-
- cmp := nod(OEQ, slen, tlen)
- cmp = typecheck(cmp, ctxExpr)
- cmp.Type = types.Types[TBOOL]
+ fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{sptr, tptr, ir.Copy(slen)})
+ TypecheckCall(call)
+
+ cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
+ cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr)
+ cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
-func eqinterface(s, t *Node) (eqtab, eqdata *Node) {
- if !types.Identical(s.Type, t.Type) {
- Fatalf("eqinterface %v %v", s.Type, t.Type)
+func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
+ if !types.Identical(s.Type(), t.Type()) {
+ base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
- var fn *Node
- if s.Type.IsEmptyInterface() {
+ var fn ir.Node
+ if s.Type().IsEmptyInterface() {
fn = syslook("efaceeq")
} else {
fn = syslook("ifaceeq")
}
- stab := nod(OITAB, s, nil)
- ttab := nod(OITAB, t, nil)
- sdata := nod(OIDATA, s, nil)
- tdata := nod(OIDATA, t, nil)
- sdata.Type = types.Types[TUNSAFEPTR]
- tdata.Type = types.Types[TUNSAFEPTR]
+ stab := ir.Nod(ir.OITAB, s, nil)
+ ttab := ir.Nod(ir.OITAB, t, nil)
+ sdata := ir.Nod(ir.OIDATA, s, nil)
+ tdata := ir.Nod(ir.OIDATA, t, nil)
+ sdata.SetType(types.Types[types.TUNSAFEPTR])
+ tdata.SetType(types.Types[types.TUNSAFEPTR])
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
- call := nod(OCALL, fn, nil)
- call.List.Append(stab, sdata, tdata)
- call = typecheck(call, ctxExpr|ctxMultiOK)
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{stab, sdata, tdata})
+ TypecheckCall(call)
- cmp := nod(OEQ, stab, ttab)
- cmp = typecheck(cmp, ctxExpr)
- cmp.Type = types.Types[TBOOL]
+ cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab)
+ cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr)
+ cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
// eqmem returns the node
// memequal(&p.field, &q.field [, size])
-func eqmem(p *Node, q *Node, field *types.Sym, size int64) *Node {
- nx := nod(OADDR, nodSym(OXDOT, p, field), nil)
- ny := nod(OADDR, nodSym(OXDOT, q, field), nil)
- nx = typecheck(nx, ctxExpr)
- ny = typecheck(ny, ctxExpr)
-
- fn, needsize := eqmemfunc(size, nx.Type.Elem())
- call := nod(OCALL, fn, nil)
- call.List.Append(nx)
- call.List.Append(ny)
+func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
+ nx := typecheck(nodAddr(nodSym(ir.OXDOT, p, field)), ctxExpr)
+ ny := typecheck(nodAddr(nodSym(ir.OXDOT, q, field)), ctxExpr)
+
+ fn, needsize := eqmemfunc(size, nx.Type().Elem())
+ call := ir.Nod(ir.OCALL, fn, nil)
+ call.PtrList().Append(nx)
+ call.PtrList().Append(ny)
if needsize {
- call.List.Append(nodintconst(size))
+ call.PtrList().Append(nodintconst(size))
}
return call
}
-func eqmemfunc(size int64, t *types.Type) (fn *Node, needsize bool) {
+func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
switch size {
default:
fn = syslook("memequal")
// by padding.
func ispaddedfield(t *types.Type, i int) bool {
if !t.IsStruct() {
- Fatalf("ispaddedfield called non-struct %v", t)
+ base.Fatalf("ispaddedfield called non-struct %v", t)
}
end := t.Width
if i+1 < t.NumFields() {
import (
"bytes"
+ "cmd/compile/internal/base"
"cmd/compile/internal/types"
"fmt"
"sort"
)
+// MaxWidth is the maximum size of a value on the target architecture.
+var MaxWidth int64
+
// sizeCalculationDisabled indicates whether it is safe
// to calculate Types' widths and alignments. See dowidth.
var sizeCalculationDisabled bool
func Rnd(o int64, r int64) int64 {
if r < 1 || r > 8 || r&(r-1) != 0 {
- Fatalf("rnd %d", r)
+ base.Fatalf("rnd %d", r)
}
return (o + r - 1) &^ (r - 1)
}
case langSupported(1, 14, t.Pkg()) && !explicit && types.Identical(m.Type, prev.Type):
return
default:
- yyerrorl(m.Pos, "duplicate method %s", m.Sym.Name)
+ base.ErrorfAt(m.Pos, "duplicate method %s", m.Sym.Name)
}
methods = append(methods, m)
}
}
if !m.Type.IsInterface() {
- yyerrorl(m.Pos, "interface contains embedded non-interface %v", m.Type)
+ base.ErrorfAt(m.Pos, "interface contains embedded non-interface %v", m.Type)
m.SetBroke(true)
t.SetBroke(true)
// Add to fields so that error messages
// (including broken ones, if any) and add to t's
// method set.
for _, t1 := range m.Type.Fields().Slice() {
- f := types.NewField()
- f.Pos = m.Pos // preserve embedding position
- f.Sym = t1.Sym
- f.Type = t1.Type
- f.SetBroke(t1.Broke())
+ // Use m.Pos rather than t1.Pos to preserve embedding position.
+ f := types.NewField(m.Pos, t1.Sym, t1.Type)
addMethod(f, false)
}
}
sort.Sort(methcmp(methods))
- if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) {
- yyerrorl(typePos(t), "interface too large")
+ if int64(len(methods)) >= MaxWidth/int64(Widthptr) {
+ base.ErrorfAt(typePos(t), "interface too large")
}
for i, m := range methods {
m.Offset = int64(i) * int64(Widthptr)
o = Rnd(o, int64(f.Type.Align))
}
f.Offset = o
- if n := asNode(f.Nname); n != nil {
+ if f.Nname != nil {
// addrescapes has similar code to update these offsets.
// Usually addrescapes runs after widstruct,
// in which case we could drop this,
// NOTE(rsc): This comment may be stale.
// It's possible the ordering has changed and this is
// now the common case. I'm not sure.
- if n.Name.Param.Stackcopy != nil {
- n.Name.Param.Stackcopy.Xoffset = o
- n.Xoffset = 0
- } else {
- n.Xoffset = o
- }
+ f.Nname.(types.VarObject).RecordFrameOffset(o)
}
w := f.Type.Width
if w < 0 {
- Fatalf("invalid width %d", f.Type.Width)
+ base.Fatalf("invalid width %d", f.Type.Width)
}
if w == 0 {
lastzero = o
}
o += w
- maxwidth := thearch.MAXWIDTH
+ maxwidth := MaxWidth
// On 32-bit systems, reflect tables impose an additional constraint
// that each field start offset must fit in 31 bits.
if maxwidth < 1<<32 {
maxwidth = 1<<31 - 1
}
if o >= maxwidth {
- yyerrorl(typePos(errtype), "type %L too large", errtype)
+ base.ErrorfAt(typePos(errtype), "type %L too large", errtype)
o = 8 // small but nonzero
}
}
// We implement a simple DFS loop-finding algorithm. This
// could be faster, but type cycles are rare.
- if t.Sym != nil {
+ if t.Sym() != nil {
// Declared type. Check for loops and otherwise
// recurse on the type expression used in the type
// declaration.
+ // Type imported from package, so it can't be part of
+ // a type loop (otherwise that package should have
+ // failed to compile).
+ if t.Sym().Pkg != types.LocalPkg {
+ return false
+ }
+
for i, x := range *path {
if x == t {
*path = (*path)[i:]
}
*path = append(*path, t)
- if p := asNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) {
+ if findTypeLoop(t.Obj().(types.TypeObject).TypeDefn(), path) {
return true
}
*path = (*path)[:len(*path)-1]
} else {
// Anonymous type. Recurse on contained types.
- switch t.Etype {
- case TARRAY:
+ switch t.Kind() {
+ case types.TARRAY:
if findTypeLoop(t.Elem(), path) {
return true
}
- case TSTRUCT:
+ case types.TSTRUCT:
for _, f := range t.Fields().Slice() {
if findTypeLoop(f.Type, path) {
return true
}
}
- case TINTER:
+ case types.TINTER:
for _, m := range t.Methods().Slice() {
if m.Type.IsInterface() { // embedded interface
if findTypeLoop(m.Type, path) {
var l []*types.Type
if !findTypeLoop(t, &l) {
- Fatalf("failed to find type loop for: %v", t)
+ base.Fatalf("failed to find type loop for: %v", t)
}
// Rotate loop so that the earliest type declaration is first.
var msg bytes.Buffer
fmt.Fprintf(&msg, "invalid recursive type %v\n", l[0])
for _, t := range l {
- fmt.Fprintf(&msg, "\t%v: %v refers to\n", linestr(typePos(t)), t)
+ fmt.Fprintf(&msg, "\t%v: %v refers to\n", base.FmtPos(typePos(t)), t)
t.SetBroke(true)
}
- fmt.Fprintf(&msg, "\t%v: %v", linestr(typePos(l[0])), l[0])
- yyerrorl(typePos(l[0]), msg.String())
+ fmt.Fprintf(&msg, "\t%v: %v", base.FmtPos(typePos(l[0])), l[0])
+ base.ErrorfAt(typePos(l[0]), msg.String())
}
// dowidth calculates and stores the size and alignment for t.
return
}
if Widthptr == 0 {
- Fatalf("dowidth without betypeinit")
+ base.Fatalf("dowidth without betypeinit")
}
if t == nil {
return
}
t.SetBroke(true)
- Fatalf("width not calculated: %v", t)
+ base.Fatalf("width not calculated: %v", t)
}
// break infinite recursion if the broken recursive type
// defer checkwidth calls until after we're done
defercheckwidth()
- lno := lineno
- if asNode(t.Nod) != nil {
- lineno = asNode(t.Nod).Pos
+ lno := base.Pos
+ if pos := t.Pos(); pos.IsKnown() {
+ base.Pos = pos
}
t.Width = -2
t.Align = 0 // 0 means use t.Width, below
- et := t.Etype
+ et := t.Kind()
switch et {
- case TFUNC, TCHAN, TMAP, TSTRING:
+ case types.TFUNC, types.TCHAN, types.TMAP, types.TSTRING:
break
// simtype == 0 during bootstrap
default:
- if simtype[t.Etype] != 0 {
- et = simtype[t.Etype]
+ if simtype[t.Kind()] != 0 {
+ et = simtype[t.Kind()]
}
}
var w int64
switch et {
default:
- Fatalf("dowidth: unknown type: %v", t)
+ base.Fatalf("dowidth: unknown type: %v", t)
// compiler-specific stuff
- case TINT8, TUINT8, TBOOL:
+ case types.TINT8, types.TUINT8, types.TBOOL:
// bool is int8
w = 1
- case TINT16, TUINT16:
+ case types.TINT16, types.TUINT16:
w = 2
- case TINT32, TUINT32, TFLOAT32:
+ case types.TINT32, types.TUINT32, types.TFLOAT32:
w = 4
- case TINT64, TUINT64, TFLOAT64:
+ case types.TINT64, types.TUINT64, types.TFLOAT64:
w = 8
t.Align = uint8(Widthreg)
- case TCOMPLEX64:
+ case types.TCOMPLEX64:
w = 8
t.Align = 4
- case TCOMPLEX128:
+ case types.TCOMPLEX128:
w = 16
t.Align = uint8(Widthreg)
- case TPTR:
+ case types.TPTR:
w = int64(Widthptr)
checkwidth(t.Elem())
- case TUNSAFEPTR:
+ case types.TUNSAFEPTR:
w = int64(Widthptr)
- case TINTER: // implemented as 2 pointers
+ case types.TINTER: // implemented as 2 pointers
w = 2 * int64(Widthptr)
t.Align = uint8(Widthptr)
expandiface(t)
- case TCHAN: // implemented as pointer
+ case types.TCHAN: // implemented as pointer
w = int64(Widthptr)
checkwidth(t.Elem())
t1 := types.NewChanArgs(t)
checkwidth(t1)
- case TCHANARGS:
+ case types.TCHANARGS:
t1 := t.ChanArgs()
dowidth(t1) // just in case
if t1.Elem().Width >= 1<<16 {
- yyerrorl(typePos(t1), "channel element type too large (>64kB)")
+ base.ErrorfAt(typePos(t1), "channel element type too large (>64kB)")
}
w = 1 // anything will do
- case TMAP: // implemented as pointer
+ case types.TMAP: // implemented as pointer
w = int64(Widthptr)
checkwidth(t.Elem())
checkwidth(t.Key())
- case TFORW: // should have been filled in
+ case types.TFORW: // should have been filled in
reportTypeLoop(t)
w = 1 // anything will do
- case TANY:
- // dummy type; should be replaced before use.
- Fatalf("dowidth any")
+ case types.TANY:
+ // not a real type; should be replaced before use.
+ base.Fatalf("dowidth any")
- case TSTRING:
+ case types.TSTRING:
if sizeofString == 0 {
- Fatalf("early dowidth string")
+ base.Fatalf("early dowidth string")
}
w = sizeofString
t.Align = uint8(Widthptr)
- case TARRAY:
+ case types.TARRAY:
if t.Elem() == nil {
break
}
dowidth(t.Elem())
if t.Elem().Width != 0 {
- cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
+ cap := (uint64(MaxWidth) - 1) / uint64(t.Elem().Width)
if uint64(t.NumElem()) > cap {
- yyerrorl(typePos(t), "type %L larger than address space", t)
+ base.ErrorfAt(typePos(t), "type %L larger than address space", t)
}
}
w = t.NumElem() * t.Elem().Width
t.Align = t.Elem().Align
- case TSLICE:
+ case types.TSLICE:
if t.Elem() == nil {
break
}
checkwidth(t.Elem())
t.Align = uint8(Widthptr)
- case TSTRUCT:
+ case types.TSTRUCT:
if t.IsFuncArgStruct() {
- Fatalf("dowidth fn struct %v", t)
+ base.Fatalf("dowidth fn struct %v", t)
}
w = widstruct(t, t, 0, 1)
// make fake type to check later to
// trigger function argument computation.
- case TFUNC:
+ case types.TFUNC:
t1 := types.NewFuncArgs(t)
checkwidth(t1)
w = int64(Widthptr) // width of func type is pointer
// function is 3 cated structures;
// compute their widths as side-effect.
- case TFUNCARGS:
+ case types.TFUNCARGS:
t1 := t.FuncArgs()
w = widstruct(t1, t1.Recvs(), 0, 0)
w = widstruct(t1, t1.Params(), w, Widthreg)
w = widstruct(t1, t1.Results(), w, Widthreg)
t1.Extra.(*types.Func).Argwid = w
if w%int64(Widthreg) != 0 {
- Warn("bad type %v %d\n", t1, w)
+ base.Warn("bad type %v %d\n", t1, w)
}
t.Align = 1
}
if Widthptr == 4 && w != int64(int32(w)) {
- yyerrorl(typePos(t), "type %v too large", t)
+ base.ErrorfAt(typePos(t), "type %v too large", t)
}
t.Width = w
if t.Align == 0 {
if w == 0 || w > 8 || w&(w-1) != 0 {
- Fatalf("invalid alignment for %v", t)
+ base.Fatalf("invalid alignment for %v", t)
}
t.Align = uint8(w)
}
- lineno = lno
+ base.Pos = lno
resumecheckwidth()
}
+// CalcStructSize calculates the size of s,
+// filling in s.Width and s.Align,
+// even if size calculation is otherwise disabled.
+func CalcStructSize(s *types.Type) {
+ s.Width = widstruct(s, s, 0, 1) // sets align
+}
+
// when a type's width should be known, we call checkwidth
// to compute it. during a declaration like
//
// function arg structs should not be checked
// outside of the enclosing function.
if t.IsFuncArgStruct() {
- Fatalf("checkwidth %v", t)
+ base.Fatalf("checkwidth %v", t)
}
if defercalc == 0 {
package gc
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
marked map[*types.Type]bool // types already seen by markType
}
+// markObject visits a reachable object.
+func (p *exporter) markObject(n ir.Node) {
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if n.Class() == ir.PFUNC {
+ inlFlood(n, exportsym)
+ }
+ }
+
+ p.markType(n.Type())
+}
+
// markType recursively visits types reachable from t to identify
// functions whose inline bodies may be needed.
func (p *exporter) markType(t *types.Type) {
// only their unexpanded method set (i.e., exclusive of
// interface embeddings), and the switch statement below
// handles their full method set.
- if t.Sym != nil && t.Etype != TINTER {
+ if t.Sym() != nil && t.Kind() != types.TINTER {
for _, m := range t.Methods().Slice() {
if types.IsExported(m.Sym.Name) {
- p.markType(m.Type)
+ p.markObject(ir.AsNode(m.Nname))
}
}
}
// Notably, we don't mark function parameter types, because
// the user already needs some way to construct values of
// those types.
- switch t.Etype {
- case TPTR, TARRAY, TSLICE:
+ switch t.Kind() {
+ case types.TPTR, types.TARRAY, types.TSLICE:
p.markType(t.Elem())
- case TCHAN:
+ case types.TCHAN:
if t.ChanDir().CanRecv() {
p.markType(t.Elem())
}
- case TMAP:
+ case types.TMAP:
p.markType(t.Key())
p.markType(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
p.markType(f.Type)
}
}
- case TFUNC:
- // If t is the type of a function or method, then
- // t.Nname() is its ONAME. Mark its inline body and
- // any recursively called functions for export.
- inlFlood(asNode(t.Nname()))
-
+ case types.TFUNC:
for _, f := range t.Results().FieldSlice() {
p.markType(f.Type)
}
- case TINTER:
+ case types.TINTER:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) {
p.markType(f.Type)
// elements have been initialized before
predecl = []*types.Type{
// basic types
- types.Types[TBOOL],
- types.Types[TINT],
- types.Types[TINT8],
- types.Types[TINT16],
- types.Types[TINT32],
- types.Types[TINT64],
- types.Types[TUINT],
- types.Types[TUINT8],
- types.Types[TUINT16],
- types.Types[TUINT32],
- types.Types[TUINT64],
- types.Types[TUINTPTR],
- types.Types[TFLOAT32],
- types.Types[TFLOAT64],
- types.Types[TCOMPLEX64],
- types.Types[TCOMPLEX128],
- types.Types[TSTRING],
+ types.Types[types.TBOOL],
+ types.Types[types.TINT],
+ types.Types[types.TINT8],
+ types.Types[types.TINT16],
+ types.Types[types.TINT32],
+ types.Types[types.TINT64],
+ types.Types[types.TUINT],
+ types.Types[types.TUINT8],
+ types.Types[types.TUINT16],
+ types.Types[types.TUINT32],
+ types.Types[types.TUINT64],
+ types.Types[types.TUINTPTR],
+ types.Types[types.TFLOAT32],
+ types.Types[types.TFLOAT64],
+ types.Types[types.TCOMPLEX64],
+ types.Types[types.TCOMPLEX128],
+ types.Types[types.TSTRING],
// basic type aliases
- types.Bytetype,
- types.Runetype,
+ types.ByteType,
+ types.RuneType,
// error
- types.Errortype,
+ types.ErrorType,
// untyped types
types.UntypedBool,
types.UntypedFloat,
types.UntypedComplex,
types.UntypedString,
- types.Types[TNIL],
+ types.Types[types.TNIL],
// package unsafe
- types.Types[TUNSAFEPTR],
+ types.Types[types.TUNSAFEPTR],
// invalid type (package contains errors)
- types.Types[Txxx],
+ types.Types[types.Txxx],
// any type, for builtin export data
- types.Types[TANY],
+ types.Types[types.TANY],
}
}
return predecl
+++ /dev/null
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/internal/src"
-)
-
-// numImport tracks how often a package with a given name is imported.
-// It is used to provide a better error message (by using the package
-// path to disambiguate) if a package that appears multiple times with
-// the same name appears in an error message.
-var numImport = make(map[string]int)
-
-func npos(pos src.XPos, n *Node) *Node {
- n.Pos = pos
- return n
-}
-
-func builtinCall(op Op) *Node {
- return nod(OCALL, mkname(builtinpkg.Lookup(goopnames[op])), nil)
-}
package gc
-import "runtime"
+import (
+ "cmd/compile/internal/base"
+ "runtime"
+)
func startMutexProfiling() {
- Fatalf("mutex profiling unavailable in version %v", runtime.Version())
+ base.Fatalf("mutex profiling unavailable in version %v", runtime.Version())
}
package gc
-import "cmd/compile/internal/types"
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
var runtimeDecls = [...]struct {
name string
func runtimeTypes() []*types.Type {
var typs [132]*types.Type
- typs[0] = types.Bytetype
+ typs[0] = types.ByteType
typs[1] = types.NewPtr(typs[0])
- typs[2] = types.Types[TANY]
+ typs[2] = types.Types[types.TANY]
typs[3] = types.NewPtr(typs[2])
- typs[4] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[3])})
- typs[5] = types.Types[TUINTPTR]
- typs[6] = types.Types[TBOOL]
- typs[7] = types.Types[TUNSAFEPTR]
- typs[8] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*Node{anonfield(typs[7])})
+ typs[4] = functype(nil, []*ir.Field{anonfield(typs[1])}, []*ir.Field{anonfield(typs[3])})
+ typs[5] = types.Types[types.TUINTPTR]
+ typs[6] = types.Types[types.TBOOL]
+ typs[7] = types.Types[types.TUNSAFEPTR]
+ typs[8] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*ir.Field{anonfield(typs[7])})
typs[9] = functype(nil, nil, nil)
- typs[10] = types.Types[TINTER]
- typs[11] = functype(nil, []*Node{anonfield(typs[10])}, nil)
- typs[12] = types.Types[TINT32]
+ typs[10] = types.Types[types.TINTER]
+ typs[11] = functype(nil, []*ir.Field{anonfield(typs[10])}, nil)
+ typs[12] = types.Types[types.TINT32]
typs[13] = types.NewPtr(typs[12])
- typs[14] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[10])})
- typs[15] = types.Types[TINT]
- typs[16] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
- typs[17] = types.Types[TUINT]
- typs[18] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
- typs[19] = functype(nil, []*Node{anonfield(typs[6])}, nil)
- typs[20] = types.Types[TFLOAT64]
- typs[21] = functype(nil, []*Node{anonfield(typs[20])}, nil)
- typs[22] = types.Types[TINT64]
- typs[23] = functype(nil, []*Node{anonfield(typs[22])}, nil)
- typs[24] = types.Types[TUINT64]
- typs[25] = functype(nil, []*Node{anonfield(typs[24])}, nil)
- typs[26] = types.Types[TCOMPLEX128]
- typs[27] = functype(nil, []*Node{anonfield(typs[26])}, nil)
- typs[28] = types.Types[TSTRING]
- typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
- typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
- typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
+ typs[14] = functype(nil, []*ir.Field{anonfield(typs[13])}, []*ir.Field{anonfield(typs[10])})
+ typs[15] = types.Types[types.TINT]
+ typs[16] = functype(nil, []*ir.Field{anonfield(typs[15]), anonfield(typs[15])}, nil)
+ typs[17] = types.Types[types.TUINT]
+ typs[18] = functype(nil, []*ir.Field{anonfield(typs[17]), anonfield(typs[15])}, nil)
+ typs[19] = functype(nil, []*ir.Field{anonfield(typs[6])}, nil)
+ typs[20] = types.Types[types.TFLOAT64]
+ typs[21] = functype(nil, []*ir.Field{anonfield(typs[20])}, nil)
+ typs[22] = types.Types[types.TINT64]
+ typs[23] = functype(nil, []*ir.Field{anonfield(typs[22])}, nil)
+ typs[24] = types.Types[types.TUINT64]
+ typs[25] = functype(nil, []*ir.Field{anonfield(typs[24])}, nil)
+ typs[26] = types.Types[types.TCOMPLEX128]
+ typs[27] = functype(nil, []*ir.Field{anonfield(typs[26])}, nil)
+ typs[28] = types.Types[types.TSTRING]
+ typs[29] = functype(nil, []*ir.Field{anonfield(typs[28])}, nil)
+ typs[30] = functype(nil, []*ir.Field{anonfield(typs[2])}, nil)
+ typs[31] = functype(nil, []*ir.Field{anonfield(typs[5])}, nil)
typs[32] = types.NewArray(typs[0], 32)
typs[33] = types.NewPtr(typs[32])
- typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[34] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])})
+ typs[35] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])})
+ typs[36] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])})
+ typs[37] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])})
typs[38] = types.NewSlice(typs[28])
- typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
- typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
+ typs[39] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[38])}, []*ir.Field{anonfield(typs[28])})
+ typs[40] = functype(nil, []*ir.Field{anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[15])})
typs[41] = types.NewArray(typs[0], 4)
typs[42] = types.NewPtr(typs[41])
- typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
- typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
- typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
- typs[46] = types.Runetype
+ typs[43] = functype(nil, []*ir.Field{anonfield(typs[42]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[28])})
+ typs[44] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])})
+ typs[45] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])})
+ typs[46] = types.RuneType
typs[47] = types.NewSlice(typs[46])
- typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
+ typs[48] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[47])}, []*ir.Field{anonfield(typs[28])})
typs[49] = types.NewSlice(typs[0])
- typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
+ typs[50] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[49])})
typs[51] = types.NewArray(typs[46], 32)
typs[52] = types.NewPtr(typs[51])
- typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
- typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
- typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
- typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
- typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
- typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
- typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
- typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
- typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
- typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
+ typs[53] = functype(nil, []*ir.Field{anonfield(typs[52]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[47])})
+ typs[54] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[15])})
+ typs[55] = functype(nil, []*ir.Field{anonfield(typs[28]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[46]), anonfield(typs[15])})
+ typs[56] = functype(nil, []*ir.Field{anonfield(typs[28])}, []*ir.Field{anonfield(typs[15])})
+ typs[57] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[2])})
+ typs[58] = functype(nil, []*ir.Field{anonfield(typs[2])}, []*ir.Field{anonfield(typs[7])})
+ typs[59] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[2])})
+ typs[60] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[2]), anonfield(typs[6])})
+ typs[61] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
+ typs[62] = functype(nil, []*ir.Field{anonfield(typs[1])}, nil)
typs[63] = types.NewPtr(typs[5])
- typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
- typs[65] = types.Types[TUINT32]
- typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
+ typs[64] = functype(nil, []*ir.Field{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[6])})
+ typs[65] = types.Types[types.TUINT32]
+ typs[66] = functype(nil, nil, []*ir.Field{anonfield(typs[65])})
typs[67] = types.NewMap(typs[2], typs[2])
- typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
- typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
- typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
- typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
- typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
- typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
- typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
- typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
- typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
- typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
+ typs[68] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[67])})
+ typs[69] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[67])})
+ typs[70] = functype(nil, nil, []*ir.Field{anonfield(typs[67])})
+ typs[71] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[3])})
+ typs[72] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[3])})
+ typs[73] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Field{anonfield(typs[3])})
+ typs[74] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])})
+ typs[75] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])})
+ typs[76] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])})
+ typs[77] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
+ typs[78] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
+ typs[79] = functype(nil, []*ir.Field{anonfield(typs[3])}, nil)
+ typs[80] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67])}, nil)
typs[81] = types.NewChan(typs[2], types.Cboth)
- typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
- typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
+ typs[82] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[81])})
+ typs[83] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[81])})
typs[84] = types.NewChan(typs[2], types.Crecv)
- typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
- typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
+ typs[85] = functype(nil, []*ir.Field{anonfield(typs[84]), anonfield(typs[3])}, nil)
+ typs[86] = functype(nil, []*ir.Field{anonfield(typs[84]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])})
typs[87] = types.NewChan(typs[2], types.Csend)
- typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
+ typs[88] = functype(nil, []*ir.Field{anonfield(typs[87]), anonfield(typs[3])}, nil)
typs[89] = types.NewArray(typs[0], 3)
- typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
- typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
- typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
- typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
- typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
- typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
+ typs[90] = tostruct([]*ir.Field{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
+ typs[91] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
+ typs[92] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3])}, nil)
+ typs[93] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[15])})
+ typs[94] = functype(nil, []*ir.Field{anonfield(typs[87]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])})
+ typs[95] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[84])}, []*ir.Field{anonfield(typs[6])})
typs[96] = types.NewPtr(typs[6])
- typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
- typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
- typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
- typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
- typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
- typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
+ typs[97] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*ir.Field{anonfield(typs[6])})
+ typs[98] = functype(nil, []*ir.Field{anonfield(typs[63])}, nil)
+ typs[99] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*ir.Field{anonfield(typs[15]), anonfield(typs[6])})
+ typs[100] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[7])})
+ typs[101] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[7])})
+ typs[102] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[7])})
typs[103] = types.NewSlice(typs[2])
- typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
- typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
- typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
- typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
- typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
- typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
- typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
- typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
- typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
- typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
- typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
- typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
- typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
- typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
- typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
- typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
- typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
- typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
- typs[122] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5]), anonfield(typs[5])}, nil)
- typs[123] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
+ typs[104] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[103])})
+ typs[105] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
+ typs[106] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5])}, nil)
+ typs[107] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[6])})
+ typs[108] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])})
+ typs[109] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[6])})
+ typs[110] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[5])})
+ typs[111] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[5])})
+ typs[112] = functype(nil, []*ir.Field{anonfield(typs[22]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[22])})
+ typs[113] = functype(nil, []*ir.Field{anonfield(typs[24]), anonfield(typs[24])}, []*ir.Field{anonfield(typs[24])})
+ typs[114] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[22])})
+ typs[115] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[24])})
+ typs[116] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[65])})
+ typs[117] = functype(nil, []*ir.Field{anonfield(typs[22])}, []*ir.Field{anonfield(typs[20])})
+ typs[118] = functype(nil, []*ir.Field{anonfield(typs[24])}, []*ir.Field{anonfield(typs[20])})
+ typs[119] = functype(nil, []*ir.Field{anonfield(typs[65])}, []*ir.Field{anonfield(typs[20])})
+ typs[120] = functype(nil, []*ir.Field{anonfield(typs[26]), anonfield(typs[26])}, []*ir.Field{anonfield(typs[26])})
+ typs[121] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[5])}, nil)
+ typs[122] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[5]), anonfield(typs[5])}, nil)
+ typs[123] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[124] = types.NewSlice(typs[7])
- typs[125] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[124])}, nil)
- typs[126] = types.Types[TUINT8]
- typs[127] = functype(nil, []*Node{anonfield(typs[126]), anonfield(typs[126])}, nil)
- typs[128] = types.Types[TUINT16]
- typs[129] = functype(nil, []*Node{anonfield(typs[128]), anonfield(typs[128])}, nil)
- typs[130] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
- typs[131] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
+ typs[125] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[124])}, nil)
+ typs[126] = types.Types[types.TUINT8]
+ typs[127] = functype(nil, []*ir.Field{anonfield(typs[126]), anonfield(typs[126])}, nil)
+ typs[128] = types.Types[types.TUINT16]
+ typs[129] = functype(nil, []*ir.Field{anonfield(typs[128]), anonfield(typs[128])}, nil)
+ typs[130] = functype(nil, []*ir.Field{anonfield(typs[65]), anonfield(typs[65])}, nil)
+ typs[131] = functype(nil, []*ir.Field{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}
import (
"math/bits"
+
+ "cmd/compile/internal/base"
)
const (
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
- Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
+ base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return bulkBvec{
words: make([]uint32, size),
func (bv1 bvec) Eq(bv2 bvec) bool {
if bv1.n != bv2.n {
- Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
+ base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
}
for i, x := range bv1.b {
if x != bv2.b[i] {
func (bv bvec) Get(i int32) bool {
if i < 0 || i >= bv.n {
- Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
+ base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
return bv.b[i>>wordShift]&mask != 0
func (bv bvec) Set(i int32) {
if i < 0 || i >= bv.n {
- Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
+ base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] |= mask
func (bv bvec) Unset(i int32) {
if i < 0 || i >= bv.n {
- Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
+ base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] &^= mask
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
+ "cmd/internal/src"
"fmt"
)
-func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
+func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
xtype := p.typeExpr(expr.Type)
ntype := p.typeExpr(expr.Type)
- xfunc := p.nod(expr, ODCLFUNC, nil, nil)
- xfunc.Func.SetIsHiddenClosure(Curfn != nil)
- xfunc.Func.Nname = newfuncnamel(p.pos(expr), nblank.Sym) // filled in by typecheckclosure
- xfunc.Func.Nname.Name.Param.Ntype = xtype
- xfunc.Func.Nname.Name.Defn = xfunc
+ fn := ir.NewFunc(p.pos(expr))
+ fn.SetIsHiddenClosure(Curfn != nil)
+ fn.Nname = newFuncNameAt(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure
+ fn.Nname.Ntype = xtype
+ fn.Nname.Defn = fn
- clo := p.nod(expr, OCLOSURE, nil, nil)
- clo.Func.Ntype = ntype
+ clo := ir.NewClosureExpr(p.pos(expr), fn)
+ fn.ClosureType = ntype
+ fn.OClosure = clo
- xfunc.Func.Closure = clo
- clo.Func.Closure = xfunc
-
- p.funcBody(xfunc, expr.Body)
+ p.funcBody(fn, expr.Body)
// closure-specific variables are hanging off the
// ordinary ones in the symbol table; see oldname.
// unhook them.
// make the list of pointers for the closure call.
- for _, v := range xfunc.Func.Cvars.Slice() {
+ for _, v := range fn.ClosureVars {
// Unlink from v1; see comment in syntax.go type Param for these fields.
- v1 := v.Name.Defn
- v1.Name.Param.Innermost = v.Name.Param.Outer
+ v1 := v.Defn
+ v1.Name().Innermost = v.Outer
// If the closure usage of v is not dense,
// we need to make it dense; now that we're out
// obtains f3's v, creating it if necessary (as it is in the example).
//
// capturevars will decide whether to use v directly or &v.
- v.Name.Param.Outer = oldname(v.Sym)
+ v.Outer = oldname(v.Sym()).(*ir.Name)
}
return clo
// function associated with the closure.
// TODO: This creation of the named function should probably really be done in a
// separate pass from type-checking.
-func typecheckclosure(clo *Node, top int) {
- xfunc := clo.Func.Closure
+func typecheckclosure(clo *ir.ClosureExpr, top int) {
+ fn := clo.Func()
// Set current associated iota value, so iota can be used inside
// function in ConstSpec, see issue #22344
if x := getIotaValue(); x >= 0 {
- xfunc.SetIota(x)
+ fn.SetIota(x)
}
- clo.Func.Ntype = typecheck(clo.Func.Ntype, ctxType)
- clo.Type = clo.Func.Ntype.Type
- clo.Func.Top = top
+ fn.ClosureType = typecheck(fn.ClosureType, ctxType)
+ clo.SetType(fn.ClosureType.Type())
+ fn.SetClosureCalled(top&ctxCallee != 0)
- // Do not typecheck xfunc twice, otherwise, we will end up pushing
- // xfunc to xtop multiple times, causing initLSym called twice.
+ // Do not typecheck fn twice, otherwise, we will end up pushing
+ // fn to Target.Decls multiple times, causing initLSym called twice.
// See #30709
- if xfunc.Typecheck() == 1 {
+ if fn.Typecheck() == 1 {
return
}
- for _, ln := range xfunc.Func.Cvars.Slice() {
- n := ln.Name.Defn
- if !n.Name.Captured() {
- n.Name.SetCaptured(true)
- if n.Name.Decldepth == 0 {
- Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
+ for _, ln := range fn.ClosureVars {
+ n := ln.Defn
+ if !n.Name().Captured() {
+ n.Name().SetCaptured(true)
+ if n.Name().Decldepth == 0 {
+ base.Fatalf("typecheckclosure: var %v does not have decldepth assigned", n)
}
// Ignore assignments to the variable in straightline code
// preceding the first capturing by a closure.
- if n.Name.Decldepth == decldepth {
- n.Name.SetAssigned(false)
+ if n.Name().Decldepth == decldepth {
+ n.Name().SetAssigned(false)
}
}
}
- xfunc.Func.Nname.Sym = closurename(Curfn)
- setNodeNameFunc(xfunc.Func.Nname)
- xfunc = typecheck(xfunc, ctxStmt)
+ fn.Nname.SetSym(closurename(Curfn))
+ setNodeNameFunc(fn.Nname)
+ typecheckFunc(fn)
// Type check the body now, but only if we're inside a function.
// At top level (in a variable initialization: curfn==nil) we're not
// ready to type check code yet; we'll check it later, because the
- // underlying closure function we create is added to xtop.
- if Curfn != nil && clo.Type != nil {
+ // underlying closure function we create is added to Target.Decls.
+ if Curfn != nil && clo.Type() != nil {
oldfn := Curfn
- Curfn = xfunc
+ Curfn = fn
olddd := decldepth
decldepth = 1
- typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
decldepth = olddd
Curfn = oldfn
}
- xtop = append(xtop, xfunc)
+ Target.Decls = append(Target.Decls, fn)
}
// globClosgen is like Func.Closgen, but for the global scope.
-var globClosgen int
+var globClosgen int32
// closurename generates a new unique name for a closure within
// outerfunc.
-func closurename(outerfunc *Node) *types.Sym {
+func closurename(outerfunc *ir.Func) *types.Sym {
outer := "glob."
prefix := "func"
gen := &globClosgen
if outerfunc != nil {
- if outerfunc.Func.Closure != nil {
+ if outerfunc.OClosure != nil {
prefix = ""
}
- outer = outerfunc.funcname()
+ outer = ir.FuncName(outerfunc)
// There may be multiple functions named "_". In those
// cases, we can't use their individual Closgens as it
// would lead to name clashes.
- if !outerfunc.Func.Nname.isBlank() {
- gen = &outerfunc.Func.Closgen
+ if !ir.IsBlank(outerfunc.Nname) {
+ gen = &outerfunc.Closgen
}
}
// by value or by reference.
// We use value capturing for values <= 128 bytes that are never reassigned
// after capturing (effectively constant).
-func capturevars(xfunc *Node) {
- lno := lineno
- lineno = xfunc.Pos
-
- clo := xfunc.Func.Closure
- cvars := xfunc.Func.Cvars.Slice()
+func capturevars(fn *ir.Func) {
+ lno := base.Pos
+ base.Pos = fn.Pos()
+ cvars := fn.ClosureVars
out := cvars[:0]
for _, v := range cvars {
- if v.Type == nil {
+ if v.Type() == nil {
// If v.Type is nil, it means v looked like it
// was going to be used in the closure, but
// isn't. This happens in struct literals like
// type check the & of closed variables outside the closure,
// so that the outer frame also grabs them and knows they escape.
- dowidth(v.Type)
+ dowidth(v.Type())
- outer := v.Name.Param.Outer
- outermost := v.Name.Defn
+ var outer ir.Node
+ outer = v.Outer
+ outermost := v.Defn.(*ir.Name)
// out parameters will be assigned to implicitly upon return.
- if outermost.Class() != PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 {
- v.Name.SetByval(true)
+ if outermost.Class() != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 {
+ v.SetByval(true)
} else {
- outermost.Name.SetAddrtaken(true)
- outer = nod(OADDR, outer, nil)
+ outermost.Name().SetAddrtaken(true)
+ outer = nodAddr(outer)
}
- if Debug.m > 1 {
+ if base.Flag.LowerM > 1 {
var name *types.Sym
- if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
- name = v.Name.Curfn.Func.Nname.Sym
+ if v.Curfn != nil && v.Curfn.Nname != nil {
+ name = v.Curfn.Sym()
}
how := "ref"
- if v.Name.Byval() {
+ if v.Byval() {
how = "value"
}
- Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width))
+ base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width))
}
outer = typecheck(outer, ctxExpr)
- clo.Func.Enter.Append(outer)
+ fn.ClosureEnter.Append(outer)
}
- xfunc.Func.Cvars.Set(out)
- lineno = lno
+ fn.ClosureVars = out
+ base.Pos = lno
}
// transformclosure is called in a separate phase after escape analysis.
// It transform closure bodies to properly reference captured variables.
-func transformclosure(xfunc *Node) {
- lno := lineno
- lineno = xfunc.Pos
- clo := xfunc.Func.Closure
+func transformclosure(fn *ir.Func) {
+ lno := base.Pos
+ base.Pos = fn.Pos()
- if clo.Func.Top&ctxCallee != 0 {
+ if fn.ClosureCalled() {
// If the closure is directly called, we transform it to a plain function call
// with variables passed as args. This avoids allocation of a closure object.
// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
// }(byval, &byref, 42)
// f is ONAME of the actual function.
- f := xfunc.Func.Nname
+ f := fn.Nname
// We are going to insert captured variables before input args.
var params []*types.Field
- var decls []*Node
- for _, v := range xfunc.Func.Cvars.Slice() {
- if !v.Name.Byval() {
+ var decls []*ir.Name
+ for _, v := range fn.ClosureVars {
+ if !v.Byval() {
// If v of type T is captured by reference,
// we introduce function param &v *T
// and v remains PAUTOHEAP with &v heapaddr
// (accesses will implicitly deref &v).
- addr := newname(lookup("&" + v.Sym.Name))
- addr.Type = types.NewPtr(v.Type)
- v.Name.Param.Heapaddr = addr
+ addr := NewName(lookup("&" + v.Sym().Name))
+ addr.SetType(types.NewPtr(v.Type()))
+ v.Heapaddr = addr
v = addr
}
- v.SetClass(PPARAM)
+ v.SetClass(ir.PPARAM)
decls = append(decls, v)
- fld := types.NewField()
- fld.Nname = asTypesNode(v)
- fld.Type = v.Type
- fld.Sym = v.Sym
+ fld := types.NewField(src.NoXPos, v.Sym(), v.Type())
+ fld.Nname = v
params = append(params, fld)
}
if len(params) > 0 {
// Prepend params and decls.
- f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...))
- xfunc.Func.Dcl = append(decls, xfunc.Func.Dcl...)
+ f.Type().Params().SetFields(append(params, f.Type().Params().FieldSlice()...))
+ fn.Dcl = append(decls, fn.Dcl...)
}
- dowidth(f.Type)
- xfunc.Type = f.Type // update type of ODCLFUNC
+ dowidth(f.Type())
+ fn.SetType(f.Type()) // update type of ODCLFUNC
} else {
// The closure is not called, so it is going to stay as closure.
- var body []*Node
+ var body []ir.Node
offset := int64(Widthptr)
- for _, v := range xfunc.Func.Cvars.Slice() {
+ for _, v := range fn.ClosureVars {
// cv refers to the field inside of closure OSTRUCTLIT.
- cv := nod(OCLOSUREVAR, nil, nil)
-
- cv.Type = v.Type
- if !v.Name.Byval() {
- cv.Type = types.NewPtr(v.Type)
+ typ := v.Type()
+ if !v.Byval() {
+ typ = types.NewPtr(typ)
}
- offset = Rnd(offset, int64(cv.Type.Align))
- cv.Xoffset = offset
- offset += cv.Type.Width
+ offset = Rnd(offset, int64(typ.Align))
+ cr := ir.NewClosureRead(typ, offset)
+ offset += typ.Width
- if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) {
+ if v.Byval() && v.Type().Width <= int64(2*Widthptr) {
// If it is a small variable captured by value, downgrade it to PAUTO.
- v.SetClass(PAUTO)
- xfunc.Func.Dcl = append(xfunc.Func.Dcl, v)
- body = append(body, nod(OAS, v, cv))
+ v.SetClass(ir.PAUTO)
+ fn.Dcl = append(fn.Dcl, v)
+ body = append(body, ir.Nod(ir.OAS, v, cr))
} else {
// Declare variable holding addresses taken from closure
// and initialize in entry prologue.
- addr := newname(lookup("&" + v.Sym.Name))
- addr.Type = types.NewPtr(v.Type)
- addr.SetClass(PAUTO)
- addr.Name.SetUsed(true)
- addr.Name.Curfn = xfunc
- xfunc.Func.Dcl = append(xfunc.Func.Dcl, addr)
- v.Name.Param.Heapaddr = addr
- if v.Name.Byval() {
- cv = nod(OADDR, cv, nil)
+ addr := NewName(lookup("&" + v.Sym().Name))
+ addr.SetType(types.NewPtr(v.Type()))
+ addr.SetClass(ir.PAUTO)
+ addr.SetUsed(true)
+ addr.Curfn = fn
+ fn.Dcl = append(fn.Dcl, addr)
+ v.Heapaddr = addr
+ var src ir.Node = cr
+ if v.Byval() {
+ src = nodAddr(cr)
}
- body = append(body, nod(OAS, addr, cv))
+ body = append(body, ir.Nod(ir.OAS, addr, src))
}
}
if len(body) > 0 {
typecheckslice(body, ctxStmt)
- xfunc.Func.Enter.Set(body)
- xfunc.Func.SetNeedctxt(true)
+ fn.Enter.Set(body)
+ fn.SetNeedctxt(true)
}
}
- lineno = lno
+ base.Pos = lno
}
// hasemptycvars reports whether closure clo has an
// empty list of captured vars.
-func hasemptycvars(clo *Node) bool {
- xfunc := clo.Func.Closure
- return xfunc.Func.Cvars.Len() == 0
+func hasemptycvars(clo *ir.ClosureExpr) bool {
+ return len(clo.Func().ClosureVars) == 0
}
// closuredebugruntimecheck applies boilerplate checks for debug flags
// and compiling runtime
-func closuredebugruntimecheck(clo *Node) {
- if Debug_closure > 0 {
- xfunc := clo.Func.Closure
- if clo.Esc == EscHeap {
- Warnl(clo.Pos, "heap closure, captured vars = %v", xfunc.Func.Cvars)
+func closuredebugruntimecheck(clo *ir.ClosureExpr) {
+ if base.Debug.Closure > 0 {
+ if clo.Esc() == EscHeap {
+ base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func().ClosureVars)
} else {
- Warnl(clo.Pos, "stack closure, captured vars = %v", xfunc.Func.Cvars)
+ base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func().ClosureVars)
}
}
- if compiling_runtime && clo.Esc == EscHeap {
- yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime")
+ if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
+ base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
}
}
// closureType returns the struct type used to hold all the information
// needed in the closure for clo (clo must be a OCLOSURE node).
// The address of a variable of the returned type can be cast to a func.
-func closureType(clo *Node) *types.Type {
+func closureType(clo *ir.ClosureExpr) *types.Type {
// Create closure in the form of a composite literal.
// supposing the closure captures an int i and a string s
// and has one float64 argument and no results,
// The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
- fields := []*Node{
- namedfield(".F", types.Types[TUINTPTR]),
+ fields := []*ir.Field{
+ namedfield(".F", types.Types[types.TUINTPTR]),
}
- for _, v := range clo.Func.Closure.Func.Cvars.Slice() {
- typ := v.Type
- if !v.Name.Byval() {
+ for _, v := range clo.Func().ClosureVars {
+ typ := v.Type()
+ if !v.Byval() {
typ = types.NewPtr(typ)
}
- fields = append(fields, symfield(v.Sym, typ))
+ fields = append(fields, symfield(v.Sym(), typ))
}
typ := tostruct(fields)
typ.SetNoalg(true)
return typ
}
-func walkclosure(clo *Node, init *Nodes) *Node {
- xfunc := clo.Func.Closure
+func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
+ fn := clo.Func()
// If no closure vars, don't bother wrapping.
if hasemptycvars(clo) {
- if Debug_closure > 0 {
- Warnl(clo.Pos, "closure converted to global")
+ if base.Debug.Closure > 0 {
+ base.WarnfAt(clo.Pos(), "closure converted to global")
}
- return xfunc.Func.Nname
+ return fn.Nname
}
closuredebugruntimecheck(clo)
typ := closureType(clo)
- clos := nod(OCOMPLIT, nil, typenod(typ))
- clos.Esc = clo.Esc
- clos.List.Set(append([]*Node{nod(OCFUNC, xfunc.Func.Nname, nil)}, clo.Func.Enter.Slice()...))
+ clos := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ))
+ clos.SetEsc(clo.Esc())
+ clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...))
- clos = nod(OADDR, clos, nil)
- clos.Esc = clo.Esc
+ addr := nodAddr(clos)
+ addr.SetEsc(clo.Esc())
// Force type conversion from *struct to the func type.
- clos = convnop(clos, clo.Type)
+ cfn := convnop(addr, clo.Type())
// non-escaping temp to use, if any.
- if x := prealloc[clo]; x != nil {
- if !types.Identical(typ, x.Type) {
+ if x := clo.Prealloc; x != nil {
+ if !types.Identical(typ, x.Type()) {
panic("closure type does not match order's assigned type")
}
- clos.Left.Right = x
- delete(prealloc, clo)
+ addr.SetRight(x)
+ clo.Prealloc = nil
}
- return walkexpr(clos, init)
+ return walkexpr(cfn, init)
}
-func typecheckpartialcall(fn *Node, sym *types.Sym) {
- switch fn.Op {
- case ODOTINTER, ODOTMETH:
+func typecheckpartialcall(n ir.Node, sym *types.Sym) *ir.CallPartExpr {
+ switch n.Op() {
+ case ir.ODOTINTER, ir.ODOTMETH:
break
default:
- Fatalf("invalid typecheckpartialcall")
+ base.Fatalf("invalid typecheckpartialcall")
}
+ dot := n.(*ir.SelectorExpr)
// Create top-level function.
- xfunc := makepartialcall(fn, fn.Type, sym)
- fn.Func = xfunc.Func
- fn.Func.SetWrapper(true)
- fn.Right = newname(sym)
- fn.Op = OCALLPART
- fn.Type = xfunc.Type
+ fn := makepartialcall(dot, dot.Type(), sym)
+ fn.SetWrapper(true)
+
+ return ir.NewCallPartExpr(dot.Pos(), dot.Left(), dot.Selection, fn)
}
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
// for partial calls.
-func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
- rcvrtype := fn.Left.Type
+func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir.Func {
+ rcvrtype := dot.Left().Type()
sym := methodSymSuffix(rcvrtype, meth, "-fm")
if sym.Uniq() {
- return asNode(sym.Def)
+ return sym.Def.(*ir.Func)
}
sym.SetUniq(true)
savecurfn := Curfn
- saveLineNo := lineno
+ saveLineNo := base.Pos
Curfn = nil
// Set line number equal to the line number where the method is declared.
var m *types.Field
if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() {
- lineno = m.Pos
+ base.Pos = m.Pos
}
// Note: !m.Pos.IsKnown() happens for method expressions where
// the method is implicitly declared. The Error method of the
// number at the use of the method expression in this
// case. See issue 29389.
- tfn := nod(OTFUNC, nil, nil)
- tfn.List.Set(structargs(t0.Params(), true))
- tfn.Rlist.Set(structargs(t0.Results(), false))
+ tfn := ir.NewFuncType(base.Pos, nil,
+ structargs(t0.Params(), true),
+ structargs(t0.Results(), false))
- xfunc := dclfunc(sym, tfn)
- xfunc.Func.SetDupok(true)
- xfunc.Func.SetNeedctxt(true)
-
- tfn.Type.SetPkg(t0.Pkg())
+ fn := dclfunc(sym, tfn)
+ fn.SetDupok(true)
+ fn.SetNeedctxt(true)
// Declare and initialize variable holding receiver.
-
- cv := nod(OCLOSUREVAR, nil, nil)
- cv.Type = rcvrtype
- cv.Xoffset = Rnd(int64(Widthptr), int64(cv.Type.Align))
-
- ptr := newname(lookup(".this"))
- declare(ptr, PAUTO)
- ptr.Name.SetUsed(true)
- var body []*Node
+ cr := ir.NewClosureRead(rcvrtype, Rnd(int64(Widthptr), int64(rcvrtype.Align)))
+ ptr := NewName(lookup(".this"))
+ declare(ptr, ir.PAUTO)
+ ptr.SetUsed(true)
+ var body []ir.Node
if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
- ptr.Type = rcvrtype
- body = append(body, nod(OAS, ptr, cv))
+ ptr.SetType(rcvrtype)
+ body = append(body, ir.Nod(ir.OAS, ptr, cr))
} else {
- ptr.Type = types.NewPtr(rcvrtype)
- body = append(body, nod(OAS, ptr, nod(OADDR, cv, nil)))
+ ptr.SetType(types.NewPtr(rcvrtype))
+ body = append(body, ir.Nod(ir.OAS, ptr, nodAddr(cr)))
}
- call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil)
- call.List.Set(paramNnames(tfn.Type))
- call.SetIsDDD(tfn.Type.IsVariadic())
+ call := ir.Nod(ir.OCALL, nodSym(ir.OXDOT, ptr, meth), nil)
+ call.PtrList().Set(paramNnames(tfn.Type()))
+ call.SetIsDDD(tfn.Type().IsVariadic())
if t0.NumResults() != 0 {
- n := nod(ORETURN, nil, nil)
- n.List.Set1(call)
- call = n
+ ret := ir.Nod(ir.ORETURN, nil, nil)
+ ret.PtrList().Set1(call)
+ body = append(body, ret)
+ } else {
+ body = append(body, call)
}
- body = append(body, call)
- xfunc.Nbody.Set(body)
+ fn.PtrBody().Set(body)
funcbody()
- xfunc = typecheck(xfunc, ctxStmt)
+ typecheckFunc(fn)
// Need to typecheck the body of the just-generated wrapper.
// typecheckslice() requires that Curfn is set when processing an ORETURN.
- Curfn = xfunc
- typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
- sym.Def = asTypesNode(xfunc)
- xtop = append(xtop, xfunc)
+ Curfn = fn
+ typecheckslice(fn.Body().Slice(), ctxStmt)
+ sym.Def = fn
+ Target.Decls = append(Target.Decls, fn)
Curfn = savecurfn
- lineno = saveLineNo
+ base.Pos = saveLineNo
- return xfunc
+ return fn
}
// partialCallType returns the struct type used to hold all the information
// needed in the closure for n (n must be a OCALLPART node).
// The address of a variable of the returned type can be cast to a func.
-func partialCallType(n *Node) *types.Type {
- t := tostruct([]*Node{
- namedfield("F", types.Types[TUINTPTR]),
- namedfield("R", n.Left.Type),
+func partialCallType(n *ir.CallPartExpr) *types.Type {
+ t := tostruct([]*ir.Field{
+ namedfield("F", types.Types[types.TUINTPTR]),
+ namedfield("R", n.Left().Type()),
})
t.SetNoalg(true)
return t
}
-func walkpartialcall(n *Node, init *Nodes) *Node {
+func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
//
// Like walkclosure above.
- if n.Left.Type.IsInterface() {
+ if n.Left().Type().IsInterface() {
// Trigger panic for method on nil interface now.
// Otherwise it happens in the wrapper and is confusing.
- n.Left = cheapexpr(n.Left, init)
- n.Left = walkexpr(n.Left, nil)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ n.SetLeft(walkexpr(n.Left(), nil))
- tab := nod(OITAB, n.Left, nil)
- tab = typecheck(tab, ctxExpr)
+ tab := typecheck(ir.Nod(ir.OITAB, n.Left(), nil), ctxExpr)
- c := nod(OCHECKNIL, tab, nil)
+ c := ir.Nod(ir.OCHECKNIL, tab, nil)
c.SetTypecheck(1)
init.Append(c)
}
typ := partialCallType(n)
- clos := nod(OCOMPLIT, nil, typenod(typ))
- clos.Esc = n.Esc
- clos.List.Set2(nod(OCFUNC, n.Func.Nname, nil), n.Left)
+ clos := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ))
+ clos.SetEsc(n.Esc())
+ clos.PtrList().Set2(ir.Nod(ir.OCFUNC, n.Func().Nname, nil), n.Left())
- clos = nod(OADDR, clos, nil)
- clos.Esc = n.Esc
+ addr := nodAddr(clos)
+ addr.SetEsc(n.Esc())
// Force type conversion from *struct to the func type.
- clos = convnop(clos, n.Type)
+ cfn := convnop(addr, n.Type())
// non-escaping temp to use, if any.
- if x := prealloc[n]; x != nil {
- if !types.Identical(typ, x.Type) {
+ if x := n.Prealloc; x != nil {
+ if !types.Identical(typ, x.Type()) {
panic("partial call type does not match order's assigned type")
}
- clos.Left.Right = x
- delete(prealloc, n)
+ addr.SetRight(x)
+ n.Prealloc = nil
}
- return walkexpr(clos, init)
+ return walkexpr(cfn, init)
}
// callpartMethod returns the *types.Field representing the method
// referenced by method value n.
-func callpartMethod(n *Node) *types.Field {
- if n.Op != OCALLPART {
- Fatalf("expected OCALLPART, got %v", n)
- }
-
- // TODO(mdempsky): Optimize this. If necessary,
- // makepartialcall could save m for us somewhere.
- var m *types.Field
- if lookdot0(n.Right.Sym, n.Left.Type, &m, false) != 1 {
- Fatalf("failed to find field for OCALLPART")
- }
-
- return m
+func callpartMethod(n ir.Node) *types.Field {
+ return n.(*ir.CallPartExpr).Method
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
+ "go/constant"
+ "go/token"
+ "math"
"math/big"
"strings"
+ "unicode"
)
-// Ctype describes the constant kind of an "ideal" (untyped) constant.
-type Ctype uint8
-
const (
- CTxxx Ctype = iota
-
- CTINT
- CTRUNE
- CTFLT
- CTCPLX
- CTSTR
- CTBOOL
- CTNIL
+ // Maximum size in bits for big.Ints before signalling
+ // overflow and also mantissa precision for big.Floats.
+ Mpprec = 512
)
-type Val struct {
- // U contains one of:
- // bool bool when Ctype() == CTBOOL
- // *Mpint int when Ctype() == CTINT, rune when Ctype() == CTRUNE
- // *Mpflt float when Ctype() == CTFLT
- // *Mpcplx pair of floats when Ctype() == CTCPLX
- // string string when Ctype() == CTSTR
- // *Nilval when Ctype() == CTNIL
- U interface{}
-}
-
-func (v Val) Ctype() Ctype {
- switch x := v.U.(type) {
- default:
- Fatalf("unexpected Ctype for %T", v.U)
- panic("unreachable")
- case nil:
- return CTxxx
- case *NilVal:
- return CTNIL
- case bool:
- return CTBOOL
- case *Mpint:
- if x.Rune {
- return CTRUNE
- }
- return CTINT
- case *Mpflt:
- return CTFLT
- case *Mpcplx:
- return CTCPLX
- case string:
- return CTSTR
- }
-}
-
-func eqval(a, b Val) bool {
- if a.Ctype() != b.Ctype() {
- return false
- }
- switch x := a.U.(type) {
- default:
- Fatalf("unexpected Ctype for %T", a.U)
- panic("unreachable")
- case *NilVal:
- return true
- case bool:
- y := b.U.(bool)
- return x == y
- case *Mpint:
- y := b.U.(*Mpint)
- return x.Cmp(y) == 0
- case *Mpflt:
- y := b.U.(*Mpflt)
- return x.Cmp(y) == 0
- case *Mpcplx:
- y := b.U.(*Mpcplx)
- return x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0
- case string:
- y := b.U.(string)
- return x == y
- }
-}
-
-// Interface returns the constant value stored in v as an interface{}.
-// It returns int64s for ints and runes, float64s for floats,
-// complex128s for complex values, and nil for constant nils.
-func (v Val) Interface() interface{} {
- switch x := v.U.(type) {
+func bigFloatVal(v constant.Value) *big.Float {
+ f := new(big.Float)
+ f.SetPrec(Mpprec)
+ switch u := constant.Val(v).(type) {
+ case int64:
+ f.SetInt64(u)
+ case *big.Int:
+ f.SetInt(u)
+ case *big.Float:
+ f.Set(u)
+ case *big.Rat:
+ f.SetRat(u)
default:
- Fatalf("unexpected Interface for %T", v.U)
- panic("unreachable")
- case *NilVal:
- return nil
- case bool, string:
- return x
- case *Mpint:
- return x.Int64()
- case *Mpflt:
- return x.Float64()
- case *Mpcplx:
- return complex(x.Real.Float64(), x.Imag.Float64())
- }
-}
-
-type NilVal struct{}
-
-// Int64Val returns n as an int64.
-// n must be an integer or rune constant.
-func (n *Node) Int64Val() int64 {
- if !Isconst(n, CTINT) {
- Fatalf("Int64Val(%v)", n)
- }
- return n.Val().U.(*Mpint).Int64()
-}
-
-// CanInt64 reports whether it is safe to call Int64Val() on n.
-func (n *Node) CanInt64() bool {
- if !Isconst(n, CTINT) {
- return false
+ base.Fatalf("unexpected: %v", u)
}
-
- // if the value inside n cannot be represented as an int64, the
- // return value of Int64 is undefined
- return n.Val().U.(*Mpint).CmpInt64(n.Int64Val()) == 0
+ return f
}
-// BoolVal returns n as a bool.
-// n must be a boolean constant.
-func (n *Node) BoolVal() bool {
- if !Isconst(n, CTBOOL) {
- Fatalf("BoolVal(%v)", n)
+func roundFloat(v constant.Value, sz int64) constant.Value {
+ switch sz {
+ case 4:
+ f, _ := constant.Float32Val(v)
+ return makeFloat64(float64(f))
+ case 8:
+ f, _ := constant.Float64Val(v)
+ return makeFloat64(f)
}
- return n.Val().U.(bool)
-}
-
-// StringVal returns the value of a literal string Node as a string.
-// n must be a string constant.
-func (n *Node) StringVal() string {
- if !Isconst(n, CTSTR) {
- Fatalf("StringVal(%v)", n)
- }
- return n.Val().U.(string)
+ base.Fatalf("unexpected size: %v", sz)
+ panic("unreachable")
}
// truncate float literal fv to 32-bit or 64-bit precision
// according to type; return truncated value.
-func truncfltlit(oldv *Mpflt, t *types.Type) *Mpflt {
- if t == nil {
- return oldv
- }
-
- if overflow(Val{oldv}, t) {
+func truncfltlit(v constant.Value, t *types.Type) constant.Value {
+ if t.IsUntyped() || overflow(v, t) {
// If there was overflow, simply continuing would set the
// value to Inf which in turn would lead to spurious follow-on
// errors. Avoid this by returning the existing value.
- return oldv
- }
-
- fv := newMpflt()
-
- // convert large precision literal floating
- // into limited precision (float64 or float32)
- switch t.Etype {
- case types.TFLOAT32:
- fv.SetFloat64(oldv.Float32())
- case types.TFLOAT64:
- fv.SetFloat64(oldv.Float64())
- default:
- Fatalf("truncfltlit: unexpected Etype %v", t.Etype)
+ return v
}
- return fv
+ return roundFloat(v, t.Size())
}
// truncate Real and Imag parts of Mpcplx to 32-bit or 64-bit
// precision, according to type; return truncated value. In case of
-// overflow, calls yyerror but does not truncate the input value.
-func trunccmplxlit(oldv *Mpcplx, t *types.Type) *Mpcplx {
- if t == nil {
- return oldv
- }
-
- if overflow(Val{oldv}, t) {
+// overflow, calls Errorf but does not truncate the input value.
+func trunccmplxlit(v constant.Value, t *types.Type) constant.Value {
+ if t.IsUntyped() || overflow(v, t) {
// If there was overflow, simply continuing would set the
// value to Inf which in turn would lead to spurious follow-on
// errors. Avoid this by returning the existing value.
- return oldv
+ return v
}
- cv := newMpcmplx()
-
- switch t.Etype {
- case types.TCOMPLEX64:
- cv.Real.SetFloat64(oldv.Real.Float32())
- cv.Imag.SetFloat64(oldv.Imag.Float32())
- case types.TCOMPLEX128:
- cv.Real.SetFloat64(oldv.Real.Float64())
- cv.Imag.SetFloat64(oldv.Imag.Float64())
- default:
- Fatalf("trunccplxlit: unexpected Etype %v", t.Etype)
- }
-
- return cv
+ fsz := t.Size() / 2
+ return makeComplex(roundFloat(constant.Real(v), fsz), roundFloat(constant.Imag(v), fsz))
}
// TODO(mdempsky): Replace these with better APIs.
-func convlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil) }
-func defaultlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil) }
+func convlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
+func defaultlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
// convlit1 converts an untyped expression n to type t. If n already
// has a type, convlit1 has no effect.
//
// If there's an error converting n to t, context is used in the error
// message.
-func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Node {
+func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir.Node {
if explicit && t == nil {
- Fatalf("explicit conversion missing type")
+ base.Fatalf("explicit conversion missing type")
}
if t != nil && t.IsUntyped() {
- Fatalf("bad conversion to untyped: %v", t)
+ base.Fatalf("bad conversion to untyped: %v", t)
}
- if n == nil || n.Type == nil {
+ if n == nil || n.Type() == nil {
// Allow sloppy callers.
return n
}
- if !n.Type.IsUntyped() {
+ if !n.Type().IsUntyped() {
// Already typed; nothing to do.
return n
}
- if n.Op == OLITERAL {
- // Can't always set n.Type directly on OLITERAL nodes.
- // See discussion on CL 20813.
- n = n.rawcopy()
- }
-
// Nil is technically not a constant, so handle it specially.
- if n.Type.Etype == TNIL {
+ if n.Type().Kind() == types.TNIL {
+ if n.Op() != ir.ONIL {
+ base.Fatalf("unexpected op: %v (%v)", n, n.Op())
+ }
+ n = ir.Copy(n)
if t == nil {
- yyerror("use of untyped nil")
+ base.Errorf("use of untyped nil")
n.SetDiag(true)
- n.Type = nil
+ n.SetType(nil)
return n
}
return n
}
- n.Type = t
+ n.SetType(t)
return n
}
- if t == nil || !okforconst[t.Etype] {
- t = defaultType(n.Type)
+ if t == nil || !ir.OKForConst[t.Kind()] {
+ t = defaultType(n.Type())
}
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("unexpected untyped expression: %v", n)
+ base.Fatalf("unexpected untyped expression: %v", n)
- case OLITERAL:
+ case ir.OLITERAL:
v := convertVal(n.Val(), t, explicit)
- if v.U == nil {
+ if v.Kind() == constant.Unknown {
+ n = ir.NewConstExpr(n.Val(), n)
break
}
- n.SetVal(v)
- n.Type = t
+ n = ir.NewConstExpr(v, n)
+ n.SetType(t)
return n
- case OPLUS, ONEG, OBITNOT, ONOT, OREAL, OIMAG:
- ot := operandType(n.Op, t)
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.OREAL, ir.OIMAG:
+ ot := operandType(n.Op(), t)
if ot == nil {
n = defaultlit(n, nil)
break
}
- n.Left = convlit(n.Left, ot)
- if n.Left.Type == nil {
- n.Type = nil
+ n := n.(*ir.UnaryExpr)
+ n.SetLeft(convlit(n.Left(), ot))
+ if n.Left().Type() == nil {
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
return n
- case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND, OCOMPLEX:
- ot := operandType(n.Op, t)
+ case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND, ir.OCOMPLEX:
+ ot := operandType(n.Op(), t)
if ot == nil {
n = defaultlit(n, nil)
break
}
- n.Left = convlit(n.Left, ot)
- n.Right = convlit(n.Right, ot)
- if n.Left.Type == nil || n.Right.Type == nil {
- n.Type = nil
+ var l, r ir.Node
+ switch n := n.(type) {
+ case *ir.BinaryExpr:
+ n.SetLeft(convlit(n.Left(), ot))
+ n.SetRight(convlit(n.Right(), ot))
+ l, r = n.Left(), n.Right()
+ case *ir.LogicalExpr:
+ n.SetLeft(convlit(n.Left(), ot))
+ n.SetRight(convlit(n.Right(), ot))
+ l, r = n.Left(), n.Right()
+ }
+
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- if !types.Identical(n.Left.Type, n.Right.Type) {
- yyerror("invalid operation: %v (mismatched types %v and %v)", n, n.Left.Type, n.Right.Type)
- n.Type = nil
+ if !types.Identical(l.Type(), r.Type()) {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
return n
- case OEQ, ONE, OLT, OLE, OGT, OGE:
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
if !t.IsBoolean() {
break
}
- n.Type = t
+ n.SetType(t)
return n
- case OLSH, ORSH:
- n.Left = convlit1(n.Left, t, explicit, nil)
- n.Type = n.Left.Type
- if n.Type != nil && !n.Type.IsInteger() {
- yyerror("invalid operation: %v (shift of type %v)", n, n.Type)
- n.Type = nil
+ case ir.OLSH, ir.ORSH:
+ n.SetLeft(convlit1(n.Left(), t, explicit, nil))
+ n.SetType(n.Left().Type())
+ if n.Type() != nil && !n.Type().IsInteger() {
+ base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type())
+ n.SetType(nil)
}
return n
}
if !n.Diag() {
if !t.Broke() {
if explicit {
- yyerror("cannot convert %L to type %v", n, t)
+ base.Errorf("cannot convert %L to type %v", n, t)
} else if context != nil {
- yyerror("cannot use %L as type %v in %s", n, t, context())
+ base.Errorf("cannot use %L as type %v in %s", n, t, context())
} else {
- yyerror("cannot use %L as type %v", n, t)
+ base.Errorf("cannot use %L as type %v", n, t)
}
}
n.SetDiag(true)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
-func operandType(op Op, t *types.Type) *types.Type {
+func operandType(op ir.Op, t *types.Type) *types.Type {
switch op {
- case OCOMPLEX:
+ case ir.OCOMPLEX:
if t.IsComplex() {
return floatForComplex(t)
}
- case OREAL, OIMAG:
+ case ir.OREAL, ir.OIMAG:
if t.IsFloat() {
return complexForFloat(t)
}
default:
- if okfor[op][t.Etype] {
+ if okfor[op][t.Kind()] {
return t
}
}
//
// If explicit is true, then conversions from integer to string are
// also allowed.
-func convertVal(v Val, t *types.Type, explicit bool) Val {
- switch ct := v.Ctype(); ct {
- case CTBOOL:
+func convertVal(v constant.Value, t *types.Type, explicit bool) constant.Value {
+ switch ct := v.Kind(); ct {
+ case constant.Bool:
if t.IsBoolean() {
return v
}
- case CTSTR:
+ case constant.String:
if t.IsString() {
return v
}
- case CTINT, CTRUNE:
+ case constant.Int:
if explicit && t.IsString() {
return tostr(v)
}
fallthrough
- case CTFLT, CTCPLX:
+ case constant.Float, constant.Complex:
switch {
case t.IsInteger():
v = toint(v)
return v
case t.IsFloat():
v = toflt(v)
- v = Val{truncfltlit(v.U.(*Mpflt), t)}
+ v = truncfltlit(v, t)
return v
case t.IsComplex():
v = tocplx(v)
- v = Val{trunccmplxlit(v.U.(*Mpcplx), t)}
+ v = trunccmplxlit(v, t)
return v
}
}
- return Val{}
+ return constant.MakeUnknown()
}
-func tocplx(v Val) Val {
- switch u := v.U.(type) {
- case *Mpint:
- c := newMpcmplx()
- c.Real.SetInt(u)
- c.Imag.SetFloat64(0.0)
- v.U = c
-
- case *Mpflt:
- c := newMpcmplx()
- c.Real.Set(u)
- c.Imag.SetFloat64(0.0)
- v.U = c
- }
-
- return v
+func tocplx(v constant.Value) constant.Value {
+ return constant.ToComplex(v)
}
-func toflt(v Val) Val {
- switch u := v.U.(type) {
- case *Mpint:
- f := newMpflt()
- f.SetInt(u)
- v.U = f
-
- case *Mpcplx:
- f := newMpflt()
- f.Set(&u.Real)
- if u.Imag.CmpFloat64(0) != 0 {
- yyerror("constant %v truncated to real", u.GoString())
+func toflt(v constant.Value) constant.Value {
+ if v.Kind() == constant.Complex {
+ if constant.Sign(constant.Imag(v)) != 0 {
+ base.Errorf("constant %v truncated to real", v)
}
- v.U = f
+ v = constant.Real(v)
}
- return v
+ return constant.ToFloat(v)
}
-func toint(v Val) Val {
- switch u := v.U.(type) {
- case *Mpint:
- if u.Rune {
- i := new(Mpint)
- i.Set(u)
- v.U = i
+func toint(v constant.Value) constant.Value {
+ if v.Kind() == constant.Complex {
+ if constant.Sign(constant.Imag(v)) != 0 {
+ base.Errorf("constant %v truncated to integer", v)
}
+ v = constant.Real(v)
+ }
- case *Mpflt:
- i := new(Mpint)
- if !i.SetFloat(u) {
- if i.checkOverflow(0) {
- yyerror("integer too large")
- } else {
- // The value of u cannot be represented as an integer;
- // so we need to print an error message.
- // Unfortunately some float values cannot be
- // reasonably formatted for inclusion in an error
- // message (example: 1 + 1e-100), so first we try to
- // format the float; if the truncation resulted in
- // something that looks like an integer we omit the
- // value from the error message.
- // (See issue #11371).
- var t big.Float
- t.Parse(u.GoString(), 10)
- if t.IsInt() {
- yyerror("constant truncated to integer")
- } else {
- yyerror("constant %v truncated to integer", u.GoString())
- }
- }
- }
- v.U = i
+ if v := constant.ToInt(v); v.Kind() == constant.Int {
+ return v
+ }
- case *Mpcplx:
- i := new(Mpint)
- if !i.SetFloat(&u.Real) || u.Imag.CmpFloat64(0) != 0 {
- yyerror("constant %v truncated to integer", u.GoString())
+ // The value of v cannot be represented as an integer;
+ // so we need to print an error message.
+ // Unfortunately some float values cannot be
+ // reasonably formatted for inclusion in an error
+ // message (example: 1 + 1e-100), so first we try to
+ // format the float; if the truncation resulted in
+ // something that looks like an integer we omit the
+ // value from the error message.
+ // (See issue #11371).
+ f := bigFloatVal(v)
+ if f.MantExp(nil) > 2*Mpprec {
+ base.Errorf("integer too large")
+ } else {
+ var t big.Float
+ t.Parse(fmt.Sprint(v), 0)
+ if t.IsInt() {
+ base.Errorf("constant truncated to integer")
+ } else {
+ base.Errorf("constant %v truncated to integer", v)
}
-
- v.U = i
}
- return v
+ // Prevent follow-on errors.
+ // TODO(mdempsky): Use constant.MakeUnknown() instead.
+ return constant.MakeInt64(1)
}
-func doesoverflow(v Val, t *types.Type) bool {
- switch u := v.U.(type) {
- case *Mpint:
- if !t.IsInteger() {
- Fatalf("overflow: %v integer constant", t)
- }
- return u.Cmp(minintval[t.Etype]) < 0 || u.Cmp(maxintval[t.Etype]) > 0
-
- case *Mpflt:
- if !t.IsFloat() {
- Fatalf("overflow: %v floating-point constant", t)
- }
- return u.Cmp(minfltval[t.Etype]) <= 0 || u.Cmp(maxfltval[t.Etype]) >= 0
-
- case *Mpcplx:
- if !t.IsComplex() {
- Fatalf("overflow: %v complex constant", t)
- }
- return u.Real.Cmp(minfltval[t.Etype]) <= 0 || u.Real.Cmp(maxfltval[t.Etype]) >= 0 ||
- u.Imag.Cmp(minfltval[t.Etype]) <= 0 || u.Imag.Cmp(maxfltval[t.Etype]) >= 0
- }
-
- return false
+// doesoverflow reports whether constant value v is too large
+// to represent with type t.
+func doesoverflow(v constant.Value, t *types.Type) bool {
+ switch {
+ case t.IsInteger():
+ bits := uint(8 * t.Size())
+ if t.IsUnsigned() {
+ x, ok := constant.Uint64Val(v)
+ return !ok || x>>bits != 0
+ }
+ x, ok := constant.Int64Val(v)
+ if x < 0 {
+ x = ^x
+ }
+ return !ok || x>>(bits-1) != 0
+ case t.IsFloat():
+ switch t.Size() {
+ case 4:
+ f, _ := constant.Float32Val(v)
+ return math.IsInf(float64(f), 0)
+ case 8:
+ f, _ := constant.Float64Val(v)
+ return math.IsInf(f, 0)
+ }
+ case t.IsComplex():
+ ft := floatForComplex(t)
+ return doesoverflow(constant.Real(v), ft) || doesoverflow(constant.Imag(v), ft)
+ }
+ base.Fatalf("doesoverflow: %v, %v", v, t)
+ panic("unreachable")
}
-func overflow(v Val, t *types.Type) bool {
+// overflow reports whether constant value v is too large
+// to represent with type t, and emits an error message if so.
+func overflow(v constant.Value, t *types.Type) bool {
// v has already been converted
// to appropriate form for t.
- if t == nil || t.Etype == TIDEAL {
+ if t.IsUntyped() {
return false
}
-
- // Only uintptrs may be converted to pointers, which cannot overflow.
- if t.IsPtr() || t.IsUnsafePtr() {
- return false
+ if v.Kind() == constant.Int && constant.BitLen(v) > Mpprec {
+ base.Errorf("integer too large")
+ return true
}
-
if doesoverflow(v, t) {
- yyerror("constant %v overflows %v", v, t)
+ base.Errorf("constant %v overflows %v", types.FmtConst(v, false), t)
return true
}
-
return false
-
}
-func tostr(v Val) Val {
- switch u := v.U.(type) {
- case *Mpint:
- var r rune = 0xFFFD
- if u.Cmp(minintval[TINT32]) >= 0 && u.Cmp(maxintval[TINT32]) <= 0 {
- r = rune(u.Int64())
+func tostr(v constant.Value) constant.Value {
+ if v.Kind() == constant.Int {
+ r := unicode.ReplacementChar
+ if x, ok := constant.Uint64Val(v); ok && x <= unicode.MaxRune {
+ r = rune(x)
}
- v.U = string(r)
+ v = constant.MakeString(string(r))
}
-
return v
}
-func consttype(n *Node) Ctype {
- if n == nil || n.Op != OLITERAL {
- return CTxxx
- }
- return n.Val().Ctype()
-}
-
-func Isconst(n *Node, ct Ctype) bool {
- t := consttype(n)
+var tokenForOp = [...]token.Token{
+ ir.OPLUS: token.ADD,
+ ir.ONEG: token.SUB,
+ ir.ONOT: token.NOT,
+ ir.OBITNOT: token.XOR,
+
+ ir.OADD: token.ADD,
+ ir.OSUB: token.SUB,
+ ir.OMUL: token.MUL,
+ ir.ODIV: token.QUO,
+ ir.OMOD: token.REM,
+ ir.OOR: token.OR,
+ ir.OXOR: token.XOR,
+ ir.OAND: token.AND,
+ ir.OANDNOT: token.AND_NOT,
+ ir.OOROR: token.LOR,
+ ir.OANDAND: token.LAND,
+
+ ir.OEQ: token.EQL,
+ ir.ONE: token.NEQ,
+ ir.OLT: token.LSS,
+ ir.OLE: token.LEQ,
+ ir.OGT: token.GTR,
+ ir.OGE: token.GEQ,
+
+ ir.OLSH: token.SHL,
+ ir.ORSH: token.SHR,
+}
+
+// evalConst returns a constant-evaluated expression equivalent to n.
+// If n is not a constant, evalConst returns n.
+// Otherwise, evalConst returns a new OLITERAL with the same value as n,
+// and with .Orig pointing back to n.
+func evalConst(n ir.Node) ir.Node {
+ // Pick off just the opcodes that can be constant evaluated.
+ switch n.Op() {
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
+ nl := n.Left()
+ if nl.Op() == ir.OLITERAL {
+ var prec uint
+ if n.Type().IsUnsigned() {
+ prec = uint(n.Type().Size() * 8)
+ }
+ return origConst(n, constant.UnaryOp(tokenForOp[n.Op()], nl.Val(), prec))
+ }
- // If the caller is asking for CTINT, allow CTRUNE too.
- // Makes life easier for back ends.
- return t == ct || (ct == CTINT && t == CTRUNE)
-}
+ case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT:
+ nl, nr := n.Left(), n.Right()
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ rval := nr.Val()
-// evconst rewrites constant expressions into OLITERAL nodes.
-func evconst(n *Node) {
- nl, nr := n.Left, n.Right
+ // check for divisor underflow in complex division (see issue 20227)
+ if n.Op() == ir.ODIV && n.Type().IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 {
+ base.Errorf("complex division by zero")
+ n.SetType(nil)
+ return n
+ }
+ if (n.Op() == ir.ODIV || n.Op() == ir.OMOD) && constant.Sign(rval) == 0 {
+ base.Errorf("division by zero")
+ n.SetType(nil)
+ return n
+ }
- // Pick off just the opcodes that can be constant evaluated.
- switch op := n.Op; op {
- case OPLUS, ONEG, OBITNOT, ONOT:
- if nl.Op == OLITERAL {
- setconst(n, unaryOp(op, nl.Val(), n.Type))
+ tok := tokenForOp[n.Op()]
+ if n.Op() == ir.ODIV && n.Type().IsInteger() {
+ tok = token.QUO_ASSIGN // integer division
+ }
+ return origConst(n, constant.BinaryOp(nl.Val(), tok, rval))
}
- case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND:
- if nl.Op == OLITERAL && nr.Op == OLITERAL {
- setconst(n, binaryOp(nl.Val(), op, nr.Val()))
+ case ir.OOROR, ir.OANDAND:
+ nl, nr := n.Left(), n.Right()
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ return origConst(n, constant.BinaryOp(nl.Val(), tokenForOp[n.Op()], nr.Val()))
}
- case OEQ, ONE, OLT, OLE, OGT, OGE:
- if nl.Op == OLITERAL && nr.Op == OLITERAL {
- setboolconst(n, compareOp(nl.Val(), op, nr.Val()))
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ nl, nr := n.Left(), n.Right()
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[n.Op()], nr.Val()))
}
- case OLSH, ORSH:
- if nl.Op == OLITERAL && nr.Op == OLITERAL {
- setconst(n, shiftOp(nl.Val(), op, nr.Val()))
+ case ir.OLSH, ir.ORSH:
+ nl, nr := n.Left(), n.Right()
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ // shiftBound from go/types; "so we can express smallestFloat64"
+ const shiftBound = 1023 - 1 + 52
+ s, ok := constant.Uint64Val(nr.Val())
+ if !ok || s > shiftBound {
+ base.Errorf("invalid shift count %v", nr)
+ n.SetType(nil)
+ break
+ }
+ return origConst(n, constant.Shift(toint(nl.Val()), tokenForOp[n.Op()], uint(s)))
}
- case OCONV, ORUNESTR:
- if okforconst[n.Type.Etype] && nl.Op == OLITERAL {
- setconst(n, convertVal(nl.Val(), n.Type, true))
+ case ir.OCONV, ir.ORUNESTR:
+ nl := n.Left()
+ if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL {
+ return origConst(n, convertVal(nl.Val(), n.Type(), true))
}
- case OCONVNOP:
- if okforconst[n.Type.Etype] && nl.Op == OLITERAL {
+ case ir.OCONVNOP:
+ nl := n.Left()
+ if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL {
// set so n.Orig gets OCONV instead of OCONVNOP
- n.Op = OCONV
- setconst(n, nl.Val())
+ n.SetOp(ir.OCONV)
+ return origConst(n, nl.Val())
}
- case OADDSTR:
+ case ir.OADDSTR:
// Merge adjacent constants in the argument list.
- s := n.List.Slice()
- for i1 := 0; i1 < len(s); i1++ {
- if Isconst(s[i1], CTSTR) && i1+1 < len(s) && Isconst(s[i1+1], CTSTR) {
- // merge from i1 up to but not including i2
+ s := n.List().Slice()
+ need := 0
+ for i := 0; i < len(s); i++ {
+ if i == 0 || !ir.IsConst(s[i-1], constant.String) || !ir.IsConst(s[i], constant.String) {
+ // Can't merge s[i] into s[i-1]; need a slot in the list.
+ need++
+ }
+ }
+ if need == len(s) {
+ return n
+ }
+ if need == 1 {
+ var strs []string
+ for _, c := range s {
+ strs = append(strs, ir.StringVal(c))
+ }
+ return origConst(n, constant.MakeString(strings.Join(strs, "")))
+ }
+ newList := make([]ir.Node, 0, need)
+ for i := 0; i < len(s); i++ {
+ if ir.IsConst(s[i], constant.String) && i+1 < len(s) && ir.IsConst(s[i+1], constant.String) {
+ // merge from i up to but not including i2
var strs []string
- i2 := i1
- for i2 < len(s) && Isconst(s[i2], CTSTR) {
- strs = append(strs, s[i2].StringVal())
+ i2 := i
+ for i2 < len(s) && ir.IsConst(s[i2], constant.String) {
+ strs = append(strs, ir.StringVal(s[i2]))
i2++
}
- nl := *s[i1]
- nl.Orig = &nl
- nl.SetVal(Val{strings.Join(strs, "")})
- s[i1] = &nl
- s = append(s[:i1+1], s[i2:]...)
+ nl := ir.Copy(n).(*ir.AddStringExpr)
+ nl.PtrList().Set(s[i:i2])
+ newList = append(newList, origConst(nl, constant.MakeString(strings.Join(strs, ""))))
+ i = i2 - 1
+ } else {
+ newList = append(newList, s[i])
}
}
- if len(s) == 1 && Isconst(s[0], CTSTR) {
- n.Op = OLITERAL
- n.SetVal(s[0].Val())
- } else {
- n.List.Set(s)
- }
-
- case OCAP, OLEN:
- switch nl.Type.Etype {
- case TSTRING:
- if Isconst(nl, CTSTR) {
- setintconst(n, int64(len(nl.StringVal())))
- }
- case TARRAY:
- if !hascallchan(nl) {
- setintconst(n, nl.Type.NumElem())
- }
- }
+ nn := ir.Copy(n).(*ir.AddStringExpr)
+ nn.PtrList().Set(newList)
+ return nn
- case OALIGNOF, OOFFSETOF, OSIZEOF:
- setintconst(n, evalunsafe(n))
-
- case OREAL, OIMAG:
- if nl.Op == OLITERAL {
- var re, im *Mpflt
- switch u := nl.Val().U.(type) {
- case *Mpint:
- re = newMpflt()
- re.SetInt(u)
- // im = 0
- case *Mpflt:
- re = u
- // im = 0
- case *Mpcplx:
- re = &u.Real
- im = &u.Imag
- default:
- Fatalf("impossible")
+ case ir.OCAP, ir.OLEN:
+ nl := n.Left()
+ switch nl.Type().Kind() {
+ case types.TSTRING:
+ if ir.IsConst(nl, constant.String) {
+ return origIntConst(n, int64(len(ir.StringVal(nl))))
}
- if n.Op == OIMAG {
- if im == nil {
- im = newMpflt()
- }
- re = im
+ case types.TARRAY:
+ if !anyCallOrChan(nl) {
+ return origIntConst(n, nl.Type().NumElem())
}
- setconst(n, Val{re})
}
- case OCOMPLEX:
- if nl.Op == OLITERAL && nr.Op == OLITERAL {
- // make it a complex literal
- c := newMpcmplx()
- c.Real.Set(toflt(nl.Val()).U.(*Mpflt))
- c.Imag.Set(toflt(nr.Val()).U.(*Mpflt))
- setconst(n, Val{c})
- }
- }
-}
+ case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ return origIntConst(n, evalunsafe(n))
-func match(x, y Val) (Val, Val) {
- switch {
- case x.Ctype() == CTCPLX || y.Ctype() == CTCPLX:
- return tocplx(x), tocplx(y)
- case x.Ctype() == CTFLT || y.Ctype() == CTFLT:
- return toflt(x), toflt(y)
- }
-
- // Mixed int/rune are fine.
- return x, y
-}
-
-func compareOp(x Val, op Op, y Val) bool {
- x, y = match(x, y)
-
- switch x.Ctype() {
- case CTBOOL:
- x, y := x.U.(bool), y.U.(bool)
- switch op {
- case OEQ:
- return x == y
- case ONE:
- return x != y
+ case ir.OREAL:
+ nl := n.Left()
+ if nl.Op() == ir.OLITERAL {
+ return origConst(n, constant.Real(nl.Val()))
}
- case CTINT, CTRUNE:
- x, y := x.U.(*Mpint), y.U.(*Mpint)
- return cmpZero(x.Cmp(y), op)
-
- case CTFLT:
- x, y := x.U.(*Mpflt), y.U.(*Mpflt)
- return cmpZero(x.Cmp(y), op)
-
- case CTCPLX:
- x, y := x.U.(*Mpcplx), y.U.(*Mpcplx)
- eq := x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0
- switch op {
- case OEQ:
- return eq
- case ONE:
- return !eq
+ case ir.OIMAG:
+ nl := n.Left()
+ if nl.Op() == ir.OLITERAL {
+ return origConst(n, constant.Imag(nl.Val()))
}
- case CTSTR:
- x, y := x.U.(string), y.U.(string)
- switch op {
- case OEQ:
- return x == y
- case ONE:
- return x != y
- case OLT:
- return x < y
- case OLE:
- return x <= y
- case OGT:
- return x > y
- case OGE:
- return x >= y
+ case ir.OCOMPLEX:
+ nl, nr := n.Left(), n.Right()
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ return origConst(n, makeComplex(nl.Val(), nr.Val()))
}
}
- Fatalf("compareOp: bad comparison: %v %v %v", x, op, y)
- panic("unreachable")
+ return n
}
-func cmpZero(x int, op Op) bool {
- switch op {
- case OEQ:
- return x == 0
- case ONE:
- return x != 0
- case OLT:
- return x < 0
- case OLE:
- return x <= 0
- case OGT:
- return x > 0
- case OGE:
- return x >= 0
+func makeInt(i *big.Int) constant.Value {
+ if i.IsInt64() {
+ return constant.Make(i.Int64()) // workaround #42640 (Int64Val(Make(big.NewInt(10))) returns (10, false), not (10, true))
}
-
- Fatalf("cmpZero: want comparison operator, got %v", op)
- panic("unreachable")
+ return constant.Make(i)
}
-func binaryOp(x Val, op Op, y Val) Val {
- x, y = match(x, y)
-
-Outer:
- switch x.Ctype() {
- case CTBOOL:
- x, y := x.U.(bool), y.U.(bool)
- switch op {
- case OANDAND:
- return Val{U: x && y}
- case OOROR:
- return Val{U: x || y}
- }
-
- case CTINT, CTRUNE:
- x, y := x.U.(*Mpint), y.U.(*Mpint)
-
- u := new(Mpint)
- u.Rune = x.Rune || y.Rune
- u.Set(x)
- switch op {
- case OADD:
- u.Add(y)
- case OSUB:
- u.Sub(y)
- case OMUL:
- u.Mul(y)
- case ODIV:
- if y.CmpInt64(0) == 0 {
- yyerror("division by zero")
- return Val{}
- }
- u.Quo(y)
- case OMOD:
- if y.CmpInt64(0) == 0 {
- yyerror("division by zero")
- return Val{}
- }
- u.Rem(y)
- case OOR:
- u.Or(y)
- case OAND:
- u.And(y)
- case OANDNOT:
- u.AndNot(y)
- case OXOR:
- u.Xor(y)
- default:
- break Outer
- }
- return Val{U: u}
-
- case CTFLT:
- x, y := x.U.(*Mpflt), y.U.(*Mpflt)
-
- u := newMpflt()
- u.Set(x)
- switch op {
- case OADD:
- u.Add(y)
- case OSUB:
- u.Sub(y)
- case OMUL:
- u.Mul(y)
- case ODIV:
- if y.CmpFloat64(0) == 0 {
- yyerror("division by zero")
- return Val{}
- }
- u.Quo(y)
- default:
- break Outer
- }
- return Val{U: u}
-
- case CTCPLX:
- x, y := x.U.(*Mpcplx), y.U.(*Mpcplx)
-
- u := newMpcmplx()
- u.Real.Set(&x.Real)
- u.Imag.Set(&x.Imag)
- switch op {
- case OADD:
- u.Real.Add(&y.Real)
- u.Imag.Add(&y.Imag)
- case OSUB:
- u.Real.Sub(&y.Real)
- u.Imag.Sub(&y.Imag)
- case OMUL:
- u.Mul(y)
- case ODIV:
- if !u.Div(y) {
- yyerror("complex division by zero")
- return Val{}
- }
- default:
- break Outer
- }
- return Val{U: u}
+func makeFloat64(f float64) constant.Value {
+ if math.IsInf(f, 0) {
+ base.Fatalf("infinity is not a valid constant")
}
-
- Fatalf("binaryOp: bad operation: %v %v %v", x, op, y)
- panic("unreachable")
+ v := constant.MakeFloat64(f)
+ v = constant.ToFloat(v) // workaround #42641 (MakeFloat64(0).Kind() returns Int, not Float)
+ return v
}
-func unaryOp(op Op, x Val, t *types.Type) Val {
- switch op {
- case OPLUS:
- switch x.Ctype() {
- case CTINT, CTRUNE, CTFLT, CTCPLX:
- return x
- }
-
- case ONEG:
- switch x.Ctype() {
- case CTINT, CTRUNE:
- x := x.U.(*Mpint)
- u := new(Mpint)
- u.Rune = x.Rune
- u.Set(x)
- u.Neg()
- return Val{U: u}
-
- case CTFLT:
- x := x.U.(*Mpflt)
- u := newMpflt()
- u.Set(x)
- u.Neg()
- return Val{U: u}
-
- case CTCPLX:
- x := x.U.(*Mpcplx)
- u := newMpcmplx()
- u.Real.Set(&x.Real)
- u.Imag.Set(&x.Imag)
- u.Real.Neg()
- u.Imag.Neg()
- return Val{U: u}
- }
-
- case OBITNOT:
- switch x.Ctype() {
- case CTINT, CTRUNE:
- x := x.U.(*Mpint)
-
- u := new(Mpint)
- u.Rune = x.Rune
- if t.IsSigned() || t.IsUntyped() {
- // Signed values change sign.
- u.SetInt64(-1)
- } else {
- // Unsigned values invert their bits.
- u.Set(maxintval[t.Etype])
- }
- u.Xor(x)
- return Val{U: u}
- }
-
- case ONOT:
- return Val{U: !x.U.(bool)}
- }
-
- Fatalf("unaryOp: bad operation: %v %v", op, x)
- panic("unreachable")
+func makeComplex(real, imag constant.Value) constant.Value {
+ return constant.BinaryOp(constant.ToFloat(real), token.ADD, constant.MakeImag(constant.ToFloat(imag)))
}
-func shiftOp(x Val, op Op, y Val) Val {
- if x.Ctype() != CTRUNE {
- x = toint(x)
- }
- y = toint(y)
-
- u := new(Mpint)
- u.Set(x.U.(*Mpint))
- u.Rune = x.U.(*Mpint).Rune
- switch op {
- case OLSH:
- u.Lsh(y.U.(*Mpint))
- case ORSH:
- u.Rsh(y.U.(*Mpint))
- default:
- Fatalf("shiftOp: bad operator: %v", op)
- panic("unreachable")
- }
- return Val{U: u}
+func square(x constant.Value) constant.Value {
+ return constant.BinaryOp(x, token.MUL, x)
}
-// setconst rewrites n as an OLITERAL with value v.
-func setconst(n *Node, v Val) {
- // If constant folding failed, mark n as broken and give up.
- if v.U == nil {
- n.Type = nil
- return
- }
-
- // Ensure n.Orig still points to a semantically-equivalent
- // expression after we rewrite n into a constant.
- if n.Orig == n {
- n.Orig = n.sepcopy()
- }
-
- *n = Node{
- Op: OLITERAL,
- Pos: n.Pos,
- Orig: n.Orig,
- Type: n.Type,
- Xoffset: BADWIDTH,
- }
- n.SetVal(v)
- if vt := idealType(v.Ctype()); n.Type.IsUntyped() && n.Type != vt {
- Fatalf("untyped type mismatch, have: %v, want: %v", n.Type, vt)
- }
+// For matching historical "constant OP overflow" error messages.
+// TODO(mdempsky): Replace with error messages like go/types uses.
+var overflowNames = [...]string{
+ ir.OADD: "addition",
+ ir.OSUB: "subtraction",
+ ir.OMUL: "multiplication",
+ ir.OLSH: "shift",
+ ir.OXOR: "bitwise XOR",
+ ir.OBITNOT: "bitwise complement",
+}
- // Check range.
+// origConst returns an OLITERAL with orig n and value v.
+func origConst(n ir.Node, v constant.Value) ir.Node {
lno := setlineno(n)
- overflow(v, n.Type)
- lineno = lno
-
- if !n.Type.IsUntyped() {
- switch v.Ctype() {
- // Truncate precision for non-ideal float.
- case CTFLT:
- n.SetVal(Val{truncfltlit(v.U.(*Mpflt), n.Type)})
- // Truncate precision for non-ideal complex.
- case CTCPLX:
- n.SetVal(Val{trunccmplxlit(v.U.(*Mpcplx), n.Type)})
+ v = convertVal(v, n.Type(), false)
+ base.Pos = lno
+
+ switch v.Kind() {
+ case constant.Int:
+ if constant.BitLen(v) <= Mpprec {
+ break
}
+ fallthrough
+ case constant.Unknown:
+ what := overflowNames[n.Op()]
+ if what == "" {
+ base.Fatalf("unexpected overflow: %v", n.Op())
+ }
+ base.ErrorfAt(n.Pos(), "constant %v overflow", what)
+ n.SetType(nil)
+ return n
}
-}
-func setboolconst(n *Node, v bool) {
- setconst(n, Val{U: v})
+ return ir.NewConstExpr(v, n)
}
-func setintconst(n *Node, v int64) {
- u := new(Mpint)
- u.SetInt64(v)
- setconst(n, Val{u})
-}
-
-// nodlit returns a new untyped constant with value v.
-func nodlit(v Val) *Node {
- n := nod(OLITERAL, nil, nil)
- n.SetVal(v)
- n.Type = idealType(v.Ctype())
- return n
+func origBoolConst(n ir.Node, v bool) ir.Node {
+ return origConst(n, constant.MakeBool(v))
}
-func idealType(ct Ctype) *types.Type {
- switch ct {
- case CTSTR:
- return types.UntypedString
- case CTBOOL:
- return types.UntypedBool
- case CTINT:
- return types.UntypedInt
- case CTRUNE:
- return types.UntypedRune
- case CTFLT:
- return types.UntypedFloat
- case CTCPLX:
- return types.UntypedComplex
- case CTNIL:
- return types.Types[TNIL]
- }
- Fatalf("unexpected Ctype: %v", ct)
- return nil
+func origIntConst(n ir.Node, v int64) ir.Node {
+ return origConst(n, constant.MakeInt64(v))
}
// defaultlit on both nodes simultaneously;
// force means must assign concrete (non-ideal) type.
// The results of defaultlit2 MUST be assigned back to l and r, e.g.
// n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
-func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) {
- if l.Type == nil || r.Type == nil {
+func defaultlit2(l ir.Node, r ir.Node, force bool) (ir.Node, ir.Node) {
+ if l.Type() == nil || r.Type() == nil {
return l, r
}
- if !l.Type.IsUntyped() {
- r = convlit(r, l.Type)
+ if !l.Type().IsUntyped() {
+ r = convlit(r, l.Type())
return l, r
}
- if !r.Type.IsUntyped() {
- l = convlit(l, r.Type)
+ if !r.Type().IsUntyped() {
+ l = convlit(l, r.Type())
return l, r
}
}
// Can't mix bool with non-bool, string with non-string, or nil with anything (untyped).
- if l.Type.IsBoolean() != r.Type.IsBoolean() {
+ if l.Type().IsBoolean() != r.Type().IsBoolean() {
return l, r
}
- if l.Type.IsString() != r.Type.IsString() {
+ if l.Type().IsString() != r.Type().IsString() {
return l, r
}
- if l.isNil() || r.isNil() {
+ if ir.IsNil(l) || ir.IsNil(r) {
return l, r
}
- t := defaultType(mixUntyped(l.Type, r.Type))
+ t := defaultType(mixUntyped(l.Type(), r.Type()))
l = convlit(l, t)
r = convlit(r, t)
return l, r
}
-func ctype(t *types.Type) Ctype {
- switch t {
- case types.UntypedBool:
- return CTBOOL
- case types.UntypedString:
- return CTSTR
- case types.UntypedInt:
- return CTINT
- case types.UntypedRune:
- return CTRUNE
- case types.UntypedFloat:
- return CTFLT
- case types.UntypedComplex:
- return CTCPLX
+func mixUntyped(t1, t2 *types.Type) *types.Type {
+ if t1 == t2 {
+ return t1
+ }
+
+ rank := func(t *types.Type) int {
+ switch t {
+ case types.UntypedInt:
+ return 0
+ case types.UntypedRune:
+ return 1
+ case types.UntypedFloat:
+ return 2
+ case types.UntypedComplex:
+ return 3
+ }
+ base.Fatalf("bad type %v", t)
+ panic("unreachable")
}
- Fatalf("bad type %v", t)
- panic("unreachable")
-}
-func mixUntyped(t1, t2 *types.Type) *types.Type {
- t := t1
- if ctype(t2) > ctype(t1) {
- t = t2
+ if rank(t2) > rank(t1) {
+ return t2
}
- return t
+ return t1
}
func defaultType(t *types.Type) *types.Type {
- if !t.IsUntyped() || t.Etype == TNIL {
+ if !t.IsUntyped() || t.Kind() == types.TNIL {
return t
}
switch t {
case types.UntypedBool:
- return types.Types[TBOOL]
+ return types.Types[types.TBOOL]
case types.UntypedString:
- return types.Types[TSTRING]
+ return types.Types[types.TSTRING]
case types.UntypedInt:
- return types.Types[TINT]
+ return types.Types[types.TINT]
case types.UntypedRune:
- return types.Runetype
+ return types.RuneType
case types.UntypedFloat:
- return types.Types[TFLOAT64]
+ return types.Types[types.TFLOAT64]
case types.UntypedComplex:
- return types.Types[TCOMPLEX128]
+ return types.Types[types.TCOMPLEX128]
}
- Fatalf("bad type %v", t)
+ base.Fatalf("bad type %v", t)
return nil
}
-func smallintconst(n *Node) bool {
- if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
- switch simtype[n.Type.Etype] {
- case TINT8,
- TUINT8,
- TINT16,
- TUINT16,
- TINT32,
- TUINT32,
- TBOOL:
- return true
-
- case TIDEAL, TINT64, TUINT64, TPTR:
- v, ok := n.Val().U.(*Mpint)
- if ok && v.Cmp(minintval[TINT32]) >= 0 && v.Cmp(maxintval[TINT32]) <= 0 {
- return true
- }
- }
+func smallintconst(n ir.Node) bool {
+ if n.Op() == ir.OLITERAL {
+ v, ok := constant.Int64Val(n.Val())
+ return ok && int64(int32(v)) == v
}
-
return false
}
// If n is not a constant expression, not representable as an
// integer, or negative, it returns -1. If n is too large, it
// returns -2.
-func indexconst(n *Node) int64 {
- if n.Op != OLITERAL {
+func indexconst(n ir.Node) int64 {
+ if n.Op() != ir.OLITERAL {
+ return -1
+ }
+ if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
return -1
}
- v := toint(n.Val()) // toint returns argument unchanged if not representable as an *Mpint
- vi, ok := v.U.(*Mpint)
- if !ok || vi.CmpInt64(0) < 0 {
+ v := toint(n.Val())
+ if v.Kind() != constant.Int || constant.Sign(v) < 0 {
return -1
}
- if vi.Cmp(maxintval[TINT]) > 0 {
+ if doesoverflow(v, types.Types[types.TINT]) {
return -2
}
-
- return vi.Int64()
+ return ir.IntVal(types.Types[types.TINT], v)
}
// isGoConst reports whether n is a Go language constant (as opposed to a
//
// Expressions derived from nil, like string([]byte(nil)), while they
// may be known at compile time, are not Go language constants.
-func (n *Node) isGoConst() bool {
- return n.Op == OLITERAL && n.Val().Ctype() != CTNIL
-}
-
-func hascallchan(n *Node) bool {
- if n == nil {
- return false
- }
- switch n.Op {
- case OAPPEND,
- OCALL,
- OCALLFUNC,
- OCALLINTER,
- OCALLMETH,
- OCAP,
- OCLOSE,
- OCOMPLEX,
- OCOPY,
- ODELETE,
- OIMAG,
- OLEN,
- OMAKE,
- ONEW,
- OPANIC,
- OPRINT,
- OPRINTN,
- OREAL,
- ORECOVER,
- ORECV:
- return true
- }
-
- if hascallchan(n.Left) || hascallchan(n.Right) {
- return true
- }
- for _, n1 := range n.List.Slice() {
- if hascallchan(n1) {
+func isGoConst(n ir.Node) bool {
+ return n.Op() == ir.OLITERAL
+}
+
+// anyCallOrChan reports whether n contains any calls or channel operations.
+func anyCallOrChan(n ir.Node) bool {
+ return ir.Any(n, func(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OAPPEND,
+ ir.OCALL,
+ ir.OCALLFUNC,
+ ir.OCALLINTER,
+ ir.OCALLMETH,
+ ir.OCAP,
+ ir.OCLOSE,
+ ir.OCOMPLEX,
+ ir.OCOPY,
+ ir.ODELETE,
+ ir.OIMAG,
+ ir.OLEN,
+ ir.OMAKE,
+ ir.ONEW,
+ ir.OPANIC,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.OREAL,
+ ir.ORECOVER,
+ ir.ORECV:
return true
}
- }
- for _, n2 := range n.Rlist.Slice() {
- if hascallchan(n2) {
- return true
- }
- }
-
- return false
+ return false
+ })
}
// A constSet represents a set of Go constant expressions.
// where are used in the error message.
//
// n must not be an untyped constant.
-func (s *constSet) add(pos src.XPos, n *Node, what, where string) {
- if n.Op == OCONVIFACE && n.Implicit() {
- n = n.Left
+func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) {
+ if conv := n; conv.Op() == ir.OCONVIFACE {
+ if conv.Implicit() {
+ n = conv.Left()
+ }
}
- if !n.isGoConst() {
+ if !isGoConst(n) {
return
}
- if n.Type.IsUntyped() {
- Fatalf("%v is untyped", n)
+ if n.Type().IsUntyped() {
+ base.Fatalf("%v is untyped", n)
}
// Consts are only duplicates if they have the same value and
// #21866 by treating all type aliases like byte/uint8 and
// rune/int32.
- typ := n.Type
+ typ := n.Type()
switch typ {
- case types.Bytetype:
- typ = types.Types[TUINT8]
- case types.Runetype:
- typ = types.Types[TINT32]
+ case types.ByteType:
+ typ = types.Types[types.TUINT8]
+ case types.RuneType:
+ typ = types.Types[types.TINT32]
}
- k := constSetKey{typ, n.Val().Interface()}
+ k := constSetKey{typ, ir.ConstValue(n)}
if hasUniquePos(n) {
- pos = n.Pos
+ pos = n.Pos()
}
if s.m == nil {
}
if prevPos, isDup := s.m[k]; isDup {
- yyerrorl(pos, "duplicate %s %s in %s\n\tprevious %s at %v",
+ base.ErrorfAt(pos, "duplicate %s %s in %s\n\tprevious %s at %v",
what, nodeAndVal(n), where,
- what, linestr(prevPos))
+ what, base.FmtPos(prevPos))
} else {
s.m[k] = pos
}
// the latter is non-obvious.
//
// TODO(mdempsky): This could probably be a fmt.go flag.
-func nodeAndVal(n *Node) string {
- show := n.String()
- val := n.Val().Interface()
+func nodeAndVal(n ir.Node) string {
+ show := fmt.Sprint(n)
+ val := ir.ConstValue(n)
if s := fmt.Sprintf("%#v", val); show != s {
show += " (value " + s + ")"
}
import (
"bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"strings"
)
-// Declaration stack & operations
+func EnableNoWriteBarrierRecCheck() {
+ nowritebarrierrecCheck = newNowritebarrierrecChecker()
+}
+
+func NoWriteBarrierRecCheck() {
+ // Write barriers are now known. Check the
+ // call graph.
+ nowritebarrierrecCheck.check()
+ nowritebarrierrecCheck = nil
+}
-var externdcl []*Node
+var nowritebarrierrecCheck *nowritebarrierrecChecker
func testdclstack() {
if !types.IsDclstackValid() {
- if nerrors != 0 {
- errorexit()
- }
- Fatalf("mark left on the dclstack")
+ base.Fatalf("mark left on the dclstack")
}
}
// redeclare emits a diagnostic about symbol s being redeclared at pos.
func redeclare(pos src.XPos, s *types.Sym, where string) {
if !s.Lastlineno.IsKnown() {
- pkg := s.Origpkg
- if pkg == nil {
- pkg = s.Pkg
- }
- yyerrorl(pos, "%v redeclared %s\n"+
- "\tprevious declaration during import %q", s, where, pkg.Path)
+ pkgName := dotImportRefs[s.Def.(*ir.Ident)]
+ base.ErrorfAt(pos, "%v redeclared %s\n"+
+ "\t%v: previous declaration during import %q", s, where, base.FmtPos(pkgName.Pos()), pkgName.Pkg.Path)
} else {
prevPos := s.Lastlineno
pos, prevPos = prevPos, pos
}
- yyerrorl(pos, "%v redeclared %s\n"+
- "\tprevious declaration at %v", s, where, linestr(prevPos))
+ base.ErrorfAt(pos, "%v redeclared %s\n"+
+ "\t%v: previous declaration", s, where, base.FmtPos(prevPos))
}
}
// declare records that Node n declares symbol n.Sym in the specified
// declaration context.
-func declare(n *Node, ctxt Class) {
- if n.isBlank() {
+func declare(n *ir.Name, ctxt ir.Class) {
+ if ir.IsBlank(n) {
return
}
- if n.Name == nil {
- // named OLITERAL needs Name; most OLITERALs don't.
- n.Name = new(Name)
- }
-
- s := n.Sym
+ s := n.Sym()
// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
- if !inimport && !typecheckok && s.Pkg != localpkg {
- yyerrorl(n.Pos, "cannot declare name %v", s)
+ if !inimport && !typecheckok && s.Pkg != types.LocalPkg {
+ base.ErrorfAt(n.Pos(), "cannot declare name %v", s)
}
gen := 0
- if ctxt == PEXTERN {
+ if ctxt == ir.PEXTERN {
if s.Name == "init" {
- yyerrorl(n.Pos, "cannot declare init - must be func")
+ base.ErrorfAt(n.Pos(), "cannot declare init - must be func")
}
if s.Name == "main" && s.Pkg.Name == "main" {
- yyerrorl(n.Pos, "cannot declare main - must be func")
+ base.ErrorfAt(n.Pos(), "cannot declare main - must be func")
}
- externdcl = append(externdcl, n)
+ Target.Externs = append(Target.Externs, n)
} else {
- if Curfn == nil && ctxt == PAUTO {
- lineno = n.Pos
- Fatalf("automatic outside function")
+ if Curfn == nil && ctxt == ir.PAUTO {
+ base.Pos = n.Pos()
+ base.Fatalf("automatic outside function")
}
- if Curfn != nil && ctxt != PFUNC {
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ if Curfn != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME {
+ Curfn.Dcl = append(Curfn.Dcl, n)
}
- if n.Op == OTYPE {
+ if n.Op() == ir.OTYPE {
declare_typegen++
gen = declare_typegen
- } else if n.Op == ONAME && ctxt == PAUTO && !strings.Contains(s.Name, "·") {
+ } else if n.Op() == ir.ONAME && ctxt == ir.PAUTO && !strings.Contains(s.Name, "·") {
vargen++
gen = vargen
}
types.Pushdcl(s)
- n.Name.Curfn = Curfn
+ n.Curfn = Curfn
}
- if ctxt == PAUTO {
- n.Xoffset = 0
+ if ctxt == ir.PAUTO {
+ n.SetFrameOffset(0)
}
if s.Block == types.Block {
// functype will print errors about duplicate function arguments.
// Don't repeat the error here.
- if ctxt != PPARAM && ctxt != PPARAMOUT {
- redeclare(n.Pos, s, "in this block")
+ if ctxt != ir.PPARAM && ctxt != ir.PPARAMOUT {
+ redeclare(n.Pos(), s, "in this block")
}
}
s.Block = types.Block
- s.Lastlineno = lineno
- s.Def = asTypesNode(n)
- n.Name.Vargen = int32(gen)
+ s.Lastlineno = base.Pos
+ s.Def = n
+ n.Vargen = int32(gen)
n.SetClass(ctxt)
- if ctxt == PFUNC {
- n.Sym.SetFunc(true)
+ if ctxt == ir.PFUNC {
+ n.Sym().SetFunc(true)
}
autoexport(n, ctxt)
}
-func addvar(n *Node, t *types.Type, ctxt Class) {
- if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
- Fatalf("addvar: n=%v t=%v nil", n, t)
- }
-
- n.Op = ONAME
- declare(n, ctxt)
- n.Type = t
-}
-
// declare variables from grammar
// new_name_list (type | [type] = expr_list)
-func variter(vl []*Node, t *Node, el []*Node) []*Node {
- var init []*Node
+func variter(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node {
+ var init []ir.Node
doexpr := len(el) > 0
if len(el) == 1 && len(vl) > 1 {
e := el[0]
- as2 := nod(OAS2, nil, nil)
- as2.List.Set(vl)
- as2.Rlist.Set1(e)
+ as2 := ir.Nod(ir.OAS2, nil, nil)
+ as2.PtrRlist().Set1(e)
for _, v := range vl {
- v.Op = ONAME
+ as2.PtrList().Append(v)
declare(v, dclcontext)
- v.Name.Param.Ntype = t
- v.Name.Defn = as2
+ v.Ntype = t
+ v.Defn = as2
if Curfn != nil {
- init = append(init, nod(ODCL, v, nil))
+ init = append(init, ir.Nod(ir.ODCL, v, nil))
}
}
return append(init, as2)
}
- nel := len(el)
- for _, v := range vl {
- var e *Node
+ for i, v := range vl {
+ var e ir.Node
if doexpr {
- if len(el) == 0 {
- yyerror("assignment mismatch: %d variables but %d values", len(vl), nel)
+ if i >= len(el) {
+ base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el))
break
}
- e = el[0]
- el = el[1:]
+ e = el[i]
}
- v.Op = ONAME
declare(v, dclcontext)
- v.Name.Param.Ntype = t
+ v.Ntype = t
- if e != nil || Curfn != nil || v.isBlank() {
+ if e != nil || Curfn != nil || ir.IsBlank(v) {
if Curfn != nil {
- init = append(init, nod(ODCL, v, nil))
+ init = append(init, ir.Nod(ir.ODCL, v, nil))
}
- e = nod(OAS, v, e)
- init = append(init, e)
- if e.Right != nil {
- v.Name.Defn = e
+ as := ir.Nod(ir.OAS, v, e)
+ init = append(init, as)
+ if e != nil {
+ v.Defn = as
}
}
}
- if len(el) != 0 {
- yyerror("assignment mismatch: %d variables but %d values", len(vl), nel)
+ if len(el) > len(vl) {
+ base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el))
}
return init
}
-// newnoname returns a new ONONAME Node associated with symbol s.
-func newnoname(s *types.Sym) *Node {
- if s == nil {
- Fatalf("newnoname nil")
+// newFuncNameAt generates a new name node for a function or method.
+func newFuncNameAt(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Name {
+ if fn.Nname != nil {
+ base.Fatalf("newFuncName - already have name")
}
- n := nod(ONONAME, nil, nil)
- n.Sym = s
- n.Xoffset = 0
- return n
-}
-
-// newfuncnamel generates a new name node for a function or method.
-// TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360.
-func newfuncnamel(pos src.XPos, s *types.Sym) *Node {
- n := newnamel(pos, s)
- n.Func = new(Func)
- n.Func.SetIsHiddenClosure(Curfn != nil)
+ n := ir.NewNameAt(pos, s)
+ n.SetFunc(fn)
+ fn.Nname = n
return n
}
-// this generates a new name node for a name
-// being declared.
-func dclname(s *types.Sym) *Node {
- n := newname(s)
- n.Op = ONONAME // caller will correct it
- return n
-}
-
-func typenod(t *types.Type) *Node {
- return typenodl(src.NoXPos, t)
-}
-
-func typenodl(pos src.XPos, t *types.Type) *Node {
- // if we copied another type with *t = *u
- // then t->nod might be out of date, so
- // check t->nod->type too
- if asNode(t.Nod) == nil || asNode(t.Nod).Type != t {
- t.Nod = asTypesNode(nodl(pos, OTYPE, nil, nil))
- asNode(t.Nod).Type = t
- asNode(t.Nod).Sym = t.Sym
- }
-
- return asNode(t.Nod)
-}
-
-func anonfield(typ *types.Type) *Node {
+func anonfield(typ *types.Type) *ir.Field {
return symfield(nil, typ)
}
-func namedfield(s string, typ *types.Type) *Node {
+func namedfield(s string, typ *types.Type) *ir.Field {
return symfield(lookup(s), typ)
}
-func symfield(s *types.Sym, typ *types.Type) *Node {
- n := nodSym(ODCLFIELD, nil, s)
- n.Type = typ
- return n
+func symfield(s *types.Sym, typ *types.Type) *ir.Field {
+ return ir.NewField(base.Pos, s, nil, typ)
}
// oldname returns the Node that declares symbol s in the current scope.
// If no such Node currently exists, an ONONAME Node is returned instead.
// Automatically creates a new closure variable if the referenced symbol was
// declared in a different (containing) function.
-func oldname(s *types.Sym) *Node {
- n := asNode(s.Def)
+func oldname(s *types.Sym) ir.Node {
+ if s.Pkg != types.LocalPkg {
+ return ir.NewIdent(base.Pos, s)
+ }
+
+ n := ir.AsNode(s.Def)
if n == nil {
// Maybe a top-level declaration will come along later to
// define s. resolve will check s.Def again once all input
// source has been processed.
- return newnoname(s)
+ return ir.NewIdent(base.Pos, s)
}
- if Curfn != nil && n.Op == ONAME && n.Name.Curfn != nil && n.Name.Curfn != Curfn {
+ if Curfn != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != Curfn {
// Inner func is referring to var in outer func.
//
// TODO(rsc): If there is an outer variable x and we
// are parsing x := 5 inside the closure, until we get to
// the := it looks like a reference to the outer x so we'll
// make x a closure variable unnecessarily.
- c := n.Name.Param.Innermost
- if c == nil || c.Name.Curfn != Curfn {
+ c := n.Name().Innermost
+ if c == nil || c.Curfn != Curfn {
// Do not have a closure var for the active closure yet; make one.
- c = newname(s)
- c.SetClass(PAUTOHEAP)
- c.Name.SetIsClosureVar(true)
+ c = NewName(s)
+ c.SetClass(ir.PAUTOHEAP)
+ c.SetIsClosureVar(true)
c.SetIsDDD(n.IsDDD())
- c.Name.Defn = n
+ c.Defn = n
// Link into list of active closure variables.
// Popped from list in func funcLit.
- c.Name.Param.Outer = n.Name.Param.Innermost
- n.Name.Param.Innermost = c
+ c.Outer = n.Name().Innermost
+ n.Name().Innermost = c
- Curfn.Func.Cvars.Append(c)
+ Curfn.ClosureVars = append(Curfn.ClosureVars, c)
}
// return ref to closure var, not original
return n
}
-// importName is like oldname, but it reports an error if sym is from another package and not exported.
-func importName(sym *types.Sym) *Node {
+// importName is like oldname,
+// but it reports an error if sym is from another package and not exported.
+func importName(sym *types.Sym) ir.Node {
n := oldname(sym)
- if !types.IsExported(sym.Name) && sym.Pkg != localpkg {
+ if !types.IsExported(sym.Name) && sym.Pkg != types.LocalPkg {
n.SetDiag(true)
- yyerror("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
+ base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
}
return n
}
// := declarations
-func colasname(n *Node) bool {
- switch n.Op {
- case ONAME,
- ONONAME,
- OPACK,
- OTYPE,
- OLITERAL:
- return n.Sym != nil
+func colasname(n ir.Node) bool {
+ switch n.Op() {
+ case ir.ONAME,
+ ir.ONONAME,
+ ir.OPACK,
+ ir.OTYPE,
+ ir.OLITERAL:
+ return n.Sym() != nil
}
return false
}
-func colasdefn(left []*Node, defn *Node) {
+func colasdefn(left []ir.Node, defn ir.Node) {
for _, n := range left {
- if n.Sym != nil {
- n.Sym.SetUniq(true)
+ if n.Sym() != nil {
+ n.Sym().SetUniq(true)
}
}
var nnew, nerr int
for i, n := range left {
- if n.isBlank() {
+ if ir.IsBlank(n) {
continue
}
if !colasname(n) {
- yyerrorl(defn.Pos, "non-name %v on left side of :=", n)
+ base.ErrorfAt(defn.Pos(), "non-name %v on left side of :=", n)
nerr++
continue
}
- if !n.Sym.Uniq() {
- yyerrorl(defn.Pos, "%v repeated on left side of :=", n.Sym)
+ if !n.Sym().Uniq() {
+ base.ErrorfAt(defn.Pos(), "%v repeated on left side of :=", n.Sym())
n.SetDiag(true)
nerr++
continue
}
- n.Sym.SetUniq(false)
- if n.Sym.Block == types.Block {
+ n.Sym().SetUniq(false)
+ if n.Sym().Block == types.Block {
continue
}
nnew++
- n = newname(n.Sym)
+ n := NewName(n.Sym())
declare(n, dclcontext)
- n.Name.Defn = defn
- defn.Ninit.Append(nod(ODCL, n, nil))
+ n.Defn = defn
+ defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
left[i] = n
}
if nnew == 0 && nerr == 0 {
- yyerrorl(defn.Pos, "no new variables on left side of :=")
- }
-}
-
-// declare the arguments in an
-// interface field declaration.
-func ifacedcl(n *Node) {
- if n.Op != ODCLFIELD || n.Left == nil {
- Fatalf("ifacedcl")
- }
-
- if n.Sym.IsBlank() {
- yyerror("methods must have a unique non-blank name")
+ base.ErrorfAt(defn.Pos(), "no new variables on left side of :=")
}
}
// and declare the arguments.
// called in extern-declaration context
// returns in auto-declaration context.
-func funchdr(n *Node) {
+func funchdr(fn *ir.Func) {
// change the declaration context from extern to auto
funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext})
- Curfn = n
- dclcontext = PAUTO
+ Curfn = fn
+ dclcontext = ir.PAUTO
types.Markdcl()
- if n.Func.Nname != nil {
- funcargs(n.Func.Nname.Name.Param.Ntype)
- } else if n.Func.Ntype != nil {
- funcargs(n.Func.Ntype)
+ if fn.Nname.Ntype != nil {
+ funcargs(fn.Nname.Ntype.(*ir.FuncType))
} else {
- funcargs2(n.Type)
+ funcargs2(fn.Type())
}
}
-func funcargs(nt *Node) {
- if nt.Op != OTFUNC {
- Fatalf("funcargs %v", nt.Op)
+func funcargs(nt *ir.FuncType) {
+ if nt.Op() != ir.OTFUNC {
+ base.Fatalf("funcargs %v", nt.Op())
}
// re-start the variable generation number
// TODO(mdempsky): This is ugly, and only necessary because
// esc.go uses Vargen to figure out result parameters' index
// within the result tuple.
- vargen = nt.Rlist.Len()
+ vargen = len(nt.Results)
// declare the receiver and in arguments.
- if nt.Left != nil {
- funcarg(nt.Left, PPARAM)
+ if nt.Recv != nil {
+ funcarg(nt.Recv, ir.PPARAM)
}
- for _, n := range nt.List.Slice() {
- funcarg(n, PPARAM)
+ for _, n := range nt.Params {
+ funcarg(n, ir.PPARAM)
}
oldvargen := vargen
vargen = 0
// declare the out arguments.
- gen := nt.List.Len()
- for _, n := range nt.Rlist.Slice() {
+ gen := len(nt.Params)
+ for _, n := range nt.Results {
if n.Sym == nil {
// Name so that escape analysis can track it. ~r stands for 'result'.
n.Sym = lookupN("~r", gen)
gen++
}
- funcarg(n, PPARAMOUT)
+ funcarg(n, ir.PPARAMOUT)
}
vargen = oldvargen
}
-func funcarg(n *Node, ctxt Class) {
- if n.Op != ODCLFIELD {
- Fatalf("funcarg %v", n.Op)
- }
+func funcarg(n *ir.Field, ctxt ir.Class) {
if n.Sym == nil {
return
}
- n.Right = newnamel(n.Pos, n.Sym)
- n.Right.Name.Param.Ntype = n.Left
- n.Right.SetIsDDD(n.IsDDD())
- declare(n.Right, ctxt)
+ name := ir.NewNameAt(n.Pos, n.Sym)
+ n.Decl = name
+ name.Ntype = n.Ntype
+ name.SetIsDDD(n.IsDDD)
+ declare(name, ctxt)
vargen++
- n.Right.Name.Vargen = int32(vargen)
+ n.Decl.Vargen = int32(vargen)
}
// Same as funcargs, except run over an already constructed TFUNC.
// This happens during import, where the hidden_fndcl rule has
// used functype directly to parse the function's type.
func funcargs2(t *types.Type) {
- if t.Etype != TFUNC {
- Fatalf("funcargs2 %v", t)
+ if t.Kind() != types.TFUNC {
+ base.Fatalf("funcargs2 %v", t)
}
for _, f := range t.Recvs().Fields().Slice() {
- funcarg2(f, PPARAM)
+ funcarg2(f, ir.PPARAM)
}
for _, f := range t.Params().Fields().Slice() {
- funcarg2(f, PPARAM)
+ funcarg2(f, ir.PPARAM)
}
for _, f := range t.Results().Fields().Slice() {
- funcarg2(f, PPARAMOUT)
+ funcarg2(f, ir.PPARAMOUT)
}
}
-func funcarg2(f *types.Field, ctxt Class) {
+func funcarg2(f *types.Field, ctxt ir.Class) {
if f.Sym == nil {
return
}
- n := newnamel(f.Pos, f.Sym)
- f.Nname = asTypesNode(n)
- n.Type = f.Type
+ n := ir.NewNameAt(f.Pos, f.Sym)
+ f.Nname = n
+ n.SetType(f.Type)
n.SetIsDDD(f.IsDDD())
declare(n, ctxt)
}
var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
type funcStackEnt struct {
- curfn *Node
- dclcontext Class
+ curfn *ir.Func
+ dclcontext ir.Class
+}
+
+func CheckFuncStack() {
+ if len(funcStack) != 0 {
+ base.Fatalf("funcStack is non-empty: %v", len(funcStack))
+ }
}
// finish the body.
return
}
- if t.Sym == nil && t.IsPtr() {
+ if t.Sym() == nil && t.IsPtr() {
t = t.Elem()
if t.IsInterface() {
- yyerror("embedded type cannot be a pointer to interface")
+ base.Errorf("embedded type cannot be a pointer to interface")
}
}
if t.IsPtr() || t.IsUnsafePtr() {
- yyerror("embedded type cannot be a pointer")
- } else if t.Etype == TFORW && !t.ForwardType().Embedlineno.IsKnown() {
- t.ForwardType().Embedlineno = lineno
- }
-}
-
-func structfield(n *Node) *types.Field {
- lno := lineno
- lineno = n.Pos
-
- if n.Op != ODCLFIELD {
- Fatalf("structfield: oops %v\n", n)
- }
-
- f := types.NewField()
- f.Pos = n.Pos
- f.Sym = n.Sym
-
- if n.Left != nil {
- n.Left = typecheck(n.Left, ctxType)
- n.Type = n.Left.Type
- n.Left = nil
+ base.Errorf("embedded type cannot be a pointer")
+ } else if t.Kind() == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() {
+ t.ForwardType().Embedlineno = base.Pos
}
-
- f.Type = n.Type
- if f.Type == nil {
- f.SetBroke(true)
- }
-
- if n.Embedded() {
- checkembeddedtype(n.Type)
- f.Embedded = 1
- } else {
- f.Embedded = 0
- }
-
- switch u := n.Val().U.(type) {
- case string:
- f.Note = u
- default:
- yyerror("field tag must be a string")
- case nil:
- // no-op
- }
-
- lineno = lno
- return f
}
// checkdupfields emits errors for duplicately named fields or methods in
continue
}
if seen[f.Sym] {
- yyerrorl(f.Pos, "duplicate %s %s", what, f.Sym.Name)
+ base.ErrorfAt(f.Pos, "duplicate %s %s", what, f.Sym.Name)
continue
}
seen[f.Sym] = true
// convert a parsed id/type list into
// a type for struct/interface/arglist
-func tostruct(l []*Node) *types.Type {
- t := types.New(TSTRUCT)
+func tostruct(l []*ir.Field) *types.Type {
+ lno := base.Pos
fields := make([]*types.Field, len(l))
for i, n := range l {
- f := structfield(n)
- if f.Broke() {
- t.SetBroke(true)
- }
- fields[i] = f
- }
- t.SetFields(fields)
-
- checkdupfields("field", t.FieldSlice())
+ base.Pos = n.Pos
- if !t.Broke() {
- checkwidth(t)
- }
-
- return t
-}
-
-func tofunargs(l []*Node, funarg types.Funarg) *types.Type {
- t := types.New(TSTRUCT)
- t.StructType().Funarg = funarg
-
- fields := make([]*types.Field, len(l))
- for i, n := range l {
- f := structfield(n)
- f.SetIsDDD(n.IsDDD())
- if n.Right != nil {
- n.Right.Type = f.Type
- f.Nname = asTypesNode(n.Right)
+ if n.Ntype != nil {
+ n.Type = typecheckNtype(n.Ntype).Type()
+ n.Ntype = nil
}
- if f.Broke() {
- t.SetBroke(true)
+ f := types.NewField(n.Pos, n.Sym, n.Type)
+ if n.Embedded {
+ checkembeddedtype(n.Type)
+ f.Embedded = 1
}
+ f.Note = n.Note
fields[i] = f
}
- t.SetFields(fields)
- return t
-}
+ checkdupfields("field", fields)
-func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type {
- t := types.New(TSTRUCT)
- t.StructType().Funarg = funarg
- t.SetFields(fields)
- return t
+ base.Pos = lno
+ return types.NewStruct(types.LocalPkg, fields)
}
-func interfacefield(n *Node) *types.Field {
- lno := lineno
- lineno = n.Pos
-
- if n.Op != ODCLFIELD {
- Fatalf("interfacefield: oops %v\n", n)
+func tointerface(nmethods []*ir.Field) *types.Type {
+ if len(nmethods) == 0 {
+ return types.Types[types.TINTER]
}
- if n.Val().Ctype() != CTxxx {
- yyerror("interface method cannot have annotation")
- }
-
- // MethodSpec = MethodName Signature | InterfaceTypeName .
- //
- // If Sym != nil, then Sym is MethodName and Left is Signature.
- // Otherwise, Left is InterfaceTypeName.
+ lno := base.Pos
- if n.Left != nil {
- n.Left = typecheck(n.Left, ctxType)
- n.Type = n.Left.Type
- n.Left = nil
- }
-
- f := types.NewField()
- f.Pos = n.Pos
- f.Sym = n.Sym
- f.Type = n.Type
- if f.Type == nil {
- f.SetBroke(true)
- }
-
- lineno = lno
- return f
-}
-
-func tointerface(l []*Node) *types.Type {
- if len(l) == 0 {
- return types.Types[TINTER]
- }
- t := types.New(TINTER)
- var fields []*types.Field
- for _, n := range l {
- f := interfacefield(n)
- if f.Broke() {
- t.SetBroke(true)
+ methods := make([]*types.Field, len(nmethods))
+ for i, n := range nmethods {
+ base.Pos = n.Pos
+ if n.Ntype != nil {
+ n.Type = typecheckNtype(n.Ntype).Type()
+ n.Ntype = nil
}
- fields = append(fields, f)
+ methods[i] = types.NewField(n.Pos, n.Sym, n.Type)
}
- t.SetInterface(fields)
- return t
+
+ base.Pos = lno
+ return types.NewInterface(types.LocalPkg, methods)
}
-func fakeRecv() *Node {
+func fakeRecv() *ir.Field {
return anonfield(types.FakeRecvType())
}
func fakeRecvField() *types.Field {
- f := types.NewField()
- f.Type = types.FakeRecvType()
- return f
+ return types.NewField(src.NoXPos, nil, types.FakeRecvType())
}
// isifacemethod reports whether (field) m is
}
// turn a parsed function declaration into a type
-func functype(this *Node, in, out []*Node) *types.Type {
- t := types.New(TFUNC)
+func functype(nrecv *ir.Field, nparams, nresults []*ir.Field) *types.Type {
+ funarg := func(n *ir.Field) *types.Field {
+ lno := base.Pos
+ base.Pos = n.Pos
- var rcvr []*Node
- if this != nil {
- rcvr = []*Node{this}
- }
- t.FuncType().Receiver = tofunargs(rcvr, types.FunargRcvr)
- t.FuncType().Params = tofunargs(in, types.FunargParams)
- t.FuncType().Results = tofunargs(out, types.FunargResults)
+ if n.Ntype != nil {
+ n.Type = typecheckNtype(n.Ntype).Type()
+ n.Ntype = nil
+ }
- checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice())
+ f := types.NewField(n.Pos, n.Sym, n.Type)
+ f.SetIsDDD(n.IsDDD)
+ if n.Decl != nil {
+ n.Decl.SetType(f.Type)
+ f.Nname = n.Decl
+ }
- if t.Recvs().Broke() || t.Results().Broke() || t.Params().Broke() {
- t.SetBroke(true)
+ base.Pos = lno
+ return f
}
-
- t.FuncType().Outnamed = t.NumResults() > 0 && origSym(t.Results().Field(0).Sym) != nil
-
- return t
-}
-
-func functypefield(this *types.Field, in, out []*types.Field) *types.Type {
- t := types.New(TFUNC)
-
- var rcvr []*types.Field
- if this != nil {
- rcvr = []*types.Field{this}
+ funargs := func(nn []*ir.Field) []*types.Field {
+ res := make([]*types.Field, len(nn))
+ for i, n := range nn {
+ res[i] = funarg(n)
+ }
+ return res
}
- t.FuncType().Receiver = tofunargsfield(rcvr, types.FunargRcvr)
- t.FuncType().Params = tofunargsfield(in, types.FunargParams)
- t.FuncType().Results = tofunargsfield(out, types.FunargResults)
- t.FuncType().Outnamed = t.NumResults() > 0 && origSym(t.Results().Field(0).Sym) != nil
+ var recv *types.Field
+ if nrecv != nil {
+ recv = funarg(nrecv)
+ }
+ t := types.NewSignature(types.LocalPkg, recv, funargs(nparams), funargs(nresults))
+ checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice())
return t
}
-// origSym returns the original symbol written by the user.
-func origSym(s *types.Sym) *types.Sym {
- if s == nil {
- return nil
- }
-
- if len(s.Name) > 1 && s.Name[0] == '~' {
- switch s.Name[1] {
- case 'r': // originally an unnamed result
- return nil
- case 'b': // originally the blank identifier _
- // TODO(mdempsky): Does s.Pkg matter here?
- return nblank.Sym
- }
- return s
- }
-
- if strings.HasPrefix(s.Name, ".anon") {
- // originally an unnamed or _ name (see subr.go: structargs)
- return nil
- }
-
- return s
+func hasNamedResults(fn *ir.Func) bool {
+ typ := fn.Type()
+ return typ.NumResults() > 0 && types.OrigSym(typ.Results().Field(0).Sym) != nil
}
// methodSym returns the method symbol representing a method name
// start with a letter, number, or period.
func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
if msym.IsBlank() {
- Fatalf("blank method name")
+ base.Fatalf("blank method name")
}
- rsym := recv.Sym
+ rsym := recv.Sym()
if recv.IsPtr() {
if rsym != nil {
- Fatalf("declared pointer receiver type: %v", recv)
+ base.Fatalf("declared pointer receiver type: %v", recv)
}
- rsym = recv.Elem().Sym
+ rsym = recv.Elem().Sym()
}
// Find the package the receiver type appeared in. For
// - msym is the method symbol
// - t is function type (with receiver)
// Returns a pointer to the existing or added Field; or nil if there's an error.
-func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
+func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
if msym == nil {
- Fatalf("no method symbol")
+ base.Fatalf("no method symbol")
}
// get parent type sym
rf := t.Recv() // ptr to this structure
if rf == nil {
- yyerror("missing receiver")
+ base.Errorf("missing receiver")
return nil
}
mt := methtype(rf.Type)
- if mt == nil || mt.Sym == nil {
+ if mt == nil || mt.Sym() == nil {
pa := rf.Type
t := pa
if t != nil && t.IsPtr() {
- if t.Sym != nil {
- yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
+ if t.Sym() != nil {
+ base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
return nil
}
t = t.Elem()
switch {
case t == nil || t.Broke():
// rely on typecheck having complained before
- case t.Sym == nil:
- yyerror("invalid receiver type %v (%v is not a defined type)", pa, t)
+ case t.Sym() == nil:
+ base.Errorf("invalid receiver type %v (%v is not a defined type)", pa, t)
case t.IsPtr():
- yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
+ base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
case t.IsInterface():
- yyerror("invalid receiver type %v (%v is an interface type)", pa, t)
+ base.Errorf("invalid receiver type %v (%v is an interface type)", pa, t)
default:
// Should have picked off all the reasons above,
// but just in case, fall back to generic error.
- yyerror("invalid receiver type %v (%L / %L)", pa, pa, t)
+ base.Errorf("invalid receiver type %v (%L / %L)", pa, pa, t)
}
return nil
}
- if local && mt.Sym.Pkg != localpkg {
- yyerror("cannot define new methods on non-local type %v", mt)
+ if local && mt.Sym().Pkg != types.LocalPkg {
+ base.Errorf("cannot define new methods on non-local type %v", mt)
return nil
}
if mt.IsStruct() {
for _, f := range mt.Fields().Slice() {
if f.Sym == msym {
- yyerror("type %v has both field and method named %v", mt, msym)
+ base.Errorf("type %v has both field and method named %v", mt, msym)
f.SetBroke(true)
return nil
}
// types.Identical only checks that incoming and result parameters match,
// so explicitly check that the receiver parameters match too.
if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) {
- yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
+ base.Errorf("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
}
return f
}
- f := types.NewField()
- f.Pos = lineno
- f.Sym = msym
- f.Type = t
+ f := types.NewField(base.Pos, msym, t)
+ f.Nname = n.Nname
f.SetNointerface(nointerface)
mt.Methods().Append(f)
// When dynamically linking, the necessary function
// symbols will be created explicitly with makefuncsym.
// See the makefuncsym comment for details.
- if !Ctxt.Flag_dynlink && !existed {
+ if !base.Ctxt.Flag_dynlink && !existed {
funcsyms = append(funcsyms, s)
}
funcsymsmu.Unlock()
// So instead, when dynamic linking, we only create
// the s·f stubs in s's package.
func makefuncsym(s *types.Sym) {
- if !Ctxt.Flag_dynlink {
- Fatalf("makefuncsym dynlink")
+ if !base.Ctxt.Flag_dynlink {
+ base.Fatalf("makefuncsym dynlink")
}
if s.IsBlank() {
return
}
- if compiling_runtime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
+ if base.Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
// runtime.getg(), getclosureptr(), getcallerpc(), and
// getcallersp() are not real functions and so do not
// get funcsyms.
}
// setNodeNameFunc marks a node as a function.
-func setNodeNameFunc(n *Node) {
- if n.Op != ONAME || n.Class() != Pxxx {
- Fatalf("expected ONAME/Pxxx node, got %v", n)
+func setNodeNameFunc(n *ir.Name) {
+ if n.Op() != ir.ONAME || n.Class() != ir.Pxxx {
+ base.Fatalf("expected ONAME/Pxxx node, got %v", n)
}
- n.SetClass(PFUNC)
- n.Sym.SetFunc(true)
+ n.SetClass(ir.PFUNC)
+ n.Sym().SetFunc(true)
}
-func dclfunc(sym *types.Sym, tfn *Node) *Node {
- if tfn.Op != OTFUNC {
- Fatalf("expected OTFUNC node, got %v", tfn)
+func dclfunc(sym *types.Sym, tfn ir.Ntype) *ir.Func {
+ if tfn.Op() != ir.OTFUNC {
+ base.Fatalf("expected OTFUNC node, got %v", tfn)
}
- fn := nod(ODCLFUNC, nil, nil)
- fn.Func.Nname = newfuncnamel(lineno, sym)
- fn.Func.Nname.Name.Defn = fn
- fn.Func.Nname.Name.Param.Ntype = tfn
- setNodeNameFunc(fn.Func.Nname)
+ fn := ir.NewFunc(base.Pos)
+ fn.Nname = newFuncNameAt(base.Pos, sym, fn)
+ fn.Nname.Defn = fn
+ fn.Nname.Ntype = tfn
+ setNodeNameFunc(fn.Nname)
funchdr(fn)
- fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, ctxType)
+ fn.Nname.Ntype = typecheckNtype(fn.Nname.Ntype)
return fn
}
// extraCalls contains extra function calls that may not be
// visible during later analysis. It maps from the ODCLFUNC of
// the caller to a list of callees.
- extraCalls map[*Node][]nowritebarrierrecCall
+ extraCalls map[*ir.Func][]nowritebarrierrecCall
// curfn is the current function during AST walks.
- curfn *Node
+ curfn *ir.Func
}
type nowritebarrierrecCall struct {
- target *Node // ODCLFUNC of caller or callee
+ target *ir.Func // caller or callee
lineno src.XPos // line of call
}
-type nowritebarrierrecCallSym struct {
- target *obj.LSym // LSym of callee
- lineno src.XPos // line of call
-}
-
// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
// must be called before transformclosure and walk.
func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
c := &nowritebarrierrecChecker{
- extraCalls: make(map[*Node][]nowritebarrierrecCall),
+ extraCalls: make(map[*ir.Func][]nowritebarrierrecCall),
}
// Find all systemstack calls and record their targets. In
// important to handle it for this check, so we model it
// directly. This has to happen before transformclosure since
// it's a lot harder to work out the argument after.
- for _, n := range xtop {
- if n.Op != ODCLFUNC {
+ for _, n := range Target.Decls {
+ if n.Op() != ir.ODCLFUNC {
continue
}
- c.curfn = n
- inspect(n, c.findExtraCalls)
+ c.curfn = n.(*ir.Func)
+ ir.Visit(n, c.findExtraCalls)
}
c.curfn = nil
return c
}
-func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool {
- if n.Op != OCALLFUNC {
- return true
+func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) {
+ if nn.Op() != ir.OCALLFUNC {
+ return
}
- fn := n.Left
- if fn == nil || fn.Op != ONAME || fn.Class() != PFUNC || fn.Name.Defn == nil {
- return true
+ n := nn.(*ir.CallExpr)
+ if n.Left() == nil || n.Left().Op() != ir.ONAME {
+ return
}
- if !isRuntimePkg(fn.Sym.Pkg) || fn.Sym.Name != "systemstack" {
- return true
+ fn := n.Left().(*ir.Name)
+ if fn.Class() != ir.PFUNC || fn.Name().Defn == nil {
+ return
+ }
+ if !isRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" {
+ return
}
- var callee *Node
- arg := n.List.First()
- switch arg.Op {
- case ONAME:
- callee = arg.Name.Defn
- case OCLOSURE:
- callee = arg.Func.Closure
+ var callee *ir.Func
+ arg := n.List().First()
+ switch arg.Op() {
+ case ir.ONAME:
+ callee = arg.Name().Defn.(*ir.Func)
+ case ir.OCLOSURE:
+ arg := arg.(*ir.ClosureExpr)
+ callee = arg.Func()
default:
- Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
+ base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
}
- if callee.Op != ODCLFUNC {
- Fatalf("expected ODCLFUNC node, got %+v", callee)
+ if callee.Op() != ir.ODCLFUNC {
+ base.Fatalf("expected ODCLFUNC node, got %+v", callee)
}
- c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos})
- return true
+ c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()})
}
// recordCall records a call from ODCLFUNC node "from", to function
// because that's all we know after we start SSA.
//
// This can be called concurrently for different from Nodes.
-func (c *nowritebarrierrecChecker) recordCall(from *Node, to *obj.LSym, pos src.XPos) {
- if from.Op != ODCLFUNC {
- Fatalf("expected ODCLFUNC, got %v", from)
- }
- // We record this information on the *Func so this is
- // concurrent-safe.
- fn := from.Func
- if fn.nwbrCalls == nil {
- fn.nwbrCalls = new([]nowritebarrierrecCallSym)
+func (c *nowritebarrierrecChecker) recordCall(fn *ir.Func, to *obj.LSym, pos src.XPos) {
+ // We record this information on the *Func so this is concurrent-safe.
+ if fn.NWBRCalls == nil {
+ fn.NWBRCalls = new([]ir.SymAndPos)
}
- *fn.nwbrCalls = append(*fn.nwbrCalls, nowritebarrierrecCallSym{to, pos})
+ *fn.NWBRCalls = append(*fn.NWBRCalls, ir.SymAndPos{Sym: to, Pos: pos})
}
func (c *nowritebarrierrecChecker) check() {
// capture all calls created by lowering, but this means we
// only get to see the obj.LSyms of calls. symToFunc lets us
// get back to the ODCLFUNCs.
- symToFunc := make(map[*obj.LSym]*Node)
+ symToFunc := make(map[*obj.LSym]*ir.Func)
// funcs records the back-edges of the BFS call graph walk. It
// maps from the ODCLFUNC of each function that must not have
// write barriers to the call that inhibits them. Functions
// that are directly marked go:nowritebarrierrec are in this
// map with a zero-valued nowritebarrierrecCall. This also
// acts as the set of marks for the BFS of the call graph.
- funcs := make(map[*Node]nowritebarrierrecCall)
+ funcs := make(map[*ir.Func]nowritebarrierrecCall)
// q is the queue of ODCLFUNC Nodes to visit in BFS order.
- var q nodeQueue
+ var q ir.NameQueue
- for _, n := range xtop {
- if n.Op != ODCLFUNC {
+ for _, n := range Target.Decls {
+ if n.Op() != ir.ODCLFUNC {
continue
}
+ fn := n.(*ir.Func)
- symToFunc[n.Func.lsym] = n
+ symToFunc[fn.LSym] = fn
// Make nowritebarrierrec functions BFS roots.
- if n.Func.Pragma&Nowritebarrierrec != 0 {
- funcs[n] = nowritebarrierrecCall{}
- q.pushRight(n)
+ if fn.Pragma&ir.Nowritebarrierrec != 0 {
+ funcs[fn] = nowritebarrierrecCall{}
+ q.PushRight(fn.Nname)
}
// Check go:nowritebarrier functions.
- if n.Func.Pragma&Nowritebarrier != 0 && n.Func.WBPos.IsKnown() {
- yyerrorl(n.Func.WBPos, "write barrier prohibited")
+ if fn.Pragma&ir.Nowritebarrier != 0 && fn.WBPos.IsKnown() {
+ base.ErrorfAt(fn.WBPos, "write barrier prohibited")
}
}
// Perform a BFS of the call graph from all
// go:nowritebarrierrec functions.
- enqueue := func(src, target *Node, pos src.XPos) {
- if target.Func.Pragma&Yeswritebarrierrec != 0 {
+ enqueue := func(src, target *ir.Func, pos src.XPos) {
+ if target.Pragma&ir.Yeswritebarrierrec != 0 {
// Don't flow into this function.
return
}
// Record the path.
funcs[target] = nowritebarrierrecCall{target: src, lineno: pos}
- q.pushRight(target)
+ q.PushRight(target.Nname)
}
- for !q.empty() {
- fn := q.popLeft()
+ for !q.Empty() {
+ fn := q.PopLeft().Func()
// Check fn.
- if fn.Func.WBPos.IsKnown() {
+ if fn.WBPos.IsKnown() {
var err bytes.Buffer
call := funcs[fn]
for call.target != nil {
- fmt.Fprintf(&err, "\n\t%v: called by %v", linestr(call.lineno), call.target.Func.Nname)
+ fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Nname)
call = funcs[call.target]
}
- yyerrorl(fn.Func.WBPos, "write barrier prohibited by caller; %v%s", fn.Func.Nname, err.String())
+ base.ErrorfAt(fn.WBPos, "write barrier prohibited by caller; %v%s", fn.Nname, err.String())
continue
}
for _, callee := range c.extraCalls[fn] {
enqueue(fn, callee.target, callee.lineno)
}
- if fn.Func.nwbrCalls == nil {
+ if fn.NWBRCalls == nil {
continue
}
- for _, callee := range *fn.Func.nwbrCalls {
- target := symToFunc[callee.target]
+ for _, callee := range *fn.NWBRCalls {
+ target := symToFunc[callee.Sym]
if target != nil {
- enqueue(fn, target, callee.lineno)
+ enqueue(fn, target, callee.Pos)
}
}
}
}
for _, dep := range strings.Fields(strings.Trim(string(out), "[]")) {
switch dep {
- case "go/build", "go/token":
+ case "go/build", "go/scanner":
t.Errorf("undesired dependency on %q", dep)
}
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
var inlcalls dwarf.InlCalls
- if Debug_gendwarfinl != 0 {
- Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
+ if base.Debug.DwarfInl != 0 {
+ base.Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
}
// This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls
}
m = makePreinlineDclMap(fnsym)
} else {
- ifnlsym := Ctxt.InlTree.InlinedFunction(int(ii - 1))
+ ifnlsym := base.Ctxt.InlTree.InlinedFunction(int(ii - 1))
m = makePreinlineDclMap(ifnlsym)
}
}
// Debugging
- if Debug_gendwarfinl != 0 {
+ if base.Debug.DwarfInl != 0 {
dumpInlCalls(inlcalls)
dumpInlVars(dwVars)
}
// abstract function DIE for an inlined routine imported from a
// previously compiled package.
func genAbstractFunc(fn *obj.LSym) {
- ifn := Ctxt.DwFixups.GetPrecursorFunc(fn)
+ ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn)
if ifn == nil {
- Ctxt.Diag("failed to locate precursor fn for %v", fn)
+ base.Ctxt.Diag("failed to locate precursor fn for %v", fn)
return
}
- if Debug_gendwarfinl != 0 {
- Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
+ _ = ifn.(*ir.Func)
+ if base.Debug.DwarfInl != 0 {
+ base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
}
- Ctxt.DwarfAbstractFunc(ifn, fn, myimportpath)
+ base.Ctxt.DwarfAbstractFunc(ifn, fn, base.Ctxt.Pkgpath)
}
// Undo any versioning performed when a name was written
dcl := preInliningDcls(fnsym)
m := make(map[varPos]int)
for i, n := range dcl {
- pos := Ctxt.InnermostPos(n.Pos)
+ pos := base.Ctxt.InnermostPos(n.Pos())
vp := varPos{
- DeclName: unversion(n.Sym.Name),
+ DeclName: unversion(n.Sym().Name),
DeclFile: pos.RelFilename(),
DeclLine: pos.RelLine(),
DeclCol: pos.Col(),
}
if _, found := m[vp]; found {
- Fatalf("child dcl collision on symbol %s within %v\n", n.Sym.Name, fnsym.Name)
+ base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym().Name, fnsym.Name)
}
m[vp] = i
}
// is one. We do this first so that parents appear before their
// children in the resulting table.
parCallIdx := -1
- parInlIdx := Ctxt.InlTree.Parent(inlIdx)
+ parInlIdx := base.Ctxt.InlTree.Parent(inlIdx)
if parInlIdx >= 0 {
parCallIdx = insertInlCall(dwcalls, parInlIdx, imap)
}
// Create new entry for this inline
- inlinedFn := Ctxt.InlTree.InlinedFunction(inlIdx)
- callXPos := Ctxt.InlTree.CallPos(inlIdx)
- absFnSym := Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
- pb := Ctxt.PosTable.Pos(callXPos).Base()
- callFileSym := Ctxt.Lookup(pb.SymFilename())
+ inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx)
+ callXPos := base.Ctxt.InlTree.CallPos(inlIdx)
+ absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
+ pb := base.Ctxt.PosTable.Pos(callXPos).Base()
+ callFileSym := base.Ctxt.Lookup(pb.SymFilename())
ic := dwarf.InlCall{
InlIndex: inlIdx,
CallFile: callFileSym,
// the index for a node from the inlined body of D will refer to the
// call to D from C. Whew.
func posInlIndex(xpos src.XPos) int {
- pos := Ctxt.PosTable.Pos(xpos)
+ pos := base.Ctxt.PosTable.Pos(xpos)
if b := pos.Base(); b != nil {
ii := b.InliningIndex()
if ii >= 0 {
// Append range to correct inlined call
callIdx, found := imap[ii]
if !found {
- Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
+ base.Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
}
call := &calls[callIdx]
call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end})
func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) {
for i := 0; i < ilevel; i++ {
- Ctxt.Logf(" ")
+ base.Ctxt.Logf(" ")
}
ic := inlcalls.Calls[idx]
- callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex)
- Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
+ callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex)
+ base.Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
for _, f := range ic.InlVars {
- Ctxt.Logf(" %v", f.Name)
+ base.Ctxt.Logf(" %v", f.Name)
}
- Ctxt.Logf(" ) C: (")
+ base.Ctxt.Logf(" ) C: (")
for _, k := range ic.Children {
- Ctxt.Logf(" %v", k)
+ base.Ctxt.Logf(" %v", k)
}
- Ctxt.Logf(" ) R:")
+ base.Ctxt.Logf(" ) R:")
for _, r := range ic.Ranges {
- Ctxt.Logf(" [%d,%d)", r.Start, r.End)
+ base.Ctxt.Logf(" [%d,%d)", r.Start, r.End)
}
- Ctxt.Logf("\n")
+ base.Ctxt.Logf("\n")
for _, k := range ic.Children {
dumpInlCall(inlcalls, k, ilevel+1)
}
if dwv.IsInAbstract {
ia = 1
}
- Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
+ base.Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
}
}
// Callee
ic := inlCalls.Calls[idx]
- callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
+ callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
calleeRanges := ic.Ranges
// Caller
parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}}
if parentIdx != -1 {
pic := inlCalls.Calls[parentIdx]
- caller = Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
+ caller = base.Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
parentRanges = pic.Ranges
}
// Callee ranges contained in caller ranges?
c, m := rangesContainsAll(parentRanges, calleeRanges)
if !c {
- Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
+ base.Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
}
// Now visit kids
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"cmd/internal/obj"
- "encoding/json"
- "io/ioutil"
- "log"
+
"path"
"sort"
"strconv"
"strings"
)
-var embedlist []*Node
-
-var embedCfg struct {
- Patterns map[string][]string
- Files map[string]string
-}
-
-func readEmbedCfg(file string) {
- data, err := ioutil.ReadFile(file)
- if err != nil {
- log.Fatalf("-embedcfg: %v", err)
- }
- if err := json.Unmarshal(data, &embedCfg); err != nil {
- log.Fatalf("%s: %v", file, err)
- }
- if embedCfg.Patterns == nil {
- log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
- }
- if embedCfg.Files == nil {
- log.Fatalf("%s: invalid embedcfg: missing Files", file)
- }
-}
-
const (
embedUnknown = iota
embedBytes
embedFiles
)
-var numLocalEmbed int
-
-func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) (newExprs []*Node) {
+func varEmbed(p *noder, names []*ir.Name, typ ir.Ntype, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) {
haveEmbed := false
for _, decl := range p.file.DeclList {
imp, ok := decl.(*syntax.ImportDecl)
pos := embeds[0].Pos
if !haveEmbed {
- p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
+ p.errorAt(pos, "invalid go:embed: missing import \"embed\"")
return exprs
}
- if embedCfg.Patterns == nil {
- p.yyerrorpos(pos, "invalid go:embed: build system did not supply embed configuration")
+ if base.Flag.Cfg.Embed.Patterns == nil {
+ p.errorAt(pos, "invalid go:embed: build system did not supply embed configuration")
return exprs
}
if len(names) > 1 {
- p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
+ p.errorAt(pos, "go:embed cannot apply to multiple vars")
return exprs
}
if len(exprs) > 0 {
- p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
+ p.errorAt(pos, "go:embed cannot apply to var with initializer")
return exprs
}
if typ == nil {
// Should not happen, since len(exprs) == 0 now.
- p.yyerrorpos(pos, "go:embed cannot apply to var without type")
+ p.errorAt(pos, "go:embed cannot apply to var without type")
+ return exprs
+ }
+ if dclcontext != ir.PEXTERN {
+ p.errorAt(pos, "go:embed cannot apply to var inside func")
return exprs
}
- kind := embedKindApprox(typ)
+ v := names[0]
+ Target.Embeds = append(Target.Embeds, v)
+ v.Embed = new([]ir.Embed)
+ for _, e := range embeds {
+ *v.Embed = append(*v.Embed, ir.Embed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns})
+ }
+ return exprs
+}
+
+func embedFileList(v *ir.Name) []string {
+ kind := embedKind(v.Type())
if kind == embedUnknown {
- p.yyerrorpos(pos, "go:embed cannot apply to var of type %v", typ)
- return exprs
+ base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
+ return nil
}
// Build list of files to store.
have := make(map[string]bool)
var list []string
- for _, e := range embeds {
+ for _, e := range *v.Embed {
for _, pattern := range e.Patterns {
- files, ok := embedCfg.Patterns[pattern]
+ files, ok := base.Flag.Cfg.Embed.Patterns[pattern]
if !ok {
- p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
+ base.ErrorfAt(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
}
for _, file := range files {
- if embedCfg.Files[file] == "" {
- p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map file: %s", file)
+ if base.Flag.Cfg.Embed.Files[file] == "" {
+ base.ErrorfAt(e.Pos, "invalid go:embed: build system did not map file: %s", file)
continue
}
if !have[file] {
if kind == embedString || kind == embedBytes {
if len(list) > 1 {
- p.yyerrorpos(pos, "invalid go:embed: multiple files for type %v", typ)
- return exprs
+ base.ErrorfAt(v.Pos(), "invalid go:embed: multiple files for type %v", v.Type())
+ return nil
}
}
- v := names[0]
- if dclcontext != PEXTERN {
- numLocalEmbed++
- v = newnamel(v.Pos, lookupN("embed.", numLocalEmbed))
- v.Sym.Def = asTypesNode(v)
- v.Name.Param.Ntype = typ
- v.SetClass(PEXTERN)
- externdcl = append(externdcl, v)
- exprs = []*Node{v}
- }
-
- v.Name.Param.SetEmbedFiles(list)
- embedlist = append(embedlist, v)
- return exprs
+ return list
}
// embedKindApprox determines the kind of embedding variable, approximately.
// The match is approximate because we haven't done scope resolution yet and
// can't tell whether "string" and "byte" really mean "string" and "byte".
// The result must be confirmed later, after type checking, using embedKind.
-func embedKindApprox(typ *Node) int {
- if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
+func embedKindApprox(typ ir.Node) int {
+ if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
// These are not guaranteed to match only string and []byte -
// maybe the local package has redefined one of those words.
// But it's the best we can do now during the noder.
// The stricter check happens later, in initEmbed calling embedKind.
- if typ.Sym != nil && typ.Sym.Name == "string" && typ.Sym.Pkg == localpkg {
+ if typ.Sym() != nil && typ.Sym().Name == "string" && typ.Sym().Pkg == types.LocalPkg {
return embedString
}
- if typ.Op == OTARRAY && typ.Left == nil && typ.Right.Sym != nil && typ.Right.Sym.Name == "byte" && typ.Right.Sym.Pkg == localpkg {
- return embedBytes
+ if typ, ok := typ.(*ir.SliceType); ok {
+ if sym := typ.Elem.Sym(); sym != nil && sym.Name == "byte" && sym.Pkg == types.LocalPkg {
+ return embedBytes
+ }
}
return embedUnknown
}
// embedKind determines the kind of embedding variable.
func embedKind(typ *types.Type) int {
- if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
+ if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
- if typ == types.Types[TSTRING] {
+ if typ == types.Types[types.TSTRING] {
return embedString
}
- if typ.Sym == nil && typ.IsSlice() && typ.Elem() == types.Bytetype {
+ if typ.Sym() == nil && typ.IsSlice() && typ.Elem() == types.ByteType {
return embedBytes
}
return embedUnknown
}
func dumpembeds() {
- for _, v := range embedlist {
+ for _, v := range Target.Embeds {
initEmbed(v)
}
}
// initEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
-func initEmbed(v *Node) {
- files := v.Name.Param.EmbedFiles()
- switch kind := embedKind(v.Type); kind {
+func initEmbed(v *ir.Name) {
+ files := embedFileList(v)
+ switch kind := embedKind(v.Type()); kind {
case embedUnknown:
- yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
+ base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
case embedString, embedBytes:
file := files[0]
- fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
+ fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], kind == embedString, nil)
if err != nil {
- yyerrorl(v.Pos, "embed %s: %v", file, err)
+ base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
}
- sym := v.Sym.Linksym()
+ sym := v.Sym().Linksym()
off := 0
off = dsymptr(sym, off, fsym, 0) // data string
off = duintptr(sym, off, uint64(size)) // len
}
case embedFiles:
- slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
+ slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`)
off := 0
// []files pointed at by Files
off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
const hashSize = 16
hash := make([]byte, hashSize)
for _, file := range files {
- off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
+ off = dsymptr(slicedata, off, stringsym(v.Pos(), file), 0) // file string
off = duintptr(slicedata, off, uint64(len(file)))
if strings.HasSuffix(file, "/") {
// entry for directory - no data
off = duintptr(slicedata, off, 0)
off += hashSize
} else {
- fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
+ fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash)
if err != nil {
- yyerrorl(v.Pos, "embed %s: %v", file, err)
+ base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
}
off = dsymptr(slicedata, off, fsym, 0) // data string
off = duintptr(slicedata, off, uint64(size))
- off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
+ off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash))
}
}
ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
- sym := v.Sym.Linksym()
+ sym := v.Sym().Linksym()
dsymptr(sym, 0, slicedata, 0)
}
}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/types"
- "fmt"
-)
-
-func escapes(all []*Node) {
- visitBottomUp(all, escapeFuncs)
-}
-
-const (
- EscFuncUnknown = 0 + iota
- EscFuncPlanned
- EscFuncStarted
- EscFuncTagged
-)
-
-func min8(a, b int8) int8 {
- if a < b {
- return a
- }
- return b
-}
-
-func max8(a, b int8) int8 {
- if a > b {
- return a
- }
- return b
-}
-
-const (
- EscUnknown = iota
- EscNone // Does not escape to heap, result, or parameters.
- EscHeap // Reachable from the heap
- EscNever // By construction will not escape.
-)
-
-// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
-func funcSym(fn *Node) *types.Sym {
- if fn == nil || fn.Func.Nname == nil {
- return nil
- }
- return fn.Func.Nname.Sym
-}
-
-// Mark labels that have no backjumps to them as not increasing e.loopdepth.
-// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat
-// and set it to one of the following two. Then in esc we'll clear it again.
-var (
- looping Node
- nonlooping Node
-)
-
-func isSliceSelfAssign(dst, src *Node) bool {
- // Detect the following special case.
- //
- // func (b *Buffer) Foo() {
- // n, m := ...
- // b.buf = b.buf[n:m]
- // }
- //
- // This assignment is a no-op for escape analysis,
- // it does not store any new pointers into b that were not already there.
- // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
- // Here we assume that the statement will not contain calls,
- // that is, that order will move any calls to init.
- // Otherwise base ONAME value could change between the moments
- // when we evaluate it for dst and for src.
-
- // dst is ONAME dereference.
- if dst.Op != ODEREF && dst.Op != ODOTPTR || dst.Left.Op != ONAME {
- return false
- }
- // src is a slice operation.
- switch src.Op {
- case OSLICE, OSLICE3, OSLICESTR:
- // OK.
- case OSLICEARR, OSLICE3ARR:
- // Since arrays are embedded into containing object,
- // slice of non-pointer array will introduce a new pointer into b that was not already there
- // (pointer to b itself). After such assignment, if b contents escape,
- // b escapes as well. If we ignore such OSLICEARR, we will conclude
- // that b does not escape when b contents do.
- //
- // Pointer to an array is OK since it's not stored inside b directly.
- // For slicing an array (not pointer to array), there is an implicit OADDR.
- // We check that to determine non-pointer array slicing.
- if src.Left.Op == OADDR {
- return false
- }
- default:
- return false
- }
- // slice is applied to ONAME dereference.
- if src.Left.Op != ODEREF && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME {
- return false
- }
- // dst and src reference the same base ONAME.
- return dst.Left == src.Left.Left
-}
-
-// isSelfAssign reports whether assignment from src to dst can
-// be ignored by the escape analysis as it's effectively a self-assignment.
-func isSelfAssign(dst, src *Node) bool {
- if isSliceSelfAssign(dst, src) {
- return true
- }
-
- // Detect trivial assignments that assign back to the same object.
- //
- // It covers these cases:
- // val.x = val.y
- // val.x[i] = val.y[j]
- // val.x1.x2 = val.x1.y2
- // ... etc
- //
- // These assignments do not change assigned object lifetime.
-
- if dst == nil || src == nil || dst.Op != src.Op {
- return false
- }
-
- switch dst.Op {
- case ODOT, ODOTPTR:
- // Safe trailing accessors that are permitted to differ.
- case OINDEX:
- if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) {
- return false
- }
- default:
- return false
- }
-
- // The expression prefix must be both "safe" and identical.
- return samesafeexpr(dst.Left, src.Left)
-}
-
-// mayAffectMemory reports whether evaluation of n may affect the program's
-// memory state. If the expression can't affect memory state, then it can be
-// safely ignored by the escape analysis.
-func mayAffectMemory(n *Node) bool {
- // We may want to use a list of "memory safe" ops instead of generally
- // "side-effect free", which would include all calls and other ops that can
- // allocate or change global state. For now, it's safer to start with the latter.
- //
- // We're ignoring things like division by zero, index out of range,
- // and nil pointer dereference here.
- switch n.Op {
- case ONAME, OCLOSUREVAR, OLITERAL:
- return false
-
- // Left+Right group.
- case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
- return mayAffectMemory(n.Left) || mayAffectMemory(n.Right)
-
- // Left group.
- case ODOT, ODOTPTR, ODEREF, OCONVNOP, OCONV, OLEN, OCAP,
- ONOT, OBITNOT, OPLUS, ONEG, OALIGNOF, OOFFSETOF, OSIZEOF:
- return mayAffectMemory(n.Left)
-
- default:
- return true
- }
-}
-
-// heapAllocReason returns the reason the given Node must be heap
-// allocated, or the empty string if it doesn't.
-func heapAllocReason(n *Node) string {
- if n.Type == nil {
- return ""
- }
-
- // Parameters are always passed via the stack.
- if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) {
- return ""
- }
-
- if n.Type.Width > maxStackVarSize {
- return "too large for stack"
- }
-
- if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
- return "too large for stack"
- }
-
- if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
- return "too large for stack"
- }
- if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
- return "too large for stack"
- }
-
- if n.Op == OMAKESLICE {
- r := n.Right
- if r == nil {
- r = n.Left
- }
- if !smallintconst(r) {
- return "non-constant size"
- }
- if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
- return "too large for stack"
- }
- }
-
- return ""
-}
-
-// addrescapes tags node n as having had its address taken
-// by "increasing" the "value" of n.Esc to EscHeap.
-// Storage is allocated as necessary to allow the address
-// to be taken.
-func addrescapes(n *Node) {
- switch n.Op {
- default:
- // Unexpected Op, probably due to a previous type error. Ignore.
-
- case ODEREF, ODOTPTR:
- // Nothing to do.
-
- case ONAME:
- if n == nodfp {
- break
- }
-
- // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
- // on PPARAM it means something different.
- if n.Class() == PAUTO && n.Esc == EscNever {
- break
- }
-
- // If a closure reference escapes, mark the outer variable as escaping.
- if n.Name.IsClosureVar() {
- addrescapes(n.Name.Defn)
- break
- }
-
- if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO {
- break
- }
-
- // This is a plain parameter or local variable that needs to move to the heap,
- // but possibly for the function outside the one we're compiling.
- // That is, if we have:
- //
- // func f(x int) {
- // func() {
- // global = &x
- // }
- // }
- //
- // then we're analyzing the inner closure but we need to move x to the
- // heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
- oldfn := Curfn
- Curfn = n.Name.Curfn
- if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE {
- Curfn = Curfn.Func.Closure
- }
- ln := lineno
- lineno = Curfn.Pos
- moveToHeap(n)
- Curfn = oldfn
- lineno = ln
-
- // ODOTPTR has already been introduced,
- // so these are the non-pointer ODOT and OINDEX.
- // In &x[0], if x is a slice, then x does not
- // escape--the pointer inside x does, but that
- // is always a heap pointer anyway.
- case ODOT, OINDEX, OPAREN, OCONVNOP:
- if !n.Left.Type.IsSlice() {
- addrescapes(n.Left)
- }
- }
-}
-
-// moveToHeap records the parameter or local variable n as moved to the heap.
-func moveToHeap(n *Node) {
- if Debug.r != 0 {
- Dump("MOVE", n)
- }
- if compiling_runtime {
- yyerror("%v escapes to heap, not allowed in runtime", n)
- }
- if n.Class() == PAUTOHEAP {
- Dump("n", n)
- Fatalf("double move to heap")
- }
-
- // Allocate a local stack variable to hold the pointer to the heap copy.
- // temp will add it to the function declaration list automatically.
- heapaddr := temp(types.NewPtr(n.Type))
- heapaddr.Sym = lookup("&" + n.Sym.Name)
- heapaddr.Orig.Sym = heapaddr.Sym
- heapaddr.Pos = n.Pos
-
- // Unset AutoTemp to persist the &foo variable name through SSA to
- // liveness analysis.
- // TODO(mdempsky/drchase): Cleaner solution?
- heapaddr.Name.SetAutoTemp(false)
-
- // Parameters have a local stack copy used at function start/end
- // in addition to the copy in the heap that may live longer than
- // the function.
- if n.Class() == PPARAM || n.Class() == PPARAMOUT {
- if n.Xoffset == BADWIDTH {
- Fatalf("addrescapes before param assignment")
- }
-
- // We rewrite n below to be a heap variable (indirection of heapaddr).
- // Preserve a copy so we can still write code referring to the original,
- // and substitute that copy into the function declaration list
- // so that analyses of the local (on-stack) variables use it.
- stackcopy := newname(n.Sym)
- stackcopy.Type = n.Type
- stackcopy.Xoffset = n.Xoffset
- stackcopy.SetClass(n.Class())
- stackcopy.Name.Param.Heapaddr = heapaddr
- if n.Class() == PPARAMOUT {
- // Make sure the pointer to the heap copy is kept live throughout the function.
- // The function could panic at any point, and then a defer could recover.
- // Thus, we need the pointer to the heap copy always available so the
- // post-deferreturn code can copy the return value back to the stack.
- // See issue 16095.
- heapaddr.Name.SetIsOutputParamHeapAddr(true)
- }
- n.Name.Param.Stackcopy = stackcopy
-
- // Substitute the stackcopy into the function variable list so that
- // liveness and other analyses use the underlying stack slot
- // and not the now-pseudo-variable n.
- found := false
- for i, d := range Curfn.Func.Dcl {
- if d == n {
- Curfn.Func.Dcl[i] = stackcopy
- found = true
- break
- }
- // Parameters are before locals, so can stop early.
- // This limits the search even in functions with many local variables.
- if d.Class() == PAUTO {
- break
- }
- }
- if !found {
- Fatalf("cannot find %v in local variable list", n)
- }
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
- }
-
- // Modify n in place so that uses of n now mean indirection of the heapaddr.
- n.SetClass(PAUTOHEAP)
- n.Xoffset = 0
- n.Name.Param.Heapaddr = heapaddr
- n.Esc = EscHeap
- if Debug.m != 0 {
- Warnl(n.Pos, "moved to heap: %v", n)
- }
-}
-
-// This special tag is applied to uintptr variables
-// that we believe may hold unsafe.Pointers for
-// calls into assembly functions.
-const unsafeUintptrTag = "unsafe-uintptr"
-
-// This special tag is applied to uintptr parameters of functions
-// marked go:uintptrescapes.
-const uintptrEscapesTag = "uintptr-escapes"
-
-func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
- name := func() string {
- if f.Sym != nil {
- return f.Sym.Name
- }
- return fmt.Sprintf("arg#%d", narg)
- }
-
- if fn.Nbody.Len() == 0 {
- // Assume that uintptr arguments must be held live across the call.
- // This is most important for syscall.Syscall.
- // See golang.org/issue/13372.
- // This really doesn't have much to do with escape analysis per se,
- // but we are reusing the ability to annotate an individual function
- // argument and pass those annotations along to importing code.
- if f.Type.IsUintptr() {
- if Debug.m != 0 {
- Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
- }
- return unsafeUintptrTag
- }
-
- if !f.Type.HasPointers() { // don't bother tagging for scalars
- return ""
- }
-
- var esc EscLeaks
-
- // External functions are assumed unsafe, unless
- // //go:noescape is given before the declaration.
- if fn.Func.Pragma&Noescape != 0 {
- if Debug.m != 0 && f.Sym != nil {
- Warnl(f.Pos, "%v does not escape", name())
- }
- } else {
- if Debug.m != 0 && f.Sym != nil {
- Warnl(f.Pos, "leaking param: %v", name())
- }
- esc.AddHeap(0)
- }
-
- return esc.Encode()
- }
-
- if fn.Func.Pragma&UintptrEscapes != 0 {
- if f.Type.IsUintptr() {
- if Debug.m != 0 {
- Warnl(f.Pos, "marking %v as escaping uintptr", name())
- }
- return uintptrEscapesTag
- }
- if f.IsDDD() && f.Type.Elem().IsUintptr() {
- // final argument is ...uintptr.
- if Debug.m != 0 {
- Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
- }
- return uintptrEscapesTag
- }
- }
-
- if !f.Type.HasPointers() { // don't bother tagging for scalars
- return ""
- }
-
- // Unnamed parameters are unused and therefore do not escape.
- if f.Sym == nil || f.Sym.IsBlank() {
- var esc EscLeaks
- return esc.Encode()
- }
-
- n := asNode(f.Nname)
- loc := e.oldLoc(n)
- esc := loc.paramEsc
- esc.Optimize()
-
- if Debug.m != 0 && !loc.escapes {
- if esc.Empty() {
- Warnl(f.Pos, "%v does not escape", name())
- }
- if x := esc.Heap(); x >= 0 {
- if x == 0 {
- Warnl(f.Pos, "leaking param: %v", name())
- } else {
- // TODO(mdempsky): Mention level=x like below?
- Warnl(f.Pos, "leaking param content: %v", name())
- }
- }
- for i := 0; i < numEscResults; i++ {
- if x := esc.Result(i); x >= 0 {
- res := fn.Type.Results().Field(i).Sym
- Warnl(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
- }
- }
- }
-
- return esc.Encode()
-}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/types"
"cmd/internal/src"
type Escape struct {
allLocs []*EscLocation
+ labels map[*types.Sym]labelState // known labels
- curfn *Node
+ curfn *ir.Func
// loopDepth counts the current loop nesting depth within
// curfn. It increments within each "for" loop and at each
// An EscLocation represents an abstract location that stores a Go
// variable.
type EscLocation struct {
- n *Node // represented variable or expression, if any
- curfn *Node // enclosing function
+ n ir.Node // represented variable or expression, if any
+ curfn *ir.Func // enclosing function
edges []EscEdge // incoming edges
loopDepth int // loopDepth at declaration
notes *EscNote
}
+// escFmt is called from node printing to print information about escape analysis results.
+func escFmt(n ir.Node) string {
+ text := ""
+ switch n.Esc() {
+ case EscUnknown:
+ break
+
+ case EscHeap:
+ text = "esc(h)"
+
+ case EscNone:
+ text = "esc(no)"
+
+ case EscNever:
+ text = "esc(N)"
+
+ default:
+ text = fmt.Sprintf("esc(%d)", n.Esc())
+ }
+
+ if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 {
+ if text != "" {
+ text += " "
+ }
+ text += fmt.Sprintf("ld(%d)", e.loopDepth)
+ }
+ return text
+}
+
// escapeFuncs performs escape analysis on a minimal batch of
// functions.
-func escapeFuncs(fns []*Node, recursive bool) {
+func escapeFuncs(fns []*ir.Func, recursive bool) {
for _, fn := range fns {
- if fn.Op != ODCLFUNC {
- Fatalf("unexpected node: %v", fn)
+ if fn.Op() != ir.ODCLFUNC {
+ base.Fatalf("unexpected node: %v", fn)
}
}
e.finish(fns)
}
-func (e *Escape) initFunc(fn *Node) {
- if fn.Op != ODCLFUNC || fn.Esc != EscFuncUnknown {
- Fatalf("unexpected node: %v", fn)
+func (e *Escape) initFunc(fn *ir.Func) {
+ if fn.Esc() != EscFuncUnknown {
+ base.Fatalf("unexpected node: %v", fn)
}
- fn.Esc = EscFuncPlanned
- if Debug.m > 3 {
- Dump("escAnalyze", fn)
+ fn.SetEsc(EscFuncPlanned)
+ if base.Flag.LowerM > 3 {
+ ir.Dump("escAnalyze", fn)
}
e.curfn = fn
e.loopDepth = 1
// Allocate locations for local variables.
- for _, dcl := range fn.Func.Dcl {
- if dcl.Op == ONAME {
+ for _, dcl := range fn.Dcl {
+ if dcl.Op() == ir.ONAME {
e.newLoc(dcl, false)
}
}
}
-func (e *Escape) walkFunc(fn *Node) {
- fn.Esc = EscFuncStarted
+func (e *Escape) walkFunc(fn *ir.Func) {
+ fn.SetEsc(EscFuncStarted)
// Identify labels that mark the head of an unstructured loop.
- inspectList(fn.Nbody, func(n *Node) bool {
- switch n.Op {
- case OLABEL:
- n.Sym.Label = asTypesNode(&nonlooping)
+ ir.Visit(fn, func(n ir.Node) {
+ switch n.Op() {
+ case ir.OLABEL:
+ n := n.(*ir.LabelStmt)
+ if e.labels == nil {
+ e.labels = make(map[*types.Sym]labelState)
+ }
+ e.labels[n.Sym()] = nonlooping
- case OGOTO:
+ case ir.OGOTO:
// If we visited the label before the goto,
// then this is a looping label.
- if n.Sym.Label == asTypesNode(&nonlooping) {
- n.Sym.Label = asTypesNode(&looping)
+ n := n.(*ir.BranchStmt)
+ if e.labels[n.Sym()] == nonlooping {
+ e.labels[n.Sym()] = looping
}
}
-
- return true
})
e.curfn = fn
e.loopDepth = 1
- e.block(fn.Nbody)
+ e.block(fn.Body())
+
+ if len(e.labels) != 0 {
+ base.FatalfAt(fn.Pos(), "leftover labels after walkFunc")
+ }
}
// Below we implement the methods for walking the AST and recording
// }
// stmt evaluates a single Go statement.
-func (e *Escape) stmt(n *Node) {
+func (e *Escape) stmt(n ir.Node) {
if n == nil {
return
}
lno := setlineno(n)
defer func() {
- lineno = lno
+ base.Pos = lno
}()
- if Debug.m > 2 {
- fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n)
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, funcSym(e.curfn), n)
}
- e.stmts(n.Ninit)
+ e.stmts(n.Init())
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("unexpected stmt: %v", n)
+ base.Fatalf("unexpected stmt: %v", n)
- case ODCLCONST, ODCLTYPE, OEMPTY, OFALL, OINLMARK:
+ case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
// nop
- case OBREAK, OCONTINUE, OGOTO:
+ case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
// TODO(mdempsky): Handle dead code?
- case OBLOCK:
- e.stmts(n.List)
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ e.stmts(n.List())
- case ODCL:
+ case ir.ODCL:
// Record loop depth at declaration.
- if !n.Left.isBlank() {
- e.dcl(n.Left)
+ n := n.(*ir.Decl)
+ if !ir.IsBlank(n.Left()) {
+ e.dcl(n.Left())
}
- case OLABEL:
- switch asNode(n.Sym.Label) {
- case &nonlooping:
- if Debug.m > 2 {
- fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
+ case ir.OLABEL:
+ n := n.(*ir.LabelStmt)
+ switch e.labels[n.Sym()] {
+ case nonlooping:
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
}
- case &looping:
- if Debug.m > 2 {
- fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
+ case looping:
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
}
e.loopDepth++
default:
- Fatalf("label missing tag")
+ base.Fatalf("label missing tag")
}
- n.Sym.Label = nil
+ delete(e.labels, n.Sym())
- case OIF:
- e.discard(n.Left)
- e.block(n.Nbody)
- e.block(n.Rlist)
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ e.discard(n.Left())
+ e.block(n.Body())
+ e.block(n.Rlist())
- case OFOR, OFORUNTIL:
+ case ir.OFOR, ir.OFORUNTIL:
+ n := n.(*ir.ForStmt)
e.loopDepth++
- e.discard(n.Left)
- e.stmt(n.Right)
- e.block(n.Nbody)
+ e.discard(n.Left())
+ e.stmt(n.Right())
+ e.block(n.Body())
e.loopDepth--
- case ORANGE:
+ case ir.ORANGE:
// for List = range Right { Nbody }
+ n := n.(*ir.RangeStmt)
e.loopDepth++
- ks := e.addrs(n.List)
- e.block(n.Nbody)
+ ks := e.addrs(n.List())
+ e.block(n.Body())
e.loopDepth--
// Right is evaluated outside the loop.
k := e.discardHole()
if len(ks) >= 2 {
- if n.Right.Type.IsArray() {
+ if n.Right().Type().IsArray() {
k = ks[1].note(n, "range")
} else {
k = ks[1].deref(n, "range-deref")
}
}
- e.expr(e.later(k), n.Right)
+ e.expr(e.later(k), n.Right())
- case OSWITCH:
- typesw := n.Left != nil && n.Left.Op == OTYPESW
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ typesw := n.Left() != nil && n.Left().Op() == ir.OTYPESW
var ks []EscHole
- for _, cas := range n.List.Slice() { // cases
- if typesw && n.Left.Left != nil {
- cv := cas.Rlist.First()
+ for _, cas := range n.List().Slice() { // cases
+ cas := cas.(*ir.CaseStmt)
+ if typesw && n.Left().(*ir.TypeSwitchGuard).Left() != nil {
+ cv := cas.Rlist().First()
k := e.dcl(cv) // type switch variables have no ODCL.
- if cv.Type.HasPointers() {
- ks = append(ks, k.dotType(cv.Type, cas, "switch case"))
+ if cv.Type().HasPointers() {
+ ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
}
}
- e.discards(cas.List)
- e.block(cas.Nbody)
+ e.discards(cas.List())
+ e.block(cas.Body())
}
if typesw {
- e.expr(e.teeHole(ks...), n.Left.Right)
+ e.expr(e.teeHole(ks...), n.Left().(*ir.TypeSwitchGuard).Right())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
- case OSELECT:
- for _, cas := range n.List.Slice() {
- e.stmt(cas.Left)
- e.block(cas.Nbody)
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ for _, cas := range n.List().Slice() {
+ cas := cas.(*ir.CaseStmt)
+ e.stmt(cas.Left())
+ e.block(cas.Body())
}
- case OSELRECV:
- e.assign(n.Left, n.Right, "selrecv", n)
- case OSELRECV2:
- e.assign(n.Left, n.Right, "selrecv", n)
- e.assign(n.List.First(), nil, "selrecv", n)
- case ORECV:
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ e.assign(n.List().First(), n.Rlist().First(), "selrecv", n)
+ e.assign(n.List().Second(), nil, "selrecv", n)
+ case ir.ORECV:
// TODO(mdempsky): Consider e.discard(n.Left).
+ n := n.(*ir.UnaryExpr)
e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
- case OSEND:
- e.discard(n.Left)
- e.assignHeap(n.Right, "send", n)
-
- case OAS, OASOP:
- e.assign(n.Left, n.Right, "assign", n)
-
- case OAS2:
- for i, nl := range n.List.Slice() {
- e.assign(nl, n.Rlist.Index(i), "assign-pair", n)
- }
-
- case OAS2DOTTYPE: // v, ok = x.(type)
- e.assign(n.List.First(), n.Right, "assign-pair-dot-type", n)
- e.assign(n.List.Second(), nil, "assign-pair-dot-type", n)
- case OAS2MAPR: // v, ok = m[k]
- e.assign(n.List.First(), n.Right, "assign-pair-mapr", n)
- e.assign(n.List.Second(), nil, "assign-pair-mapr", n)
- case OAS2RECV: // v, ok = <-ch
- e.assign(n.List.First(), n.Right, "assign-pair-receive", n)
- e.assign(n.List.Second(), nil, "assign-pair-receive", n)
-
- case OAS2FUNC:
- e.stmts(n.Right.Ninit)
- e.call(e.addrs(n.List), n.Right, nil)
- case ORETURN:
- results := e.curfn.Type.Results().FieldSlice()
- for i, v := range n.List.Slice() {
- e.assign(asNode(results[i].Nname), v, "return", n)
- }
- case OCALLFUNC, OCALLMETH, OCALLINTER, OCLOSE, OCOPY, ODELETE, OPANIC, OPRINT, OPRINTN, ORECOVER:
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ e.discard(n.Left())
+ e.assignHeap(n.Right(), "send", n)
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ e.assign(n.Left(), n.Right(), "assign", n)
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ e.assign(n.Left(), n.Right(), "assign", n)
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+ for i, nl := range n.List().Slice() {
+ e.assign(nl, n.Rlist().Index(i), "assign-pair", n)
+ }
+
+ case ir.OAS2DOTTYPE: // v, ok = x.(type)
+ n := n.(*ir.AssignListStmt)
+ e.assign(n.List().First(), n.Rlist().First(), "assign-pair-dot-type", n)
+ e.assign(n.List().Second(), nil, "assign-pair-dot-type", n)
+ case ir.OAS2MAPR: // v, ok = m[k]
+ n := n.(*ir.AssignListStmt)
+ e.assign(n.List().First(), n.Rlist().First(), "assign-pair-mapr", n)
+ e.assign(n.List().Second(), nil, "assign-pair-mapr", n)
+ case ir.OAS2RECV: // v, ok = <-ch
+ n := n.(*ir.AssignListStmt)
+ e.assign(n.List().First(), n.Rlist().First(), "assign-pair-receive", n)
+ e.assign(n.List().Second(), nil, "assign-pair-receive", n)
+
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
+ e.stmts(n.Rlist().First().Init())
+ e.call(e.addrs(n.List()), n.Rlist().First(), nil)
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ results := e.curfn.Type().Results().FieldSlice()
+ for i, v := range n.List().Slice() {
+ e.assign(ir.AsNode(results[i].Nname), v, "return", n)
+ }
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
e.call(nil, n, nil)
- case OGO, ODEFER:
- e.stmts(n.Left.Ninit)
- e.call(nil, n.Left, n)
+ case ir.OGO, ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
+ e.stmts(n.Left().Init())
+ e.call(nil, n.Left(), n)
- case ORETJMP:
+ case ir.ORETJMP:
// TODO(mdempsky): What do? esc.go just ignores it.
}
}
-func (e *Escape) stmts(l Nodes) {
+func (e *Escape) stmts(l ir.Nodes) {
for _, n := range l.Slice() {
e.stmt(n)
}
}
// block is like stmts, but preserves loopDepth.
-func (e *Escape) block(l Nodes) {
+func (e *Escape) block(l ir.Nodes) {
old := e.loopDepth
e.stmts(l)
e.loopDepth = old
// expr models evaluating an expression n and flowing the result into
// hole k.
-func (e *Escape) expr(k EscHole, n *Node) {
+func (e *Escape) expr(k EscHole, n ir.Node) {
if n == nil {
return
}
- e.stmts(n.Ninit)
+ e.stmts(n.Init())
e.exprSkipInit(k, n)
}
-func (e *Escape) exprSkipInit(k EscHole, n *Node) {
+func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
if n == nil {
return
}
lno := setlineno(n)
defer func() {
- lineno = lno
+ base.Pos = lno
}()
uintptrEscapesHack := k.uintptrEscapesHack
k.uintptrEscapesHack = false
- if uintptrEscapesHack && n.Op == OCONVNOP && n.Left.Type.IsUnsafePtr() {
+ if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).Left().Type().IsUnsafePtr() {
// nop
- } else if k.derefs >= 0 && !n.Type.HasPointers() {
+ } else if k.derefs >= 0 && !n.Type().HasPointers() {
k = e.discardHole()
}
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("unexpected expr: %v", n)
+ base.Fatalf("unexpected expr: %v", n)
- case OLITERAL, OGETG, OCLOSUREVAR, OTYPE:
+ case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OCLOSUREREAD, ir.OTYPE, ir.OMETHEXPR:
// nop
- case ONAME:
- if n.Class() == PFUNC || n.Class() == PEXTERN {
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class() == ir.PFUNC || n.Class() == ir.PEXTERN {
return
}
e.flow(k, e.oldLoc(n))
- case OPLUS, ONEG, OBITNOT, ONOT:
- e.discard(n.Left)
- case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OLSH, ORSH, OAND, OANDNOT, OEQ, ONE, OLT, OLE, OGT, OGE, OANDAND, OOROR:
- e.discard(n.Left)
- e.discard(n.Right)
-
- case OADDR:
- e.expr(k.addr(n, "address-of"), n.Left) // "address-of"
- case ODEREF:
- e.expr(k.deref(n, "indirection"), n.Left) // "indirection"
- case ODOT, ODOTMETH, ODOTINTER:
- e.expr(k.note(n, "dot"), n.Left)
- case ODOTPTR:
- e.expr(k.deref(n, "dot of pointer"), n.Left) // "dot of pointer"
- case ODOTTYPE, ODOTTYPE2:
- e.expr(k.dotType(n.Type, n, "dot"), n.Left)
- case OINDEX:
- if n.Left.Type.IsArray() {
- e.expr(k.note(n, "fixed-array-index-of"), n.Left)
+ case ir.ONAMEOFFSET:
+ n := n.(*ir.NameOffsetExpr)
+ e.expr(k, n.Name_)
+
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
+ n := n.(*ir.UnaryExpr)
+ e.discard(n.Left())
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ e.discard(n.Left())
+ e.discard(n.Right())
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ e.discard(n.Left())
+ e.discard(n.Right())
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ e.expr(k.addr(n, "address-of"), n.Left()) // "address-of"
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ e.expr(k.deref(n, "indirection"), n.Left()) // "indirection"
+ case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
+ n := n.(*ir.SelectorExpr)
+ e.expr(k.note(n, "dot"), n.Left())
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ e.expr(k.deref(n, "dot of pointer"), n.Left()) // "dot of pointer"
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := n.(*ir.TypeAssertExpr)
+ e.expr(k.dotType(n.Type(), n, "dot"), n.Left())
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ if n.Left().Type().IsArray() {
+ e.expr(k.note(n, "fixed-array-index-of"), n.Left())
} else {
// TODO(mdempsky): Fix why reason text.
- e.expr(k.deref(n, "dot of pointer"), n.Left)
- }
- e.discard(n.Right)
- case OINDEXMAP:
- e.discard(n.Left)
- e.discard(n.Right)
- case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
- e.expr(k.note(n, "slice"), n.Left)
+ e.expr(k.deref(n, "dot of pointer"), n.Left())
+ }
+ e.discard(n.Right())
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.Left())
+ e.discard(n.Right())
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
+ n := n.(*ir.SliceExpr)
+ e.expr(k.note(n, "slice"), n.Left())
low, high, max := n.SliceBounds()
e.discard(low)
e.discard(high)
e.discard(max)
- case OCONV, OCONVNOP:
- if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() {
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if checkPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.Left().Type().IsPtr() {
// When -d=checkptr=2 is enabled, treat
// conversions to unsafe.Pointer as an
// escaping operation. This allows better
// runtime instrumentation, since we can more
// easily detect object boundaries on the heap
// than the stack.
- e.assignHeap(n.Left, "conversion to unsafe.Pointer", n)
- } else if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() {
- e.unsafeValue(k, n.Left)
+ e.assignHeap(n.Left(), "conversion to unsafe.Pointer", n)
+ } else if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() {
+ e.unsafeValue(k, n.Left())
} else {
- e.expr(k, n.Left)
+ e.expr(k, n.Left())
}
- case OCONVIFACE:
- if !n.Left.Type.IsInterface() && !isdirectiface(n.Left.Type) {
+ case ir.OCONVIFACE:
+ n := n.(*ir.ConvExpr)
+ if !n.Left().Type().IsInterface() && !isdirectiface(n.Left().Type()) {
k = e.spill(k, n)
}
- e.expr(k.note(n, "interface-converted"), n.Left)
+ e.expr(k.note(n, "interface-converted"), n.Left())
- case ORECV:
- e.discard(n.Left)
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ e.discard(n.Left())
- case OCALLMETH, OCALLFUNC, OCALLINTER, OLEN, OCAP, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCOPY:
+ case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY:
e.call([]EscHole{k}, n, nil)
- case ONEW:
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
e.spill(k, n)
- case OMAKESLICE:
+ case ir.OMAKESLICE:
+ n := n.(*ir.MakeExpr)
e.spill(k, n)
- e.discard(n.Left)
- e.discard(n.Right)
- case OMAKECHAN:
- e.discard(n.Left)
- case OMAKEMAP:
+ e.discard(n.Left())
+ e.discard(n.Right())
+ case ir.OMAKECHAN:
+ n := n.(*ir.MakeExpr)
+ e.discard(n.Left())
+ case ir.OMAKEMAP:
+ n := n.(*ir.MakeExpr)
e.spill(k, n)
- e.discard(n.Left)
+ e.discard(n.Left())
- case ORECOVER:
+ case ir.ORECOVER:
// nop
- case OCALLPART:
+ case ir.OCALLPART:
// Flow the receiver argument to both the closure and
// to the receiver parameter.
+ n := n.(*ir.CallPartExpr)
closureK := e.spill(k, n)
m := callpartMethod(n)
// parameters all flow to the heap.
//
// TODO(mdempsky): Change ks into a callback, so that
- // we don't have to create this dummy slice?
+ // we don't have to create this slice?
var ks []EscHole
for i := m.Type.NumResults(); i > 0; i-- {
ks = append(ks, e.heapHole())
}
- paramK := e.tagHole(ks, asNode(m.Type.Nname()), m.Type.Recv())
+ name, _ := m.Nname.(*ir.Name)
+ paramK := e.tagHole(ks, name, m.Type.Recv())
- e.expr(e.teeHole(paramK, closureK), n.Left)
+ e.expr(e.teeHole(paramK, closureK), n.Left())
- case OPTRLIT:
- e.expr(e.spill(k, n), n.Left)
+ case ir.OPTRLIT:
+ n := n.(*ir.AddrExpr)
+ e.expr(e.spill(k, n), n.Left())
- case OARRAYLIT:
- for _, elt := range n.List.Slice() {
- if elt.Op == OKEY {
- elt = elt.Right
+ case ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elt := range n.List().Slice() {
+ if elt.Op() == ir.OKEY {
+ elt = elt.(*ir.KeyExpr).Right()
}
e.expr(k.note(n, "array literal element"), elt)
}
- case OSLICELIT:
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
k = e.spill(k, n)
k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
- for _, elt := range n.List.Slice() {
- if elt.Op == OKEY {
- elt = elt.Right
+ for _, elt := range n.List().Slice() {
+ if elt.Op() == ir.OKEY {
+ elt = elt.(*ir.KeyExpr).Right()
}
e.expr(k.note(n, "slice-literal-element"), elt)
}
- case OSTRUCTLIT:
- for _, elt := range n.List.Slice() {
- e.expr(k.note(n, "struct literal element"), elt.Left)
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elt := range n.List().Slice() {
+ e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Left())
}
- case OMAPLIT:
+ case ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
e.spill(k, n)
// Map keys and values are always stored in the heap.
- for _, elt := range n.List.Slice() {
- e.assignHeap(elt.Left, "map literal key", n)
- e.assignHeap(elt.Right, "map literal value", n)
+ for _, elt := range n.List().Slice() {
+ elt := elt.(*ir.KeyExpr)
+ e.assignHeap(elt.Left(), "map literal key", n)
+ e.assignHeap(elt.Right(), "map literal value", n)
}
- case OCLOSURE:
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
k = e.spill(k, n)
// Link addresses of captured variables to closure.
- for _, v := range n.Func.Closure.Func.Cvars.Slice() {
- if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs
- continue
- }
-
+ for _, v := range n.Func().ClosureVars {
k := k
- if !v.Name.Byval() {
+ if !v.Byval() {
k = k.addr(v, "reference")
}
- e.expr(k.note(n, "captured by a closure"), v.Name.Defn)
+ e.expr(k.note(n, "captured by a closure"), v.Defn)
}
- case ORUNES2STR, OBYTES2STR, OSTR2RUNES, OSTR2BYTES, ORUNESTR:
+ case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
+ n := n.(*ir.ConvExpr)
e.spill(k, n)
- e.discard(n.Left)
+ e.discard(n.Left())
- case OADDSTR:
+ case ir.OADDSTR:
+ n := n.(*ir.AddStringExpr)
e.spill(k, n)
// Arguments of OADDSTR never escape;
// runtime.concatstrings makes sure of that.
- e.discards(n.List)
+ e.discards(n.List())
}
}
// unsafeValue evaluates a uintptr-typed arithmetic expression looking
// for conversions from an unsafe.Pointer.
-func (e *Escape) unsafeValue(k EscHole, n *Node) {
- if n.Type.Etype != TUINTPTR {
- Fatalf("unexpected type %v for %v", n.Type, n)
+func (e *Escape) unsafeValue(k EscHole, n ir.Node) {
+ if n.Type().Kind() != types.TUINTPTR {
+ base.Fatalf("unexpected type %v for %v", n.Type(), n)
}
- e.stmts(n.Ninit)
+ e.stmts(n.Init())
- switch n.Op {
- case OCONV, OCONVNOP:
- if n.Left.Type.IsUnsafePtr() {
- e.expr(k, n.Left)
+ switch n.Op() {
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if n.Left().Type().IsUnsafePtr() {
+ e.expr(k, n.Left())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
- case ODOTPTR:
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
if isReflectHeaderDataField(n) {
- e.expr(k.deref(n, "reflect.Header.Data"), n.Left)
+ e.expr(k.deref(n, "reflect.Header.Data"), n.Left())
} else {
- e.discard(n.Left)
- }
- case OPLUS, ONEG, OBITNOT:
- e.unsafeValue(k, n.Left)
- case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OAND, OANDNOT:
- e.unsafeValue(k, n.Left)
- e.unsafeValue(k, n.Right)
- case OLSH, ORSH:
- e.unsafeValue(k, n.Left)
+ e.discard(n.Left())
+ }
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT:
+ n := n.(*ir.UnaryExpr)
+ e.unsafeValue(k, n.Left())
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ e.unsafeValue(k, n.Left())
+ e.unsafeValue(k, n.Right())
+ case ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ e.unsafeValue(k, n.Left())
// RHS need not be uintptr-typed (#32959) and can't meaningfully
// flow pointers anyway.
- e.discard(n.Right)
+ e.discard(n.Right())
default:
e.exprSkipInit(e.discardHole(), n)
}
// discard evaluates an expression n for side-effects, but discards
// its value.
-func (e *Escape) discard(n *Node) {
+func (e *Escape) discard(n ir.Node) {
e.expr(e.discardHole(), n)
}
-func (e *Escape) discards(l Nodes) {
+func (e *Escape) discards(l ir.Nodes) {
for _, n := range l.Slice() {
e.discard(n)
}
// addr evaluates an addressable expression n and returns an EscHole
// that represents storing into the represented location.
-func (e *Escape) addr(n *Node) EscHole {
- if n == nil || n.isBlank() {
- // Can happen at least in OSELRECV.
- // TODO(mdempsky): Anywhere else?
+func (e *Escape) addr(n ir.Node) EscHole {
+ if n == nil || ir.IsBlank(n) {
+ // Can happen in select case, range, maybe others.
return e.discardHole()
}
k := e.heapHole()
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("unexpected addr: %v", n)
- case ONAME:
- if n.Class() == PEXTERN {
+ base.Fatalf("unexpected addr: %v", n)
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class() == ir.PEXTERN {
break
}
k = e.oldLoc(n).asHole()
- case ODOT:
- k = e.addr(n.Left)
- case OINDEX:
- e.discard(n.Right)
- if n.Left.Type.IsArray() {
- k = e.addr(n.Left)
+ case ir.ONAMEOFFSET:
+ n := n.(*ir.NameOffsetExpr)
+ e.addr(n.Name_)
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ k = e.addr(n.Left())
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.Right())
+ if n.Left().Type().IsArray() {
+ k = e.addr(n.Left())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
- case ODEREF, ODOTPTR:
+ case ir.ODEREF, ir.ODOTPTR:
e.discard(n)
- case OINDEXMAP:
- e.discard(n.Left)
- e.assignHeap(n.Right, "key of map put", n)
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.Left())
+ e.assignHeap(n.Right(), "key of map put", n)
}
- if !n.Type.HasPointers() {
+ if !n.Type().HasPointers() {
k = e.discardHole()
}
return k
}
-func (e *Escape) addrs(l Nodes) []EscHole {
+func (e *Escape) addrs(l ir.Nodes) []EscHole {
var ks []EscHole
for _, n := range l.Slice() {
ks = append(ks, e.addr(n))
}
// assign evaluates the assignment dst = src.
-func (e *Escape) assign(dst, src *Node, why string, where *Node) {
+func (e *Escape) assign(dst, src ir.Node, why string, where ir.Node) {
// Filter out some no-op assignments for escape analysis.
ignore := dst != nil && src != nil && isSelfAssign(dst, src)
- if ignore && Debug.m != 0 {
- Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
+ if ignore && base.Flag.LowerM != 0 {
+ base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", funcSym(e.curfn), where)
}
k := e.addr(dst)
- if dst != nil && dst.Op == ODOTPTR && isReflectHeaderDataField(dst) {
+ if dst != nil && dst.Op() == ir.ODOTPTR && isReflectHeaderDataField(dst) {
e.unsafeValue(e.heapHole().note(where, why), src)
} else {
if ignore {
}
}
-func (e *Escape) assignHeap(src *Node, why string, where *Node) {
+func (e *Escape) assignHeap(src ir.Node, why string, where ir.Node) {
e.expr(e.heapHole().note(where, why), src)
}
// call evaluates a call expressions, including builtin calls. ks
// should contain the holes representing where the function callee's
// results flows; where is the OGO/ODEFER context of the call, if any.
-func (e *Escape) call(ks []EscHole, call, where *Node) {
- topLevelDefer := where != nil && where.Op == ODEFER && e.loopDepth == 1
+func (e *Escape) call(ks []EscHole, call, where ir.Node) {
+ topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1
if topLevelDefer {
// force stack allocation of defer record, unless
// open-coded defers are used (see ssa.go)
- where.Esc = EscNever
+ where.SetEsc(EscNever)
}
- argument := func(k EscHole, arg *Node) {
+ argument := func(k EscHole, arg ir.Node) {
if topLevelDefer {
// Top level defers arguments don't escape to
// heap, but they do need to last until end of
e.expr(k.note(call, "call parameter"), arg)
}
- switch call.Op {
+ switch call.Op() {
default:
- Fatalf("unexpected call op: %v", call.Op)
+ ir.Dump("esc", call)
+ base.Fatalf("unexpected call op: %v", call.Op())
- case OCALLFUNC, OCALLMETH, OCALLINTER:
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ call := call.(*ir.CallExpr)
fixVariadicCall(call)
// Pick out the function callee, if statically known.
- var fn *Node
- switch call.Op {
- case OCALLFUNC:
- switch v := staticValue(call.Left); {
- case v.Op == ONAME && v.Class() == PFUNC:
- fn = v
- case v.Op == OCLOSURE:
- fn = v.Func.Closure.Func.Nname
+ var fn *ir.Name
+ switch call.Op() {
+ case ir.OCALLFUNC:
+ switch v := staticValue(call.Left()); {
+ case v.Op() == ir.ONAME && v.(*ir.Name).Class() == ir.PFUNC:
+ fn = v.(*ir.Name)
+ case v.Op() == ir.OCLOSURE:
+ fn = v.(*ir.ClosureExpr).Func().Nname
}
- case OCALLMETH:
- fn = asNode(call.Left.Type.FuncType().Nname)
+ case ir.OCALLMETH:
+ fn = methodExprName(call.Left())
}
- fntype := call.Left.Type
+ fntype := call.Left().Type()
if fn != nil {
- fntype = fn.Type
+ fntype = fn.Type()
}
if ks != nil && fn != nil && e.inMutualBatch(fn) {
- for i, result := range fn.Type.Results().FieldSlice() {
- e.expr(ks[i], asNode(result.Nname))
+ for i, result := range fn.Type().Results().FieldSlice() {
+ e.expr(ks[i], ir.AsNode(result.Nname))
}
}
if r := fntype.Recv(); r != nil {
- argument(e.tagHole(ks, fn, r), call.Left.Left)
+ argument(e.tagHole(ks, fn, r), call.Left().(*ir.SelectorExpr).Left())
} else {
// Evaluate callee function expression.
- argument(e.discardHole(), call.Left)
+ argument(e.discardHole(), call.Left())
}
- args := call.List.Slice()
+ args := call.List().Slice()
for i, param := range fntype.Params().FieldSlice() {
argument(e.tagHole(ks, fn, param), args[i])
}
- case OAPPEND:
- args := call.List.Slice()
+ case ir.OAPPEND:
+ call := call.(*ir.CallExpr)
+ args := call.List().Slice()
// Appendee slice may flow directly to the result, if
// it has enough capacity. Alternatively, a new heap
// slice might be allocated, and all slice elements
// might flow to heap.
appendeeK := ks[0]
- if args[0].Type.Elem().HasPointers() {
+ if args[0].Type().Elem().HasPointers() {
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
}
argument(appendeeK, args[0])
if call.IsDDD() {
appendedK := e.discardHole()
- if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() {
+ if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
appendedK = e.heapHole().deref(call, "appended slice...")
}
argument(appendedK, args[1])
}
}
- case OCOPY:
- argument(e.discardHole(), call.Left)
+ case ir.OCOPY:
+ call := call.(*ir.BinaryExpr)
+ argument(e.discardHole(), call.Left())
copiedK := e.discardHole()
- if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() {
+ if call.Right().Type().IsSlice() && call.Right().Type().Elem().HasPointers() {
copiedK = e.heapHole().deref(call, "copied slice")
}
- argument(copiedK, call.Right)
-
- case OPANIC:
- argument(e.heapHole(), call.Left)
-
- case OCOMPLEX:
- argument(e.discardHole(), call.Left)
- argument(e.discardHole(), call.Right)
- case ODELETE, OPRINT, OPRINTN, ORECOVER:
- for _, arg := range call.List.Slice() {
+ argument(copiedK, call.Right())
+
+ case ir.OPANIC:
+ call := call.(*ir.UnaryExpr)
+ argument(e.heapHole(), call.Left())
+
+ case ir.OCOMPLEX:
+ call := call.(*ir.BinaryExpr)
+ argument(e.discardHole(), call.Left())
+ argument(e.discardHole(), call.Right())
+ case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ call := call.(*ir.CallExpr)
+ for _, arg := range call.List().Slice() {
argument(e.discardHole(), arg)
}
- case OLEN, OCAP, OREAL, OIMAG, OCLOSE:
- argument(e.discardHole(), call.Left)
+ case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
+ call := call.(*ir.UnaryExpr)
+ argument(e.discardHole(), call.Left())
}
}
// ks should contain the holes representing where the function
// callee's results flows. fn is the statically-known callee function,
// if any.
-func (e *Escape) tagHole(ks []EscHole, fn *Node, param *types.Field) EscHole {
+func (e *Escape) tagHole(ks []EscHole, fn *ir.Name, param *types.Field) EscHole {
// If this is a dynamic call, we can't rely on param.Note.
if fn == nil {
return e.heapHole()
}
if e.inMutualBatch(fn) {
- return e.addr(asNode(param.Nname))
+ return e.addr(ir.AsNode(param.Nname))
}
// Call to previously tagged function.
// fn has not yet been analyzed, so its parameters and results
// should be incorporated directly into the flow graph instead of
// relying on its escape analysis tagging.
-func (e *Escape) inMutualBatch(fn *Node) bool {
- if fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged {
- if fn.Name.Defn.Esc == EscFuncUnknown {
- Fatalf("graph inconsistency")
+func (e *Escape) inMutualBatch(fn *ir.Name) bool {
+ if fn.Defn != nil && fn.Defn.Esc() < EscFuncTagged {
+ if fn.Defn.Esc() == EscFuncUnknown {
+ base.Fatalf("graph inconsistency")
}
return true
}
type EscNote struct {
next *EscNote
- where *Node
+ where ir.Node
why string
}
-func (k EscHole) note(where *Node, why string) EscHole {
+func (k EscHole) note(where ir.Node, why string) EscHole {
if where == nil || why == "" {
- Fatalf("note: missing where/why")
+ base.Fatalf("note: missing where/why")
}
- if Debug.m >= 2 || logopt.Enabled() {
+ if base.Flag.LowerM >= 2 || logopt.Enabled() {
k.notes = &EscNote{
next: k.notes,
where: where,
func (k EscHole) shift(delta int) EscHole {
k.derefs += delta
if k.derefs < -1 {
- Fatalf("derefs underflow: %v", k.derefs)
+ base.Fatalf("derefs underflow: %v", k.derefs)
}
return k
}
-func (k EscHole) deref(where *Node, why string) EscHole { return k.shift(1).note(where, why) }
-func (k EscHole) addr(where *Node, why string) EscHole { return k.shift(-1).note(where, why) }
+func (k EscHole) deref(where ir.Node, why string) EscHole { return k.shift(1).note(where, why) }
+func (k EscHole) addr(where ir.Node, why string) EscHole { return k.shift(-1).note(where, why) }
-func (k EscHole) dotType(t *types.Type, where *Node, why string) EscHole {
+func (k EscHole) dotType(t *types.Type, where ir.Node, why string) EscHole {
if !t.IsInterface() && !isdirectiface(t) {
k = k.shift(1)
}
// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
// instead.
if k.derefs < 0 {
- Fatalf("teeHole: negative derefs")
+ base.Fatalf("teeHole: negative derefs")
}
e.flow(k, loc)
return loc.asHole()
}
-func (e *Escape) dcl(n *Node) EscHole {
+func (e *Escape) dcl(n ir.Node) EscHole {
loc := e.oldLoc(n)
loc.loopDepth = e.loopDepth
return loc.asHole()
// spill allocates a new location associated with expression n, flows
// its address to k, and returns a hole that flows values to it. It's
// intended for use with most expressions that allocate storage.
-func (e *Escape) spill(k EscHole, n *Node) EscHole {
+func (e *Escape) spill(k EscHole, n ir.Node) EscHole {
loc := e.newLoc(n, true)
e.flow(k.addr(n, "spill"), loc)
return loc.asHole()
// canonicalNode returns the canonical *Node that n logically
// represents.
-func canonicalNode(n *Node) *Node {
- if n != nil && n.Op == ONAME && n.Name.IsClosureVar() {
- n = n.Name.Defn
- if n.Name.IsClosureVar() {
- Fatalf("still closure var")
+func canonicalNode(n ir.Node) ir.Node {
+ if n != nil && n.Op() == ir.ONAME && n.Name().IsClosureVar() {
+ n = n.Name().Defn
+ if n.Name().IsClosureVar() {
+ base.Fatalf("still closure var")
}
}
return n
}
-func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
+func (e *Escape) newLoc(n ir.Node, transient bool) *EscLocation {
if e.curfn == nil {
- Fatalf("e.curfn isn't set")
+ base.Fatalf("e.curfn isn't set")
}
- if n != nil && n.Type != nil && n.Type.NotInHeap() {
- yyerrorl(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type)
+ if n != nil && n.Type() != nil && n.Type().NotInHeap() {
+ base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
}
n = canonicalNode(n)
}
e.allLocs = append(e.allLocs, loc)
if n != nil {
- if n.Op == ONAME && n.Name.Curfn != e.curfn {
- Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn)
+ if n.Op() == ir.ONAME && n.Name().Curfn != e.curfn {
+ n := n.(*ir.Name)
+ base.Fatalf("curfn mismatch: %v != %v", n.Name().Curfn, e.curfn)
}
- if n.HasOpt() {
- Fatalf("%v already has a location", n)
+ if n.Opt() != nil {
+ base.Fatalf("%v already has a location", n)
}
n.SetOpt(loc)
return loc
}
-func (e *Escape) oldLoc(n *Node) *EscLocation {
+func (e *Escape) oldLoc(n ir.Node) *EscLocation {
n = canonicalNode(n)
return n.Opt().(*EscLocation)
}
return
}
if dst.escapes && k.derefs < 0 { // dst = &src
- if Debug.m >= 2 || logopt.Enabled() {
- pos := linestr(src.n.Pos)
- if Debug.m >= 2 {
+ if base.Flag.LowerM >= 2 || logopt.Enabled() {
+ pos := base.FmtPos(src.n.Pos())
+ if base.Flag.LowerM >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
}
explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
if logopt.Enabled() {
- logopt.LogOpt(src.n.Pos, "escapes", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", src.n), explanation)
+ logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
}
}
l := todo[len(todo)-1]
todo = todo[:len(todo)-1]
- base := l.derefs
+ derefs := l.derefs
// If l.derefs < 0, then l's address flows to root.
- addressOf := base < 0
+ addressOf := derefs < 0
if addressOf {
// For a flow path like "root = &l; l = x",
// l's address flows to root, but x's does
// not. We recognize this by lower bounding
- // base at 0.
- base = 0
+ // derefs at 0.
+ derefs = 0
// If l's address flows to a non-transient
// location, then l can't be transiently
// corresponding result parameter, then record
// that value flow for tagging the function
// later.
- if l.isName(PPARAM) {
- if (logopt.Enabled() || Debug.m >= 2) && !l.escapes {
- if Debug.m >= 2 {
- fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), base)
+ if l.isName(ir.PPARAM) {
+ if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, e.explainLoc(root), derefs)
}
explanation := e.explainPath(root, l)
if logopt.Enabled() {
- logopt.LogOpt(l.n.Pos, "leak", "escape", e.curfn.funcname(),
- fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), base), explanation)
+ logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e.curfn),
+ fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), derefs), explanation)
}
}
- l.leakTo(root, base)
+ l.leakTo(root, derefs)
}
// If l's address flows somewhere that
// outlives it, then l needs to be heap
// allocated.
if addressOf && !l.escapes {
- if logopt.Enabled() || Debug.m >= 2 {
- if Debug.m >= 2 {
- fmt.Printf("%s: %v escapes to heap:\n", linestr(l.n.Pos), l.n)
+ if logopt.Enabled() || base.Flag.LowerM >= 2 {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
}
explanation := e.explainPath(root, l)
if logopt.Enabled() {
- logopt.LogOpt(l.n.Pos, "escape", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+ logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
}
}
l.escapes = true
if edge.src.escapes {
continue
}
- derefs := base + edge.derefs
- if edge.src.walkgen != walkgen || edge.src.derefs > derefs {
+ d := derefs + edge.derefs
+ if edge.src.walkgen != walkgen || edge.src.derefs > d {
edge.src.walkgen = walkgen
- edge.src.derefs = derefs
+ edge.src.derefs = d
edge.src.dst = l
edge.src.dstEdgeIdx = i
todo = append(todo, edge.src)
// explainPath prints an explanation of how src flows to the walk root.
func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
visited := make(map[*EscLocation]bool)
- pos := linestr(src.n.Pos)
+ pos := base.FmtPos(src.n.Pos())
var explanation []*logopt.LoggedOpt
for {
// Prevent infinite loop.
if visited[src] {
- if Debug.m >= 2 {
+ if base.Flag.LowerM >= 2 {
fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
}
break
dst := src.dst
edge := &dst.edges[src.dstEdgeIdx]
if edge.src != src {
- Fatalf("path inconsistency: %v != %v", edge.src, src)
+ base.Fatalf("path inconsistency: %v != %v", edge.src, src)
}
explanation = e.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
if derefs >= 0 {
ops = strings.Repeat("*", derefs)
}
- print := Debug.m >= 2
+ print := base.Flag.LowerM >= 2
flow := fmt.Sprintf(" flow: %s = %s%v:", e.explainLoc(dst), ops, e.explainLoc(srcloc))
if print {
if logopt.Enabled() {
var epos src.XPos
if notes != nil {
- epos = notes.where.Pos
+ epos = notes.where.Pos()
} else if srcloc != nil && srcloc.n != nil {
- epos = srcloc.n.Pos
+ epos = srcloc.n.Pos()
}
- explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", e.curfn.funcname(), flow))
+ explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e.curfn), flow))
}
for note := notes; note != nil; note = note.next {
if print {
- fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, linestr(note.where.Pos))
+ fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
}
if logopt.Enabled() {
- explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos, "escflow", "escape", e.curfn.funcname(),
+ explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e.curfn),
fmt.Sprintf(" from %v (%v)", note.where, note.why)))
}
}
// TODO(mdempsky): Omit entirely.
return "{temp}"
}
- if l.n.Op == ONAME {
+ if l.n.Op() == ir.ONAME {
return fmt.Sprintf("%v", l.n)
}
return fmt.Sprintf("{storage for %v}", l.n)
// We don't know what callers do with returned values, so
// pessimistically we need to assume they flow to the heap and
// outlive everything too.
- if l.isName(PPARAMOUT) {
+ if l.isName(ir.PPARAMOUT) {
// Exception: Directly called closures can return
// locations allocated outside of them without forcing
// them to the heap. For example:
//
// var u int // okay to stack allocate
// *(func() *int { return &u }()) = 42
- if containsClosure(other.curfn, l.curfn) && l.curfn.Func.Closure.Func.Top&ctxCallee != 0 {
+ if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
return false
}
}
// containsClosure reports whether c is a closure contained within f.
-func containsClosure(f, c *Node) bool {
- if f.Op != ODCLFUNC || c.Op != ODCLFUNC {
- Fatalf("bad containsClosure: %v, %v", f, c)
- }
-
+func containsClosure(f, c *ir.Func) bool {
// Common case.
if f == c {
return false
// Closures within function Foo are named like "Foo.funcN..."
// TODO(mdempsky): Better way to recognize this.
- fn := f.Func.Nname.Sym.Name
- cn := c.Func.Nname.Sym.Name
+ fn := f.Sym().Name
+ cn := c.Sym().Name
return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
}
func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
// If sink is a result parameter and we can fit return bits
// into the escape analysis tag, then record a return leak.
- if sink.isName(PPARAMOUT) && sink.curfn == l.curfn {
+ if sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
// TODO(mdempsky): Eliminate dependency on Vargen here.
- ri := int(sink.n.Name.Vargen) - 1
+ ri := int(sink.n.Name().Vargen) - 1
if ri < numEscResults {
// Leak to result parameter.
l.paramEsc.AddResult(ri, derefs)
l.paramEsc.AddHeap(derefs)
}
-func (e *Escape) finish(fns []*Node) {
+func (e *Escape) finish(fns []*ir.Func) {
// Record parameter tags for package export data.
for _, fn := range fns {
- fn.Esc = EscFuncTagged
+ fn.SetEsc(EscFuncTagged)
narg := 0
for _, fs := range &types.RecvsParams {
- for _, f := range fs(fn.Type).Fields().Slice() {
+ for _, f := range fs(fn.Type()).Fields().Slice() {
narg++
f.Note = e.paramTag(fn, narg, f)
}
// Update n.Esc based on escape analysis results.
if loc.escapes {
- if n.Op != ONAME {
- if Debug.m != 0 {
- Warnl(n.Pos, "%S escapes to heap", n)
+ if n.Op() != ir.ONAME {
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(n.Pos(), "%v escapes to heap", n)
}
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "escape", "escape", e.curfn.funcname())
+ logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e.curfn))
}
}
- n.Esc = EscHeap
+ n.SetEsc(EscHeap)
addrescapes(n)
} else {
- if Debug.m != 0 && n.Op != ONAME {
- Warnl(n.Pos, "%S does not escape", n)
+ if base.Flag.LowerM != 0 && n.Op() != ir.ONAME {
+ base.WarnfAt(n.Pos(), "%v does not escape", n)
}
- n.Esc = EscNone
+ n.SetEsc(EscNone)
if loc.transient {
- n.SetTransient(true)
+ switch n.Op() {
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ n.SetTransient(true)
+ case ir.OCALLPART:
+ n := n.(*ir.CallPartExpr)
+ n.SetTransient(true)
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ n.SetTransient(true)
+ }
}
}
}
}
-func (l *EscLocation) isName(c Class) bool {
- return l.n != nil && l.n.Op == ONAME && l.n.Class() == c
+func (l *EscLocation) isName(c ir.Class) bool {
+ return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class() == c
}
const numEscResults = 7
func (l *EscLeaks) set(i, derefs int) {
v := derefs + 1
if v < 0 {
- Fatalf("invalid derefs count: %v", derefs)
+ base.Fatalf("invalid derefs count: %v", derefs)
}
if v > math.MaxUint8 {
v = math.MaxUint8
copy(l[:], s[4:])
return l
}
+
+func escapes(all []ir.Node) {
+ visitBottomUp(all, escapeFuncs)
+}
+
+const (
+ EscFuncUnknown = 0 + iota
+ EscFuncPlanned
+ EscFuncStarted
+ EscFuncTagged
+)
+
+func min8(a, b int8) int8 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max8(a, b int8) int8 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+const (
+ EscUnknown = iota
+ EscNone // Does not escape to heap, result, or parameters.
+ EscHeap // Reachable from the heap
+ EscNever // By construction will not escape.
+)
+
+// funcSym returns fn.Nname.Sym if no nils are encountered along the way.
+func funcSym(fn *ir.Func) *types.Sym {
+ if fn == nil || fn.Nname == nil {
+ return nil
+ }
+ return fn.Sym()
+}
+
+// Mark labels that have no backjumps to them as not increasing e.loopdepth.
+type labelState int
+
+const (
+ looping labelState = 1 + iota
+ nonlooping
+)
+
+func isSliceSelfAssign(dst, src ir.Node) bool {
+ // Detect the following special case.
+ //
+ // func (b *Buffer) Foo() {
+ // n, m := ...
+ // b.buf = b.buf[n:m]
+ // }
+ //
+ // This assignment is a no-op for escape analysis,
+ // it does not store any new pointers into b that were not already there.
+ // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+ // Here we assume that the statement will not contain calls,
+ // that is, that order will move any calls to init.
+ // Otherwise base ONAME value could change between the moments
+ // when we evaluate it for dst and for src.
+
+ // dst is ONAME dereference.
+ var dstX ir.Node
+ switch dst.Op() {
+ default:
+ return false
+ case ir.ODEREF:
+ dst := dst.(*ir.StarExpr)
+ dstX = dst.Left()
+ case ir.ODOTPTR:
+ dst := dst.(*ir.SelectorExpr)
+ dstX = dst.Left()
+ }
+ if dstX.Op() != ir.ONAME {
+ return false
+ }
+ // src is a slice operation.
+ switch src.Op() {
+ case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
+ // OK.
+ case ir.OSLICEARR, ir.OSLICE3ARR:
+ // Since arrays are embedded into containing object,
+ // slice of non-pointer array will introduce a new pointer into b that was not already there
+ // (pointer to b itself). After such assignment, if b contents escape,
+ // b escapes as well. If we ignore such OSLICEARR, we will conclude
+ // that b does not escape when b contents do.
+ //
+ // Pointer to an array is OK since it's not stored inside b directly.
+ // For slicing an array (not pointer to array), there is an implicit OADDR.
+ // We check that to determine non-pointer array slicing.
+ src := src.(*ir.SliceExpr)
+ if src.Left().Op() == ir.OADDR {
+ return false
+ }
+ default:
+ return false
+ }
+ // slice is applied to ONAME dereference.
+ var baseX ir.Node
+ switch base := src.(*ir.SliceExpr).Left(); base.Op() {
+ default:
+ return false
+ case ir.ODEREF:
+ base := base.(*ir.StarExpr)
+ baseX = base.Left()
+ case ir.ODOTPTR:
+ base := base.(*ir.SelectorExpr)
+ baseX = base.Left()
+ }
+ if baseX.Op() != ir.ONAME {
+ return false
+ }
+ // dst and src reference the same base ONAME.
+ return dstX.(*ir.Name) == baseX.(*ir.Name)
+}
+
+// isSelfAssign reports whether assignment from src to dst can
+// be ignored by the escape analysis as it's effectively a self-assignment.
+func isSelfAssign(dst, src ir.Node) bool {
+ if isSliceSelfAssign(dst, src) {
+ return true
+ }
+
+ // Detect trivial assignments that assign back to the same object.
+ //
+ // It covers these cases:
+ // val.x = val.y
+ // val.x[i] = val.y[j]
+ // val.x1.x2 = val.x1.y2
+ // ... etc
+ //
+ // These assignments do not change assigned object lifetime.
+
+ if dst == nil || src == nil || dst.Op() != src.Op() {
+ return false
+ }
+
+ // The expression prefix must be both "safe" and identical.
+ switch dst.Op() {
+ case ir.ODOT, ir.ODOTPTR:
+ // Safe trailing accessors that are permitted to differ.
+ dst := dst.(*ir.SelectorExpr)
+ src := src.(*ir.SelectorExpr)
+ return samesafeexpr(dst.Left(), src.Left())
+ case ir.OINDEX:
+ dst := dst.(*ir.IndexExpr)
+ src := src.(*ir.IndexExpr)
+ if mayAffectMemory(dst.Right()) || mayAffectMemory(src.Right()) {
+ return false
+ }
+ return samesafeexpr(dst.Left(), src.Left())
+ default:
+ return false
+ }
+}
+
+// mayAffectMemory reports whether evaluation of n may affect the program's
+// memory state. If the expression can't affect memory state, then it can be
+// safely ignored by the escape analysis.
+func mayAffectMemory(n ir.Node) bool {
+ // We may want to use a list of "memory safe" ops instead of generally
+ // "side-effect free", which would include all calls and other ops that can
+ // allocate or change global state. For now, it's safer to start with the latter.
+ //
+ // We're ignoring things like division by zero, index out of range,
+ // and nil pointer dereference here.
+
+ // TODO(rsc): It seems like it should be possible to replace this with
+ // an ir.Any looking for any op that's not the ones in the case statement.
+ // But that produces changes in the compiled output detected by buildall.
+ switch n.Op() {
+ case ir.ONAME, ir.OCLOSUREREAD, ir.OLITERAL, ir.ONIL:
+ return false
+
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right())
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right())
+
+ case ir.OCONVNOP, ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ return mayAffectMemory(n.Left())
+
+ case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ n := n.(*ir.UnaryExpr)
+ return mayAffectMemory(n.Left())
+
+ case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ return mayAffectMemory(n.Left())
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ return mayAffectMemory(n.Left())
+
+ default:
+ return true
+ }
+}
+
+// heapAllocReason returns the reason the given Node must be heap
+// allocated, or the empty string if it doesn't.
+func heapAllocReason(n ir.Node) string {
+ if n.Type() == nil {
+ return ""
+ }
+
+ // Parameters are always passed via the stack.
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
+ return ""
+ }
+ }
+
+ if n.Type().Width > maxStackVarSize {
+ return "too large for stack"
+ }
+
+ if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= maxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op() == ir.OCLOSURE && closureType(n.(*ir.ClosureExpr)).Size() >= maxImplicitStackVarSize {
+ return "too large for stack"
+ }
+ if n.Op() == ir.OCALLPART && partialCallType(n.(*ir.CallPartExpr)).Size() >= maxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op() == ir.OMAKESLICE {
+ n := n.(*ir.MakeExpr)
+ r := n.Right()
+ if r == nil {
+ r = n.Left()
+ }
+ if !smallintconst(r) {
+ return "non-constant size"
+ }
+ if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) >= maxImplicitStackVarSize/t.Elem().Width {
+ return "too large for stack"
+ }
+ }
+
+ return ""
+}
+
+// addrescapes tags node n as having had its address taken
+// by "increasing" the "value" of n.Esc to EscHeap.
+// Storage is allocated as necessary to allow the address
+// to be taken.
+func addrescapes(n ir.Node) {
+ switch n.Op() {
+ default:
+ // Unexpected Op, probably due to a previous type error. Ignore.
+
+ case ir.ODEREF, ir.ODOTPTR:
+ // Nothing to do.
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n == nodfp {
+ break
+ }
+
+ // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
+ // on PPARAM it means something different.
+ if n.Class() == ir.PAUTO && n.Esc() == EscNever {
+ break
+ }
+
+ // If a closure reference escapes, mark the outer variable as escaping.
+ if n.IsClosureVar() {
+ addrescapes(n.Defn)
+ break
+ }
+
+ if n.Class() != ir.PPARAM && n.Class() != ir.PPARAMOUT && n.Class() != ir.PAUTO {
+ break
+ }
+
+ // This is a plain parameter or local variable that needs to move to the heap,
+ // but possibly for the function outside the one we're compiling.
+ // That is, if we have:
+ //
+ // func f(x int) {
+ // func() {
+ // global = &x
+ // }
+ // }
+ //
+ // then we're analyzing the inner closure but we need to move x to the
+ // heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
+ oldfn := Curfn
+ Curfn = n.Curfn
+ ln := base.Pos
+ base.Pos = Curfn.Pos()
+ moveToHeap(n)
+ Curfn = oldfn
+ base.Pos = ln
+
+ // ODOTPTR has already been introduced,
+ // so these are the non-pointer ODOT and OINDEX.
+ // In &x[0], if x is a slice, then x does not
+ // escape--the pointer inside x does, but that
+ // is always a heap pointer anyway.
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ addrescapes(n.Left())
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ if !n.Left().Type().IsSlice() {
+ addrescapes(n.Left())
+ }
+ case ir.OPAREN:
+ n := n.(*ir.ParenExpr)
+ addrescapes(n.Left())
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ addrescapes(n.Left())
+ }
+}
+
+// moveToHeap records the parameter or local variable n as moved to the heap.
+func moveToHeap(n *ir.Name) {
+ if base.Flag.LowerR != 0 {
+ ir.Dump("MOVE", n)
+ }
+ if base.Flag.CompilingRuntime {
+ base.Errorf("%v escapes to heap, not allowed in runtime", n)
+ }
+ if n.Class() == ir.PAUTOHEAP {
+ ir.Dump("n", n)
+ base.Fatalf("double move to heap")
+ }
+
+ // Allocate a local stack variable to hold the pointer to the heap copy.
+ // temp will add it to the function declaration list automatically.
+ heapaddr := temp(types.NewPtr(n.Type()))
+ heapaddr.SetSym(lookup("&" + n.Sym().Name))
+ heapaddr.SetPos(n.Pos())
+
+ // Unset AutoTemp to persist the &foo variable name through SSA to
+ // liveness analysis.
+ // TODO(mdempsky/drchase): Cleaner solution?
+ heapaddr.SetAutoTemp(false)
+
+ // Parameters have a local stack copy used at function start/end
+ // in addition to the copy in the heap that may live longer than
+ // the function.
+ if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
+ if n.FrameOffset() == types.BADWIDTH {
+ base.Fatalf("addrescapes before param assignment")
+ }
+
+ // We rewrite n below to be a heap variable (indirection of heapaddr).
+ // Preserve a copy so we can still write code referring to the original,
+ // and substitute that copy into the function declaration list
+ // so that analyses of the local (on-stack) variables use it.
+ stackcopy := NewName(n.Sym())
+ stackcopy.SetType(n.Type())
+ stackcopy.SetFrameOffset(n.FrameOffset())
+ stackcopy.SetClass(n.Class())
+ stackcopy.Heapaddr = heapaddr
+ if n.Class() == ir.PPARAMOUT {
+ // Make sure the pointer to the heap copy is kept live throughout the function.
+ // The function could panic at any point, and then a defer could recover.
+ // Thus, we need the pointer to the heap copy always available so the
+ // post-deferreturn code can copy the return value back to the stack.
+ // See issue 16095.
+ heapaddr.SetIsOutputParamHeapAddr(true)
+ }
+ n.Stackcopy = stackcopy
+
+ // Substitute the stackcopy into the function variable list so that
+ // liveness and other analyses use the underlying stack slot
+ // and not the now-pseudo-variable n.
+ found := false
+ for i, d := range Curfn.Dcl {
+ if d == n {
+ Curfn.Dcl[i] = stackcopy
+ found = true
+ break
+ }
+ // Parameters are before locals, so can stop early.
+ // This limits the search even in functions with many local variables.
+ if d.Class() == ir.PAUTO {
+ break
+ }
+ }
+ if !found {
+ base.Fatalf("cannot find %v in local variable list", n)
+ }
+ Curfn.Dcl = append(Curfn.Dcl, n)
+ }
+
+ // Modify n in place so that uses of n now mean indirection of the heapaddr.
+ n.SetClass(ir.PAUTOHEAP)
+ n.SetFrameOffset(0)
+ n.Heapaddr = heapaddr
+ n.SetEsc(EscHeap)
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(n.Pos(), "moved to heap: %v", n)
+ }
+}
+
+// This special tag is applied to uintptr variables
+// that we believe may hold unsafe.Pointers for
+// calls into assembly functions.
+const unsafeUintptrTag = "unsafe-uintptr"
+
+// This special tag is applied to uintptr parameters of functions
+// marked go:uintptrescapes.
+const uintptrEscapesTag = "uintptr-escapes"
+
+func (e *Escape) paramTag(fn *ir.Func, narg int, f *types.Field) string {
+ name := func() string {
+ if f.Sym != nil {
+ return f.Sym.Name
+ }
+ return fmt.Sprintf("arg#%d", narg)
+ }
+
+ if fn.Body().Len() == 0 {
+ // Assume that uintptr arguments must be held live across the call.
+ // This is most important for syscall.Syscall.
+ // See golang.org/issue/13372.
+ // This really doesn't have much to do with escape analysis per se,
+ // but we are reusing the ability to annotate an individual function
+ // argument and pass those annotations along to importing code.
+ if f.Type.IsUintptr() {
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name())
+ }
+ return unsafeUintptrTag
+ }
+
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
+ return ""
+ }
+
+ var esc EscLeaks
+
+ // External functions are assumed unsafe, unless
+ // //go:noescape is given before the declaration.
+ if fn.Func().Pragma&ir.Noescape != 0 {
+ if base.Flag.LowerM != 0 && f.Sym != nil {
+ base.WarnfAt(f.Pos, "%v does not escape", name())
+ }
+ } else {
+ if base.Flag.LowerM != 0 && f.Sym != nil {
+ base.WarnfAt(f.Pos, "leaking param: %v", name())
+ }
+ esc.AddHeap(0)
+ }
+
+ return esc.Encode()
+ }
+
+ if fn.Func().Pragma&ir.UintptrEscapes != 0 {
+ if f.Type.IsUintptr() {
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
+ }
+ return uintptrEscapesTag
+ }
+ if f.IsDDD() && f.Type.Elem().IsUintptr() {
+ // final argument is ...uintptr.
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name())
+ }
+ return uintptrEscapesTag
+ }
+ }
+
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
+ return ""
+ }
+
+ // Unnamed parameters are unused and therefore do not escape.
+ if f.Sym == nil || f.Sym.IsBlank() {
+ var esc EscLeaks
+ return esc.Encode()
+ }
+
+ n := ir.AsNode(f.Nname)
+ loc := e.oldLoc(n)
+ esc := loc.paramEsc
+ esc.Optimize()
+
+ if base.Flag.LowerM != 0 && !loc.escapes {
+ if esc.Empty() {
+ base.WarnfAt(f.Pos, "%v does not escape", name())
+ }
+ if x := esc.Heap(); x >= 0 {
+ if x == 0 {
+ base.WarnfAt(f.Pos, "leaking param: %v", name())
+ } else {
+ // TODO(mdempsky): Mention level=x like below?
+ base.WarnfAt(f.Pos, "leaking param content: %v", name())
+ }
+ }
+ for i := 0; i < numEscResults; i++ {
+ if x := esc.Result(i); x >= 0 {
+ res := fn.Type().Results().Field(i).Sym
+ base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
+ }
+ }
+ }
+
+ return esc.Encode()
+}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/src"
"fmt"
-)
-
-var (
- Debug_export int // if set, print debugging information about export data
+ "go/constant"
)
func exportf(bout *bio.Writer, format string, args ...interface{}) {
fmt.Fprintf(bout, format, args...)
- if Debug_export != 0 {
+ if base.Debug.Export != 0 {
fmt.Printf(format, args...)
}
}
-var asmlist []*Node
-
// exportsym marks n for export (or reexport).
-func exportsym(n *Node) {
- if n.Sym.OnExportList() {
+func exportsym(n *ir.Name) {
+ if n.Sym().OnExportList() {
return
}
- n.Sym.SetOnExportList(true)
+ n.Sym().SetOnExportList(true)
- if Debug.E != 0 {
- fmt.Printf("export symbol %v\n", n.Sym)
+ if base.Flag.E != 0 {
+ fmt.Printf("export symbol %v\n", n.Sym())
}
- exportlist = append(exportlist, n)
+ Target.Exports = append(Target.Exports, n)
}
func initname(s string) bool {
return s == "init"
}
-func autoexport(n *Node, ctxt Class) {
- if n.Sym.Pkg != localpkg {
+func autoexport(n *ir.Name, ctxt ir.Class) {
+ if n.Sym().Pkg != types.LocalPkg {
return
}
- if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
+ if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || dclcontext != ir.PEXTERN {
return
}
- if n.Type != nil && n.Type.IsKind(TFUNC) && n.IsMethod() {
+ if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) {
return
}
- if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) {
+ if types.IsExported(n.Sym().Name) || initname(n.Sym().Name) {
exportsym(n)
}
- if asmhdr != "" && !n.Sym.Asm() {
- n.Sym.SetAsm(true)
- asmlist = append(asmlist, n)
+ if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
+ n.Sym().SetAsm(true)
+ Target.Asms = append(Target.Asms, n)
}
}
func dumpexport(bout *bio.Writer) {
+ p := &exporter{marked: make(map[*types.Type]bool)}
+ for _, n := range Target.Exports {
+ p.markObject(n)
+ }
+
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
exportf(bout, "\n$$B\n") // indicate binary export format
off := bout.Offset()
size := bout.Offset() - off
exportf(bout, "\n$$\n")
- if Debug_export != 0 {
- fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", myimportpath, size)
+ if base.Debug.Export != 0 {
+ fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
}
}
-func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
- n := asNode(s.PkgDef())
- if n == nil {
- // iimport should have created a stub ONONAME
- // declaration for all imported symbols. The exception
- // is declarations for Runtimepkg, which are populated
- // by loadsys instead.
- if s.Pkg != Runtimepkg {
- Fatalf("missing ONONAME for %v\n", s)
- }
-
- n = dclname(s)
- s.SetPkgDef(asTypesNode(n))
- s.Importdef = ipkg
- }
- if n.Op != ONONAME && n.Op != op {
- redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
+func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name {
+ if n := s.PkgDef(); n != nil {
+ base.Fatalf("importsym of symbol that already exists: %v", n)
}
+
+ n := ir.NewDeclNameAt(pos, op, s)
+ n.SetClass(ctxt) // TODO(mdempsky): Move this into NewDeclNameAt too?
+ s.SetPkgDef(n)
+ s.Importdef = ipkg
return n
}
// importtype returns the named type declared by symbol s.
// If no such type has been declared yet, a forward declaration is returned.
// ipkg is the package being imported
-func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
- n := importsym(ipkg, s, OTYPE)
- if n.Op != OTYPE {
- t := types.New(TFORW)
- t.Sym = s
- t.Nod = asTypesNode(n)
-
- n.Op = OTYPE
- n.Pos = pos
- n.Type = t
- n.SetClass(PEXTERN)
- }
-
- t := n.Type
- if t == nil {
- Fatalf("importtype %v", s)
- }
- return t
+func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *ir.Name {
+ n := importsym(ipkg, pos, s, ir.OTYPE, ir.PEXTERN)
+ n.SetType(types.NewNamed(n))
+ return n
}
// importobj declares symbol s as an imported object representable by op.
// ipkg is the package being imported
-func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node {
- n := importsym(ipkg, s, op)
- if n.Op != ONONAME {
- if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) {
- redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
- }
- return nil
- }
-
- n.Op = op
- n.Pos = pos
- n.SetClass(ctxt)
- if ctxt == PFUNC {
- n.Sym.SetFunc(true)
+func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
+ n := importsym(ipkg, pos, s, op, ctxt)
+ n.SetType(t)
+ if ctxt == ir.PFUNC {
+ n.Sym().SetFunc(true)
}
- n.Type = t
return n
}
// importconst declares symbol s as an imported constant with type t and value val.
// ipkg is the package being imported
-func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val Val) {
- n := importobj(ipkg, pos, s, OLITERAL, PEXTERN, t)
- if n == nil { // TODO: Check that value matches.
- return
- }
-
+func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
+ n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t)
n.SetVal(val)
-
- if Debug.E != 0 {
- fmt.Printf("import const %v %L = %v\n", s, t, val)
- }
+ return n
}
// importfunc declares symbol s as an imported function with type t.
// ipkg is the package being imported
-func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
- n := importobj(ipkg, pos, s, ONAME, PFUNC, t)
- if n == nil {
- return
- }
+func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t)
- n.Func = new(Func)
- t.SetNname(asTypesNode(n))
+ fn := ir.NewFunc(pos)
+ fn.SetType(t)
+ n.SetFunc(fn)
+ fn.Nname = n
- if Debug.E != 0 {
- fmt.Printf("import func %v%S\n", s, t)
- }
+ return n
}
// importvar declares symbol s as an imported variable with type t.
// ipkg is the package being imported
-func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
- n := importobj(ipkg, pos, s, ONAME, PEXTERN, t)
- if n == nil {
- return
- }
-
- if Debug.E != 0 {
- fmt.Printf("import var %v %L\n", s, t)
- }
+func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ return importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t)
}
// importalias declares symbol s as an imported type alias with type t.
// ipkg is the package being imported
-func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
- n := importobj(ipkg, pos, s, OTYPE, PEXTERN, t)
- if n == nil {
- return
- }
-
- if Debug.E != 0 {
- fmt.Printf("import type %v = %L\n", s, t)
- }
+func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ return importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t)
}
func dumpasmhdr() {
- b, err := bio.Create(asmhdr)
+ b, err := bio.Create(base.Flag.AsmHdr)
if err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
- fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name)
- for _, n := range asmlist {
- if n.Sym.IsBlank() {
+ fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
+ for _, n := range Target.Asms {
+ if n.Sym().IsBlank() {
continue
}
- switch n.Op {
- case OLITERAL:
- t := n.Val().Ctype()
- if t == CTFLT || t == CTCPLX {
+ switch n.Op() {
+ case ir.OLITERAL:
+ t := n.Val().Kind()
+ if t == constant.Float || t == constant.Complex {
break
}
- fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val())
+ fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym().Name, n.Val())
- case OTYPE:
- t := n.Type
+ case ir.OTYPE:
+ t := n.Type()
if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
break
}
- fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width))
+ fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Width))
for _, f := range t.Fields().Slice() {
if !f.Sym.IsBlank() {
- fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset))
+ fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
}
}
}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "bytes"
- "cmd/compile/internal/types"
- "cmd/internal/src"
- "fmt"
- "io"
- "strconv"
- "strings"
- "sync"
- "unicode/utf8"
-)
-
-// A FmtFlag value is a set of flags (or 0).
-// They control how the Xconv functions format their values.
-// See the respective function's documentation for details.
-type FmtFlag int
-
-const ( // fmt.Format flag/prec or verb
- FmtLeft FmtFlag = 1 << iota // '-'
- FmtSharp // '#'
- FmtSign // '+'
- FmtUnsigned // internal use only (historic: u flag)
- FmtShort // verb == 'S' (historic: h flag)
- FmtLong // verb == 'L' (historic: l flag)
- FmtComma // '.' (== hasPrec) (historic: , flag)
- FmtByte // '0' (historic: hh flag)
-)
-
-// fmtFlag computes the (internal) FmtFlag
-// value given the fmt.State and format verb.
-func fmtFlag(s fmt.State, verb rune) FmtFlag {
- var flag FmtFlag
- if s.Flag('-') {
- flag |= FmtLeft
- }
- if s.Flag('#') {
- flag |= FmtSharp
- }
- if s.Flag('+') {
- flag |= FmtSign
- }
- if s.Flag(' ') {
- Fatalf("FmtUnsigned in format string")
- }
- if _, ok := s.Precision(); ok {
- flag |= FmtComma
- }
- if s.Flag('0') {
- flag |= FmtByte
- }
- switch verb {
- case 'S':
- flag |= FmtShort
- case 'L':
- flag |= FmtLong
- }
- return flag
-}
-
-// Format conversions:
-// TODO(gri) verify these; eliminate those not used anymore
-//
-// %v Op Node opcodes
-// Flags: #: print Go syntax (automatic unless mode == FDbg)
-//
-// %j *Node Node details
-// Flags: 0: suppresses things not relevant until walk
-//
-// %v *Val Constant values
-//
-// %v *types.Sym Symbols
-// %S unqualified identifier in any mode
-// Flags: +,- #: mode (see below)
-// 0: in export mode: unqualified identifier if exported, qualified if not
-//
-// %v *types.Type Types
-// %S omit "func" and receiver in function types
-// %L definition instead of name.
-// Flags: +,- #: mode (see below)
-// ' ' (only in -/Sym mode) print type identifiers wit package name instead of prefix.
-//
-// %v *Node Nodes
-// %S (only in +/debug mode) suppress recursion
-// %L (only in Error mode) print "foo (type Bar)"
-// Flags: +,- #: mode (see below)
-//
-// %v Nodes Node lists
-// Flags: those of *Node
-// .: separate items with ',' instead of ';'
-
-// *types.Sym, *types.Type, and *Node types use the flags below to set the format mode
-const (
- FErr fmtMode = iota
- FDbg
- FTypeId
- FTypeIdName // same as FTypeId, but use package name instead of prefix
-)
-
-// The mode flags '+', '-', and '#' are sticky; they persist through
-// recursions of *Node, *types.Type, and *types.Sym values. The ' ' flag is
-// sticky only on *types.Type recursions and only used in %-/*types.Sym mode.
-//
-// Example: given a *types.Sym: %+v %#v %-v print an identifier properly qualified for debug/export/internal mode
-
-// Useful format combinations:
-// TODO(gri): verify these
-//
-// *Node, Nodes:
-// %+v multiline recursive debug dump of *Node/Nodes
-// %+S non-recursive debug dump
-//
-// *Node:
-// %#v Go format
-// %L "foo (type Bar)" for error messages
-//
-// *types.Type:
-// %#v Go format
-// %#L type definition instead of name
-// %#S omit "func" and receiver in function signature
-//
-// %-v type identifiers
-// %-S type identifiers without "func" and arg names in type signatures (methodsym)
-// %- v type identifiers with package name instead of prefix (typesym, dcommontype, typehash)
-
-// update returns the results of applying f to mode.
-func (f FmtFlag) update(mode fmtMode) (FmtFlag, fmtMode) {
- switch {
- case f&FmtSign != 0:
- mode = FDbg
- case f&FmtSharp != 0:
- // ignore (textual export format no longer supported)
- case f&FmtUnsigned != 0:
- mode = FTypeIdName
- case f&FmtLeft != 0:
- mode = FTypeId
- }
-
- f &^= FmtSharp | FmtLeft | FmtSign
- return f, mode
-}
-
-var goopnames = []string{
- OADDR: "&",
- OADD: "+",
- OADDSTR: "+",
- OALIGNOF: "unsafe.Alignof",
- OANDAND: "&&",
- OANDNOT: "&^",
- OAND: "&",
- OAPPEND: "append",
- OAS: "=",
- OAS2: "=",
- OBREAK: "break",
- OCALL: "function call", // not actual syntax
- OCAP: "cap",
- OCASE: "case",
- OCLOSE: "close",
- OCOMPLEX: "complex",
- OBITNOT: "^",
- OCONTINUE: "continue",
- OCOPY: "copy",
- ODELETE: "delete",
- ODEFER: "defer",
- ODIV: "/",
- OEQ: "==",
- OFALL: "fallthrough",
- OFOR: "for",
- OFORUNTIL: "foruntil", // not actual syntax; used to avoid off-end pointer live on backedge.892
- OGE: ">=",
- OGOTO: "goto",
- OGT: ">",
- OIF: "if",
- OIMAG: "imag",
- OINLMARK: "inlmark",
- ODEREF: "*",
- OLEN: "len",
- OLE: "<=",
- OLSH: "<<",
- OLT: "<",
- OMAKE: "make",
- ONEG: "-",
- OMOD: "%",
- OMUL: "*",
- ONEW: "new",
- ONE: "!=",
- ONOT: "!",
- OOFFSETOF: "unsafe.Offsetof",
- OOROR: "||",
- OOR: "|",
- OPANIC: "panic",
- OPLUS: "+",
- OPRINTN: "println",
- OPRINT: "print",
- ORANGE: "range",
- OREAL: "real",
- ORECV: "<-",
- ORECOVER: "recover",
- ORETURN: "return",
- ORSH: ">>",
- OSELECT: "select",
- OSEND: "<-",
- OSIZEOF: "unsafe.Sizeof",
- OSUB: "-",
- OSWITCH: "switch",
- OXOR: "^",
-}
-
-func (o Op) GoString() string {
- return fmt.Sprintf("%#v", o)
-}
-
-func (o Op) format(s fmt.State, verb rune, mode fmtMode) {
- switch verb {
- case 'v':
- o.oconv(s, fmtFlag(s, verb), mode)
-
- default:
- fmt.Fprintf(s, "%%!%c(Op=%d)", verb, int(o))
- }
-}
-
-func (o Op) oconv(s fmt.State, flag FmtFlag, mode fmtMode) {
- if flag&FmtSharp != 0 || mode != FDbg {
- if int(o) < len(goopnames) && goopnames[o] != "" {
- fmt.Fprint(s, goopnames[o])
- return
- }
- }
-
- // 'o.String()' instead of just 'o' to avoid infinite recursion
- fmt.Fprint(s, o.String())
-}
-
-type (
- fmtMode int
-
- fmtNodeErr Node
- fmtNodeDbg Node
- fmtNodeTypeId Node
- fmtNodeTypeIdName Node
-
- fmtOpErr Op
- fmtOpDbg Op
- fmtOpTypeId Op
- fmtOpTypeIdName Op
-
- fmtTypeErr types.Type
- fmtTypeDbg types.Type
- fmtTypeTypeId types.Type
- fmtTypeTypeIdName types.Type
-
- fmtSymErr types.Sym
- fmtSymDbg types.Sym
- fmtSymTypeId types.Sym
- fmtSymTypeIdName types.Sym
-
- fmtNodesErr Nodes
- fmtNodesDbg Nodes
- fmtNodesTypeId Nodes
- fmtNodesTypeIdName Nodes
-)
-
-func (n *fmtNodeErr) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FErr) }
-func (n *fmtNodeDbg) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FDbg) }
-func (n *fmtNodeTypeId) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FTypeId) }
-func (n *fmtNodeTypeIdName) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FTypeIdName) }
-func (n *Node) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) }
-
-func (o fmtOpErr) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FErr) }
-func (o fmtOpDbg) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FDbg) }
-func (o fmtOpTypeId) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FTypeId) }
-func (o fmtOpTypeIdName) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FTypeIdName) }
-func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) }
-
-func (t *fmtTypeErr) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FErr) }
-func (t *fmtTypeDbg) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FDbg) }
-func (t *fmtTypeTypeId) Format(s fmt.State, verb rune) {
- typeFormat((*types.Type)(t), s, verb, FTypeId)
-}
-func (t *fmtTypeTypeIdName) Format(s fmt.State, verb rune) {
- typeFormat((*types.Type)(t), s, verb, FTypeIdName)
-}
-
-// func (t *types.Type) Format(s fmt.State, verb rune) // in package types
-
-func (y *fmtSymErr) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FErr) }
-func (y *fmtSymDbg) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FDbg) }
-func (y *fmtSymTypeId) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FTypeId) }
-func (y *fmtSymTypeIdName) Format(s fmt.State, verb rune) {
- symFormat((*types.Sym)(y), s, verb, FTypeIdName)
-}
-
-// func (y *types.Sym) Format(s fmt.State, verb rune) // in package types { y.format(s, verb, FErr) }
-
-func (n fmtNodesErr) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FErr) }
-func (n fmtNodesDbg) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FDbg) }
-func (n fmtNodesTypeId) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FTypeId) }
-func (n fmtNodesTypeIdName) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FTypeIdName) }
-func (n Nodes) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) }
-
-func (m fmtMode) Fprintf(s fmt.State, format string, args ...interface{}) {
- m.prepareArgs(args)
- fmt.Fprintf(s, format, args...)
-}
-
-func (m fmtMode) Sprintf(format string, args ...interface{}) string {
- m.prepareArgs(args)
- return fmt.Sprintf(format, args...)
-}
-
-func (m fmtMode) Sprint(args ...interface{}) string {
- m.prepareArgs(args)
- return fmt.Sprint(args...)
-}
-
-func (m fmtMode) prepareArgs(args []interface{}) {
- switch m {
- case FErr:
- for i, arg := range args {
- switch arg := arg.(type) {
- case Op:
- args[i] = fmtOpErr(arg)
- case *Node:
- args[i] = (*fmtNodeErr)(arg)
- case *types.Type:
- args[i] = (*fmtTypeErr)(arg)
- case *types.Sym:
- args[i] = (*fmtSymErr)(arg)
- case Nodes:
- args[i] = fmtNodesErr(arg)
- case Val, int32, int64, string, types.EType:
- // OK: printing these types doesn't depend on mode
- default:
- Fatalf("mode.prepareArgs type %T", arg)
- }
- }
- case FDbg:
- for i, arg := range args {
- switch arg := arg.(type) {
- case Op:
- args[i] = fmtOpDbg(arg)
- case *Node:
- args[i] = (*fmtNodeDbg)(arg)
- case *types.Type:
- args[i] = (*fmtTypeDbg)(arg)
- case *types.Sym:
- args[i] = (*fmtSymDbg)(arg)
- case Nodes:
- args[i] = fmtNodesDbg(arg)
- case Val, int32, int64, string, types.EType:
- // OK: printing these types doesn't depend on mode
- default:
- Fatalf("mode.prepareArgs type %T", arg)
- }
- }
- case FTypeId:
- for i, arg := range args {
- switch arg := arg.(type) {
- case Op:
- args[i] = fmtOpTypeId(arg)
- case *Node:
- args[i] = (*fmtNodeTypeId)(arg)
- case *types.Type:
- args[i] = (*fmtTypeTypeId)(arg)
- case *types.Sym:
- args[i] = (*fmtSymTypeId)(arg)
- case Nodes:
- args[i] = fmtNodesTypeId(arg)
- case Val, int32, int64, string, types.EType:
- // OK: printing these types doesn't depend on mode
- default:
- Fatalf("mode.prepareArgs type %T", arg)
- }
- }
- case FTypeIdName:
- for i, arg := range args {
- switch arg := arg.(type) {
- case Op:
- args[i] = fmtOpTypeIdName(arg)
- case *Node:
- args[i] = (*fmtNodeTypeIdName)(arg)
- case *types.Type:
- args[i] = (*fmtTypeTypeIdName)(arg)
- case *types.Sym:
- args[i] = (*fmtSymTypeIdName)(arg)
- case Nodes:
- args[i] = fmtNodesTypeIdName(arg)
- case Val, int32, int64, string, types.EType:
- // OK: printing these types doesn't depend on mode
- default:
- Fatalf("mode.prepareArgs type %T", arg)
- }
- }
- default:
- Fatalf("mode.prepareArgs mode %d", m)
- }
-}
-
-func (n *Node) format(s fmt.State, verb rune, mode fmtMode) {
- switch verb {
- case 'v', 'S', 'L':
- n.nconv(s, fmtFlag(s, verb), mode)
-
- case 'j':
- n.jconv(s, fmtFlag(s, verb))
-
- default:
- fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n)
- }
-}
-
-// *Node details
-func (n *Node) jconv(s fmt.State, flag FmtFlag) {
- c := flag & FmtShort
-
- // Useful to see which nodes in a Node Dump/dumplist are actually identical
- if Debug_dumpptrs != 0 {
- fmt.Fprintf(s, " p(%p)", n)
- }
- if c == 0 && n.Name != nil && n.Name.Vargen != 0 {
- fmt.Fprintf(s, " g(%d)", n.Name.Vargen)
- }
-
- if Debug_dumpptrs != 0 && c == 0 && n.Name != nil && n.Name.Defn != nil {
- // Useful to see where Defn is set and what node it points to
- fmt.Fprintf(s, " defn(%p)", n.Name.Defn)
- }
-
- if n.Pos.IsKnown() {
- pfx := ""
- switch n.Pos.IsStmt() {
- case src.PosNotStmt:
- pfx = "_" // "-" would be confusing
- case src.PosIsStmt:
- pfx = "+"
- }
- fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos.Line())
- }
-
- if c == 0 && n.Xoffset != BADWIDTH {
- fmt.Fprintf(s, " x(%d)", n.Xoffset)
- }
-
- if n.Class() != 0 {
- fmt.Fprintf(s, " class(%v)", n.Class())
- }
-
- if n.Colas() {
- fmt.Fprintf(s, " colas(%v)", n.Colas())
- }
-
- switch n.Esc {
- case EscUnknown:
- break
-
- case EscHeap:
- fmt.Fprint(s, " esc(h)")
-
- case EscNone:
- fmt.Fprint(s, " esc(no)")
-
- case EscNever:
- if c == 0 {
- fmt.Fprint(s, " esc(N)")
- }
-
- default:
- fmt.Fprintf(s, " esc(%d)", n.Esc)
- }
-
- if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 {
- fmt.Fprintf(s, " ld(%d)", e.loopDepth)
- }
-
- if c == 0 && n.Typecheck() != 0 {
- fmt.Fprintf(s, " tc(%d)", n.Typecheck())
- }
-
- if n.IsDDD() {
- fmt.Fprintf(s, " isddd(%v)", n.IsDDD())
- }
-
- if n.Implicit() {
- fmt.Fprintf(s, " implicit(%v)", n.Implicit())
- }
-
- if n.Embedded() {
- fmt.Fprintf(s, " embedded")
- }
-
- if n.Op == ONAME {
- if n.Name.Addrtaken() {
- fmt.Fprint(s, " addrtaken")
- }
- if n.Name.Assigned() {
- fmt.Fprint(s, " assigned")
- }
- if n.Name.IsClosureVar() {
- fmt.Fprint(s, " closurevar")
- }
- if n.Name.Captured() {
- fmt.Fprint(s, " captured")
- }
- if n.Name.IsOutputParamHeapAddr() {
- fmt.Fprint(s, " outputparamheapaddr")
- }
- }
- if n.Bounded() {
- fmt.Fprint(s, " bounded")
- }
- if n.NonNil() {
- fmt.Fprint(s, " nonnil")
- }
-
- if c == 0 && n.HasCall() {
- fmt.Fprint(s, " hascall")
- }
-
- if c == 0 && n.Name != nil && n.Name.Used() {
- fmt.Fprint(s, " used")
- }
-}
-
-func (v Val) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- v.vconv(s, fmtFlag(s, verb))
-
- default:
- fmt.Fprintf(s, "%%!%c(Val=%T)", verb, v)
- }
-}
-
-func (v Val) vconv(s fmt.State, flag FmtFlag) {
- switch u := v.U.(type) {
- case *Mpint:
- if !u.Rune {
- if flag&FmtSharp != 0 {
- fmt.Fprint(s, u.String())
- return
- }
- fmt.Fprint(s, u.GoString())
- return
- }
-
- switch x := u.Int64(); {
- case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'':
- fmt.Fprintf(s, "'%c'", int(x))
-
- case 0 <= x && x < 1<<16:
- fmt.Fprintf(s, "'\\u%04x'", uint(int(x)))
-
- case 0 <= x && x <= utf8.MaxRune:
- fmt.Fprintf(s, "'\\U%08x'", uint64(x))
-
- default:
- fmt.Fprintf(s, "('\\x00' + %v)", u)
- }
-
- case *Mpflt:
- if flag&FmtSharp != 0 {
- fmt.Fprint(s, u.String())
- return
- }
- fmt.Fprint(s, u.GoString())
- return
-
- case *Mpcplx:
- if flag&FmtSharp != 0 {
- fmt.Fprint(s, u.String())
- return
- }
- fmt.Fprint(s, u.GoString())
- return
-
- case string:
- fmt.Fprint(s, strconv.Quote(u))
-
- case bool:
- fmt.Fprint(s, u)
-
- case *NilVal:
- fmt.Fprint(s, "nil")
-
- default:
- fmt.Fprintf(s, "<ctype=%d>", v.Ctype())
- }
-}
-
-/*
-s%,%,\n%g
-s%\n+%\n%g
-s%^[ ]*T%%g
-s%,.*%%g
-s%.+% [T&] = "&",%g
-s%^ ........*\]%&~%g
-s%~ %%g
-*/
-
-func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) {
- if flag&FmtShort == 0 {
- switch mode {
- case FErr: // This is for the user
- if s.Pkg == builtinpkg || s.Pkg == localpkg {
- b.WriteString(s.Name)
- return
- }
-
- // If the name was used by multiple packages, display the full path,
- if s.Pkg.Name != "" && numImport[s.Pkg.Name] > 1 {
- fmt.Fprintf(b, "%q.%s", s.Pkg.Path, s.Name)
- return
- }
- b.WriteString(s.Pkg.Name)
- b.WriteByte('.')
- b.WriteString(s.Name)
- return
-
- case FDbg:
- b.WriteString(s.Pkg.Name)
- b.WriteByte('.')
- b.WriteString(s.Name)
- return
-
- case FTypeIdName:
- // dcommontype, typehash
- b.WriteString(s.Pkg.Name)
- b.WriteByte('.')
- b.WriteString(s.Name)
- return
-
- case FTypeId:
- // (methodsym), typesym, weaksym
- b.WriteString(s.Pkg.Prefix)
- b.WriteByte('.')
- b.WriteString(s.Name)
- return
- }
- }
-
- if flag&FmtByte != 0 {
- // FmtByte (hh) implies FmtShort (h)
- // skip leading "type." in method name
- name := s.Name
- if i := strings.LastIndex(name, "."); i >= 0 {
- name = name[i+1:]
- }
-
- if mode == FDbg {
- fmt.Fprintf(b, "@%q.%s", s.Pkg.Path, name)
- return
- }
-
- b.WriteString(name)
- return
- }
-
- b.WriteString(s.Name)
-}
-
-var basicnames = []string{
- TINT: "int",
- TUINT: "uint",
- TINT8: "int8",
- TUINT8: "uint8",
- TINT16: "int16",
- TUINT16: "uint16",
- TINT32: "int32",
- TUINT32: "uint32",
- TINT64: "int64",
- TUINT64: "uint64",
- TUINTPTR: "uintptr",
- TFLOAT32: "float32",
- TFLOAT64: "float64",
- TCOMPLEX64: "complex64",
- TCOMPLEX128: "complex128",
- TBOOL: "bool",
- TANY: "any",
- TSTRING: "string",
- TNIL: "nil",
- TIDEAL: "untyped number",
- TBLANK: "blank",
-}
-
-var fmtBufferPool = sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
-}
-
-func tconv(t *types.Type, flag FmtFlag, mode fmtMode) string {
- buf := fmtBufferPool.Get().(*bytes.Buffer)
- buf.Reset()
- defer fmtBufferPool.Put(buf)
-
- tconv2(buf, t, flag, mode, nil)
- return types.InternString(buf.Bytes())
-}
-
-// tconv2 writes a string representation of t to b.
-// flag and mode control exactly what is printed.
-// Any types x that are already in the visited map get printed as @%d where %d=visited[x].
-// See #16897 before changing the implementation of tconv.
-func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited map[*types.Type]int) {
- if off, ok := visited[t]; ok {
- // We've seen this type before, so we're trying to print it recursively.
- // Print a reference to it instead.
- fmt.Fprintf(b, "@%d", off)
- return
- }
- if t == nil {
- b.WriteString("<T>")
- return
- }
- if t.Etype == types.TSSA {
- b.WriteString(t.Extra.(string))
- return
- }
- if t.Etype == types.TTUPLE {
- b.WriteString(t.FieldType(0).String())
- b.WriteByte(',')
- b.WriteString(t.FieldType(1).String())
- return
- }
-
- if t.Etype == types.TRESULTS {
- tys := t.Extra.(*types.Results).Types
- for i, et := range tys {
- if i > 0 {
- b.WriteByte(',')
- }
- b.WriteString(et.String())
- }
- return
- }
-
- flag, mode = flag.update(mode)
- if mode == FTypeIdName {
- flag |= FmtUnsigned
- }
- if t == types.Bytetype || t == types.Runetype {
- // in %-T mode collapse rune and byte with their originals.
- switch mode {
- case FTypeIdName, FTypeId:
- t = types.Types[t.Etype]
- default:
- sconv2(b, t.Sym, FmtShort, mode)
- return
- }
- }
- if t == types.Errortype {
- b.WriteString("error")
- return
- }
-
- // Unless the 'L' flag was specified, if the type has a name, just print that name.
- if flag&FmtLong == 0 && t.Sym != nil && t != types.Types[t.Etype] {
- switch mode {
- case FTypeId, FTypeIdName:
- if flag&FmtShort != 0 {
- if t.Vargen != 0 {
- sconv2(b, t.Sym, FmtShort, mode)
- fmt.Fprintf(b, "·%d", t.Vargen)
- return
- }
- sconv2(b, t.Sym, FmtShort, mode)
- return
- }
-
- if mode == FTypeIdName {
- sconv2(b, t.Sym, FmtUnsigned, mode)
- return
- }
-
- if t.Sym.Pkg == localpkg && t.Vargen != 0 {
- b.WriteString(mode.Sprintf("%v·%d", t.Sym, t.Vargen))
- return
- }
- }
-
- sconv2(b, t.Sym, 0, mode)
- return
- }
-
- if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
- var name string
- switch t {
- case types.UntypedBool:
- name = "untyped bool"
- case types.UntypedString:
- name = "untyped string"
- case types.UntypedInt:
- name = "untyped int"
- case types.UntypedRune:
- name = "untyped rune"
- case types.UntypedFloat:
- name = "untyped float"
- case types.UntypedComplex:
- name = "untyped complex"
- default:
- name = basicnames[t.Etype]
- }
- b.WriteString(name)
- return
- }
-
- if mode == FDbg {
- b.WriteString(t.Etype.String())
- b.WriteByte('-')
- tconv2(b, t, flag, FErr, visited)
- return
- }
-
- // At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't
- // try to print it recursively.
- // We record the offset in the result buffer where the type's text starts. This offset serves as a reference
- // point for any later references to the same type.
- // Note that we remove the type from the visited map as soon as the recursive call is done.
- // This prevents encoding types like map[*int]*int as map[*int]@4. (That encoding would work,
- // but I'd like to use the @ notation only when strictly necessary.)
- if visited == nil {
- visited = map[*types.Type]int{}
- }
- visited[t] = b.Len()
- defer delete(visited, t)
-
- switch t.Etype {
- case TPTR:
- b.WriteByte('*')
- switch mode {
- case FTypeId, FTypeIdName:
- if flag&FmtShort != 0 {
- tconv2(b, t.Elem(), FmtShort, mode, visited)
- return
- }
- }
- tconv2(b, t.Elem(), 0, mode, visited)
-
- case TARRAY:
- b.WriteByte('[')
- b.WriteString(strconv.FormatInt(t.NumElem(), 10))
- b.WriteByte(']')
- tconv2(b, t.Elem(), 0, mode, visited)
-
- case TSLICE:
- b.WriteString("[]")
- tconv2(b, t.Elem(), 0, mode, visited)
-
- case TCHAN:
- switch t.ChanDir() {
- case types.Crecv:
- b.WriteString("<-chan ")
- tconv2(b, t.Elem(), 0, mode, visited)
- case types.Csend:
- b.WriteString("chan<- ")
- tconv2(b, t.Elem(), 0, mode, visited)
- default:
- b.WriteString("chan ")
- if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym == nil && t.Elem().ChanDir() == types.Crecv {
- b.WriteByte('(')
- tconv2(b, t.Elem(), 0, mode, visited)
- b.WriteByte(')')
- } else {
- tconv2(b, t.Elem(), 0, mode, visited)
- }
- }
-
- case TMAP:
- b.WriteString("map[")
- tconv2(b, t.Key(), 0, mode, visited)
- b.WriteByte(']')
- tconv2(b, t.Elem(), 0, mode, visited)
-
- case TINTER:
- if t.IsEmptyInterface() {
- b.WriteString("interface {}")
- break
- }
- b.WriteString("interface {")
- for i, f := range t.Fields().Slice() {
- if i != 0 {
- b.WriteByte(';')
- }
- b.WriteByte(' ')
- switch {
- case f.Sym == nil:
- // Check first that a symbol is defined for this type.
- // Wrong interface definitions may have types lacking a symbol.
- break
- case types.IsExported(f.Sym.Name):
- sconv2(b, f.Sym, FmtShort, mode)
- default:
- flag1 := FmtLeft
- if flag&FmtUnsigned != 0 {
- flag1 = FmtUnsigned
- }
- sconv2(b, f.Sym, flag1, mode)
- }
- tconv2(b, f.Type, FmtShort, mode, visited)
- }
- if t.NumFields() != 0 {
- b.WriteByte(' ')
- }
- b.WriteByte('}')
-
- case TFUNC:
- if flag&FmtShort != 0 {
- // no leading func
- } else {
- if t.Recv() != nil {
- b.WriteString("method")
- tconv2(b, t.Recvs(), 0, mode, visited)
- b.WriteByte(' ')
- }
- b.WriteString("func")
- }
- tconv2(b, t.Params(), 0, mode, visited)
-
- switch t.NumResults() {
- case 0:
- // nothing to do
-
- case 1:
- b.WriteByte(' ')
- tconv2(b, t.Results().Field(0).Type, 0, mode, visited) // struct->field->field's type
-
- default:
- b.WriteByte(' ')
- tconv2(b, t.Results(), 0, mode, visited)
- }
-
- case TSTRUCT:
- if m := t.StructType().Map; m != nil {
- mt := m.MapType()
- // Format the bucket struct for map[x]y as map.bucket[x]y.
- // This avoids a recursive print that generates very long names.
- switch t {
- case mt.Bucket:
- b.WriteString("map.bucket[")
- case mt.Hmap:
- b.WriteString("map.hdr[")
- case mt.Hiter:
- b.WriteString("map.iter[")
- default:
- Fatalf("unknown internal map type")
- }
- tconv2(b, m.Key(), 0, mode, visited)
- b.WriteByte(']')
- tconv2(b, m.Elem(), 0, mode, visited)
- break
- }
-
- if funarg := t.StructType().Funarg; funarg != types.FunargNone {
- b.WriteByte('(')
- var flag1 FmtFlag
- switch mode {
- case FTypeId, FTypeIdName, FErr:
- // no argument names on function signature, and no "noescape"/"nosplit" tags
- flag1 = FmtShort
- }
- for i, f := range t.Fields().Slice() {
- if i != 0 {
- b.WriteString(", ")
- }
- fldconv(b, f, flag1, mode, visited, funarg)
- }
- b.WriteByte(')')
- } else {
- b.WriteString("struct {")
- for i, f := range t.Fields().Slice() {
- if i != 0 {
- b.WriteByte(';')
- }
- b.WriteByte(' ')
- fldconv(b, f, FmtLong, mode, visited, funarg)
- }
- if t.NumFields() != 0 {
- b.WriteByte(' ')
- }
- b.WriteByte('}')
- }
-
- case TFORW:
- b.WriteString("undefined")
- if t.Sym != nil {
- b.WriteByte(' ')
- sconv2(b, t.Sym, 0, mode)
- }
-
- case TUNSAFEPTR:
- b.WriteString("unsafe.Pointer")
-
- case Txxx:
- b.WriteString("Txxx")
- default:
- // Don't know how to handle - fall back to detailed prints.
- b.WriteString(mode.Sprintf("%v <%v>", t.Etype, t.Sym))
- }
-}
-
-// Statements which may be rendered with a simplestmt as init.
-func stmtwithinit(op Op) bool {
- switch op {
- case OIF, OFOR, OFORUNTIL, OSWITCH:
- return true
- }
-
- return false
-}
-
-func (n *Node) stmtfmt(s fmt.State, mode fmtMode) {
- // some statements allow for an init, but at most one,
- // but we may have an arbitrary number added, eg by typecheck
- // and inlining. If it doesn't fit the syntax, emit an enclosing
- // block starting with the init statements.
-
- // if we can just say "for" n->ninit; ... then do so
- simpleinit := n.Ninit.Len() == 1 && n.Ninit.First().Ninit.Len() == 0 && stmtwithinit(n.Op)
-
- // otherwise, print the inits as separate statements
- complexinit := n.Ninit.Len() != 0 && !simpleinit && (mode != FErr)
-
- // but if it was for if/for/switch, put in an extra surrounding block to limit the scope
- extrablock := complexinit && stmtwithinit(n.Op)
-
- if extrablock {
- fmt.Fprint(s, "{")
- }
-
- if complexinit {
- mode.Fprintf(s, " %v; ", n.Ninit)
- }
-
- switch n.Op {
- case ODCL:
- mode.Fprintf(s, "var %v %v", n.Left.Sym, n.Left.Type)
-
- case ODCLFIELD:
- if n.Sym != nil {
- mode.Fprintf(s, "%v %v", n.Sym, n.Left)
- } else {
- mode.Fprintf(s, "%v", n.Left)
- }
-
- // Don't export "v = <N>" initializing statements, hope they're always
- // preceded by the DCL which will be re-parsed and typechecked to reproduce
- // the "v = <N>" again.
- case OAS:
- if n.Colas() && !complexinit {
- mode.Fprintf(s, "%v := %v", n.Left, n.Right)
- } else {
- mode.Fprintf(s, "%v = %v", n.Left, n.Right)
- }
-
- case OASOP:
- if n.Implicit() {
- if n.SubOp() == OADD {
- mode.Fprintf(s, "%v++", n.Left)
- } else {
- mode.Fprintf(s, "%v--", n.Left)
- }
- break
- }
-
- mode.Fprintf(s, "%v %#v= %v", n.Left, n.SubOp(), n.Right)
-
- case OAS2:
- if n.Colas() && !complexinit {
- mode.Fprintf(s, "%.v := %.v", n.List, n.Rlist)
- break
- }
- fallthrough
-
- case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
- mode.Fprintf(s, "%.v = %v", n.List, n.Right)
-
- case ORETURN:
- mode.Fprintf(s, "return %.v", n.List)
-
- case ORETJMP:
- mode.Fprintf(s, "retjmp %v", n.Sym)
-
- case OINLMARK:
- mode.Fprintf(s, "inlmark %d", n.Xoffset)
-
- case OGO:
- mode.Fprintf(s, "go %v", n.Left)
-
- case ODEFER:
- mode.Fprintf(s, "defer %v", n.Left)
-
- case OIF:
- if simpleinit {
- mode.Fprintf(s, "if %v; %v { %v }", n.Ninit.First(), n.Left, n.Nbody)
- } else {
- mode.Fprintf(s, "if %v { %v }", n.Left, n.Nbody)
- }
- if n.Rlist.Len() != 0 {
- mode.Fprintf(s, " else { %v }", n.Rlist)
- }
-
- case OFOR, OFORUNTIL:
- opname := "for"
- if n.Op == OFORUNTIL {
- opname = "foruntil"
- }
- if mode == FErr { // TODO maybe only if FmtShort, same below
- fmt.Fprintf(s, "%s loop", opname)
- break
- }
-
- fmt.Fprint(s, opname)
- if simpleinit {
- mode.Fprintf(s, " %v;", n.Ninit.First())
- } else if n.Right != nil {
- fmt.Fprint(s, " ;")
- }
-
- if n.Left != nil {
- mode.Fprintf(s, " %v", n.Left)
- }
-
- if n.Right != nil {
- mode.Fprintf(s, "; %v", n.Right)
- } else if simpleinit {
- fmt.Fprint(s, ";")
- }
-
- if n.Op == OFORUNTIL && n.List.Len() != 0 {
- mode.Fprintf(s, "; %v", n.List)
- }
-
- mode.Fprintf(s, " { %v }", n.Nbody)
-
- case ORANGE:
- if mode == FErr {
- fmt.Fprint(s, "for loop")
- break
- }
-
- if n.List.Len() == 0 {
- mode.Fprintf(s, "for range %v { %v }", n.Right, n.Nbody)
- break
- }
-
- mode.Fprintf(s, "for %.v = range %v { %v }", n.List, n.Right, n.Nbody)
-
- case OSELECT, OSWITCH:
- if mode == FErr {
- mode.Fprintf(s, "%v statement", n.Op)
- break
- }
-
- mode.Fprintf(s, "%#v", n.Op)
- if simpleinit {
- mode.Fprintf(s, " %v;", n.Ninit.First())
- }
- if n.Left != nil {
- mode.Fprintf(s, " %v ", n.Left)
- }
-
- mode.Fprintf(s, " { %v }", n.List)
-
- case OCASE:
- if n.List.Len() != 0 {
- mode.Fprintf(s, "case %.v", n.List)
- } else {
- fmt.Fprint(s, "default")
- }
- mode.Fprintf(s, ": %v", n.Nbody)
-
- case OBREAK, OCONTINUE, OGOTO, OFALL:
- if n.Sym != nil {
- mode.Fprintf(s, "%#v %v", n.Op, n.Sym)
- } else {
- mode.Fprintf(s, "%#v", n.Op)
- }
-
- case OEMPTY:
- break
-
- case OLABEL:
- mode.Fprintf(s, "%v: ", n.Sym)
- }
-
- if extrablock {
- fmt.Fprint(s, "}")
- }
-}
-
-var opprec = []int{
- OALIGNOF: 8,
- OAPPEND: 8,
- OBYTES2STR: 8,
- OARRAYLIT: 8,
- OSLICELIT: 8,
- ORUNES2STR: 8,
- OCALLFUNC: 8,
- OCALLINTER: 8,
- OCALLMETH: 8,
- OCALL: 8,
- OCAP: 8,
- OCLOSE: 8,
- OCONVIFACE: 8,
- OCONVNOP: 8,
- OCONV: 8,
- OCOPY: 8,
- ODELETE: 8,
- OGETG: 8,
- OLEN: 8,
- OLITERAL: 8,
- OMAKESLICE: 8,
- OMAKESLICECOPY: 8,
- OMAKE: 8,
- OMAPLIT: 8,
- ONAME: 8,
- ONEW: 8,
- ONONAME: 8,
- OOFFSETOF: 8,
- OPACK: 8,
- OPANIC: 8,
- OPAREN: 8,
- OPRINTN: 8,
- OPRINT: 8,
- ORUNESTR: 8,
- OSIZEOF: 8,
- OSTR2BYTES: 8,
- OSTR2RUNES: 8,
- OSTRUCTLIT: 8,
- OTARRAY: 8,
- OTCHAN: 8,
- OTFUNC: 8,
- OTINTER: 8,
- OTMAP: 8,
- OTSTRUCT: 8,
- OINDEXMAP: 8,
- OINDEX: 8,
- OSLICE: 8,
- OSLICESTR: 8,
- OSLICEARR: 8,
- OSLICE3: 8,
- OSLICE3ARR: 8,
- OSLICEHEADER: 8,
- ODOTINTER: 8,
- ODOTMETH: 8,
- ODOTPTR: 8,
- ODOTTYPE2: 8,
- ODOTTYPE: 8,
- ODOT: 8,
- OXDOT: 8,
- OCALLPART: 8,
- OPLUS: 7,
- ONOT: 7,
- OBITNOT: 7,
- ONEG: 7,
- OADDR: 7,
- ODEREF: 7,
- ORECV: 7,
- OMUL: 6,
- ODIV: 6,
- OMOD: 6,
- OLSH: 6,
- ORSH: 6,
- OAND: 6,
- OANDNOT: 6,
- OADD: 5,
- OSUB: 5,
- OOR: 5,
- OXOR: 5,
- OEQ: 4,
- OLT: 4,
- OLE: 4,
- OGE: 4,
- OGT: 4,
- ONE: 4,
- OSEND: 3,
- OANDAND: 2,
- OOROR: 1,
-
- // Statements handled by stmtfmt
- OAS: -1,
- OAS2: -1,
- OAS2DOTTYPE: -1,
- OAS2FUNC: -1,
- OAS2MAPR: -1,
- OAS2RECV: -1,
- OASOP: -1,
- OBREAK: -1,
- OCASE: -1,
- OCONTINUE: -1,
- ODCL: -1,
- ODCLFIELD: -1,
- ODEFER: -1,
- OEMPTY: -1,
- OFALL: -1,
- OFOR: -1,
- OFORUNTIL: -1,
- OGOTO: -1,
- OIF: -1,
- OLABEL: -1,
- OGO: -1,
- ORANGE: -1,
- ORETURN: -1,
- OSELECT: -1,
- OSWITCH: -1,
-
- OEND: 0,
-}
-
-func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
- for n != nil && n.Implicit() && (n.Op == ODEREF || n.Op == OADDR) {
- n = n.Left
- }
-
- if n == nil {
- fmt.Fprint(s, "<N>")
- return
- }
-
- nprec := opprec[n.Op]
- if n.Op == OTYPE && n.Sym != nil {
- nprec = 8
- }
-
- if prec > nprec {
- mode.Fprintf(s, "(%v)", n)
- return
- }
-
- switch n.Op {
- case OPAREN:
- mode.Fprintf(s, "(%v)", n.Left)
-
- case OLITERAL: // this is a bit of a mess
- if mode == FErr {
- if n.Orig != nil && n.Orig != n {
- n.Orig.exprfmt(s, prec, mode)
- return
- }
- if n.Sym != nil {
- fmt.Fprint(s, smodeString(n.Sym, mode))
- return
- }
- }
- if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
- n.Orig.exprfmt(s, prec, mode)
- return
- }
- if n.Type != nil && !n.Type.IsUntyped() {
- // Need parens when type begins with what might
- // be misinterpreted as a unary operator: * or <-.
- if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == types.Crecv) {
- mode.Fprintf(s, "(%v)(%v)", n.Type, n.Val())
- return
- } else {
- mode.Fprintf(s, "%v(%v)", n.Type, n.Val())
- return
- }
- }
-
- mode.Fprintf(s, "%v", n.Val())
-
- // Special case: name used as local variable in export.
- // _ becomes ~b%d internally; print as _ for export
- case ONAME:
- if mode == FErr && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
- fmt.Fprint(s, "_")
- return
- }
- fallthrough
- case OPACK, ONONAME:
- fmt.Fprint(s, smodeString(n.Sym, mode))
-
- case OTYPE:
- if n.Type == nil && n.Sym != nil {
- fmt.Fprint(s, smodeString(n.Sym, mode))
- return
- }
- mode.Fprintf(s, "%v", n.Type)
-
- case OTARRAY:
- if n.Left != nil {
- mode.Fprintf(s, "[%v]%v", n.Left, n.Right)
- return
- }
- mode.Fprintf(s, "[]%v", n.Right) // happens before typecheck
-
- case OTMAP:
- mode.Fprintf(s, "map[%v]%v", n.Left, n.Right)
-
- case OTCHAN:
- switch n.TChanDir() {
- case types.Crecv:
- mode.Fprintf(s, "<-chan %v", n.Left)
-
- case types.Csend:
- mode.Fprintf(s, "chan<- %v", n.Left)
-
- default:
- if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.TChanDir() == types.Crecv {
- mode.Fprintf(s, "chan (%v)", n.Left)
- } else {
- mode.Fprintf(s, "chan %v", n.Left)
- }
- }
-
- case OTSTRUCT:
- fmt.Fprint(s, "<struct>")
-
- case OTINTER:
- fmt.Fprint(s, "<inter>")
-
- case OTFUNC:
- fmt.Fprint(s, "<func>")
-
- case OCLOSURE:
- if mode == FErr {
- fmt.Fprint(s, "func literal")
- return
- }
- if n.Nbody.Len() != 0 {
- mode.Fprintf(s, "%v { %v }", n.Type, n.Nbody)
- return
- }
- mode.Fprintf(s, "%v { %v }", n.Type, n.Func.Closure.Nbody)
-
- case OCOMPLIT:
- if mode == FErr {
- if n.Implicit() {
- mode.Fprintf(s, "... argument")
- return
- }
- if n.Right != nil {
- mode.Fprintf(s, "%v{%s}", n.Right, ellipsisIf(n.List.Len() != 0))
- return
- }
-
- fmt.Fprint(s, "composite literal")
- return
- }
- mode.Fprintf(s, "(%v{ %.v })", n.Right, n.List)
-
- case OPTRLIT:
- mode.Fprintf(s, "&%v", n.Left)
-
- case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
- if mode == FErr {
- mode.Fprintf(s, "%v{%s}", n.Type, ellipsisIf(n.List.Len() != 0))
- return
- }
- mode.Fprintf(s, "(%v{ %.v })", n.Type, n.List)
-
- case OKEY:
- if n.Left != nil && n.Right != nil {
- mode.Fprintf(s, "%v:%v", n.Left, n.Right)
- return
- }
-
- if n.Left == nil && n.Right != nil {
- mode.Fprintf(s, ":%v", n.Right)
- return
- }
- if n.Left != nil && n.Right == nil {
- mode.Fprintf(s, "%v:", n.Left)
- return
- }
- fmt.Fprint(s, ":")
-
- case OSTRUCTKEY:
- mode.Fprintf(s, "%v:%v", n.Sym, n.Left)
-
- case OCALLPART:
- n.Left.exprfmt(s, nprec, mode)
- if n.Right == nil || n.Right.Sym == nil {
- fmt.Fprint(s, ".<nil>")
- return
- }
- mode.Fprintf(s, ".%0S", n.Right.Sym)
-
- case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
- n.Left.exprfmt(s, nprec, mode)
- if n.Sym == nil {
- fmt.Fprint(s, ".<nil>")
- return
- }
- mode.Fprintf(s, ".%0S", n.Sym)
-
- case ODOTTYPE, ODOTTYPE2:
- n.Left.exprfmt(s, nprec, mode)
- if n.Right != nil {
- mode.Fprintf(s, ".(%v)", n.Right)
- return
- }
- mode.Fprintf(s, ".(%v)", n.Type)
-
- case OINDEX, OINDEXMAP:
- n.Left.exprfmt(s, nprec, mode)
- mode.Fprintf(s, "[%v]", n.Right)
-
- case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
- n.Left.exprfmt(s, nprec, mode)
- fmt.Fprint(s, "[")
- low, high, max := n.SliceBounds()
- if low != nil {
- fmt.Fprint(s, low.modeString(mode))
- }
- fmt.Fprint(s, ":")
- if high != nil {
- fmt.Fprint(s, high.modeString(mode))
- }
- if n.Op.IsSlice3() {
- fmt.Fprint(s, ":")
- if max != nil {
- fmt.Fprint(s, max.modeString(mode))
- }
- }
- fmt.Fprint(s, "]")
-
- case OSLICEHEADER:
- if n.List.Len() != 2 {
- Fatalf("bad OSLICEHEADER list length %d", n.List.Len())
- }
- mode.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left, n.List.First(), n.List.Second())
-
- case OCOMPLEX, OCOPY:
- if n.Left != nil {
- mode.Fprintf(s, "%#v(%v, %v)", n.Op, n.Left, n.Right)
- } else {
- mode.Fprintf(s, "%#v(%.v)", n.Op, n.List)
- }
-
- case OCONV,
- OCONVIFACE,
- OCONVNOP,
- OBYTES2STR,
- ORUNES2STR,
- OSTR2BYTES,
- OSTR2RUNES,
- ORUNESTR:
- if n.Type == nil || n.Type.Sym == nil {
- mode.Fprintf(s, "(%v)", n.Type)
- } else {
- mode.Fprintf(s, "%v", n.Type)
- }
- if n.Left != nil {
- mode.Fprintf(s, "(%v)", n.Left)
- } else {
- mode.Fprintf(s, "(%.v)", n.List)
- }
-
- case OREAL,
- OIMAG,
- OAPPEND,
- OCAP,
- OCLOSE,
- ODELETE,
- OLEN,
- OMAKE,
- ONEW,
- OPANIC,
- ORECOVER,
- OALIGNOF,
- OOFFSETOF,
- OSIZEOF,
- OPRINT,
- OPRINTN:
- if n.Left != nil {
- mode.Fprintf(s, "%#v(%v)", n.Op, n.Left)
- return
- }
- if n.IsDDD() {
- mode.Fprintf(s, "%#v(%.v...)", n.Op, n.List)
- return
- }
- mode.Fprintf(s, "%#v(%.v)", n.Op, n.List)
-
- case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
- n.Left.exprfmt(s, nprec, mode)
- if n.IsDDD() {
- mode.Fprintf(s, "(%.v...)", n.List)
- return
- }
- mode.Fprintf(s, "(%.v)", n.List)
-
- case OMAKEMAP, OMAKECHAN, OMAKESLICE:
- if n.List.Len() != 0 { // pre-typecheck
- mode.Fprintf(s, "make(%v, %.v)", n.Type, n.List)
- return
- }
- if n.Right != nil {
- mode.Fprintf(s, "make(%v, %v, %v)", n.Type, n.Left, n.Right)
- return
- }
- if n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()) {
- mode.Fprintf(s, "make(%v, %v)", n.Type, n.Left)
- return
- }
- mode.Fprintf(s, "make(%v)", n.Type)
-
- case OMAKESLICECOPY:
- mode.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type, n.Left, n.Right)
-
- case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
- // Unary
- mode.Fprintf(s, "%#v", n.Op)
- if n.Left != nil && n.Left.Op == n.Op {
- fmt.Fprint(s, " ")
- }
- n.Left.exprfmt(s, nprec+1, mode)
-
- // Binary
- case OADD,
- OAND,
- OANDAND,
- OANDNOT,
- ODIV,
- OEQ,
- OGE,
- OGT,
- OLE,
- OLT,
- OLSH,
- OMOD,
- OMUL,
- ONE,
- OOR,
- OOROR,
- ORSH,
- OSEND,
- OSUB,
- OXOR:
- n.Left.exprfmt(s, nprec, mode)
- mode.Fprintf(s, " %#v ", n.Op)
- n.Right.exprfmt(s, nprec+1, mode)
-
- case OADDSTR:
- for i, n1 := range n.List.Slice() {
- if i != 0 {
- fmt.Fprint(s, " + ")
- }
- n1.exprfmt(s, nprec, mode)
- }
- case ODDD:
- mode.Fprintf(s, "...")
- default:
- mode.Fprintf(s, "<node %v>", n.Op)
- }
-}
-
-func (n *Node) nodefmt(s fmt.State, flag FmtFlag, mode fmtMode) {
- t := n.Type
-
- // We almost always want the original.
- // TODO(gri) Why the special case for OLITERAL?
- if n.Op != OLITERAL && n.Orig != nil {
- n = n.Orig
- }
-
- if flag&FmtLong != 0 && t != nil {
- if t.Etype == TNIL {
- fmt.Fprint(s, "nil")
- } else if n.Op == ONAME && n.Name.AutoTemp() {
- mode.Fprintf(s, "%v value", t)
- } else {
- mode.Fprintf(s, "%v (type %v)", n, t)
- }
- return
- }
-
- // TODO inlining produces expressions with ninits. we can't print these yet.
-
- if opprec[n.Op] < 0 {
- n.stmtfmt(s, mode)
- return
- }
-
- n.exprfmt(s, 0, mode)
-}
-
-func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) {
- recur := flag&FmtShort == 0
-
- if recur {
- indent(s)
- if dumpdepth > 40 {
- fmt.Fprint(s, "...")
- return
- }
-
- if n.Ninit.Len() != 0 {
- mode.Fprintf(s, "%v-init%v", n.Op, n.Ninit)
- indent(s)
- }
- }
-
- switch n.Op {
- default:
- mode.Fprintf(s, "%v%j", n.Op, n)
-
- case OLITERAL:
- mode.Fprintf(s, "%v-%v%j", n.Op, n.Val(), n)
-
- case ONAME, ONONAME:
- if n.Sym != nil {
- mode.Fprintf(s, "%v-%v%j", n.Op, n.Sym, n)
- } else {
- mode.Fprintf(s, "%v%j", n.Op, n)
- }
- if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
- indent(s)
- mode.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
- }
-
- case OASOP:
- mode.Fprintf(s, "%v-%v%j", n.Op, n.SubOp(), n)
-
- case OTYPE:
- mode.Fprintf(s, "%v %v%j type=%v", n.Op, n.Sym, n, n.Type)
- if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
- indent(s)
- mode.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
- }
- }
-
- if n.Op == OCLOSURE && n.Func.Closure != nil && n.Func.Closure.Func.Nname.Sym != nil {
- mode.Fprintf(s, " fnName %v", n.Func.Closure.Func.Nname.Sym)
- }
- if n.Sym != nil && n.Op != ONAME {
- mode.Fprintf(s, " %v", n.Sym)
- }
-
- if n.Type != nil {
- mode.Fprintf(s, " %v", n.Type)
- }
-
- if recur {
- if n.Left != nil {
- mode.Fprintf(s, "%v", n.Left)
- }
- if n.Right != nil {
- mode.Fprintf(s, "%v", n.Right)
- }
- if n.Func != nil && n.Func.Closure != nil && n.Func.Closure.Nbody.Len() != 0 {
- indent(s)
- // The function associated with a closure
- mode.Fprintf(s, "%v-clofunc%v", n.Op, n.Func.Closure)
- }
- if n.Func != nil && n.Func.Dcl != nil && len(n.Func.Dcl) != 0 {
- indent(s)
- // The dcls for a func or closure
- mode.Fprintf(s, "%v-dcl%v", n.Op, asNodes(n.Func.Dcl))
- }
- if n.List.Len() != 0 {
- indent(s)
- mode.Fprintf(s, "%v-list%v", n.Op, n.List)
- }
-
- if n.Rlist.Len() != 0 {
- indent(s)
- mode.Fprintf(s, "%v-rlist%v", n.Op, n.Rlist)
- }
-
- if n.Nbody.Len() != 0 {
- indent(s)
- mode.Fprintf(s, "%v-body%v", n.Op, n.Nbody)
- }
- }
-}
-
-// "%S" suppresses qualifying with package
-func symFormat(s *types.Sym, f fmt.State, verb rune, mode fmtMode) {
- switch verb {
- case 'v', 'S':
- fmt.Fprint(f, sconv(s, fmtFlag(f, verb), mode))
-
- default:
- fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s)
- }
-}
-
-func smodeString(s *types.Sym, mode fmtMode) string { return sconv(s, 0, mode) }
-
-// See #16897 before changing the implementation of sconv.
-func sconv(s *types.Sym, flag FmtFlag, mode fmtMode) string {
- if flag&FmtLong != 0 {
- panic("linksymfmt")
- }
-
- if s == nil {
- return "<S>"
- }
-
- if s.Name == "_" {
- return "_"
- }
- buf := fmtBufferPool.Get().(*bytes.Buffer)
- buf.Reset()
- defer fmtBufferPool.Put(buf)
-
- flag, mode = flag.update(mode)
- symfmt(buf, s, flag, mode)
- return types.InternString(buf.Bytes())
-}
-
-func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) {
- if flag&FmtLong != 0 {
- panic("linksymfmt")
- }
- if s == nil {
- b.WriteString("<S>")
- return
- }
- if s.Name == "_" {
- b.WriteString("_")
- return
- }
-
- flag, mode = flag.update(mode)
- symfmt(b, s, flag, mode)
-}
-
-func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode fmtMode, visited map[*types.Type]int, funarg types.Funarg) {
- if f == nil {
- b.WriteString("<T>")
- return
- }
- flag, mode = flag.update(mode)
- if mode == FTypeIdName {
- flag |= FmtUnsigned
- }
-
- var name string
- if flag&FmtShort == 0 {
- s := f.Sym
-
- // Take the name from the original.
- if mode == FErr {
- s = origSym(s)
- }
-
- if s != nil && f.Embedded == 0 {
- if funarg != types.FunargNone {
- name = asNode(f.Nname).modeString(mode)
- } else if flag&FmtLong != 0 {
- name = mode.Sprintf("%0S", s)
- if !types.IsExported(name) && flag&FmtUnsigned == 0 {
- name = smodeString(s, mode) // qualify non-exported names (used on structs, not on funarg)
- }
- } else {
- name = smodeString(s, mode)
- }
- }
- }
-
- if name != "" {
- b.WriteString(name)
- b.WriteString(" ")
- }
-
- if f.IsDDD() {
- var et *types.Type
- if f.Type != nil {
- et = f.Type.Elem()
- }
- b.WriteString("...")
- tconv2(b, et, 0, mode, visited)
- } else {
- tconv2(b, f.Type, 0, mode, visited)
- }
-
- if flag&FmtShort == 0 && funarg == types.FunargNone && f.Note != "" {
- b.WriteString(" ")
- b.WriteString(strconv.Quote(f.Note))
- }
-}
-
-// "%L" print definition, not name
-// "%S" omit 'func' and receiver from function types, short type names
-func typeFormat(t *types.Type, s fmt.State, verb rune, mode fmtMode) {
- switch verb {
- case 'v', 'S', 'L':
- fmt.Fprint(s, tconv(t, fmtFlag(s, verb), mode))
- default:
- fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t)
- }
-}
-
-func (n *Node) String() string { return fmt.Sprint(n) }
-func (n *Node) modeString(mode fmtMode) string { return mode.Sprint(n) }
-
-// "%L" suffix with "(type %T)" where possible
-// "%+S" in debug mode, don't recurse, no multiline output
-func (n *Node) nconv(s fmt.State, flag FmtFlag, mode fmtMode) {
- if n == nil {
- fmt.Fprint(s, "<N>")
- return
- }
-
- flag, mode = flag.update(mode)
-
- switch mode {
- case FErr:
- n.nodefmt(s, flag, mode)
-
- case FDbg:
- dumpdepth++
- n.nodedump(s, flag, mode)
- dumpdepth--
-
- default:
- Fatalf("unhandled %%N mode: %d", mode)
- }
-}
-
-func (l Nodes) format(s fmt.State, verb rune, mode fmtMode) {
- switch verb {
- case 'v':
- l.hconv(s, fmtFlag(s, verb), mode)
-
- default:
- fmt.Fprintf(s, "%%!%c(Nodes)", verb)
- }
-}
-
-func (n Nodes) String() string {
- return fmt.Sprint(n)
-}
-
-// Flags: all those of %N plus '.': separate with comma's instead of semicolons.
-func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode fmtMode) {
- if l.Len() == 0 && mode == FDbg {
- fmt.Fprint(s, "<nil>")
- return
- }
-
- flag, mode = flag.update(mode)
- sep := "; "
- if mode == FDbg {
- sep = "\n"
- } else if flag&FmtComma != 0 {
- sep = ", "
- }
-
- for i, n := range l.Slice() {
- fmt.Fprint(s, n.modeString(mode))
- if i+1 < l.Len() {
- fmt.Fprint(s, sep)
- }
- }
-}
-
-func dumplist(s string, l Nodes) {
- fmt.Printf("%s%+v\n", s, l)
-}
-
-func fdumplist(w io.Writer, s string, l Nodes) {
- fmt.Fprintf(w, "%s%+v\n", s, l)
-}
-
-func Dump(s string, n *Node) {
- fmt.Printf("%s [%p]%+v\n", s, n, n)
-}
-
-// TODO(gri) make variable local somehow
-var dumpdepth int
-
-// indent prints indentation to s.
-func indent(s fmt.State) {
- fmt.Fprint(s, "\n")
- for i := 0; i < dumpdepth; i++ {
- fmt.Fprint(s, ". ")
- }
-}
-
-func ellipsisIf(b bool) string {
- if b {
- return "..."
- }
- return ""
-}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
// isParamStackCopy reports whether this is the on-stack copy of a
// function parameter that moved to the heap.
-func (n *Node) isParamStackCopy() bool {
- return n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Name.Param.Heapaddr != nil
+func isParamStackCopy(n ir.Node) bool {
+ if n.Op() != ir.ONAME {
+ return false
+ }
+ name := n.(*ir.Name)
+ return (name.Class() == ir.PPARAM || name.Class() == ir.PPARAMOUT) && name.Heapaddr != nil
}
// isParamHeapCopy reports whether this is the on-heap copy of
// a function parameter that moved to the heap.
-func (n *Node) isParamHeapCopy() bool {
- return n.Op == ONAME && n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy != nil
+func isParamHeapCopy(n ir.Node) bool {
+ if n.Op() != ir.ONAME {
+ return false
+ }
+ name := n.(*ir.Name)
+ return name.Class() == ir.PAUTOHEAP && name.Name().Stackcopy != nil
}
// autotmpname returns the name for an autotmp variable numbered n.
}
// make a new Node off the books
-func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node {
+func tempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
if curfn == nil {
- Fatalf("no curfn for tempAt")
+ base.Fatalf("no curfn for tempAt")
}
- if curfn.Func.Closure != nil && curfn.Op == OCLOSURE {
- Dump("tempAt", curfn)
- Fatalf("adding tempAt to wrong closure function")
+ if curfn.Op() == ir.OCLOSURE {
+ ir.Dump("tempAt", curfn)
+ base.Fatalf("adding tempAt to wrong closure function")
}
if t == nil {
- Fatalf("tempAt called with nil type")
+ base.Fatalf("tempAt called with nil type")
}
s := &types.Sym{
- Name: autotmpname(len(curfn.Func.Dcl)),
- Pkg: localpkg,
+ Name: autotmpname(len(curfn.Dcl)),
+ Pkg: types.LocalPkg,
}
- n := newnamel(pos, s)
- s.Def = asTypesNode(n)
- n.Type = t
- n.SetClass(PAUTO)
- n.Esc = EscNever
- n.Name.Curfn = curfn
- n.Name.SetUsed(true)
- n.Name.SetAutoTemp(true)
- curfn.Func.Dcl = append(curfn.Func.Dcl, n)
+ n := ir.NewNameAt(pos, s)
+ s.Def = n
+ n.SetType(t)
+ n.SetClass(ir.PAUTO)
+ n.SetEsc(EscNever)
+ n.Curfn = curfn
+ n.SetUsed(true)
+ n.SetAutoTemp(true)
+ curfn.Dcl = append(curfn.Dcl, n)
dowidth(t)
- return n.Orig
+ return n
}
-func temp(t *types.Type) *Node {
- return tempAt(lineno, Curfn, t)
+func temp(t *types.Type) *ir.Name {
+ return tempAt(base.Pos, Curfn, t)
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"sync"
)
-const (
- BADWIDTH = types.BADWIDTH
-)
-
var (
// maximum size variable which we will allocate on the stack.
// This limit is for explicit variable declarations like "var x T" or "x := ...".
// isRuntimePkg reports whether p is package runtime.
func isRuntimePkg(p *types.Pkg) bool {
- if compiling_runtime && p == localpkg {
+ if base.Flag.CompilingRuntime && p == types.LocalPkg {
return true
}
return p.Path == "runtime"
// isReflectPkg reports whether p is package reflect.
func isReflectPkg(p *types.Pkg) bool {
- if p == localpkg {
- return myimportpath == "reflect"
+ if p == types.LocalPkg {
+ return base.Ctxt.Pkgpath == "reflect"
}
return p.Path == "reflect"
}
-// The Class of a variable/function describes the "storage class"
-// of a variable or function. During parsing, storage classes are
-// called declaration contexts.
-type Class uint8
-
-//go:generate stringer -type=Class
-const (
- Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
- PEXTERN // global variables
- PAUTO // local variables
- PAUTOHEAP // local variables or parameters moved to heap
- PPARAM // input arguments
- PPARAMOUT // output results
- PFUNC // global functions
-
- // Careful: Class is stored in three bits in Node.flags.
- _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
-)
-
// Slices in the runtime are represented by three components:
//
// type slice struct {
var pragcgobuf [][]string
-var outfile string
-var linkobj string
-
-// nerrors is the number of compiler errors reported
-// since the last call to saveerrors.
-var nerrors int
-
-// nsavederrors is the total number of compiler errors
-// reported before the last call to saveerrors.
-var nsavederrors int
-
-var nsyntaxerrors int
-
var decldepth int32
-var nolocalimports bool
-
-// gc debug flags
-type DebugFlags struct {
- P, B, C, E,
- K, L, N, S,
- W, e, h, j,
- l, m, r, w int
-}
-
-var Debug DebugFlags
-
-var debugstr string
-
-var Debug_checknil int
-var Debug_typeassert int
-
-var localpkg *types.Pkg // package being compiled
-
var inimport bool // set during import
var itabpkg *types.Pkg // fake pkg for itab entries
var zerosize int64
-var myimportpath string
-
-var localimport string
-
-var asmhdr string
-
-var simtype [NTYPE]types.EType
+var simtype [types.NTYPE]types.Kind
var (
- isInt [NTYPE]bool
- isFloat [NTYPE]bool
- isComplex [NTYPE]bool
- issimple [NTYPE]bool
+ isInt [types.NTYPE]bool
+ isFloat [types.NTYPE]bool
+ isComplex [types.NTYPE]bool
+ issimple [types.NTYPE]bool
)
var (
- okforeq [NTYPE]bool
- okforadd [NTYPE]bool
- okforand [NTYPE]bool
- okfornone [NTYPE]bool
- okforcmp [NTYPE]bool
- okforbool [NTYPE]bool
- okforcap [NTYPE]bool
- okforlen [NTYPE]bool
- okforarith [NTYPE]bool
- okforconst [NTYPE]bool
+ okforeq [types.NTYPE]bool
+ okforadd [types.NTYPE]bool
+ okforand [types.NTYPE]bool
+ okfornone [types.NTYPE]bool
+ okforbool [types.NTYPE]bool
+ okforcap [types.NTYPE]bool
+ okforlen [types.NTYPE]bool
+ okforarith [types.NTYPE]bool
)
+var okforcmp [types.NTYPE]bool
+
var (
- okfor [OEND][]bool
- iscmp [OEND]bool
+ okfor [ir.OEND][]bool
+ iscmp [ir.OEND]bool
)
-var minintval [NTYPE]*Mpint
-
-var maxintval [NTYPE]*Mpint
-
-var minfltval [NTYPE]*Mpflt
-
-var maxfltval [NTYPE]*Mpflt
-
-var xtop []*Node
-
-var exportlist []*Node
-
-var importlist []*Node // imported functions and methods with inlinable bodies
-
var (
funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
funcsyms []*types.Sym
)
-var dclcontext Class // PEXTERN/PAUTO
+var dclcontext ir.Class // PEXTERN/PAUTO
-var Curfn *Node
+var Curfn *ir.Func
var Widthptr int
var Widthreg int
-var nblank *Node
-
var typecheckok bool
-var compiling_runtime bool
-
-// Compiling the standard library
-var compiling_std bool
-
-var use_writebarrier bool
-
-var pure_go bool
-
-var flag_installsuffix string
-
-var flag_race bool
-
-var flag_msan bool
-
-var flagDWARF bool
-
// Whether we are adding any sort of code instrumentation, such as
// when the race detector is enabled.
var instrumenting bool
-// Whether we are tracking lexical scopes for DWARF.
-var trackScopes bool
-
-// Controls generation of DWARF inlined instance records. Zero
-// disables, 1 emits inlined routines but suppresses var info,
-// and 2 emits inlined routines with tracking of formals/locals.
-var genDwarfInline int
-
-var debuglive int
-
-var Ctxt *obj.Link
-
-var writearchive bool
-
-var nodfp *Node
-
-var disable_checknil int
+var nodfp *ir.Name
var autogeneratedPos src.XPos
var thearch Arch
var (
- staticuint64s,
- zerobase *Node
+ staticuint64s *ir.Name
+ zerobase *ir.Name
assertE2I,
assertE2I2,
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
+ "fmt"
+ "os"
)
var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
next *obj.Prog // next Prog
pc int64 // virtual PC; count of Progs
pos src.XPos // position to use for new Progs
- curfn *Node // fn these Progs are for
+ curfn *ir.Func // fn these Progs are for
progcache []obj.Prog // local progcache
cacheidx int // first free element of progcache
// newProgs returns a new Progs for fn.
// worker indicates which of the backend workers will use the Progs.
-func newProgs(fn *Node, worker int) *Progs {
+func newProgs(fn *ir.Func, worker int) *Progs {
pp := new(Progs)
- if Ctxt.CanReuseProgs() {
- sz := len(sharedProgArray) / nBackendWorkers
+ if base.Ctxt.CanReuseProgs() {
+ sz := len(sharedProgArray) / base.Flag.LowerC
pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)]
}
pp.curfn = fn
pp.next = pp.NewProg()
pp.clearp(pp.next)
- pp.pos = fn.Pos
+ pp.pos = fn.Pos()
pp.settext(fn)
// PCDATA tables implicitly start with index -1.
pp.prevLive = LivenessIndex{-1, false}
} else {
p = new(obj.Prog)
}
- p.Ctxt = Ctxt
+ p.Ctxt = base.Ctxt
return p
}
// Flush converts from pp to machine code.
func (pp *Progs) Flush() {
plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
- obj.Flushplist(Ctxt, plist, pp.NewProg, myimportpath)
+ obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
}
// Free clears pp and any associated resources.
func (pp *Progs) Free() {
- if Ctxt.CanReuseProgs() {
+ if base.Ctxt.CanReuseProgs() {
// Clear progs to enable GC and avoid abuse.
s := pp.progcache[:pp.cacheidx]
for i := range s {
pp.clearp(pp.next)
p.Link = pp.next
- if !pp.pos.IsKnown() && Debug.K != 0 {
- Warn("prog: unknown position (line 0)")
+ if !pp.pos.IsKnown() && base.Flag.K != 0 {
+ base.Warn("prog: unknown position (line 0)")
}
p.As = as
return q
}
-func (pp *Progs) settext(fn *Node) {
+func (pp *Progs) settext(fn *ir.Func) {
if pp.Text != nil {
- Fatalf("Progs.settext called twice")
+ base.Fatalf("Progs.settext called twice")
}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
- fn.Func.lsym.Func().Text = ptxt
+ fn.LSym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
- ptxt.From.Sym = fn.Func.lsym
+ ptxt.From.Sym = fn.LSym
+}
+
+// makeABIWrapper creates a new function that wraps a cross-ABI call
+// to "f". The wrapper is marked as an ABIWRAPPER.
+func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
+
+ // Q: is this needed?
+ savepos := base.Pos
+ savedclcontext := dclcontext
+ savedcurfn := Curfn
+
+ base.Pos = autogeneratedPos
+ dclcontext = ir.PEXTERN
+
+ // At the moment we don't support wrapping a method, we'd need machinery
+ // below to handle the receiver. Panic if we see this scenario.
+ ft := f.Nname.Ntype.Type()
+ if ft.NumRecvs() != 0 {
+ panic("makeABIWrapper support for wrapping methods not implemented")
+ }
+
+ // Manufacture a new func type to use for the wrapper.
+ var noReceiver *ir.Field
+ tfn := ir.NewFuncType(base.Pos,
+ noReceiver,
+ structargs(ft.Params(), true),
+ structargs(ft.Results(), false))
+
+ // Reuse f's types.Sym to create a new ODCLFUNC/function.
+ fn := dclfunc(f.Nname.Sym(), tfn)
+ fn.SetDupok(true)
+ fn.SetWrapper(true) // ignore frame for panic+recover matching
+
+ // Select LSYM now.
+ asym := base.Ctxt.LookupABI(f.LSym.Name, wrapperABI)
+ asym.Type = objabi.STEXT
+ if fn.LSym != nil {
+ panic("unexpected")
+ }
+ fn.LSym = asym
+
+ // ABI0-to-ABIInternal wrappers will be mainly loading params from
+ // stack into registers (and/or storing stack locations back to
+ // registers after the wrapped call); in most cases they won't
+ // need to allocate stack space, so it should be OK to mark them
+ // as NOSPLIT in these cases. In addition, my assumption is that
+ // functions written in assembly are NOSPLIT in most (but not all)
+ // cases. In the case of an ABIInternal target that has too many
+ // parameters to fit into registers, the wrapper would need to
+ // allocate stack space, but this seems like an unlikely scenario.
+ // Hence: mark these wrappers NOSPLIT.
+ //
+ // ABIInternal-to-ABI0 wrappers on the other hand will be taking
+ // things in registers and pushing them onto the stack prior to
+ // the ABI0 call, meaning that they will always need to allocate
+ // stack space. If the compiler marks them as NOSPLIT this seems
+ // as though it could lead to situations where the the linker's
+ // nosplit-overflow analysis would trigger a link failure. On the
+ // other hand if they not tagged NOSPLIT then this could cause
+ // problems when building the runtime (since there may be calls to
+ // asm routine in cases where it's not safe to grow the stack). In
+ // most cases the wrapper would be (in effect) inlined, but are
+ // there (perhaps) indirect calls from the runtime that could run
+ // into trouble here.
+ // FIXME: at the moment all.bash does not pass when I leave out
+ // NOSPLIT for these wrappers, so all are currently tagged with NOSPLIT.
+ setupTextLSym(fn, obj.NOSPLIT|obj.ABIWRAPPER)
+
+ // Generate call. Use tail call if no params and no returns,
+ // but a regular call otherwise.
+ //
+ // Note: ideally we would be using a tail call in cases where
+ // there are params but no returns for ABI0->ABIInternal wrappers,
+ // provided that all params fit into registers (e.g. we don't have
+ // to allocate any stack space). Doing this will require some
+ // extra work in typecheck/walk/ssa, might want to add a new node
+ // OTAILCALL or something to this effect.
+ var call ir.Node
+ if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 {
+ call = nodSym(ir.ORETJMP, nil, f.Nname.Sym())
+ } else {
+ call = ir.Nod(ir.OCALL, f.Nname, nil)
+ call.PtrList().Set(paramNnames(tfn.Type()))
+ call.SetIsDDD(tfn.Type().IsVariadic())
+ if tfn.Type().NumResults() > 0 {
+ n := ir.Nod(ir.ORETURN, nil, nil)
+ n.PtrList().Set1(call)
+ call = n
+ }
+ }
+ fn.PtrBody().Append(call)
+
+ funcbody()
+ if base.Debug.DclStack != 0 {
+ testdclstack()
+ }
+
+ typecheckFunc(fn)
+ Curfn = fn
+ typecheckslice(fn.Body().Slice(), ctxStmt)
+
+ escapeFuncs([]*ir.Func{fn}, false)
+
+ Target.Decls = append(Target.Decls, fn)
+
+ // Restore previous context.
+ base.Pos = savepos
+ dclcontext = savedclcontext
+ Curfn = savedcurfn
}
// initLSym defines f's obj.LSym and initializes it based on the
//
// initLSym must be called exactly once per function and must be
// called for both functions with bodies and functions without bodies.
-func (f *Func) initLSym(hasBody bool) {
- if f.lsym != nil {
- Fatalf("Func.initLSym called twice")
+// For body-less functions, we only create the LSym; for functions
+// with bodies call a helper to setup up / populate the LSym.
+func initLSym(f *ir.Func, hasBody bool) {
+ // FIXME: for new-style ABI wrappers, we set up the lsym at the
+ // point the wrapper is created.
+ if f.LSym != nil && base.Flag.ABIWrap {
+ return
+ }
+ selectLSym(f, hasBody)
+ if hasBody {
+ setupTextLSym(f, 0)
}
+}
- if nam := f.Nname; !nam.isBlank() {
- f.lsym = nam.Sym.Linksym()
- if f.Pragma&Systemstack != 0 {
- f.lsym.Set(obj.AttrCFunc, true)
- }
+// selectLSym sets up the LSym for a given function, and
+// makes calls to helpers to create ABI wrappers if needed.
+func selectLSym(f *ir.Func, hasBody bool) {
+ if f.LSym != nil {
+ base.Fatalf("Func.initLSym called twice")
+ }
- var aliasABI obj.ABI
- needABIAlias := false
- defABI, hasDefABI := symabiDefs[f.lsym.Name]
+ if nam := f.Nname; !ir.IsBlank(nam) {
+
+ var wrapperABI obj.ABI
+ needABIWrapper := false
+ defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()]
if hasDefABI && defABI == obj.ABI0 {
// Symbol is defined as ABI0. Create an
// Internal -> ABI0 wrapper.
- f.lsym.SetABI(obj.ABI0)
- needABIAlias, aliasABI = true, obj.ABIInternal
+ f.LSym = nam.Sym().LinksymABI0()
+ needABIWrapper, wrapperABI = true, obj.ABIInternal
} else {
+ f.LSym = nam.Sym().Linksym()
// No ABI override. Check that the symbol is
// using the expected ABI.
want := obj.ABIInternal
- if f.lsym.ABI() != want {
- Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.lsym.Name, f.lsym.ABI(), want)
+ if f.LSym.ABI() != want {
+ base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want)
}
}
+ if f.Pragma&ir.Systemstack != 0 {
+ f.LSym.Set(obj.AttrCFunc, true)
+ }
- isLinknameExported := nam.Sym.Linkname != "" && (hasBody || hasDefABI)
- if abi, ok := symabiRefs[f.lsym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
+ isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI)
+ if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
// Either 1) this symbol is definitely
// referenced as ABI0 from this package; or 2)
// this symbol is defined in this package but
// since other packages may "pull" symbols
// using linkname and we don't want to create
// duplicate ABI wrappers.
- if f.lsym.ABI() != obj.ABI0 {
- needABIAlias, aliasABI = true, obj.ABI0
+ if f.LSym.ABI() != obj.ABI0 {
+ needABIWrapper, wrapperABI = true, obj.ABI0
}
}
- if needABIAlias {
- // These LSyms have the same name as the
- // native function, so we create them directly
- // rather than looking them up. The uniqueness
- // of f.lsym ensures uniqueness of asym.
- asym := &obj.LSym{
- Name: f.lsym.Name,
- Type: objabi.SABIALIAS,
- R: []obj.Reloc{{Sym: f.lsym}}, // 0 size, so "informational"
+ if needABIWrapper {
+ if !useABIWrapGen(f) {
+ // Fallback: use alias instead. FIXME.
+
+ // These LSyms have the same name as the
+ // native function, so we create them directly
+ // rather than looking them up. The uniqueness
+ // of f.lsym ensures uniqueness of asym.
+ asym := &obj.LSym{
+ Name: f.LSym.Name,
+ Type: objabi.SABIALIAS,
+ R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational"
+ }
+ asym.SetABI(wrapperABI)
+ asym.Set(obj.AttrDuplicateOK, true)
+ base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym)
+ } else {
+ if base.Debug.ABIWrap != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %s.%s\n",
+ wrapperABI, 1-wrapperABI, types.LocalPkg.Path, f.LSym.Name)
+ }
+ makeABIWrapper(f, wrapperABI)
}
- asym.SetABI(aliasABI)
- asym.Set(obj.AttrDuplicateOK, true)
- Ctxt.ABIAliases = append(Ctxt.ABIAliases, asym)
}
}
+}
- if !hasBody {
- // For body-less functions, we only create the LSym.
- return
- }
-
- var flag int
+// setupTextLsym initializes the LSym for a with-body text symbol.
+func setupTextLSym(f *ir.Func, flag int) {
if f.Dupok() {
flag |= obj.DUPOK
}
if f.Needctxt() {
flag |= obj.NEEDCTXT
}
- if f.Pragma&Nosplit != 0 {
+ if f.Pragma&ir.Nosplit != 0 {
flag |= obj.NOSPLIT
}
if f.ReflectMethod() {
// Clumsy but important.
// See test/recover.go for test cases and src/reflect/value.go
// for the actual functions being considered.
- if myimportpath == "reflect" {
- switch f.Nname.Sym.Name {
+ if base.Ctxt.Pkgpath == "reflect" {
+ switch f.Sym().Name {
case "callReflect", "callMethod":
flag |= obj.WRAPPER
}
}
- Ctxt.InitTextSym(f.lsym, flag)
+ base.Ctxt.InitTextSym(f.LSym, flag)
}
-func ggloblnod(nam *Node) {
- s := nam.Sym.Linksym()
+func ggloblnod(nam ir.Node) {
+ s := nam.Sym().Linksym()
s.Gotype = ngotype(nam).Linksym()
flags := 0
- if nam.Name.Readonly() {
+ if nam.Name().Readonly() {
flags = obj.RODATA
}
- if nam.Type != nil && !nam.Type.HasPointers() {
+ if nam.Type() != nil && !nam.Type().HasPointers() {
flags |= obj.NOPTR
}
- Ctxt.Globl(s, nam.Type.Width, flags)
- if nam.Name.LibfuzzerExtraCounter() {
+ base.Ctxt.Globl(s, nam.Type().Width, flags)
+ if nam.Name().LibfuzzerExtraCounter() {
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
- if nam.Sym.Linkname != "" {
+ if nam.Sym().Linkname != "" {
// Make sure linkname'd symbol is non-package. When a symbol is
// both imported and linkname'd, s.Pkg may not set to "_" in
// types.Sym.Linksym because LSym already exists. Set it here.
s.Set(obj.AttrLocal, true)
flags &^= obj.LOCAL
}
- Ctxt.Globl(s, int64(width), int(flags))
+ base.Ctxt.Globl(s, int64(width), int(flags))
}
func Addrconst(a *obj.Addr, v int64) {
- a.Sym = nil
- a.Type = obj.TYPE_CONST
- a.Offset = v
+ a.SetConst(v)
}
func Patch(p *obj.Prog, to *obj.Prog) {
- if p.To.Type != obj.TYPE_BRANCH {
- Fatalf("patch: not a branch")
- }
p.To.SetTarget(to)
- p.To.Offset = to.Pc
}
import (
"bufio"
"bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/goobj"
"cmd/internal/src"
"crypto/md5"
"encoding/binary"
"fmt"
+ "go/constant"
"io"
"math/big"
"sort"
)
func iexport(out *bufio.Writer) {
- // Mark inline bodies that are reachable through exported types.
- // (Phase 0 of bexport.go.)
- {
- // TODO(mdempsky): Separate from bexport logic.
- p := &exporter{marked: make(map[*types.Type]bool)}
- for _, n := range exportlist {
- sym := n.Sym
- p.markType(asNode(sym.Def).Type)
- }
- }
-
p := iexporter{
allPkgs: map[*types.Pkg]bool{},
stringIndex: map[string]uint64{},
- declIndex: map[*Node]uint64{},
- inlineIndex: map[*Node]uint64{},
+ declIndex: map[*types.Sym]uint64{},
+ inlineIndex: map[*types.Sym]uint64{},
typIndex: map[*types.Type]uint64{},
}
p.typIndex[pt] = uint64(i)
}
if len(p.typIndex) > predeclReserved {
- Fatalf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)
+ base.Fatalf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)
}
// Initialize work queue with exported declarations.
- for _, n := range exportlist {
+ for _, n := range Target.Exports {
p.pushDecl(n)
}
// Loop until no more work. We use a queue because while
// writing out inline bodies, we may discover additional
// declarations that are needed.
- for !p.declTodo.empty() {
- p.doDecl(p.declTodo.popLeft())
+ for !p.declTodo.Empty() {
+ p.doDecl(p.declTodo.PopLeft())
}
// Append indices to data0 section.
w.writeIndex(p.inlineIndex, false)
w.flush()
+ if *base.Flag.LowerV {
+ fmt.Printf("export: hdr strings %v, data %v, index %v\n", p.strings.Len(), dataLen, p.data0.Len())
+ }
+
// Assemble header.
var hdr intWriter
hdr.WriteByte('i')
// Add fingerprint (used by linker object file).
// Attach this to the end, so tools (e.g. gcimporter) don't care.
- copy(Ctxt.Fingerprint[:], h.Sum(nil)[:])
- out.Write(Ctxt.Fingerprint[:])
+ copy(base.Ctxt.Fingerprint[:], h.Sum(nil)[:])
+ out.Write(base.Ctxt.Fingerprint[:])
}
-// writeIndex writes out an object index. mainIndex indicates whether
+// writeIndex writes out a symbol index. mainIndex indicates whether
// we're writing out the main index, which is also read by
// non-compiler tools and includes a complete package description
// (i.e., name and height).
-func (w *exportWriter) writeIndex(index map[*Node]uint64, mainIndex bool) {
- // Build a map from packages to objects from that package.
- pkgObjs := map[*types.Pkg][]*Node{}
+func (w *exportWriter) writeIndex(index map[*types.Sym]uint64, mainIndex bool) {
+ // Build a map from packages to symbols from that package.
+ pkgSyms := map[*types.Pkg][]*types.Sym{}
// For the main index, make sure to include every package that
// we reference, even if we're not exporting (or reexporting)
// any symbols from it.
if mainIndex {
- pkgObjs[localpkg] = nil
+ pkgSyms[types.LocalPkg] = nil
for pkg := range w.p.allPkgs {
- pkgObjs[pkg] = nil
+ pkgSyms[pkg] = nil
}
}
- for n := range index {
- pkgObjs[n.Sym.Pkg] = append(pkgObjs[n.Sym.Pkg], n)
+ // Group symbols by package.
+ for sym := range index {
+ pkgSyms[sym.Pkg] = append(pkgSyms[sym.Pkg], sym)
}
+ // Sort packages by path.
var pkgs []*types.Pkg
- for pkg, objs := range pkgObjs {
+ for pkg := range pkgSyms {
pkgs = append(pkgs, pkg)
-
- sort.Slice(objs, func(i, j int) bool {
- return objs[i].Sym.Name < objs[j].Sym.Name
- })
}
-
sort.Slice(pkgs, func(i, j int) bool {
return pkgs[i].Path < pkgs[j].Path
})
w.uint64(uint64(pkg.Height))
}
- objs := pkgObjs[pkg]
- w.uint64(uint64(len(objs)))
- for _, n := range objs {
- w.string(n.Sym.Name)
- w.uint64(index[n])
+ // Sort symbols within a package by name.
+ syms := pkgSyms[pkg]
+ sort.Slice(syms, func(i, j int) bool {
+ return syms[i].Name < syms[j].Name
+ })
+
+ w.uint64(uint64(len(syms)))
+ for _, sym := range syms {
+ w.string(sym.Name)
+ w.uint64(index[sym])
}
}
}
// main index.
allPkgs map[*types.Pkg]bool
- declTodo nodeQueue
+ declTodo ir.NameQueue
strings intWriter
stringIndex map[string]uint64
data0 intWriter
- declIndex map[*Node]uint64
- inlineIndex map[*Node]uint64
+ declIndex map[*types.Sym]uint64
+ inlineIndex map[*types.Sym]uint64
typIndex map[*types.Type]uint64
}
off = uint64(p.strings.Len())
p.stringIndex[s] = off
+ if *base.Flag.LowerV {
+ fmt.Printf("export: str %v %.40q\n", off, s)
+ }
+
p.strings.uint64(uint64(len(s)))
p.strings.WriteString(s)
}
}
// pushDecl adds n to the declaration work queue, if not already present.
-func (p *iexporter) pushDecl(n *Node) {
- if n.Sym == nil || asNode(n.Sym.Def) != n && n.Op != OTYPE {
- Fatalf("weird Sym: %v, %v", n, n.Sym)
+func (p *iexporter) pushDecl(n *ir.Name) {
+ if n.Sym() == nil || n.Sym().Def != n && n.Op() != ir.OTYPE {
+ base.Fatalf("weird Sym: %v, %v", n, n.Sym())
}
// Don't export predeclared declarations.
- if n.Sym.Pkg == builtinpkg || n.Sym.Pkg == unsafepkg {
+ if n.Sym().Pkg == types.BuiltinPkg || n.Sym().Pkg == unsafepkg {
return
}
- if _, ok := p.declIndex[n]; ok {
+ if _, ok := p.declIndex[n.Sym()]; ok {
return
}
- p.declIndex[n] = ^uint64(0) // mark n present in work queue
- p.declTodo.pushRight(n)
+ p.declIndex[n.Sym()] = ^uint64(0) // mark n present in work queue
+ p.declTodo.PushRight(n)
}
// exportWriter handles writing out individual data section chunks.
prevColumn int64
}
-func (p *iexporter) doDecl(n *Node) {
+func (p *iexporter) doDecl(n *ir.Name) {
w := p.newWriter()
- w.setPkg(n.Sym.Pkg, false)
+ w.setPkg(n.Sym().Pkg, false)
- switch n.Op {
- case ONAME:
+ switch n.Op() {
+ case ir.ONAME:
switch n.Class() {
- case PEXTERN:
+ case ir.PEXTERN:
// Variable.
w.tag('V')
- w.pos(n.Pos)
- w.typ(n.Type)
+ w.pos(n.Pos())
+ w.typ(n.Type())
w.varExt(n)
- case PFUNC:
- if n.IsMethod() {
- Fatalf("unexpected method: %v", n)
+ case ir.PFUNC:
+ if ir.IsMethod(n) {
+ base.Fatalf("unexpected method: %v", n)
}
// Function.
w.tag('F')
- w.pos(n.Pos)
- w.signature(n.Type)
+ w.pos(n.Pos())
+ w.signature(n.Type())
w.funcExt(n)
default:
- Fatalf("unexpected class: %v, %v", n, n.Class())
+ base.Fatalf("unexpected class: %v, %v", n, n.Class())
}
- case OLITERAL:
+ case ir.OLITERAL:
// Constant.
- n = typecheck(n, ctxExpr)
+ // TODO(mdempsky): Do we still need this typecheck? If so, why?
+ n = typecheck(n, ctxExpr).(*ir.Name)
w.tag('C')
- w.pos(n.Pos)
- w.value(n.Type, n.Val())
+ w.pos(n.Pos())
+ w.value(n.Type(), n.Val())
- case OTYPE:
- if IsAlias(n.Sym) {
+ case ir.OTYPE:
+ if IsAlias(n.Sym()) {
// Alias.
w.tag('A')
- w.pos(n.Pos)
- w.typ(n.Type)
+ w.pos(n.Pos())
+ w.typ(n.Type())
break
}
// Defined type.
w.tag('T')
- w.pos(n.Pos)
+ w.pos(n.Pos())
- underlying := n.Type.Orig
- if underlying == types.Errortype.Orig {
+ underlying := n.Type().Underlying()
+ if underlying == types.ErrorType.Underlying() {
// For "type T error", use error as the
// underlying type instead of error's own
// underlying anonymous interface. This
// ensures consistency with how importers may
// declare error (e.g., go/types uses nil Pkg
// for predeclared objects).
- underlying = types.Errortype
+ underlying = types.ErrorType
}
w.typ(underlying)
- t := n.Type
+ t := n.Type()
if t.IsInterface() {
w.typeExt(t)
break
}
default:
- Fatalf("unexpected node: %v", n)
+ base.Fatalf("unexpected node: %v", n)
}
- p.declIndex[n] = w.flush()
+ w.finish("dcl", p.declIndex, n.Sym())
}
func (w *exportWriter) tag(tag byte) {
w.data.WriteByte(tag)
}
-func (p *iexporter) doInline(f *Node) {
+func (w *exportWriter) finish(what string, index map[*types.Sym]uint64, sym *types.Sym) {
+ off := w.flush()
+ if *base.Flag.LowerV {
+ fmt.Printf("export: %v %v %v\n", what, off, sym)
+ }
+ index[sym] = off
+}
+
+func (p *iexporter) doInline(f *ir.Name) {
w := p.newWriter()
w.setPkg(fnpkg(f), false)
- w.stmtList(asNodes(f.Func.Inl.Body))
+ w.stmtList(ir.AsNodes(f.Func().Inl.Body))
- p.inlineIndex[f] = w.flush()
+ w.finish("inl", p.inlineIndex, f.Sym())
}
func (w *exportWriter) pos(pos src.XPos) {
- p := Ctxt.PosTable.Pos(pos)
+ p := base.Ctxt.PosTable.Pos(pos)
file := p.Base().AbsFilename()
line := int64(p.RelLine())
column := int64(p.RelCol())
w.string(pkg.Path)
}
-func (w *exportWriter) qualifiedIdent(n *Node) {
+func (w *exportWriter) qualifiedIdent(n ir.Node) {
// Ensure any referenced declarations are written out too.
- w.p.pushDecl(n)
+ w.p.pushDecl(n.Name())
- s := n.Sym
+ s := n.Sym()
w.string(s.Name)
w.pkg(s.Pkg)
}
func (w *exportWriter) selector(s *types.Sym) {
if w.currPkg == nil {
- Fatalf("missing currPkg")
+ base.Fatalf("missing currPkg")
}
// Method selectors are rewritten into method symbols (of the
} else {
pkg := w.currPkg
if types.IsExported(name) {
- pkg = localpkg
+ pkg = types.LocalPkg
}
if s.Pkg != pkg {
- Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path)
+ base.Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path)
}
}
if !ok {
w := p.newWriter()
w.doTyp(t)
- off = predeclReserved + w.flush()
+ rawOff := w.flush()
+ if *base.Flag.LowerV {
+ fmt.Printf("export: typ %v %v\n", rawOff, t)
+ }
+ off = predeclReserved + rawOff
p.typIndex[t] = off
}
return off
}
func (w *exportWriter) doTyp(t *types.Type) {
- if t.Sym != nil {
- if t.Sym.Pkg == builtinpkg || t.Sym.Pkg == unsafepkg {
- Fatalf("builtin type missing from typIndex: %v", t)
+ if t.Sym() != nil {
+ if t.Sym().Pkg == types.BuiltinPkg || t.Sym().Pkg == unsafepkg {
+ base.Fatalf("builtin type missing from typIndex: %v", t)
}
w.startType(definedType)
- w.qualifiedIdent(typenod(t))
+ w.qualifiedIdent(t.Obj().(*ir.Name))
return
}
- switch t.Etype {
- case TPTR:
+ switch t.Kind() {
+ case types.TPTR:
w.startType(pointerType)
w.typ(t.Elem())
- case TSLICE:
+ case types.TSLICE:
w.startType(sliceType)
w.typ(t.Elem())
- case TARRAY:
+ case types.TARRAY:
w.startType(arrayType)
w.uint64(uint64(t.NumElem()))
w.typ(t.Elem())
- case TCHAN:
+ case types.TCHAN:
w.startType(chanType)
w.uint64(uint64(t.ChanDir()))
w.typ(t.Elem())
- case TMAP:
+ case types.TMAP:
w.startType(mapType)
w.typ(t.Key())
w.typ(t.Elem())
- case TFUNC:
+ case types.TFUNC:
w.startType(signatureType)
w.setPkg(t.Pkg(), true)
w.signature(t)
- case TSTRUCT:
+ case types.TSTRUCT:
w.startType(structType)
w.setPkg(t.Pkg(), true)
w.string(f.Note)
}
- case TINTER:
+ case types.TINTER:
var embeddeds, methods []*types.Field
for _, m := range t.Methods().Slice() {
if m.Sym != nil {
}
default:
- Fatalf("unexpected type: %v", t)
+ base.Fatalf("unexpected type: %v", t)
}
}
func (w *exportWriter) setPkg(pkg *types.Pkg, write bool) {
- if pkg == nil {
- // TODO(mdempsky): Proactively set Pkg for types and
- // remove this fallback logic.
- pkg = localpkg
+ if pkg == types.NoPkg {
+ base.Fatalf("missing pkg")
}
if write {
func (w *exportWriter) param(f *types.Field) {
w.pos(f.Pos)
- w.localIdent(origSym(f.Sym), 0)
+ w.localIdent(types.OrigSym(f.Sym), 0)
w.typ(f.Type)
}
-func constTypeOf(typ *types.Type) Ctype {
+func constTypeOf(typ *types.Type) constant.Kind {
switch typ {
case types.UntypedInt, types.UntypedRune:
- return CTINT
+ return constant.Int
case types.UntypedFloat:
- return CTFLT
+ return constant.Float
case types.UntypedComplex:
- return CTCPLX
- }
-
- switch typ.Etype {
- case TCHAN, TFUNC, TMAP, TNIL, TINTER, TPTR, TSLICE, TUNSAFEPTR:
- return CTNIL
- case TBOOL:
- return CTBOOL
- case TSTRING:
- return CTSTR
- case TINT, TINT8, TINT16, TINT32, TINT64,
- TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR:
- return CTINT
- case TFLOAT32, TFLOAT64:
- return CTFLT
- case TCOMPLEX64, TCOMPLEX128:
- return CTCPLX
- }
-
- Fatalf("unexpected constant type: %v", typ)
+ return constant.Complex
+ }
+
+ switch typ.Kind() {
+ case types.TBOOL:
+ return constant.Bool
+ case types.TSTRING:
+ return constant.String
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64,
+ types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
+ return constant.Int
+ case types.TFLOAT32, types.TFLOAT64:
+ return constant.Float
+ case types.TCOMPLEX64, types.TCOMPLEX128:
+ return constant.Complex
+ }
+
+ base.Fatalf("unexpected constant type: %v", typ)
return 0
}
-func (w *exportWriter) value(typ *types.Type, v Val) {
- if vt := idealType(v.Ctype()); typ.IsUntyped() && typ != vt {
- Fatalf("exporter: untyped type mismatch, have: %v, want: %v", typ, vt)
- }
+func (w *exportWriter) value(typ *types.Type, v constant.Value) {
+ ir.AssertValidTypeForConst(typ, v)
w.typ(typ)
// Each type has only one admissible constant representation,
// and provides a useful consistency check.
switch constTypeOf(typ) {
- case CTNIL:
- // Only one value; nothing to encode.
- _ = v.U.(*NilVal)
- case CTBOOL:
- w.bool(v.U.(bool))
- case CTSTR:
- w.string(v.U.(string))
- case CTINT:
- w.mpint(&v.U.(*Mpint).Val, typ)
- case CTFLT:
- w.mpfloat(&v.U.(*Mpflt).Val, typ)
- case CTCPLX:
- x := v.U.(*Mpcplx)
- w.mpfloat(&x.Real.Val, typ)
- w.mpfloat(&x.Imag.Val, typ)
+ case constant.Bool:
+ w.bool(constant.BoolVal(v))
+ case constant.String:
+ w.string(constant.StringVal(v))
+ case constant.Int:
+ w.mpint(v, typ)
+ case constant.Float:
+ w.mpfloat(v, typ)
+ case constant.Complex:
+ w.mpfloat(constant.Real(v), typ)
+ w.mpfloat(constant.Imag(v), typ)
}
}
return true, Mpprec / 8
}
- switch typ.Etype {
- case TFLOAT32, TCOMPLEX64:
+ switch typ.Kind() {
+ case types.TFLOAT32, types.TCOMPLEX64:
return true, 3
- case TFLOAT64, TCOMPLEX128:
+ case types.TFLOAT64, types.TCOMPLEX128:
return true, 7
}
// The go/types API doesn't expose sizes to importers, so they
// don't know how big these types are.
- switch typ.Etype {
- case TINT, TUINT, TUINTPTR:
+ switch typ.Kind() {
+ case types.TINT, types.TUINT, types.TUINTPTR:
maxBytes = 8
}
// single byte.
//
// TODO(mdempsky): Is this level of complexity really worthwhile?
-func (w *exportWriter) mpint(x *big.Int, typ *types.Type) {
+func (w *exportWriter) mpint(x constant.Value, typ *types.Type) {
signed, maxBytes := intSize(typ)
- negative := x.Sign() < 0
+ negative := constant.Sign(x) < 0
if !signed && negative {
- Fatalf("negative unsigned integer; type %v, value %v", typ, x)
+ base.Fatalf("negative unsigned integer; type %v, value %v", typ, x)
+ }
+
+ b := constant.Bytes(x) // little endian
+ for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
+ b[i], b[j] = b[j], b[i]
}
- b := x.Bytes()
if len(b) > 0 && b[0] == 0 {
- Fatalf("leading zeros")
+ base.Fatalf("leading zeros")
}
if uint(len(b)) > maxBytes {
- Fatalf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)
+ base.Fatalf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)
}
maxSmall := 256 - maxBytes
}
}
if n < maxSmall || n >= 256 {
- Fatalf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)
+ base.Fatalf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)
}
w.data.WriteByte(byte(n))
// mantissa is an integer. The value is written out as mantissa (as a
// multi-precision integer) and then the exponent, except exponent is
// omitted if mantissa is zero.
-func (w *exportWriter) mpfloat(f *big.Float, typ *types.Type) {
+func (w *exportWriter) mpfloat(v constant.Value, typ *types.Type) {
+ f := bigFloatVal(v)
if f.IsInf() {
- Fatalf("infinite constant")
+ base.Fatalf("infinite constant")
}
// Break into f = mant × 2**exp, with 0.5 <= mant < 1.
manti, acc := mant.Int(nil)
if acc != big.Exact {
- Fatalf("mantissa scaling failed for %f (%s)", f, acc)
+ base.Fatalf("mantissa scaling failed for %f (%s)", f, acc)
}
- w.mpint(manti, typ)
+ w.mpint(makeInt(manti), typ)
if manti.Sign() != 0 {
w.int64(exp)
}
// Compiler-specific extensions.
-func (w *exportWriter) varExt(n *Node) {
- w.linkname(n.Sym)
- w.symIdx(n.Sym)
+func (w *exportWriter) varExt(n ir.Node) {
+ w.linkname(n.Sym())
+ w.symIdx(n.Sym())
}
-func (w *exportWriter) funcExt(n *Node) {
- w.linkname(n.Sym)
- w.symIdx(n.Sym)
+func (w *exportWriter) funcExt(n *ir.Name) {
+ w.linkname(n.Sym())
+ w.symIdx(n.Sym())
// Escape analysis.
for _, fs := range &types.RecvsParams {
- for _, f := range fs(n.Type).FieldSlice() {
+ for _, f := range fs(n.Type()).FieldSlice() {
w.string(f.Note)
}
}
// Inline body.
- if n.Func.Inl != nil {
- w.uint64(1 + uint64(n.Func.Inl.Cost))
- if n.Func.ExportInline() {
+ if n.Func().Inl != nil {
+ w.uint64(1 + uint64(n.Func().Inl.Cost))
+ if n.Func().ExportInline() {
w.p.doInline(n)
}
// Endlineno for inlined function.
- if n.Name.Defn != nil {
- w.pos(n.Name.Defn.Func.Endlineno)
- } else {
- // When the exported node was defined externally,
- // e.g. io exports atomic.(*Value).Load or bytes exports errors.New.
- // Keep it as we don't distinguish this case in iimport.go.
- w.pos(n.Func.Endlineno)
- }
+ w.pos(n.Func().Endlineno)
} else {
w.uint64(0)
}
func (w *exportWriter) methExt(m *types.Field) {
w.bool(m.Nointerface())
- w.funcExt(asNode(m.Type.Nname()))
+ w.funcExt(m.Nname.(*ir.Name))
}
func (w *exportWriter) linkname(s *types.Sym) {
// Inline bodies.
-func (w *exportWriter) stmtList(list Nodes) {
+func (w *exportWriter) stmtList(list ir.Nodes) {
for _, n := range list.Slice() {
w.node(n)
}
- w.op(OEND)
+ w.op(ir.OEND)
}
-func (w *exportWriter) node(n *Node) {
- if opprec[n.Op] < 0 {
+func (w *exportWriter) node(n ir.Node) {
+ if ir.OpPrec[n.Op()] < 0 {
w.stmt(n)
} else {
w.expr(n)
// Caution: stmt will emit more than one node for statement nodes n that have a non-empty
// n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.).
-func (w *exportWriter) stmt(n *Node) {
- if n.Ninit.Len() > 0 && !stmtwithinit(n.Op) {
+func (w *exportWriter) stmt(n ir.Node) {
+ if n.Init().Len() > 0 && !ir.StmtWithInit(n.Op()) {
// can't use stmtList here since we don't want the final OEND
- for _, n := range n.Ninit.Slice() {
+ for _, n := range n.Init().Slice() {
w.stmt(n)
}
}
- switch op := n.Op; op {
- case ODCL:
- w.op(ODCL)
- w.pos(n.Left.Pos)
- w.localName(n.Left)
- w.typ(n.Left.Type)
+ switch n.Op() {
+ case ir.OBLOCK:
+ // No OBLOCK in export data.
+ // Inline content into this statement list,
+ // like the init list above.
+ // (At the moment neither the parser nor the typechecker
+ // generate OBLOCK nodes except to denote an empty
+ // function body, although that may change.)
+ for _, n := range n.List().Slice() {
+ w.stmt(n)
+ }
- // case ODCLFIELD:
- // unimplemented - handled by default case
+ case ir.ODCL:
+ w.op(ir.ODCL)
+ w.pos(n.Left().Pos())
+ w.localName(n.Left().(*ir.Name))
+ w.typ(n.Left().Type())
- case OAS:
+ case ir.OAS:
// Don't export "v = <N>" initializing statements, hope they're always
// preceded by the DCL which will be re-parsed and typecheck to reproduce
// the "v = <N>" again.
- if n.Right != nil {
- w.op(OAS)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.expr(n.Right)
+ if n.Right() != nil {
+ w.op(ir.OAS)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
}
- case OASOP:
- w.op(OASOP)
- w.pos(n.Pos)
- w.op(n.SubOp())
- w.expr(n.Left)
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ w.op(ir.OASOP)
+ w.pos(n.Pos())
+ w.op(n.AsOp)
+ w.expr(n.Left())
if w.bool(!n.Implicit()) {
- w.expr(n.Right)
+ w.expr(n.Right())
}
- case OAS2:
- w.op(OAS2)
- w.pos(n.Pos)
- w.exprList(n.List)
- w.exprList(n.Rlist)
+ case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ w.op(ir.OAS2)
+ w.pos(n.Pos())
+ w.exprList(n.List())
+ w.exprList(n.Rlist())
- case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
- w.op(OAS2)
- w.pos(n.Pos)
- w.exprList(n.List)
- w.exprList(asNodes([]*Node{n.Right}))
-
- case ORETURN:
- w.op(ORETURN)
- w.pos(n.Pos)
- w.exprList(n.List)
+ case ir.ORETURN:
+ w.op(ir.ORETURN)
+ w.pos(n.Pos())
+ w.exprList(n.List())
// case ORETJMP:
// unreachable - generated by compiler for trampolin routines
- case OGO, ODEFER:
- w.op(op)
- w.pos(n.Pos)
- w.expr(n.Left)
-
- case OIF:
- w.op(OIF)
- w.pos(n.Pos)
- w.stmtList(n.Ninit)
- w.expr(n.Left)
- w.stmtList(n.Nbody)
- w.stmtList(n.Rlist)
-
- case OFOR:
- w.op(OFOR)
- w.pos(n.Pos)
- w.stmtList(n.Ninit)
- w.exprsOrNil(n.Left, n.Right)
- w.stmtList(n.Nbody)
-
- case ORANGE:
- w.op(ORANGE)
- w.pos(n.Pos)
- w.stmtList(n.List)
- w.expr(n.Right)
- w.stmtList(n.Nbody)
-
- case OSELECT, OSWITCH:
- w.op(op)
- w.pos(n.Pos)
- w.stmtList(n.Ninit)
- w.exprsOrNil(n.Left, nil)
+ case ir.OGO, ir.ODEFER:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.Left())
+
+ case ir.OIF:
+ w.op(ir.OIF)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.expr(n.Left())
+ w.stmtList(n.Body())
+ w.stmtList(n.Rlist())
+
+ case ir.OFOR:
+ w.op(ir.OFOR)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.exprsOrNil(n.Left(), n.Right())
+ w.stmtList(n.Body())
+
+ case ir.ORANGE:
+ w.op(ir.ORANGE)
+ w.pos(n.Pos())
+ w.stmtList(n.List())
+ w.expr(n.Right())
+ w.stmtList(n.Body())
+
+ case ir.OSELECT:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.exprsOrNil(nil, nil) // TODO(rsc): Delete (and fix importer).
+ w.caseList(n)
+
+ case ir.OSWITCH:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.exprsOrNil(n.Left(), nil)
w.caseList(n)
// case OCASE:
// handled by caseList
- case OFALL:
- w.op(OFALL)
- w.pos(n.Pos)
-
- case OBREAK, OCONTINUE:
- w.op(op)
- w.pos(n.Pos)
- w.exprsOrNil(n.Left, nil)
+ case ir.OFALL:
+ w.op(ir.OFALL)
+ w.pos(n.Pos())
- case OEMPTY:
- // nothing to emit
-
- case OGOTO, OLABEL:
- w.op(op)
- w.pos(n.Pos)
- w.string(n.Sym.Name)
+ case ir.OBREAK, ir.OCONTINUE, ir.OGOTO, ir.OLABEL:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ label := ""
+ if sym := n.Sym(); sym != nil {
+ label = sym.Name
+ }
+ w.string(label)
default:
- Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op)
+ base.Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op())
}
}
-func (w *exportWriter) caseList(sw *Node) {
- namedTypeSwitch := sw.Op == OSWITCH && sw.Left != nil && sw.Left.Op == OTYPESW && sw.Left.Left != nil
+func isNamedTypeSwitch(n ir.Node) bool {
+ if n.Op() != ir.OSWITCH {
+ return false
+ }
+ sw := n.(*ir.SwitchStmt)
+ if sw.Left() == nil || sw.Left().Op() != ir.OTYPESW {
+ return false
+ }
+ guard := sw.Left().(*ir.TypeSwitchGuard)
+ return guard.Left() != nil
+}
+
+func (w *exportWriter) caseList(sw ir.Node) {
+ namedTypeSwitch := isNamedTypeSwitch(sw)
- cases := sw.List.Slice()
+ var cases []ir.Node
+ if sw.Op() == ir.OSWITCH {
+ cases = sw.(*ir.SwitchStmt).List().Slice()
+ } else {
+ cases = sw.(*ir.SelectStmt).List().Slice()
+ }
w.uint64(uint64(len(cases)))
for _, cas := range cases {
- if cas.Op != OCASE {
- Fatalf("expected OCASE, got %v", cas)
- }
- w.pos(cas.Pos)
- w.stmtList(cas.List)
+ cas := cas.(*ir.CaseStmt)
+ w.pos(cas.Pos())
+ w.stmtList(cas.List())
if namedTypeSwitch {
- w.localName(cas.Rlist.First())
+ w.localName(cas.Rlist().First().(*ir.Name))
}
- w.stmtList(cas.Nbody)
+ w.stmtList(cas.Body())
}
}
-func (w *exportWriter) exprList(list Nodes) {
+func (w *exportWriter) exprList(list ir.Nodes) {
for _, n := range list.Slice() {
w.expr(n)
}
- w.op(OEND)
+ w.op(ir.OEND)
}
-func (w *exportWriter) expr(n *Node) {
- // from nodefmt (fmt.go)
- //
- // nodefmt reverts nodes back to their original - we don't need to do
- // it because we are not bound to produce valid Go syntax when exporting
- //
- // if (fmtmode != FExp || n.Op != OLITERAL) && n.Orig != nil {
- // n = n.Orig
- // }
-
- // from exprfmt (fmt.go)
- for n.Op == OPAREN || n.Implicit() && (n.Op == ODEREF || n.Op == OADDR || n.Op == ODOT || n.Op == ODOTPTR) {
- n = n.Left
+func simplifyForExport(n ir.Node) ir.Node {
+ switch n.Op() {
+ case ir.OPAREN:
+ return simplifyForExport(n.Left())
+ case ir.ODEREF:
+ if n.Implicit() {
+ return simplifyForExport(n.Left())
+ }
+ case ir.OADDR:
+ if n.Implicit() {
+ return simplifyForExport(n.Left())
+ }
+ case ir.ODOT, ir.ODOTPTR:
+ if n.Implicit() {
+ return simplifyForExport(n.Left())
+ }
}
+ return n
+}
- switch op := n.Op; op {
+func (w *exportWriter) expr(n ir.Node) {
+ n = simplifyForExport(n)
+ switch n.Op() {
// expressions
// (somewhat closely following the structure of exprfmt in fmt.go)
- case OLITERAL:
- if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
- w.expr(n.Orig)
- break
+ case ir.ONIL:
+ if !n.Type().HasNil() {
+ base.Fatalf("unexpected type for nil: %v", n.Type())
}
- w.op(OLITERAL)
- w.pos(n.Pos)
- w.value(n.Type, n.Val())
+ w.op(ir.ONIL)
+ w.pos(n.Pos())
+ w.typ(n.Type())
+
+ case ir.OLITERAL:
+ w.op(ir.OLITERAL)
+ w.pos(n.Pos())
+ w.value(n.Type(), n.Val())
- case ONAME:
+ case ir.OMETHEXPR:
// Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
// but for export, this should be rendered as (*pkg.T).meth.
// These nodes have the special property that they are names with a left OTYPE and a right ONAME.
- if n.isMethodExpression() {
- w.op(OXDOT)
- w.pos(n.Pos)
- w.expr(n.Left) // n.Left.Op == OTYPE
- w.selector(n.Right.Sym)
- break
- }
-
+ n := n.(*ir.MethodExpr)
+ w.op(ir.OXDOT)
+ w.pos(n.Pos())
+ w.op(ir.OTYPE)
+ w.typ(n.T) // n.Left.Op == OTYPE
+ w.selector(n.Method.Sym)
+
+ case ir.ONAME:
// Package scope name.
- if (n.Class() == PEXTERN || n.Class() == PFUNC) && !n.isBlank() {
- w.op(ONONAME)
+ n := n.(*ir.Name)
+ if (n.Class() == ir.PEXTERN || n.Class() == ir.PFUNC) && !ir.IsBlank(n) {
+ w.op(ir.ONONAME)
w.qualifiedIdent(n)
break
}
// Function scope name.
- w.op(ONAME)
+ w.op(ir.ONAME)
w.localName(n)
// case OPACK, ONONAME:
// should have been resolved by typechecking - handled by default case
- case OTYPE:
- w.op(OTYPE)
- w.typ(n.Type)
+ case ir.OTYPE:
+ w.op(ir.OTYPE)
+ w.typ(n.Type())
- case OTYPESW:
- w.op(OTYPESW)
- w.pos(n.Pos)
+ case ir.OTYPESW:
+ w.op(ir.OTYPESW)
+ w.pos(n.Pos())
var s *types.Sym
- if n.Left != nil {
- if n.Left.Op != ONONAME {
- Fatalf("expected ONONAME, got %v", n.Left)
+ if n.Left() != nil {
+ if n.Left().Op() != ir.ONONAME {
+ base.Fatalf("expected ONONAME, got %v", n.Left())
}
- s = n.Left.Sym
+ s = n.Left().Sym()
}
w.localIdent(s, 0) // declared pseudo-variable, if any
- w.exprsOrNil(n.Right, nil)
+ w.exprsOrNil(n.Right(), nil)
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// should have been resolved by typechecking - handled by default case
// case OCOMPLIT:
// should have been resolved by typechecking - handled by default case
- case OPTRLIT:
- w.op(OADDR)
- w.pos(n.Pos)
- w.expr(n.Left)
+ case ir.OPTRLIT:
+ w.op(ir.OADDR)
+ w.pos(n.Pos())
+ w.expr(n.Left())
- case OSTRUCTLIT:
- w.op(OSTRUCTLIT)
- w.pos(n.Pos)
- w.typ(n.Type)
- w.elemList(n.List) // special handling of field names
+ case ir.OSTRUCTLIT:
+ w.op(ir.OSTRUCTLIT)
+ w.pos(n.Pos())
+ w.typ(n.Type())
+ w.fieldList(n.List()) // special handling of field names
- case OARRAYLIT, OSLICELIT, OMAPLIT:
- w.op(OCOMPLIT)
- w.pos(n.Pos)
- w.typ(n.Type)
- w.exprList(n.List)
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
+ w.op(ir.OCOMPLIT)
+ w.pos(n.Pos())
+ w.typ(n.Type())
+ w.exprList(n.List())
- case OKEY:
- w.op(OKEY)
- w.pos(n.Pos)
- w.exprsOrNil(n.Left, n.Right)
+ case ir.OKEY:
+ w.op(ir.OKEY)
+ w.pos(n.Pos())
+ w.exprsOrNil(n.Left(), n.Right())
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
- case OCALLPART:
+ case ir.OCALLPART:
// An OCALLPART is an OXDOT before type checking.
- w.op(OXDOT)
- w.pos(n.Pos)
- w.expr(n.Left)
- // Right node should be ONAME
- w.selector(n.Right.Sym)
-
- case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
- w.op(OXDOT)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.selector(n.Sym)
-
- case ODOTTYPE, ODOTTYPE2:
- w.op(ODOTTYPE)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.typ(n.Type)
-
- case OINDEX, OINDEXMAP:
- w.op(OINDEX)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.expr(n.Right)
-
- case OSLICE, OSLICESTR, OSLICEARR:
- w.op(OSLICE)
- w.pos(n.Pos)
- w.expr(n.Left)
+ w.op(ir.OXDOT)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.selector(n.Sym())
+
+ case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH:
+ w.op(ir.OXDOT)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.selector(n.Sym())
+
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ w.op(ir.ODOTTYPE)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.typ(n.Type())
+
+ case ir.OINDEX, ir.OINDEXMAP:
+ w.op(ir.OINDEX)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
+
+ case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR:
+ w.op(ir.OSLICE)
+ w.pos(n.Pos())
+ w.expr(n.Left())
low, high, _ := n.SliceBounds()
w.exprsOrNil(low, high)
- case OSLICE3, OSLICE3ARR:
- w.op(OSLICE3)
- w.pos(n.Pos)
- w.expr(n.Left)
+ case ir.OSLICE3, ir.OSLICE3ARR:
+ w.op(ir.OSLICE3)
+ w.pos(n.Pos())
+ w.expr(n.Left())
low, high, max := n.SliceBounds()
w.exprsOrNil(low, high)
w.expr(max)
- case OCOPY, OCOMPLEX:
+ case ir.OCOPY, ir.OCOMPLEX:
// treated like other builtin calls (see e.g., OREAL)
- w.op(op)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.expr(n.Right)
- w.op(OEND)
-
- case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR:
- w.op(OCONV)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.typ(n.Type)
-
- case OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN:
- w.op(op)
- w.pos(n.Pos)
- if n.Left != nil {
- w.expr(n.Left)
- w.op(OEND)
- } else {
- w.exprList(n.List) // emits terminating OEND
- }
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
+ w.op(ir.OEND)
+
+ case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR:
+ w.op(ir.OCONV)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.typ(n.Type())
+
+ case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.op(ir.OEND)
+
+ case ir.OAPPEND, ir.ODELETE, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.exprList(n.List()) // emits terminating OEND
// only append() calls may contain '...' arguments
- if op == OAPPEND {
+ if n.Op() == ir.OAPPEND {
w.bool(n.IsDDD())
} else if n.IsDDD() {
- Fatalf("exporter: unexpected '...' with %v call", op)
+ base.Fatalf("exporter: unexpected '...' with %v call", n.Op())
}
- case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
- w.op(OCALL)
- w.pos(n.Pos)
- w.stmtList(n.Ninit)
- w.expr(n.Left)
- w.exprList(n.List)
+ case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG:
+ w.op(ir.OCALL)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.expr(n.Left())
+ w.exprList(n.List())
w.bool(n.IsDDD())
- case OMAKEMAP, OMAKECHAN, OMAKESLICE:
- w.op(op) // must keep separate from OMAKE for importer
- w.pos(n.Pos)
- w.typ(n.Type)
+ case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
+ w.op(n.Op()) // must keep separate from OMAKE for importer
+ w.pos(n.Pos())
+ w.typ(n.Type())
switch {
default:
// empty list
- w.op(OEND)
- case n.List.Len() != 0: // pre-typecheck
- w.exprList(n.List) // emits terminating OEND
- case n.Right != nil:
- w.expr(n.Left)
- w.expr(n.Right)
- w.op(OEND)
- case n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()):
- w.expr(n.Left)
- w.op(OEND)
+ w.op(ir.OEND)
+ case n.Right() != nil:
+ w.expr(n.Left())
+ w.expr(n.Right())
+ w.op(ir.OEND)
+ case n.Left() != nil && (n.Op() == ir.OMAKESLICE || !n.Left().Type().IsUntyped()):
+ w.expr(n.Left())
+ w.op(ir.OEND)
}
// unary expressions
- case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
- w.op(op)
- w.pos(n.Pos)
- w.expr(n.Left)
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.Left())
+
+ case ir.OADDR:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.Left())
+
+ case ir.ODEREF:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.Left())
+
+ case ir.OSEND:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
// binary expressions
- case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
- OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR:
- w.op(op)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.expr(n.Right)
-
- case OADDSTR:
- w.op(OADDSTR)
- w.pos(n.Pos)
- w.exprList(n.List)
-
- case ODCLCONST:
+ case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
+ ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
+
+ case ir.OANDAND, ir.OOROR:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
+
+ case ir.OADDSTR:
+ w.op(ir.OADDSTR)
+ w.pos(n.Pos())
+ w.exprList(n.List())
+
+ case ir.ODCLCONST:
// if exporting, DCLCONST should just be removed as its usage
// has already been replaced with literals
default:
- Fatalf("cannot export %v (%d) node\n"+
- "\t==> please file an issue and assign to gri@", n.Op, int(n.Op))
+ base.Fatalf("cannot export %v (%d) node\n"+
+ "\t==> please file an issue and assign to gri@", n.Op(), int(n.Op()))
}
}
-func (w *exportWriter) op(op Op) {
+func (w *exportWriter) op(op ir.Op) {
w.uint64(uint64(op))
}
-func (w *exportWriter) exprsOrNil(a, b *Node) {
+func (w *exportWriter) exprsOrNil(a, b ir.Node) {
ab := 0
if a != nil {
ab |= 1
}
}
-func (w *exportWriter) elemList(list Nodes) {
+func (w *exportWriter) fieldList(list ir.Nodes) {
w.uint64(uint64(list.Len()))
for _, n := range list.Slice() {
- w.selector(n.Sym)
- w.expr(n.Left)
+ n := n.(*ir.StructKeyExpr)
+ w.selector(n.Sym())
+ w.expr(n.Left())
}
}
-func (w *exportWriter) localName(n *Node) {
+func (w *exportWriter) localName(n *ir.Name) {
// Escape analysis happens after inline bodies are saved, but
// we're using the same ONAME nodes, so we might still see
// PAUTOHEAP here.
// PPARAM/PPARAMOUT, because we only want to include vargen in
// non-param names.
var v int32
- if n.Class() == PAUTO || (n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy == nil) {
- v = n.Name.Vargen
+ if n.Class() == ir.PAUTO || (n.Class() == ir.PAUTOHEAP && n.Name().Stackcopy == nil) {
+ v = n.Name().Vargen
}
- w.localIdent(n.Sym, v)
+ w.localIdent(n.Sym(), v)
}
func (w *exportWriter) localIdent(s *types.Sym, v int32) {
// TODO(mdempsky): Fix autotmp hack.
if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, ".autotmp_") {
- Fatalf("unexpected dot in identifier: %v", name)
+ base.Fatalf("unexpected dot in identifier: %v", name)
}
if v > 0 {
if strings.Contains(name, "·") {
- Fatalf("exporter: unexpected · in symbol name")
+ base.Fatalf("exporter: unexpected · in symbol name")
}
name = fmt.Sprintf("%s·%d", name, v)
}
if !types.IsExported(name) && s.Pkg != w.currPkg {
- Fatalf("weird package in name: %v => %v, not %q", s, name, w.currPkg.Path)
+ base.Fatalf("weird package in name: %v => %v, not %q", s, name, w.currPkg.Path)
}
w.string(name)
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/goobj"
"cmd/internal/src"
"encoding/binary"
"fmt"
+ "go/constant"
"io"
"math/big"
"os"
inlineImporter = map[*types.Sym]iimporterAndOffset{}
)
-func expandDecl(n *Node) {
- if n.Op != ONONAME {
- return
+func expandDecl(n ir.Node) ir.Node {
+ if n, ok := n.(*ir.Name); ok {
+ return n
+ }
+
+ id := n.(*ir.Ident)
+ if n := id.Sym().PkgDef(); n != nil {
+ return n.(*ir.Name)
}
- r := importReaderFor(n, declImporter)
+ r := importReaderFor(id.Sym(), declImporter)
if r == nil {
// Can happen if user tries to reference an undeclared name.
- return
+ return n
}
- r.doDecl(n)
+ return r.doDecl(n.Sym())
}
-func expandInline(fn *Node) {
- if fn.Func.Inl.Body != nil {
+func expandInline(fn *ir.Func) {
+ if fn.Inl.Body != nil {
return
}
- r := importReaderFor(fn, inlineImporter)
+ r := importReaderFor(fn.Nname.Sym(), inlineImporter)
if r == nil {
- Fatalf("missing import reader for %v", fn)
+ base.Fatalf("missing import reader for %v", fn)
}
r.doInline(fn)
}
-func importReaderFor(n *Node, importers map[*types.Sym]iimporterAndOffset) *importReader {
- x, ok := importers[n.Sym]
+func importReaderFor(sym *types.Sym, importers map[*types.Sym]iimporterAndOffset) *importReader {
+ x, ok := importers[sym]
if !ok {
return nil
}
- return x.p.newReader(x.off, n.Sym.Pkg)
+ return x.p.newReader(x.off, sym.Pkg)
}
type intReader struct {
func (r *intReader) int64() int64 {
i, err := binary.ReadVarint(r.Reader)
if err != nil {
- yyerror("import %q: read error: %v", r.pkg.Path, err)
- errorexit()
+ base.Errorf("import %q: read error: %v", r.pkg.Path, err)
+ base.ErrorExit()
}
return i
}
func (r *intReader) uint64() uint64 {
i, err := binary.ReadUvarint(r.Reader)
if err != nil {
- yyerror("import %q: read error: %v", r.pkg.Path, err)
- errorexit()
+ base.Errorf("import %q: read error: %v", r.pkg.Path, err)
+ base.ErrorExit()
}
return i
}
func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) {
- ir := &intReader{in, pkg}
+ ird := &intReader{in, pkg}
- version := ir.uint64()
+ version := ird.uint64()
if version != iexportVersion {
- yyerror("import %q: unknown export format version %d", pkg.Path, version)
- errorexit()
+ base.Errorf("import %q: unknown export format version %d", pkg.Path, version)
+ base.ErrorExit()
}
- sLen := ir.uint64()
- dLen := ir.uint64()
+ sLen := ird.uint64()
+ dLen := ird.uint64()
// Map string (and data) section into memory as a single large
// string. This reduces heap fragmentation and allows
// returning individual substrings very efficiently.
data, err := mapFile(in.File(), in.Offset(), int64(sLen+dLen))
if err != nil {
- yyerror("import %q: mapping input: %v", pkg.Path, err)
- errorexit()
+ base.Errorf("import %q: mapping input: %v", pkg.Path, err)
+ base.ErrorExit()
}
stringData := data[:sLen]
declData := data[sLen:]
}
// Declaration index.
- for nPkgs := ir.uint64(); nPkgs > 0; nPkgs-- {
- pkg := p.pkgAt(ir.uint64())
- pkgName := p.stringAt(ir.uint64())
- pkgHeight := int(ir.uint64())
+ for nPkgs := ird.uint64(); nPkgs > 0; nPkgs-- {
+ pkg := p.pkgAt(ird.uint64())
+ pkgName := p.stringAt(ird.uint64())
+ pkgHeight := int(ird.uint64())
if pkg.Name == "" {
pkg.Name = pkgName
pkg.Height = pkgHeight
- numImport[pkgName]++
+ types.NumImport[pkgName]++
// TODO(mdempsky): This belongs somewhere else.
- pkg.Lookup("_").Def = asTypesNode(nblank)
+ pkg.Lookup("_").Def = ir.BlankNode
} else {
if pkg.Name != pkgName {
- Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path)
+ base.Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path)
}
if pkg.Height != pkgHeight {
- Fatalf("conflicting package heights %v and %v for path %q", pkg.Height, pkgHeight, pkg.Path)
+ base.Fatalf("conflicting package heights %v and %v for path %q", pkg.Height, pkgHeight, pkg.Path)
}
}
- for nSyms := ir.uint64(); nSyms > 0; nSyms-- {
- s := pkg.Lookup(p.stringAt(ir.uint64()))
- off := ir.uint64()
-
- if _, ok := declImporter[s]; ok {
- continue
- }
- declImporter[s] = iimporterAndOffset{p, off}
+ for nSyms := ird.uint64(); nSyms > 0; nSyms-- {
+ s := pkg.Lookup(p.stringAt(ird.uint64()))
+ off := ird.uint64()
- // Create stub declaration. If used, this will
- // be overwritten by expandDecl.
- if s.Def != nil {
- Fatalf("unexpected definition for %v: %v", s, asNode(s.Def))
+ if _, ok := declImporter[s]; !ok {
+ declImporter[s] = iimporterAndOffset{p, off}
}
- s.Def = asTypesNode(npos(src.NoXPos, dclname(s)))
}
}
// Inline body index.
- for nPkgs := ir.uint64(); nPkgs > 0; nPkgs-- {
- pkg := p.pkgAt(ir.uint64())
+ for nPkgs := ird.uint64(); nPkgs > 0; nPkgs-- {
+ pkg := p.pkgAt(ird.uint64())
- for nSyms := ir.uint64(); nSyms > 0; nSyms-- {
- s := pkg.Lookup(p.stringAt(ir.uint64()))
- off := ir.uint64()
+ for nSyms := ird.uint64(); nSyms > 0; nSyms-- {
+ s := pkg.Lookup(p.stringAt(ird.uint64()))
+ off := ird.uint64()
- if _, ok := inlineImporter[s]; ok {
- continue
+ if _, ok := inlineImporter[s]; !ok {
+ inlineImporter[s] = iimporterAndOffset{p, off}
}
- inlineImporter[s] = iimporterAndOffset{p, off}
}
}
// Fingerprint.
_, err = io.ReadFull(in, fingerprint[:])
if err != nil {
- yyerror("import %s: error reading fingerprint", pkg.Path)
- errorexit()
+ base.Errorf("import %s: error reading fingerprint", pkg.Path)
+ base.ErrorExit()
}
return fingerprint
}
slen, n := binary.Uvarint(x[:n])
if n <= 0 {
- Fatalf("varint failed")
+ base.Fatalf("varint failed")
}
spos := off + uint64(n)
return p.stringData[spos : spos+slen]
r.currPkg = r.pkg()
}
-func (r *importReader) doDecl(n *Node) {
- if n.Op != ONONAME {
- Fatalf("doDecl: unexpected Op for %v: %v", n.Sym, n.Op)
- }
-
+func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
tag := r.byte()
pos := r.pos()
case 'A':
typ := r.typ()
- importalias(r.p.ipkg, pos, n.Sym, typ)
+ return importalias(r.p.ipkg, pos, sym, typ)
case 'C':
- typ, val := r.value()
+ typ := r.typ()
+ val := r.value(typ)
- importconst(r.p.ipkg, pos, n.Sym, typ, val)
+ return importconst(r.p.ipkg, pos, sym, typ, val)
case 'F':
typ := r.signature(nil)
- importfunc(r.p.ipkg, pos, n.Sym, typ)
+ n := importfunc(r.p.ipkg, pos, sym, typ)
r.funcExt(n)
+ return n
case 'T':
// Types can be recursive. We need to setup a stub
// declaration before recursing.
- t := importtype(r.p.ipkg, pos, n.Sym)
+ n := importtype(r.p.ipkg, pos, sym)
+ t := n.Type()
// We also need to defer width calculations until
// after the underlying type has been assigned.
defercheckwidth()
underlying := r.typ()
- setUnderlying(t, underlying)
+ t.SetUnderlying(underlying)
resumecheckwidth()
if underlying.IsInterface() {
r.typeExt(t)
- break
+ return n
}
ms := make([]*types.Field, r.uint64())
recv := r.param()
mtyp := r.signature(recv)
- f := types.NewField()
- f.Pos = mpos
- f.Sym = msym
- f.Type = mtyp
- ms[i] = f
-
- m := newfuncnamel(mpos, methodSym(recv.Type, msym))
- m.Type = mtyp
- m.SetClass(PFUNC)
+ fn := ir.NewFunc(mpos)
+ fn.SetType(mtyp)
+ m := newFuncNameAt(mpos, methodSym(recv.Type, msym), fn)
+ m.SetType(mtyp)
+ m.SetClass(ir.PFUNC)
// methodSym already marked m.Sym as a function.
- // (comment from parser.go)
- // inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
- // (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
- // out by typecheck's lookdot as this $$.ttype. So by providing
- // this back link here we avoid special casing there.
- mtyp.SetNname(asTypesNode(m))
+ f := types.NewField(mpos, msym, mtyp)
+ f.Nname = m
+ ms[i] = f
}
t.Methods().Set(ms)
for _, m := range ms {
r.methExt(m)
}
+ return n
case 'V':
typ := r.typ()
- importvar(r.p.ipkg, pos, n.Sym, typ)
+ n := importvar(r.p.ipkg, pos, sym, typ)
r.varExt(n)
+ return n
default:
- Fatalf("unexpected tag: %v", tag)
+ base.Fatalf("unexpected tag: %v", tag)
+ panic("unreachable")
}
}
-func (p *importReader) value() (typ *types.Type, v Val) {
- typ = p.typ()
-
+func (p *importReader) value(typ *types.Type) constant.Value {
switch constTypeOf(typ) {
- case CTNIL:
- v.U = &NilVal{}
- case CTBOOL:
- v.U = p.bool()
- case CTSTR:
- v.U = p.string()
- case CTINT:
- x := new(Mpint)
- x.Rune = typ == types.UntypedRune
- p.mpint(&x.Val, typ)
- v.U = x
- case CTFLT:
- x := newMpflt()
- p.float(x, typ)
- v.U = x
- case CTCPLX:
- x := newMpcmplx()
- p.float(&x.Real, typ)
- p.float(&x.Imag, typ)
- v.U = x
+ case constant.Bool:
+ return constant.MakeBool(p.bool())
+ case constant.String:
+ return constant.MakeString(p.string())
+ case constant.Int:
+ var i big.Int
+ p.mpint(&i, typ)
+ return makeInt(&i)
+ case constant.Float:
+ return p.float(typ)
+ case constant.Complex:
+ return makeComplex(p.float(typ), p.float(typ))
}
- return
+
+ base.Fatalf("unexpected value type: %v", typ)
+ panic("unreachable")
}
func (p *importReader) mpint(x *big.Int, typ *types.Type) {
v = -(n &^ 1) >> 1
}
if v < 1 || uint(v) > maxBytes {
- Fatalf("weird decoding: %v, %v => %v", n, signed, v)
+ base.Fatalf("weird decoding: %v, %v => %v", n, signed, v)
}
b := make([]byte, v)
p.Read(b)
}
}
-func (p *importReader) float(x *Mpflt, typ *types.Type) {
+func (p *importReader) float(typ *types.Type) constant.Value {
var mant big.Int
p.mpint(&mant, typ)
- m := x.Val.SetInt(&mant)
- if m.Sign() == 0 {
- return
+ var f big.Float
+ f.SetInt(&mant)
+ if f.Sign() != 0 {
+ f.SetMantExp(&f, int(p.int64()))
}
- m.SetMantExp(m, int(p.int64()))
+ return constant.Make(&f)
}
func (r *importReader) ident() *types.Sym {
}
pkg := r.currPkg
if types.IsExported(name) {
- pkg = localpkg
+ pkg = types.LocalPkg
}
return pkg.Lookup(name)
}
-func (r *importReader) qualifiedIdent() *types.Sym {
+func (r *importReader) qualifiedIdent() *ir.Ident {
name := r.string()
pkg := r.pkg()
- return pkg.Lookup(name)
+ sym := pkg.Lookup(name)
+ return ir.NewIdent(src.NoXPos, sym)
}
func (r *importReader) pos() src.XPos {
}
if r.prevBase == nil {
- Fatalf("missing posbase")
+ base.Fatalf("missing posbase")
}
pos := src.MakePos(r.prevBase, uint(r.prevLine), uint(r.prevColumn))
- return Ctxt.PosTable.XPos(pos)
+ return base.Ctxt.PosTable.XPos(pos)
}
func (r *importReader) typ() *types.Type {
t, ok := p.typCache[off]
if !ok {
if off < predeclReserved {
- Fatalf("predeclared type missing from cache: %d", off)
+ base.Fatalf("predeclared type missing from cache: %d", off)
}
t = p.newReader(off-predeclReserved, nil).typ1()
p.typCache[off] = t
func (r *importReader) typ1() *types.Type {
switch k := r.kind(); k {
default:
- Fatalf("unexpected kind tag in %q: %v", r.p.ipkg.Path, k)
+ base.Fatalf("unexpected kind tag in %q: %v", r.p.ipkg.Path, k)
return nil
case definedType:
// support inlining functions with local defined
// types. Therefore, this must be a package-scope
// type.
- n := asNode(r.qualifiedIdent().PkgDef())
- if n.Op == ONONAME {
- expandDecl(n)
- }
- if n.Op != OTYPE {
- Fatalf("expected OTYPE, got %v: %v, %v", n.Op, n.Sym, n)
+ n := expandDecl(r.qualifiedIdent())
+ if n.Op() != ir.OTYPE {
+ base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n)
}
- return n.Type
+ return n.Type()
case pointerType:
return types.NewPtr(r.typ())
case sliceType:
emb := r.bool()
note := r.string()
- f := types.NewField()
- f.Pos = pos
- f.Sym = sym
- f.Type = typ
+ f := types.NewField(pos, sym, typ)
if emb {
f.Embedded = 1
}
fs[i] = f
}
- t := types.New(TSTRUCT)
- t.SetPkg(r.currPkg)
- t.SetFields(fs)
- return t
+ return types.NewStruct(r.currPkg, fs)
case interfaceType:
r.setPkg()
pos := r.pos()
typ := r.typ()
- f := types.NewField()
- f.Pos = pos
- f.Type = typ
- embeddeds[i] = f
+ embeddeds[i] = types.NewField(pos, nil, typ)
}
methods := make([]*types.Field, r.uint64())
sym := r.ident()
typ := r.signature(fakeRecvField())
- f := types.NewField()
- f.Pos = pos
- f.Sym = sym
- f.Type = typ
- methods[i] = f
+ methods[i] = types.NewField(pos, sym, typ)
}
- t := types.New(TINTER)
- t.SetPkg(r.currPkg)
- t.SetInterface(append(embeddeds, methods...))
+ t := types.NewInterface(r.currPkg, append(embeddeds, methods...))
// Ensure we expand the interface in the frontend (#25055).
checkwidth(t)
if n := len(params); n > 0 {
params[n-1].SetIsDDD(r.bool())
}
- t := functypefield(recv, params, results)
- t.SetPkg(r.currPkg)
- return t
+ return types.NewSignature(r.currPkg, recv, params, results)
}
func (r *importReader) paramList() []*types.Field {
}
func (r *importReader) param() *types.Field {
- f := types.NewField()
- f.Pos = r.pos()
- f.Sym = r.ident()
- f.Type = r.typ()
- return f
+ return types.NewField(r.pos(), r.ident(), r.typ())
}
func (r *importReader) bool() bool {
func (r *importReader) int64() int64 {
n, err := binary.ReadVarint(r)
if err != nil {
- Fatalf("readVarint: %v", err)
+ base.Fatalf("readVarint: %v", err)
}
return n
}
func (r *importReader) uint64() uint64 {
n, err := binary.ReadUvarint(r)
if err != nil {
- Fatalf("readVarint: %v", err)
+ base.Fatalf("readVarint: %v", err)
}
return n
}
func (r *importReader) byte() byte {
x, err := r.ReadByte()
if err != nil {
- Fatalf("declReader.ReadByte: %v", err)
+ base.Fatalf("declReader.ReadByte: %v", err)
}
return x
}
// Compiler-specific extensions.
-func (r *importReader) varExt(n *Node) {
- r.linkname(n.Sym)
- r.symIdx(n.Sym)
+func (r *importReader) varExt(n ir.Node) {
+ r.linkname(n.Sym())
+ r.symIdx(n.Sym())
}
-func (r *importReader) funcExt(n *Node) {
- r.linkname(n.Sym)
- r.symIdx(n.Sym)
+func (r *importReader) funcExt(n *ir.Name) {
+ r.linkname(n.Sym())
+ r.symIdx(n.Sym())
// Escape analysis.
for _, fs := range &types.RecvsParams {
- for _, f := range fs(n.Type).FieldSlice() {
+ for _, f := range fs(n.Type()).FieldSlice() {
f.Note = r.string()
}
}
// Inline body.
if u := r.uint64(); u > 0 {
- n.Func.Inl = &Inline{
+ n.Func().Inl = &ir.Inline{
Cost: int32(u - 1),
}
- n.Func.Endlineno = r.pos()
+ n.Func().Endlineno = r.pos()
}
}
if r.bool() {
m.SetNointerface(true)
}
- r.funcExt(asNode(m.Type.Nname()))
+ r.funcExt(m.Nname.(*ir.Name))
}
func (r *importReader) linkname(s *types.Sym) {
idx := int32(r.int64())
if idx != -1 {
if s.Linkname != "" {
- Fatalf("bad index for linknamed symbol: %v %d\n", lsym, idx)
+ base.Fatalf("bad index for linknamed symbol: %v %d\n", lsym, idx)
}
lsym.SymIdx = idx
lsym.Set(obj.AttrIndexed, true)
// so we can use index to reference the symbol.
var typeSymIdx = make(map[*types.Type][2]int64)
-func (r *importReader) doInline(n *Node) {
- if len(n.Func.Inl.Body) != 0 {
- Fatalf("%v already has inline body", n)
+func BaseTypeIndex(t *types.Type) int64 {
+ tbase := t
+ if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
+ tbase = t.Elem()
+ }
+ i, ok := typeSymIdx[tbase]
+ if !ok {
+ return -1
+ }
+ if t != tbase {
+ return i[1]
+ }
+ return i[0]
+}
+
+func (r *importReader) doInline(fn *ir.Func) {
+ if len(fn.Inl.Body) != 0 {
+ base.Fatalf("%v already has inline body", fn)
}
- funchdr(n)
+ funchdr(fn)
body := r.stmtList()
funcbody()
if body == nil {
// (not doing so can cause significant performance
// degradation due to unnecessary calls to empty
// functions).
- body = []*Node{}
+ body = []ir.Node{}
}
- n.Func.Inl.Body = body
+ fn.Inl.Body = body
- importlist = append(importlist, n)
+ importlist = append(importlist, fn)
- if Debug.E > 0 && Debug.m > 2 {
- if Debug.m > 3 {
- fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, asNodes(n.Func.Inl.Body))
+ if base.Flag.E > 0 && base.Flag.LowerM > 2 {
+ if base.Flag.LowerM > 3 {
+ fmt.Printf("inl body for %v %v: %+v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body))
} else {
- fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, asNodes(n.Func.Inl.Body))
+ fmt.Printf("inl body for %v %v: %v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body))
}
}
}
// unrefined nodes (since this is what the importer uses). The respective case
// entries are unreachable in the importer.
-func (r *importReader) stmtList() []*Node {
- var list []*Node
+func (r *importReader) stmtList() []ir.Node {
+ var list []ir.Node
for {
n := r.node()
if n == nil {
break
}
- // OBLOCK nodes may be created when importing ODCL nodes - unpack them
- if n.Op == OBLOCK {
- list = append(list, n.List.Slice()...)
+ // OBLOCK nodes are not written to the import data directly,
+ // but the handling of ODCL calls liststmt, which creates one.
+ // Inline them into the statement list.
+ if n.Op() == ir.OBLOCK {
+ list = append(list, n.List().Slice()...)
} else {
list = append(list, n)
}
return list
}
-func (r *importReader) caseList(sw *Node) []*Node {
- namedTypeSwitch := sw.Op == OSWITCH && sw.Left != nil && sw.Left.Op == OTYPESW && sw.Left.Left != nil
+func (r *importReader) caseList(sw ir.Node) []ir.Node {
+ namedTypeSwitch := isNamedTypeSwitch(sw)
- cases := make([]*Node, r.uint64())
+ cases := make([]ir.Node, r.uint64())
for i := range cases {
- cas := nodl(r.pos(), OCASE, nil, nil)
- cas.List.Set(r.stmtList())
+ cas := ir.NodAt(r.pos(), ir.OCASE, nil, nil)
+ cas.PtrList().Set(r.stmtList())
if namedTypeSwitch {
// Note: per-case variables will have distinct, dotted
// names after import. That's okay: swt.go only needs
// Sym for diagnostics anyway.
- caseVar := newnamel(cas.Pos, r.ident())
+ caseVar := ir.NewNameAt(cas.Pos(), r.ident())
declare(caseVar, dclcontext)
- cas.Rlist.Set1(caseVar)
- caseVar.Name.Defn = sw.Left
+ cas.PtrRlist().Set1(caseVar)
+ caseVar.Defn = sw.(*ir.SwitchStmt).Left()
}
- cas.Nbody.Set(r.stmtList())
+ cas.PtrBody().Set(r.stmtList())
cases[i] = cas
}
return cases
}
-func (r *importReader) exprList() []*Node {
- var list []*Node
+func (r *importReader) exprList() []ir.Node {
+ var list []ir.Node
for {
n := r.expr()
if n == nil {
return list
}
-func (r *importReader) expr() *Node {
+func (r *importReader) expr() ir.Node {
n := r.node()
- if n != nil && n.Op == OBLOCK {
- Fatalf("unexpected block node: %v", n)
+ if n != nil && n.Op() == ir.OBLOCK {
+ base.Fatalf("unexpected block node: %v", n)
}
return n
}
// TODO(gri) split into expr and stmt
-func (r *importReader) node() *Node {
+func (r *importReader) node() ir.Node {
switch op := r.op(); op {
// expressions
// case OPAREN:
// unreachable - unpacked by exporter
- case OLITERAL:
+ case ir.ONIL:
pos := r.pos()
- typ, val := r.value()
+ typ := r.typ()
+
+ n := npos(pos, nodnil())
+ n.SetType(typ)
+ return n
+
+ case ir.OLITERAL:
+ pos := r.pos()
+ typ := r.typ()
- n := npos(pos, nodlit(val))
- n.Type = typ
+ n := npos(pos, ir.NewLiteral(r.value(typ)))
+ n.SetType(typ)
return n
- case ONONAME:
- return mkname(r.qualifiedIdent())
+ case ir.ONONAME:
+ return r.qualifiedIdent()
- case ONAME:
- return mkname(r.ident())
+ case ir.ONAME:
+ return r.ident().Def.(*ir.Name)
// case OPACK, ONONAME:
// unreachable - should have been resolved by typechecking
- case OTYPE:
- return typenod(r.typ())
+ case ir.OTYPE:
+ return ir.TypeNode(r.typ())
- case OTYPESW:
- n := nodl(r.pos(), OTYPESW, nil, nil)
+ case ir.OTYPESW:
+ pos := r.pos()
+ var tag *ir.Ident
if s := r.ident(); s != nil {
- n.Left = npos(n.Pos, newnoname(s))
+ tag = ir.NewIdent(pos, s)
}
- n.Right, _ = r.exprsOrNil()
- return n
+ expr, _ := r.exprsOrNil()
+ return ir.NewTypeSwitchGuard(pos, tag, expr)
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// unreachable - should have been resolved by typechecking
// case OPTRLIT:
// unreachable - mapped to case OADDR below by exporter
- case OSTRUCTLIT:
+ case ir.OSTRUCTLIT:
// TODO(mdempsky): Export position information for OSTRUCTKEY nodes.
- savedlineno := lineno
- lineno = r.pos()
- n := nodl(lineno, OCOMPLIT, nil, typenod(r.typ()))
- n.List.Set(r.elemList()) // special handling of field names
- lineno = savedlineno
+ savedlineno := base.Pos
+ base.Pos = r.pos()
+ n := ir.NodAt(base.Pos, ir.OCOMPLIT, nil, ir.TypeNode(r.typ()))
+ n.PtrList().Set(r.elemList()) // special handling of field names
+ base.Pos = savedlineno
return n
// case OARRAYLIT, OSLICELIT, OMAPLIT:
// unreachable - mapped to case OCOMPLIT below by exporter
- case OCOMPLIT:
- n := nodl(r.pos(), OCOMPLIT, nil, typenod(r.typ()))
- n.List.Set(r.exprList())
+ case ir.OCOMPLIT:
+ n := ir.NodAt(r.pos(), ir.OCOMPLIT, nil, ir.TypeNode(r.typ()))
+ n.PtrList().Set(r.exprList())
return n
- case OKEY:
+ case ir.OKEY:
pos := r.pos()
left, right := r.exprsOrNil()
- return nodl(pos, OKEY, left, right)
+ return ir.NodAt(pos, ir.OKEY, left, right)
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
// case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
// unreachable - mapped to case OXDOT below by exporter
- case OXDOT:
+ case ir.OXDOT:
// see parser.new_dotname
- return npos(r.pos(), nodSym(OXDOT, r.expr(), r.ident()))
+ return npos(r.pos(), nodSym(ir.OXDOT, r.expr(), r.ident()))
// case ODOTTYPE, ODOTTYPE2:
// unreachable - mapped to case ODOTTYPE below by exporter
- case ODOTTYPE:
- n := nodl(r.pos(), ODOTTYPE, r.expr(), nil)
- n.Type = r.typ()
+ case ir.ODOTTYPE:
+ n := ir.NodAt(r.pos(), ir.ODOTTYPE, r.expr(), nil)
+ n.SetType(r.typ())
return n
// case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
// unreachable - mapped to cases below by exporter
- case OINDEX:
- return nodl(r.pos(), op, r.expr(), r.expr())
+ case ir.OINDEX:
+ return ir.NodAt(r.pos(), ir.OINDEX, r.expr(), r.expr())
- case OSLICE, OSLICE3:
- n := nodl(r.pos(), op, r.expr(), nil)
+ case ir.OSLICE, ir.OSLICE3:
+ n := ir.NewSliceExpr(r.pos(), op, r.expr())
low, high := r.exprsOrNil()
- var max *Node
- if n.Op.IsSlice3() {
+ var max ir.Node
+ if n.Op().IsSlice3() {
max = r.expr()
}
n.SetSliceBounds(low, high, max)
// case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR:
// unreachable - mapped to OCONV case below by exporter
- case OCONV:
- n := nodl(r.pos(), OCONV, r.expr(), nil)
- n.Type = r.typ()
+ case ir.OCONV:
+ n := ir.NodAt(r.pos(), ir.OCONV, r.expr(), nil)
+ n.SetType(r.typ())
return n
- case OCOPY, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN:
- n := npos(r.pos(), builtinCall(op))
- n.List.Set(r.exprList())
- if op == OAPPEND {
+ case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
+ n := builtinCall(r.pos(), op)
+ n.PtrList().Set(r.exprList())
+ if op == ir.OAPPEND {
n.SetIsDDD(r.bool())
}
return n
- // case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
+ // case OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
// unreachable - mapped to OCALL case below by exporter
- case OCALL:
- n := nodl(r.pos(), OCALL, nil, nil)
- n.Ninit.Set(r.stmtList())
- n.Left = r.expr()
- n.List.Set(r.exprList())
+ case ir.OCALL:
+ n := ir.NodAt(r.pos(), ir.OCALL, nil, nil)
+ n.PtrInit().Set(r.stmtList())
+ n.SetLeft(r.expr())
+ n.PtrList().Set(r.exprList())
n.SetIsDDD(r.bool())
return n
- case OMAKEMAP, OMAKECHAN, OMAKESLICE:
- n := npos(r.pos(), builtinCall(OMAKE))
- n.List.Append(typenod(r.typ()))
- n.List.Append(r.exprList()...)
+ case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
+ n := builtinCall(r.pos(), ir.OMAKE)
+ n.PtrList().Append(ir.TypeNode(r.typ()))
+ n.PtrList().Append(r.exprList()...)
return n
// unary expressions
- case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
- return nodl(r.pos(), op, r.expr(), nil)
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
+ return ir.NewUnaryExpr(r.pos(), op, r.expr())
+
+ case ir.OADDR:
+ return nodAddrAt(r.pos(), r.expr())
+
+ case ir.ODEREF:
+ return ir.NewStarExpr(r.pos(), r.expr())
// binary expressions
- case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
- OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR:
- return nodl(r.pos(), op, r.expr(), r.expr())
+ case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
+ ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR:
+ return ir.NewBinaryExpr(r.pos(), op, r.expr(), r.expr())
+
+ case ir.OANDAND, ir.OOROR:
+ return ir.NewLogicalExpr(r.pos(), op, r.expr(), r.expr())
+
+ case ir.OSEND:
+ return ir.NewSendStmt(r.pos(), r.expr(), r.expr())
- case OADDSTR:
+ case ir.OADDSTR:
pos := r.pos()
list := r.exprList()
x := npos(pos, list[0])
for _, y := range list[1:] {
- x = nodl(pos, OADD, x, y)
+ x = ir.NodAt(pos, ir.OADD, x, y)
}
return x
// --------------------------------------------------------------------
// statements
- case ODCL:
+ case ir.ODCL:
pos := r.pos()
- lhs := npos(pos, dclname(r.ident()))
- typ := typenod(r.typ())
- return npos(pos, liststmt(variter([]*Node{lhs}, typ, nil))) // TODO(gri) avoid list creation
+ lhs := ir.NewDeclNameAt(pos, ir.ONAME, r.ident())
+ lhs.SetType(r.typ())
- // case ODCLFIELD:
- // unimplemented
+ declare(lhs, ir.PAUTO)
+
+ var stmts ir.Nodes
+ stmts.Append(ir.Nod(ir.ODCL, lhs, nil))
+ stmts.Append(ir.Nod(ir.OAS, lhs, nil))
+ return npos(pos, liststmt(stmts.Slice()))
// case OAS, OASWB:
// unreachable - mapped to OAS case below by exporter
- case OAS:
- return nodl(r.pos(), OAS, r.expr(), r.expr())
+ case ir.OAS:
+ return ir.NodAt(r.pos(), ir.OAS, r.expr(), r.expr())
- case OASOP:
- n := nodl(r.pos(), OASOP, nil, nil)
+ case ir.OASOP:
+ n := ir.NodAt(r.pos(), ir.OASOP, nil, nil)
n.SetSubOp(r.op())
- n.Left = r.expr()
+ n.SetLeft(r.expr())
if !r.bool() {
- n.Right = nodintconst(1)
+ n.SetRight(nodintconst(1))
n.SetImplicit(true)
} else {
- n.Right = r.expr()
+ n.SetRight(r.expr())
}
return n
// case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
// unreachable - mapped to OAS2 case below by exporter
- case OAS2:
- n := nodl(r.pos(), OAS2, nil, nil)
- n.List.Set(r.exprList())
- n.Rlist.Set(r.exprList())
+ case ir.OAS2:
+ n := ir.NodAt(r.pos(), ir.OAS2, nil, nil)
+ n.PtrList().Set(r.exprList())
+ n.PtrRlist().Set(r.exprList())
return n
- case ORETURN:
- n := nodl(r.pos(), ORETURN, nil, nil)
- n.List.Set(r.exprList())
+ case ir.ORETURN:
+ n := ir.NodAt(r.pos(), ir.ORETURN, nil, nil)
+ n.PtrList().Set(r.exprList())
return n
// case ORETJMP:
// unreachable - generated by compiler for trampolin routines (not exported)
- case OGO, ODEFER:
- return nodl(r.pos(), op, r.expr(), nil)
+ case ir.OGO, ir.ODEFER:
+ return ir.NewGoDeferStmt(r.pos(), op, r.expr())
- case OIF:
- n := nodl(r.pos(), OIF, nil, nil)
- n.Ninit.Set(r.stmtList())
- n.Left = r.expr()
- n.Nbody.Set(r.stmtList())
- n.Rlist.Set(r.stmtList())
+ case ir.OIF:
+ n := ir.NodAt(r.pos(), ir.OIF, nil, nil)
+ n.PtrInit().Set(r.stmtList())
+ n.SetLeft(r.expr())
+ n.PtrBody().Set(r.stmtList())
+ n.PtrRlist().Set(r.stmtList())
return n
- case OFOR:
- n := nodl(r.pos(), OFOR, nil, nil)
- n.Ninit.Set(r.stmtList())
- n.Left, n.Right = r.exprsOrNil()
- n.Nbody.Set(r.stmtList())
+ case ir.OFOR:
+ n := ir.NodAt(r.pos(), ir.OFOR, nil, nil)
+ n.PtrInit().Set(r.stmtList())
+ left, right := r.exprsOrNil()
+ n.SetLeft(left)
+ n.SetRight(right)
+ n.PtrBody().Set(r.stmtList())
return n
- case ORANGE:
- n := nodl(r.pos(), ORANGE, nil, nil)
- n.List.Set(r.stmtList())
- n.Right = r.expr()
- n.Nbody.Set(r.stmtList())
+ case ir.ORANGE:
+ n := ir.NodAt(r.pos(), ir.ORANGE, nil, nil)
+ n.PtrList().Set(r.stmtList())
+ n.SetRight(r.expr())
+ n.PtrBody().Set(r.stmtList())
return n
- case OSELECT, OSWITCH:
- n := nodl(r.pos(), op, nil, nil)
- n.Ninit.Set(r.stmtList())
- n.Left, _ = r.exprsOrNil()
- n.List.Set(r.caseList(n))
+ case ir.OSELECT:
+ n := ir.NodAt(r.pos(), ir.OSELECT, nil, nil)
+ n.PtrInit().Set(r.stmtList())
+ r.exprsOrNil() // TODO(rsc): Delete (and fix exporter). These are always nil.
+ n.PtrList().Set(r.caseList(n))
+ return n
+
+ case ir.OSWITCH:
+ n := ir.NodAt(r.pos(), ir.OSWITCH, nil, nil)
+ n.PtrInit().Set(r.stmtList())
+ left, _ := r.exprsOrNil()
+ n.SetLeft(left)
+ n.PtrList().Set(r.caseList(n))
return n
// case OCASE:
// handled by caseList
- case OFALL:
- n := nodl(r.pos(), OFALL, nil, nil)
+ case ir.OFALL:
+ n := ir.NodAt(r.pos(), ir.OFALL, nil, nil)
return n
- case OBREAK, OCONTINUE:
- pos := r.pos()
- left, _ := r.exprsOrNil()
- if left != nil {
- left = newname(left.Sym)
- }
- return nodl(pos, op, left, nil)
-
// case OEMPTY:
// unreachable - not emitted by exporter
- case OGOTO, OLABEL:
- n := nodl(r.pos(), op, nil, nil)
- n.Sym = lookup(r.string())
- return n
+ case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
+ var sym *types.Sym
+ pos := r.pos()
+ if label := r.string(); label != "" {
+ sym = lookup(label)
+ }
+ return ir.NewBranchStmt(pos, op, sym)
+
+ case ir.OLABEL:
+ return ir.NewLabelStmt(r.pos(), lookup(r.string()))
- case OEND:
+ case ir.OEND:
return nil
default:
- Fatalf("cannot import %v (%d) node\n"+
+ base.Fatalf("cannot import %v (%d) node\n"+
"\t==> please file an issue and assign to gri@", op, int(op))
panic("unreachable") // satisfy compiler
}
}
-func (r *importReader) op() Op {
- return Op(r.uint64())
+func (r *importReader) op() ir.Op {
+ return ir.Op(r.uint64())
}
-func (r *importReader) elemList() []*Node {
+func (r *importReader) elemList() []ir.Node {
c := r.uint64()
- list := make([]*Node, c)
+ list := make([]ir.Node, c)
for i := range list {
s := r.ident()
- list[i] = nodSym(OSTRUCTKEY, r.expr(), s)
+ list[i] = nodSym(ir.OSTRUCTKEY, r.expr(), s)
}
return list
}
-func (r *importReader) exprsOrNil() (a, b *Node) {
+func (r *importReader) exprsOrNil() (a, b ir.Node) {
ab := r.uint64()
if ab&1 != 0 {
a = r.expr()
}
return
}
+
+func builtinCall(pos src.XPos, op ir.Op) *ir.CallExpr {
+ return ir.NewCallExpr(pos, ir.OCALL, ir.NewIdent(base.Pos, types.BuiltinPkg.Lookup(ir.OpNames[op])), nil)
+}
+
+func npos(pos src.XPos, n ir.Node) ir.Node {
+ n.SetPos(pos)
+ return n
+}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
)
// the name, normally "pkg.init", is altered to "pkg.init.0".
var renameinitgen int
-// Dummy function for autotmps generated during typechecking.
-var dummyInitFn = nod(ODCLFUNC, nil, nil)
+// Function collecting autotmps generated during typechecking,
+// to be included in the package-level init function.
+var initTodo = ir.NewFunc(base.Pos)
func renameinit() *types.Sym {
s := lookupN("init.", renameinitgen)
return s
}
-// fninit makes an initialization record for the package.
+// fninit makes and returns an initialization record for the package.
// See runtime/proc.go:initTask for its layout.
// The 3 tasks for initialization are:
// 1) Initialize all of the packages the current package depends on.
// 2) Initialize all the variables that have initializers.
// 3) Run any init functions.
-func fninit(n []*Node) {
- nf := initOrder(n)
+func fninit() *ir.Name {
+ nf := initOrder(Target.Decls)
var deps []*obj.LSym // initTask records for packages the current package depends on
var fns []*obj.LSym // functions to call for package initialization
// Find imported packages with init tasks.
- for _, s := range types.InitSyms {
- deps = append(deps, s.Linksym())
+ for _, pkg := range Target.Imports {
+ n := resolve(ir.NewIdent(base.Pos, pkg.Lookup(".inittask")))
+ if n.Op() == ir.ONONAME {
+ continue
+ }
+ if n.Op() != ir.ONAME || n.(*ir.Name).Class() != ir.PEXTERN {
+ base.Fatalf("bad inittask: %v", n)
+ }
+ deps = append(deps, n.(*ir.Name).Sym().Linksym())
}
// Make a function that contains all the initialization statements.
if len(nf) > 0 {
- lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt
+ base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt
initializers := lookup("init")
- fn := dclfunc(initializers, nod(OTFUNC, nil, nil))
- for _, dcl := range dummyInitFn.Func.Dcl {
- dcl.Name.Curfn = fn
+ fn := dclfunc(initializers, ir.NewFuncType(base.Pos, nil, nil, nil))
+ for _, dcl := range initTodo.Dcl {
+ dcl.Curfn = fn
}
- fn.Func.Dcl = append(fn.Func.Dcl, dummyInitFn.Func.Dcl...)
- dummyInitFn.Func.Dcl = nil
+ fn.Dcl = append(fn.Dcl, initTodo.Dcl...)
+ initTodo.Dcl = nil
- fn.Nbody.Set(nf)
+ fn.PtrBody().Set(nf)
funcbody()
- fn = typecheck(fn, ctxStmt)
+ typecheckFunc(fn)
Curfn = fn
typecheckslice(nf, ctxStmt)
Curfn = nil
- xtop = append(xtop, fn)
+ Target.Decls = append(Target.Decls, fn)
fns = append(fns, initializers.Linksym())
}
- if dummyInitFn.Func.Dcl != nil {
- // We only generate temps using dummyInitFn if there
+ if initTodo.Dcl != nil {
+ // We only generate temps using initTodo if there
// are package-scope initialization statements, so
// something's weird if we get here.
- Fatalf("dummyInitFn still has declarations")
+ base.Fatalf("initTodo still has declarations")
}
- dummyInitFn = nil
+ initTodo = nil
// Record user init functions.
- for i := 0; i < renameinitgen; i++ {
- s := lookupN("init.", i)
- fn := asNode(s.Def).Name.Defn
+ for _, fn := range Target.Inits {
// Skip init functions with empty bodies.
- if fn.Nbody.Len() == 1 && fn.Nbody.First().Op == OEMPTY {
- continue
+ if fn.Body().Len() == 1 {
+ if stmt := fn.Body().First(); stmt.Op() == ir.OBLOCK && stmt.(*ir.BlockStmt).List().Len() == 0 {
+ continue
+ }
}
- fns = append(fns, s.Linksym())
+ fns = append(fns, fn.Nname.Sym().Linksym())
}
- if len(deps) == 0 && len(fns) == 0 && localpkg.Name != "main" && localpkg.Name != "runtime" {
- return // nothing to initialize
+ if len(deps) == 0 && len(fns) == 0 && types.LocalPkg.Name != "main" && types.LocalPkg.Name != "runtime" {
+ return nil // nothing to initialize
}
// Make an .inittask structure.
sym := lookup(".inittask")
- nn := newname(sym)
- nn.Type = types.Types[TUINT8] // dummy type
- nn.SetClass(PEXTERN)
- sym.Def = asTypesNode(nn)
- exportsym(nn)
+ task := NewName(sym)
+ task.SetType(types.Types[types.TUINT8]) // fake type
+ task.SetClass(ir.PEXTERN)
+ sym.Def = task
lsym := sym.Linksym()
ot := 0
ot = duintptr(lsym, ot, 0) // state: not initialized yet
// An initTask has pointers, but none into the Go heap.
// It's not quite read only, the state field must be modifiable.
ggloblsym(lsym, int32(ot), obj.NOPTR)
+ return task
}
"bytes"
"container/heap"
"fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
)
// Package initialization
type InitOrder struct {
// blocking maps initialization assignments to the assignments
// that depend on it.
- blocking map[*Node][]*Node
+ blocking map[ir.Node][]ir.Node
// ready is the queue of Pending initialization assignments
// that are ready for initialization.
ready declOrder
+
+ order map[ir.Node]int
}
// initOrder computes initialization order for a list l of
// package-level declarations (in declaration order) and outputs the
// corresponding list of statements to include in the init() function
// body.
-func initOrder(l []*Node) []*Node {
+func initOrder(l []ir.Node) []ir.Node {
s := InitSchedule{
- initplans: make(map[*Node]*InitPlan),
- inittemps: make(map[*Node]*Node),
+ initplans: make(map[ir.Node]*InitPlan),
+ inittemps: make(map[ir.Node]*ir.Name),
}
o := InitOrder{
- blocking: make(map[*Node][]*Node),
+ blocking: make(map[ir.Node][]ir.Node),
+ order: make(map[ir.Node]int),
}
// Process all package-level assignment in declaration order.
for _, n := range l {
- switch n.Op {
- case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ switch n.Op() {
+ case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
o.processAssign(n)
o.flushReady(s.staticInit)
- case ODCLCONST, ODCLFUNC, ODCLTYPE:
+ case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE:
// nop
default:
- Fatalf("unexpected package-level statement: %v", n)
+ base.Fatalf("unexpected package-level statement: %v", n)
}
}
// Check that all assignments are now Done; if not, there must
// have been a dependency cycle.
for _, n := range l {
- switch n.Op {
- case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
- if n.Initorder() != InitDone {
+ switch n.Op() {
+ case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ if o.order[n] != orderDone {
// If there have already been errors
// printed, those errors may have
// confused us and there might not be
// a loop. Let the user fix those
// first.
- if nerrors > 0 {
- errorexit()
- }
+ base.ExitIfErrors()
- findInitLoopAndExit(firstLHS(n), new([]*Node))
- Fatalf("initialization unfinished, but failed to identify loop")
+ o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name))
+ base.Fatalf("initialization unfinished, but failed to identify loop")
}
}
}
// Invariant consistency check. If this is non-zero, then we
// should have found a cycle above.
if len(o.blocking) != 0 {
- Fatalf("expected empty map: %v", o.blocking)
+ base.Fatalf("expected empty map: %v", o.blocking)
}
return s.out
}
-func (o *InitOrder) processAssign(n *Node) {
- if n.Initorder() != InitNotStarted || n.Xoffset != BADWIDTH {
- Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
+func (o *InitOrder) processAssign(n ir.Node) {
+ if _, ok := o.order[n]; ok {
+ base.Fatalf("unexpected state: %v, %v", n, o.order[n])
}
-
- n.SetInitorder(InitPending)
- n.Xoffset = 0
+ o.order[n] = 0
// Compute number of variable dependencies and build the
// inverse dependency ("blocking") graph.
for dep := range collectDeps(n, true) {
- defn := dep.Name.Defn
+ defn := dep.Defn
// Skip dependencies on functions (PFUNC) and
// variables already initialized (InitDone).
- if dep.Class() != PEXTERN || defn.Initorder() == InitDone {
+ if dep.Class() != ir.PEXTERN || o.order[defn] == orderDone {
continue
}
- n.Xoffset++
+ o.order[n]++
o.blocking[defn] = append(o.blocking[defn], n)
}
- if n.Xoffset == 0 {
+ if o.order[n] == 0 {
heap.Push(&o.ready, n)
}
}
+const orderDone = -1000
+
// flushReady repeatedly applies initialize to the earliest (in
// declaration order) assignment ready for initialization and updates
// the inverse dependency ("blocking") graph.
-func (o *InitOrder) flushReady(initialize func(*Node)) {
+func (o *InitOrder) flushReady(initialize func(ir.Node)) {
for o.ready.Len() != 0 {
- n := heap.Pop(&o.ready).(*Node)
- if n.Initorder() != InitPending || n.Xoffset != 0 {
- Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
+ n := heap.Pop(&o.ready).(ir.Node)
+ if order, ok := o.order[n]; !ok || order != 0 {
+ base.Fatalf("unexpected state: %v, %v, %v", n, ok, order)
}
initialize(n)
- n.SetInitorder(InitDone)
- n.Xoffset = BADWIDTH
+ o.order[n] = orderDone
blocked := o.blocking[n]
delete(o.blocking, n)
for _, m := range blocked {
- m.Xoffset--
- if m.Xoffset == 0 {
+ if o.order[m]--; o.order[m] == 0 {
heap.Push(&o.ready, m)
}
}
// path points to a slice used for tracking the sequence of
// variables/functions visited. Using a pointer to a slice allows the
// slice capacity to grow and limit reallocations.
-func findInitLoopAndExit(n *Node, path *[]*Node) {
+func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) {
// We implement a simple DFS loop-finding algorithm. This
// could be faster, but initialization cycles are rare.
// There might be multiple loops involving n; by sorting
// references, we deterministically pick the one reported.
- refers := collectDeps(n.Name.Defn, false).Sorted(func(ni, nj *Node) bool {
- return ni.Pos.Before(nj.Pos)
+ refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj *ir.Name) bool {
+ return ni.Pos().Before(nj.Pos())
})
*path = append(*path, n)
for _, ref := range refers {
// Short-circuit variables that were initialized.
- if ref.Class() == PEXTERN && ref.Name.Defn.Initorder() == InitDone {
+ if ref.Class() == ir.PEXTERN && o.order[ref.Defn] == orderDone {
continue
}
- findInitLoopAndExit(ref, path)
+ o.findInitLoopAndExit(ref, path)
}
*path = (*path)[:len(*path)-1]
}
// reportInitLoopAndExit reports and initialization loop as an error
// and exits. However, if l is not actually an initialization loop, it
// simply returns instead.
-func reportInitLoopAndExit(l []*Node) {
+func reportInitLoopAndExit(l []*ir.Name) {
// Rotate loop so that the earliest variable declaration is at
// the start.
i := -1
for j, n := range l {
- if n.Class() == PEXTERN && (i == -1 || n.Pos.Before(l[i].Pos)) {
+ if n.Class() == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
i = j
}
}
var msg bytes.Buffer
fmt.Fprintf(&msg, "initialization loop:\n")
for _, n := range l {
- fmt.Fprintf(&msg, "\t%v: %v refers to\n", n.Line(), n)
+ fmt.Fprintf(&msg, "\t%v: %v refers to\n", ir.Line(n), n)
}
- fmt.Fprintf(&msg, "\t%v: %v", l[0].Line(), l[0])
+ fmt.Fprintf(&msg, "\t%v: %v", ir.Line(l[0]), l[0])
- yyerrorl(l[0].Pos, msg.String())
- errorexit()
+ base.ErrorfAt(l[0].Pos(), msg.String())
+ base.ErrorExit()
}
// collectDeps returns all of the package-level functions and
// variables that declaration n depends on. If transitive is true,
// then it also includes the transitive dependencies of any depended
// upon functions (but not variables).
-func collectDeps(n *Node, transitive bool) NodeSet {
+func collectDeps(n ir.Node, transitive bool) ir.NameSet {
d := initDeps{transitive: transitive}
- switch n.Op {
- case OAS:
- d.inspect(n.Right)
- case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
- d.inspect(n.Right)
- case ODCLFUNC:
- d.inspectList(n.Nbody)
+ switch n.Op() {
+ case ir.OAS:
+ d.inspect(n.Right())
+ case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ d.inspect(n.Rlist().First())
+ case ir.ODCLFUNC:
+ d.inspectList(n.Body())
default:
- Fatalf("unexpected Op: %v", n.Op)
+ base.Fatalf("unexpected Op: %v", n.Op())
}
return d.seen
}
type initDeps struct {
transitive bool
- seen NodeSet
+ seen ir.NameSet
+ cvisit func(ir.Node)
+}
+
+func (d *initDeps) cachedVisit() func(ir.Node) {
+ if d.cvisit == nil {
+ d.cvisit = d.visit // cache closure
+ }
+ return d.cvisit
}
-func (d *initDeps) inspect(n *Node) { inspect(n, d.visit) }
-func (d *initDeps) inspectList(l Nodes) { inspectList(l, d.visit) }
+func (d *initDeps) inspect(n ir.Node) { ir.Visit(n, d.cachedVisit()) }
+func (d *initDeps) inspectList(l ir.Nodes) { ir.VisitList(l, d.cachedVisit()) }
// visit calls foundDep on any package-level functions or variables
// referenced by n, if any.
-func (d *initDeps) visit(n *Node) bool {
- switch n.Op {
- case ONAME:
- if n.isMethodExpression() {
- d.foundDep(asNode(n.Type.FuncType().Nname))
- return false
- }
+func (d *initDeps) visit(n ir.Node) {
+ switch n.Op() {
+ case ir.OMETHEXPR:
+ d.foundDep(methodExprName(n))
+ case ir.ONAME:
+ n := n.(*ir.Name)
switch n.Class() {
- case PEXTERN, PFUNC:
+ case ir.PEXTERN, ir.PFUNC:
d.foundDep(n)
}
- case OCLOSURE:
- d.inspectList(n.Func.Closure.Nbody)
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ d.inspectList(n.Func().Body())
- case ODOTMETH, OCALLPART:
- d.foundDep(asNode(n.Type.FuncType().Nname))
+ case ir.ODOTMETH, ir.OCALLPART:
+ d.foundDep(methodExprName(n))
}
-
- return true
}
// foundDep records that we've found a dependency on n by adding it to
// seen.
-func (d *initDeps) foundDep(n *Node) {
+func (d *initDeps) foundDep(n *ir.Name) {
// Can happen with method expressions involving interface
// types; e.g., fixedbugs/issue4495.go.
if n == nil {
// Names without definitions aren't interesting as far as
// initialization ordering goes.
- if n.Name.Defn == nil {
+ if n.Defn == nil {
return
}
return
}
d.seen.Add(n)
- if d.transitive && n.Class() == PFUNC {
- d.inspectList(n.Name.Defn.Nbody)
+ if d.transitive && n.Class() == ir.PFUNC {
+ d.inspectList(n.Defn.(*ir.Func).Body())
}
}
// an OAS node's Pos may not be unique. For example, given the
// declaration "var a, b = f(), g()", "a" must be ordered before "b",
// but both OAS nodes use the "=" token's position as their Pos.
-type declOrder []*Node
+type declOrder []ir.Node
-func (s declOrder) Len() int { return len(s) }
-func (s declOrder) Less(i, j int) bool { return firstLHS(s[i]).Pos.Before(firstLHS(s[j]).Pos) }
-func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s declOrder) Len() int { return len(s) }
+func (s declOrder) Less(i, j int) bool {
+ return firstLHS(s[i]).Pos().Before(firstLHS(s[j]).Pos())
+}
+func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*Node)) }
+func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(ir.Node)) }
func (s *declOrder) Pop() interface{} {
n := (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
// firstLHS returns the first expression on the left-hand side of
// assignment n.
-func firstLHS(n *Node) *Node {
- switch n.Op {
- case OAS:
- return n.Left
- case OAS2DOTTYPE, OAS2FUNC, OAS2RECV, OAS2MAPR:
- return n.List.First()
+func firstLHS(n ir.Node) *ir.Name {
+ switch n.Op() {
+ case ir.OAS:
+ return n.Left().Name()
+ case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR:
+ return n.List().First().Name()
}
- Fatalf("unexpected Op: %v", n.Op)
+ base.Fatalf("unexpected Op: %v", n.Op())
return nil
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
+ "errors"
"fmt"
+ "go/constant"
"strings"
)
+// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation.
+var IsIntrinsicCall = func(*ir.CallExpr) bool { return false }
+
// Inlining budget parameters, gathered in one place
const (
inlineMaxBudget = 80
inlineBigFunctionMaxCost = 20 // Max cost of inlinee when inlining into a "big" function.
)
+func InlinePackage() {
+ // Find functions that can be inlined and clone them before walk expands them.
+ visitBottomUp(Target.Decls, func(list []*ir.Func, recursive bool) {
+ numfns := numNonClosures(list)
+ for _, n := range list {
+ if !recursive || numfns > 1 {
+ // We allow inlining if there is no
+ // recursion, or the recursion cycle is
+ // across more than one function.
+ caninl(n)
+ } else {
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname)
+ }
+ }
+ inlcalls(n)
+ }
+ })
+}
+
// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
// the ->sym can be re-used in the local package, so peel it off the receiver's type.
-func fnpkg(fn *Node) *types.Pkg {
- if fn.IsMethod() {
+func fnpkg(fn *ir.Name) *types.Pkg {
+ if ir.IsMethod(fn) {
// method
- rcvr := fn.Type.Recv().Type
+ rcvr := fn.Type().Recv().Type
if rcvr.IsPtr() {
rcvr = rcvr.Elem()
}
- if rcvr.Sym == nil {
- Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym, fn, rcvr)
+ if rcvr.Sym() == nil {
+ base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym(), fn, rcvr)
}
- return rcvr.Sym.Pkg
+ return rcvr.Sym().Pkg
}
// non-method
- return fn.Sym.Pkg
+ return fn.Sym().Pkg
}
// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
// because they're a copy of an already checked body.
-func typecheckinl(fn *Node) {
- lno := setlineno(fn)
+func typecheckinl(fn *ir.Func) {
+ lno := setlineno(fn.Nname)
expandInline(fn)
// their bodies may refer to unsafe as long as the package
// was marked safe during import (which was checked then).
// the ->inl of a local function has been typechecked before caninl copied it.
- pkg := fnpkg(fn)
+ pkg := fnpkg(fn.Nname)
- if pkg == localpkg || pkg == nil {
+ if pkg == types.LocalPkg || pkg == nil {
return // typecheckinl on local function
}
- if Debug.m > 2 || Debug_export != 0 {
- fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body))
+ if base.Flag.LowerM > 2 || base.Debug.Export != 0 {
+ fmt.Printf("typecheck import [%v] %L { %v }\n", fn.Sym(), fn, ir.AsNodes(fn.Inl.Body))
}
savefn := Curfn
Curfn = fn
- typecheckslice(fn.Func.Inl.Body, ctxStmt)
+ typecheckslice(fn.Inl.Body, ctxStmt)
Curfn = savefn
// During expandInline (which imports fn.Func.Inl.Body),
// to fn.Func.Inl.Dcl for consistency with how local functions
// behave. (Append because typecheckinl may be called multiple
// times.)
- fn.Func.Inl.Dcl = append(fn.Func.Inl.Dcl, fn.Func.Dcl...)
- fn.Func.Dcl = nil
+ fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...)
+ fn.Dcl = nil
- lineno = lno
+ base.Pos = lno
}
// Caninl determines whether fn is inlineable.
// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
// fn and ->nbody will already have been typechecked.
-func caninl(fn *Node) {
- if fn.Op != ODCLFUNC {
- Fatalf("caninl %v", fn)
- }
- if fn.Func.Nname == nil {
- Fatalf("caninl no nname %+v", fn)
+func caninl(fn *ir.Func) {
+ if fn.Nname == nil {
+ base.Fatalf("caninl no nname %+v", fn)
}
var reason string // reason, if any, that the function was not inlined
- if Debug.m > 1 || logopt.Enabled() {
+ if base.Flag.LowerM > 1 || logopt.Enabled() {
defer func() {
if reason != "" {
- if Debug.m > 1 {
- fmt.Printf("%v: cannot inline %v: %s\n", fn.Line(), fn.Func.Nname, reason)
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Nname, reason)
}
if logopt.Enabled() {
- logopt.LogOpt(fn.Pos, "cannotInlineFunction", "inline", fn.funcname(), reason)
+ logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason)
}
}
}()
}
// If marked "go:noinline", don't inline
- if fn.Func.Pragma&Noinline != 0 {
+ if fn.Pragma&ir.Noinline != 0 {
reason = "marked go:noinline"
return
}
// If marked "go:norace" and -race compilation, don't inline.
- if flag_race && fn.Func.Pragma&Norace != 0 {
+ if base.Flag.Race && fn.Pragma&ir.Norace != 0 {
reason = "marked go:norace with -race compilation"
return
}
// If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
- if Debug_checkptr != 0 && fn.Func.Pragma&NoCheckPtr != 0 {
+ if base.Debug.Checkptr != 0 && fn.Pragma&ir.NoCheckPtr != 0 {
reason = "marked go:nocheckptr"
return
}
// If marked "go:cgo_unsafe_args", don't inline, since the
// function makes assumptions about its argument frame layout.
- if fn.Func.Pragma&CgoUnsafeArgs != 0 {
+ if fn.Pragma&ir.CgoUnsafeArgs != 0 {
reason = "marked go:cgo_unsafe_args"
return
}
// If marked as "go:uintptrescapes", don't inline, since the
// escape information is lost during inlining.
- if fn.Func.Pragma&UintptrEscapes != 0 {
+ if fn.Pragma&ir.UintptrEscapes != 0 {
reason = "marked as having an escaping uintptr argument"
return
}
// granularity, so inlining yeswritebarrierrec functions can
// confuse it (#22342). As a workaround, disallow inlining
// them for now.
- if fn.Func.Pragma&Yeswritebarrierrec != 0 {
+ if fn.Pragma&ir.Yeswritebarrierrec != 0 {
reason = "marked go:yeswritebarrierrec"
return
}
// If fn has no body (is defined outside of Go), cannot inline it.
- if fn.Nbody.Len() == 0 {
+ if fn.Body().Len() == 0 {
reason = "no function body"
return
}
if fn.Typecheck() == 0 {
- Fatalf("caninl on non-typechecked function %v", fn)
+ base.Fatalf("caninl on non-typechecked function %v", fn)
}
- n := fn.Func.Nname
- if n.Func.InlinabilityChecked() {
+ n := fn.Nname
+ if n.Func().InlinabilityChecked() {
return
}
- defer n.Func.SetInlinabilityChecked(true)
+ defer n.Func().SetInlinabilityChecked(true)
cc := int32(inlineExtraCallCost)
- if Debug.l == 4 {
+ if base.Flag.LowerL == 4 {
cc = 1 // this appears to yield better performance than 0.
}
visitor := hairyVisitor{
budget: inlineMaxBudget,
extraCallCost: cc,
- usedLocals: make(map[*Node]bool),
+ usedLocals: make(map[*ir.Name]bool),
}
- if visitor.visitList(fn.Nbody) {
+ if visitor.tooHairy(fn) {
reason = visitor.reason
return
}
- if visitor.budget < 0 {
- reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", inlineMaxBudget-visitor.budget, inlineMaxBudget)
- return
- }
- n.Func.Inl = &Inline{
+ n.Func().Inl = &ir.Inline{
Cost: inlineMaxBudget - visitor.budget,
- Dcl: inlcopylist(pruneUnusedAutos(n.Name.Defn.Func.Dcl, &visitor)),
- Body: inlcopylist(fn.Nbody.Slice()),
+ Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Func().Dcl, &visitor),
+ Body: ir.DeepCopyList(src.NoXPos, fn.Body().Slice()),
}
- // hack, TODO, check for better way to link method nodes back to the thing with the ->inl
- // this is so export can find the body of a method
- fn.Type.FuncType().Nname = asTypesNode(n)
-
- if Debug.m > 1 {
- fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", fn.Line(), n, inlineMaxBudget-visitor.budget, fn.Type, asNodes(n.Func.Inl.Body))
- } else if Debug.m != 0 {
- fmt.Printf("%v: can inline %v\n", fn.Line(), n)
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.AsNodes(n.Func().Inl.Body))
+ } else if base.Flag.LowerM != 0 {
+ fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
}
if logopt.Enabled() {
- logopt.LogOpt(fn.Pos, "canInlineFunction", "inline", fn.funcname(), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget))
+ logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget))
}
}
// inlFlood marks n's inline body for export and recursively ensures
// all called functions are marked too.
-func inlFlood(n *Node) {
+func inlFlood(n *ir.Name, exportsym func(*ir.Name)) {
if n == nil {
return
}
- if n.Op != ONAME || n.Class() != PFUNC {
- Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op, n.Class())
+ if n.Op() != ir.ONAME || n.Class() != ir.PFUNC {
+ base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class())
}
- if n.Func == nil {
- Fatalf("inlFlood: missing Func on %v", n)
+ fn := n.Func()
+ if fn == nil {
+ base.Fatalf("inlFlood: missing Func on %v", n)
}
- if n.Func.Inl == nil {
+ if fn.Inl == nil {
return
}
- if n.Func.ExportInline() {
+ if fn.ExportInline() {
return
}
- n.Func.SetExportInline(true)
+ fn.SetExportInline(true)
- typecheckinl(n)
+ typecheckinl(fn)
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
- inspectList(asNodes(n.Func.Inl.Body), func(n *Node) bool {
- switch n.Op {
- case ONAME:
+ ir.VisitList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) {
+ switch n.Op() {
+ case ir.OMETHEXPR, ir.ODOTMETH:
+ inlFlood(methodExprName(n), exportsym)
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
switch n.Class() {
- case PFUNC:
- if n.isMethodExpression() {
- inlFlood(asNode(n.Type.Nname()))
- } else {
- inlFlood(n)
- exportsym(n)
- }
- case PEXTERN:
+ case ir.PFUNC:
+ inlFlood(n, exportsym)
+ exportsym(n)
+ case ir.PEXTERN:
exportsym(n)
}
- case ODOTMETH:
- fn := asNode(n.Type.Nname())
- inlFlood(fn)
-
- case OCALLPART:
+ case ir.OCALLPART:
// Okay, because we don't yet inline indirect
// calls to method values.
- case OCLOSURE:
+ case ir.OCLOSURE:
// If the closure is inlinable, we'll need to
// flood it too. But today we don't support
// inlining functions that contain closures.
//
// When we do, we'll probably want:
// inlFlood(n.Func.Closure.Func.Nname)
- Fatalf("unexpected closure in inlinable function")
+ base.Fatalf("unexpected closure in inlinable function")
}
- return true
})
}
budget int32
reason string
extraCallCost int32
- usedLocals map[*Node]bool
+ usedLocals map[*ir.Name]bool
+ do func(ir.Node) error
}
-// Look for anything we want to punt on.
-func (v *hairyVisitor) visitList(ll Nodes) bool {
- for _, n := range ll.Slice() {
- if v.visit(n) {
- return true
- }
+var errBudget = errors.New("too expensive")
+
+func (v *hairyVisitor) tooHairy(fn *ir.Func) bool {
+ v.do = v.doNode // cache closure
+
+ err := ir.DoChildren(fn, v.do)
+ if err != nil {
+ v.reason = err.Error()
+ return true
+ }
+ if v.budget < 0 {
+ v.reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", inlineMaxBudget-v.budget, inlineMaxBudget)
+ return true
}
return false
}
-func (v *hairyVisitor) visit(n *Node) bool {
+func (v *hairyVisitor) doNode(n ir.Node) error {
if n == nil {
- return false
+ return nil
}
- switch n.Op {
+ switch n.Op() {
// Call is okay if inlinable and we have the budget for the body.
- case OCALLFUNC:
+ case ir.OCALLFUNC:
+ n := n.(*ir.CallExpr)
// Functions that call runtime.getcaller{pc,sp} can not be inlined
// because getcaller{pc,sp} expect a pointer to the caller's first argument.
//
// runtime.throw is a "cheap call" like panic in normal code.
- if n.Left.Op == ONAME && n.Left.Class() == PFUNC && isRuntimePkg(n.Left.Sym.Pkg) {
- fn := n.Left.Sym.Name
- if fn == "getcallerpc" || fn == "getcallersp" {
- v.reason = "call to " + fn
- return true
- }
- if fn == "throw" {
- v.budget -= inlineExtraThrowCost
- break
+ if n.Left().Op() == ir.ONAME {
+ name := n.Left().(*ir.Name)
+ if name.Class() == ir.PFUNC && isRuntimePkg(name.Sym().Pkg) {
+ fn := name.Sym().Name
+ if fn == "getcallerpc" || fn == "getcallersp" {
+ return errors.New("call to " + fn)
+ }
+ if fn == "throw" {
+ v.budget -= inlineExtraThrowCost
+ break
+ }
}
}
- if isIntrinsicCall(n) {
+ if IsIntrinsicCall(n) {
// Treat like any other node.
break
}
- if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
- v.budget -= fn.Func.Inl.Cost
+ if fn := inlCallee(n.Left()); fn != nil && fn.Inl != nil {
+ v.budget -= fn.Inl.Cost
break
}
v.budget -= v.extraCallCost
// Call is okay if inlinable and we have the budget for the body.
- case OCALLMETH:
- t := n.Left.Type
+ case ir.OCALLMETH:
+ t := n.Left().Type()
if t == nil {
- Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
- }
- if t.Nname() == nil {
- Fatalf("no function definition for [%p] %+v\n", t, t)
+ base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left())
}
- if isRuntimePkg(n.Left.Sym.Pkg) {
- fn := n.Left.Sym.Name
+ if isRuntimePkg(n.Left().Sym().Pkg) {
+ fn := n.Left().Sym().Name
if fn == "heapBits.nextArena" {
// Special case: explicitly allow
// mid-stack inlining of
break
}
}
- if inlfn := asNode(t.FuncType().Nname).Func; inlfn.Inl != nil {
+ if inlfn := methodExprName(n.Left()).Func(); inlfn.Inl != nil {
v.budget -= inlfn.Inl.Cost
break
}
v.budget -= v.extraCallCost
// Things that are too hairy, irrespective of the budget
- case OCALL, OCALLINTER:
+ case ir.OCALL, ir.OCALLINTER:
// Call cost for non-leaf inlining.
v.budget -= v.extraCallCost
- case OPANIC:
+ case ir.OPANIC:
v.budget -= inlineExtraPanicCost
- case ORECOVER:
+ case ir.ORECOVER:
// recover matches the argument frame pointer to find
// the right panic value, so it needs an argument frame.
- v.reason = "call to recover"
- return true
-
- case OCLOSURE,
- ORANGE,
- OSELECT,
- OGO,
- ODEFER,
- ODCLTYPE, // can't print yet
- ORETJMP:
- v.reason = "unhandled op " + n.Op.String()
- return true
-
- case OAPPEND:
+ return errors.New("call to recover")
+
+ case ir.OCLOSURE,
+ ir.ORANGE,
+ ir.OSELECT,
+ ir.OGO,
+ ir.ODEFER,
+ ir.ODCLTYPE, // can't print yet
+ ir.ORETJMP:
+ return errors.New("unhandled op " + n.Op().String())
+
+ case ir.OAPPEND:
v.budget -= inlineExtraAppendCost
- case ODCLCONST, OEMPTY, OFALL:
+ case ir.ODCLCONST, ir.OFALL:
// These nodes don't produce code; omit from inlining budget.
- return false
+ return nil
- case OLABEL:
- // TODO(mdempsky): Add support for inlining labeled control statements.
- if n.labeledControl() != nil {
- v.reason = "labeled control"
- return true
+ case ir.OFOR, ir.OFORUNTIL:
+ if n.Sym() != nil {
+ return errors.New("labeled control")
+ }
+ case ir.OSWITCH:
+ if n.Sym() != nil {
+ return errors.New("labeled control")
}
+ // case ir.ORANGE, ir.OSELECT in "unhandled" above
- case OBREAK, OCONTINUE:
- if n.Sym != nil {
- // Should have short-circuited due to labeledControl above.
- Fatalf("unexpected labeled break/continue: %v", n)
+ case ir.OBREAK, ir.OCONTINUE:
+ if n.Sym() != nil {
+ // Should have short-circuited due to labeled control error above.
+ base.Fatalf("unexpected labeled break/continue: %v", n)
}
- case OIF:
- if Isconst(n.Left, CTBOOL) {
+ case ir.OIF:
+ if ir.IsConst(n.Left(), constant.Bool) {
// This if and the condition cost nothing.
- return v.visitList(n.Ninit) || v.visitList(n.Nbody) ||
- v.visitList(n.Rlist)
+ // TODO(rsc): It seems strange that we visit the dead branch.
+ if err := ir.DoList(n.Init(), v.do); err != nil {
+ return err
+ }
+ if err := ir.DoList(n.Body(), v.do); err != nil {
+ return err
+ }
+ if err := ir.DoList(n.Rlist(), v.do); err != nil {
+ return err
+ }
+ return nil
}
- case ONAME:
- if n.Class() == PAUTO {
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class() == ir.PAUTO {
v.usedLocals[n] = true
}
+ case ir.OBLOCK:
+ // The only OBLOCK we should see at this point is an empty one.
+ // In any event, let the visitList(n.List()) below take care of the statements,
+ // and don't charge for the OBLOCK itself. The ++ undoes the -- below.
+ v.budget++
+
+ case ir.OCALLPART, ir.OSLICELIT:
+ v.budget-- // Hack for toolstash -cmp.
}
v.budget--
// When debugging, don't stop early, to get full cost of inlining this function
- if v.budget < 0 && Debug.m < 2 && !logopt.Enabled() {
- return true
+ if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() {
+ return errBudget
}
- return v.visit(n.Left) || v.visit(n.Right) ||
- v.visitList(n.List) || v.visitList(n.Rlist) ||
- v.visitList(n.Ninit) || v.visitList(n.Nbody)
-}
-
-// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
-// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
-// the body and dcls of an inlineable function.
-func inlcopylist(ll []*Node) []*Node {
- s := make([]*Node, 0, len(ll))
- for _, n := range ll {
- s = append(s, inlcopy(n))
- }
- return s
+ return ir.DoChildren(n, v.do)
}
-func inlcopy(n *Node) *Node {
- if n == nil {
- return nil
- }
-
- switch n.Op {
- case ONAME, OTYPE, OLITERAL:
- return n
- }
-
- m := n.copy()
- if n.Op != OCALLPART && m.Func != nil {
- Fatalf("unexpected Func: %v", m)
- }
- m.Left = inlcopy(n.Left)
- m.Right = inlcopy(n.Right)
- m.List.Set(inlcopylist(n.List.Slice()))
- m.Rlist.Set(inlcopylist(n.Rlist.Slice()))
- m.Ninit.Set(inlcopylist(n.Ninit.Slice()))
- m.Nbody.Set(inlcopylist(n.Nbody.Slice()))
-
- return m
-}
-
-func countNodes(n *Node) int {
- if n == nil {
- return 0
- }
- cnt := 1
- cnt += countNodes(n.Left)
- cnt += countNodes(n.Right)
- for _, n1 := range n.Ninit.Slice() {
- cnt += countNodes(n1)
- }
- for _, n1 := range n.Nbody.Slice() {
- cnt += countNodes(n1)
- }
- for _, n1 := range n.List.Slice() {
- cnt += countNodes(n1)
- }
- for _, n1 := range n.Rlist.Slice() {
- cnt += countNodes(n1)
- }
- return cnt
+func isBigFunc(fn *ir.Func) bool {
+ budget := inlineBigFunctionNodes
+ return ir.Any(fn, func(n ir.Node) bool {
+ budget--
+ return budget <= 0
+ })
}
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
// calls made to inlineable functions. This is the external entry point.
-func inlcalls(fn *Node) {
+func inlcalls(fn *ir.Func) {
savefn := Curfn
Curfn = fn
maxCost := int32(inlineMaxBudget)
- if countNodes(fn) >= inlineBigFunctionNodes {
+ if isBigFunc(fn) {
maxCost = inlineBigFunctionMaxCost
}
// Map to keep track of functions that have been inlined at a particular
// but allow inlining if there is a recursion cycle of many functions.
// Most likely, the inlining will stop before we even hit the beginning of
// the cycle again, but the map catches the unusual case.
- inlMap := make(map[*Node]bool)
- fn = inlnode(fn, maxCost, inlMap)
- if fn != Curfn {
- Fatalf("inlnode replaced curfn")
+ inlMap := make(map[*ir.Func]bool)
+ var edit func(ir.Node) ir.Node
+ edit = func(n ir.Node) ir.Node {
+ return inlnode(n, maxCost, inlMap, edit)
}
+ ir.EditChildren(fn, edit)
Curfn = savefn
}
// Turn an OINLCALL into a statement.
-func inlconv2stmt(n *Node) {
- n.Op = OBLOCK
-
- // n->ninit stays
- n.List.Set(n.Nbody.Slice())
-
- n.Nbody.Set(nil)
- n.Rlist.Set(nil)
+func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node {
+ n := ir.NodAt(inlcall.Pos(), ir.OBLOCK, nil, nil)
+ n.SetList(inlcall.Init())
+ n.PtrList().AppendNodes(inlcall.PtrBody())
+ return n
}
// Turn an OINLCALL into a single valued expression.
// The result of inlconv2expr MUST be assigned back to n, e.g.
// n.Left = inlconv2expr(n.Left)
-func inlconv2expr(n *Node) *Node {
- r := n.Rlist.First()
- return addinit(r, append(n.Ninit.Slice(), n.Nbody.Slice()...))
+func inlconv2expr(n *ir.InlinedCallExpr) ir.Node {
+ r := n.Rlist().First()
+ return initExpr(append(n.Init().Slice(), n.Body().Slice()...), r)
}
// Turn the rlist (with the return values) of the OINLCALL in
// n into an expression list lumping the ninit and body
// containing the inlined statements on the first list element so
-// order will be preserved Used in return, oas2func and call
+// order will be preserved. Used in return, oas2func and call
// statements.
-func inlconv2list(n *Node) []*Node {
- if n.Op != OINLCALL || n.Rlist.Len() == 0 {
- Fatalf("inlconv2list %+v\n", n)
+func inlconv2list(n *ir.InlinedCallExpr) []ir.Node {
+ if n.Op() != ir.OINLCALL || n.Rlist().Len() == 0 {
+ base.Fatalf("inlconv2list %+v\n", n)
}
- s := n.Rlist.Slice()
- s[0] = addinit(s[0], append(n.Ninit.Slice(), n.Nbody.Slice()...))
+ s := n.Rlist().Slice()
+ s[0] = initExpr(append(n.Init().Slice(), n.Body().Slice()...), s[0])
return s
}
-func inlnodelist(l Nodes, maxCost int32, inlMap map[*Node]bool) {
- s := l.Slice()
- for i := range s {
- s[i] = inlnode(s[i], maxCost, inlMap)
- }
-}
-
// inlnode recurses over the tree to find inlineable calls, which will
// be turned into OINLCALLs by mkinlcall. When the recursion comes
// back up will examine left, right, list, rlist, ninit, ntest, nincr,
// shorter and less complicated.
// The result of inlnode MUST be assigned back to n, e.g.
// n.Left = inlnode(n.Left)
-func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
+func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
if n == nil {
return n
}
- switch n.Op {
- case ODEFER, OGO:
- switch n.Left.Op {
- case OCALLFUNC, OCALLMETH:
- n.Left.SetNoInline(true)
+ switch n.Op() {
+ case ir.ODEFER, ir.OGO:
+ switch call := n.Left(); call.Op() {
+ case ir.OCALLFUNC, ir.OCALLMETH:
+ call.SetNoInline(true)
}
// TODO do them here (or earlier),
// so escape analysis can avoid more heapmoves.
- case OCLOSURE:
+ case ir.OCLOSURE:
return n
- case OCALLMETH:
+ case ir.OCALLMETH:
// Prevent inlining some reflect.Value methods when using checkptr,
// even when package reflect was compiled without it (#35073).
- if s := n.Left.Sym; Debug_checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
+ if s := n.Left().Sym(); base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
return n
}
}
lno := setlineno(n)
- inlnodelist(n.Ninit, maxCost, inlMap)
- for _, n1 := range n.Ninit.Slice() {
- if n1.Op == OINLCALL {
- inlconv2stmt(n1)
- }
- }
-
- n.Left = inlnode(n.Left, maxCost, inlMap)
- if n.Left != nil && n.Left.Op == OINLCALL {
- n.Left = inlconv2expr(n.Left)
- }
-
- n.Right = inlnode(n.Right, maxCost, inlMap)
- if n.Right != nil && n.Right.Op == OINLCALL {
- if n.Op == OFOR || n.Op == OFORUNTIL {
- inlconv2stmt(n.Right)
- } else if n.Op == OAS2FUNC {
- n.Rlist.Set(inlconv2list(n.Right))
- n.Right = nil
- n.Op = OAS2
- n.SetTypecheck(0)
- n = typecheck(n, ctxStmt)
- } else {
- n.Right = inlconv2expr(n.Right)
- }
- }
-
- inlnodelist(n.List, maxCost, inlMap)
- if n.Op == OBLOCK {
- for _, n2 := range n.List.Slice() {
- if n2.Op == OINLCALL {
- inlconv2stmt(n2)
- }
- }
- } else {
- s := n.List.Slice()
- for i1, n1 := range s {
- if n1 != nil && n1.Op == OINLCALL {
- s[i1] = inlconv2expr(s[i1])
- }
- }
- }
-
- inlnodelist(n.Rlist, maxCost, inlMap)
- s := n.Rlist.Slice()
- for i1, n1 := range s {
- if n1.Op == OINLCALL {
- if n.Op == OIF {
- inlconv2stmt(n1)
- } else {
- s[i1] = inlconv2expr(s[i1])
- }
- }
- }
+ ir.EditChildren(n, edit)
- inlnodelist(n.Nbody, maxCost, inlMap)
- for _, n := range n.Nbody.Slice() {
- if n.Op == OINLCALL {
- inlconv2stmt(n)
+ if as := n; as.Op() == ir.OAS2FUNC {
+ if as.Rlist().First().Op() == ir.OINLCALL {
+ as.PtrRlist().Set(inlconv2list(as.Rlist().First().(*ir.InlinedCallExpr)))
+ as.SetOp(ir.OAS2)
+ as.SetTypecheck(0)
+ n = typecheck(as, ctxStmt)
}
}
// with all the branches out of the way, it is now time to
// transmogrify this node itself unless inhibited by the
// switch at the top of this function.
- switch n.Op {
- case OCALLFUNC, OCALLMETH:
+ switch n.Op() {
+ case ir.OCALLFUNC, ir.OCALLMETH:
if n.NoInline() {
return n
}
}
- switch n.Op {
- case OCALLFUNC:
- if Debug.m > 3 {
- fmt.Printf("%v:call to func %+v\n", n.Line(), n.Left)
+ var call *ir.CallExpr
+ switch n.Op() {
+ case ir.OCALLFUNC:
+ call = n.(*ir.CallExpr)
+ if base.Flag.LowerM > 3 {
+ fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.Left())
}
- if isIntrinsicCall(n) {
+ if IsIntrinsicCall(call) {
break
}
- if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
- n = mkinlcall(n, fn, maxCost, inlMap)
+ if fn := inlCallee(call.Left()); fn != nil && fn.Inl != nil {
+ n = mkinlcall(call, fn, maxCost, inlMap, edit)
}
- case OCALLMETH:
- if Debug.m > 3 {
- fmt.Printf("%v:call to meth %L\n", n.Line(), n.Left.Right)
+ case ir.OCALLMETH:
+ call = n.(*ir.CallExpr)
+ if base.Flag.LowerM > 3 {
+ fmt.Printf("%v:call to meth %v\n", ir.Line(n), call.Left().(*ir.SelectorExpr).Sel)
}
// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
- if n.Left.Type == nil {
- Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
+ if call.Left().Type() == nil {
+ base.Fatalf("no function type for [%p] %+v\n", call.Left(), call.Left())
}
- if n.Left.Type.Nname() == nil {
- Fatalf("no function definition for [%p] %+v\n", n.Left.Type, n.Left.Type)
- }
+ n = mkinlcall(call, methodExprName(call.Left()).Func(), maxCost, inlMap, edit)
+ }
+
+ base.Pos = lno
- n = mkinlcall(n, asNode(n.Left.Type.FuncType().Nname), maxCost, inlMap)
+ if n.Op() == ir.OINLCALL {
+ ic := n.(*ir.InlinedCallExpr)
+ switch call.Use {
+ default:
+ ir.Dump("call", call)
+ base.Fatalf("call missing use")
+ case ir.CallUseExpr:
+ n = inlconv2expr(ic)
+ case ir.CallUseStmt:
+ n = inlconv2stmt(ic)
+ case ir.CallUseList:
+ // leave for caller to convert
+ }
}
- lineno = lno
return n
}
// inlCallee takes a function-typed expression and returns the underlying function ONAME
// that it refers to if statically known. Otherwise, it returns nil.
-func inlCallee(fn *Node) *Node {
+func inlCallee(fn ir.Node) *ir.Func {
fn = staticValue(fn)
- switch {
- case fn.Op == ONAME && fn.Class() == PFUNC:
- if fn.isMethodExpression() {
- n := asNode(fn.Type.Nname())
- // Check that receiver type matches fn.Left.
- // TODO(mdempsky): Handle implicit dereference
- // of pointer receiver argument?
- if n == nil || !types.Identical(n.Type.Recv().Type, fn.Left.Type) {
- return nil
- }
- return n
- }
- return fn
- case fn.Op == OCLOSURE:
- c := fn.Func.Closure
+ switch fn.Op() {
+ case ir.OMETHEXPR:
+ fn := fn.(*ir.MethodExpr)
+ n := methodExprName(fn)
+ // Check that receiver type matches fn.Left.
+ // TODO(mdempsky): Handle implicit dereference
+ // of pointer receiver argument?
+ if n == nil || !types.Identical(n.Type().Recv().Type, fn.T) {
+ return nil
+ }
+ return n.Func()
+ case ir.ONAME:
+ if fn.Class() == ir.PFUNC {
+ return fn.Func()
+ }
+ case ir.OCLOSURE:
+ fn := fn.(*ir.ClosureExpr)
+ c := fn.Func()
caninl(c)
- return c.Func.Nname
+ return c
}
return nil
}
-func staticValue(n *Node) *Node {
+func staticValue(n ir.Node) ir.Node {
for {
- if n.Op == OCONVNOP {
- n = n.Left
+ if n.Op() == ir.OCONVNOP {
+ n = n.(*ir.ConvExpr).Left()
continue
}
// staticValue1 implements a simple SSA-like optimization. If n is a local variable
// that is initialized and never reassigned, staticValue1 returns the initializer
// expression. Otherwise, it returns nil.
-func staticValue1(n *Node) *Node {
- if n.Op != ONAME || n.Class() != PAUTO || n.Name.Addrtaken() {
+func staticValue1(nn ir.Node) ir.Node {
+ if nn.Op() != ir.ONAME {
+ return nil
+ }
+ n := nn.(*ir.Name)
+ if n.Class() != ir.PAUTO || n.Name().Addrtaken() {
return nil
}
- defn := n.Name.Defn
+ defn := n.Name().Defn
if defn == nil {
return nil
}
- var rhs *Node
+ var rhs ir.Node
FindRHS:
- switch defn.Op {
- case OAS:
- rhs = defn.Right
- case OAS2:
- for i, lhs := range defn.List.Slice() {
+ switch defn.Op() {
+ case ir.OAS:
+ rhs = defn.Right()
+ case ir.OAS2:
+ for i, lhs := range defn.List().Slice() {
if lhs == n {
- rhs = defn.Rlist.Index(i)
+ rhs = defn.Rlist().Index(i)
break FindRHS
}
}
- Fatalf("%v missing from LHS of %v", n, defn)
+ base.Fatalf("%v missing from LHS of %v", n, defn)
default:
return nil
}
if rhs == nil {
- Fatalf("RHS is nil: %v", defn)
+ base.Fatalf("RHS is nil: %v", defn)
}
- unsafe, _ := reassigned(n)
- if unsafe {
+ if reassigned(n) {
return nil
}
// useful for -m output documenting the reason for inhibited optimizations.
// NB: global variables are always considered to be re-assigned.
// TODO: handle initial declaration not including an assignment and followed by a single assignment?
-func reassigned(n *Node) (bool, *Node) {
- if n.Op != ONAME {
- Fatalf("reassigned %v", n)
+func reassigned(name *ir.Name) bool {
+ if name.Op() != ir.ONAME {
+ base.Fatalf("reassigned %v", name)
}
// no way to reliably check for no-reassignment of globals, assume it can be
- if n.Name.Curfn == nil {
- return true, nil
- }
- f := n.Name.Curfn
- // There just might be a good reason for this although this can be pretty surprising:
- // local variables inside a closure have Curfn pointing to the OCLOSURE node instead
- // of the corresponding ODCLFUNC.
- // We need to walk the function body to check for reassignments so we follow the
- // linkage to the ODCLFUNC node as that is where body is held.
- if f.Op == OCLOSURE {
- f = f.Func.Closure
- }
- v := reassignVisitor{name: n}
- a := v.visitList(f.Nbody)
- return a != nil, a
-}
-
-type reassignVisitor struct {
- name *Node
-}
-
-func (v *reassignVisitor) visit(n *Node) *Node {
- if n == nil {
- return nil
+ if name.Curfn == nil {
+ return true
}
- switch n.Op {
- case OAS, OSELRECV:
- if n.Left == v.name && n != v.name.Name.Defn {
- return n
- }
- case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV:
- for _, p := range n.List.Slice() {
- if p == v.name && n != v.name.Name.Defn {
- return n
+ return ir.Any(name.Curfn, func(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OAS:
+ if n.Left() == name && n != name.Defn {
+ return true
+ }
+ case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OSELRECV2:
+ for _, p := range n.List().Slice() {
+ if p == name && n != name.Defn {
+ return true
+ }
}
}
- case OSELRECV2:
- if (n.Left == v.name || n.List.First() == v.name) && n != v.name.Name.Defn {
- return n
- }
- }
- if a := v.visit(n.Left); a != nil {
- return a
- }
- if a := v.visit(n.Right); a != nil {
- return a
- }
- if a := v.visitList(n.List); a != nil {
- return a
- }
- if a := v.visitList(n.Rlist); a != nil {
- return a
- }
- if a := v.visitList(n.Ninit); a != nil {
- return a
- }
- if a := v.visitList(n.Nbody); a != nil {
- return a
- }
- return nil
-}
-
-func (v *reassignVisitor) visitList(l Nodes) *Node {
- for _, n := range l.Slice() {
- if a := v.visit(n); a != nil {
- return a
- }
- }
- return nil
+ return false
+ })
}
-func inlParam(t *types.Field, as *Node, inlvars map[*Node]*Node) *Node {
- n := asNode(t.Nname)
- if n == nil || n.isBlank() {
- return nblank
+func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node {
+ n := ir.AsNode(t.Nname)
+ if n == nil || ir.IsBlank(n) {
+ return ir.BlankNode
}
- inlvar := inlvars[n]
+ inlvar := inlvars[n.(*ir.Name)]
if inlvar == nil {
- Fatalf("missing inlvar for %v", n)
+ base.Fatalf("missing inlvar for %v", n)
}
- as.Ninit.Append(nod(ODCL, inlvar, nil))
- inlvar.Name.Defn = as
+ as.PtrInit().Append(ir.Nod(ir.ODCL, inlvar, nil))
+ inlvar.Name().Defn = as
return inlvar
}
var inlgen int
+// SSADumpInline gives the SSA back end a chance to dump the function
+// when producing output for debugging the compiler itself.
+var SSADumpInline = func(*ir.Func) {}
+
// If n is a call node (OCALLFUNC or OCALLMETH), and fn is an ONAME node for a
// function with an inlinable body, return an OINLCALL node that can replace n.
// The returned node's Ninit has the parameter assignments, the Nbody is the
// parameters.
// The result of mkinlcall MUST be assigned back to n, e.g.
// n.Left = mkinlcall(n.Left, fn, isddd)
-func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
- if fn.Func.Inl == nil {
+func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
+ if fn.Inl == nil {
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
- fmt.Sprintf("%s cannot be inlined", fn.pkgFuncName()))
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
+ fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn)))
}
return n
}
- if fn.Func.Inl.Cost > maxCost {
+ if fn.Inl.Cost > maxCost {
// The inlined function body is too big. Typically we use this check to restrict
// inlining into very big functions. See issue 26546 and 17566.
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
- fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func.Inl.Cost, fn.pkgFuncName(), maxCost))
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
+ fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Inl.Cost, ir.PkgFuncName(fn), maxCost))
}
return n
}
- if fn == Curfn || fn.Name.Defn == Curfn {
+ if fn == Curfn {
// Can't recursively inline a function into itself.
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", Curfn.funcname()))
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(Curfn)))
}
return n
}
- if instrumenting && isRuntimePkg(fn.Sym.Pkg) {
+ if instrumenting && isRuntimePkg(fn.Sym().Pkg) {
// Runtime package must not be instrumented.
// Instrument skips runtime package. However, some runtime code can be
// inlined into other packages and instrumented there. To avoid this,
}
if inlMap[fn] {
- if Debug.m > 1 {
- fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", n.Line(), fn, Curfn.funcname())
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(Curfn))
}
return n
}
defer func() {
inlMap[fn] = false
}()
- if Debug_typecheckinl == 0 {
+ if base.Debug.TypecheckInl == 0 {
typecheckinl(fn)
}
// We have a function node, and it has an inlineable body.
- if Debug.m > 1 {
- fmt.Printf("%v: inlining call to %v %#v { %#v }\n", n.Line(), fn.Sym, fn.Type, asNodes(fn.Func.Inl.Body))
- } else if Debug.m != 0 {
- fmt.Printf("%v: inlining call to %v\n", n.Line(), fn)
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: inlining call to %v %v { %v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.AsNodes(fn.Inl.Body))
+ } else if base.Flag.LowerM != 0 {
+ fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
}
- if Debug.m > 2 {
- fmt.Printf("%v: Before inlining: %+v\n", n.Line(), n)
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
}
- if ssaDump != "" && ssaDump == Curfn.funcname() {
- ssaDumpInlined = append(ssaDumpInlined, fn)
- }
+ SSADumpInline(fn)
- ninit := n.Ninit
+ ninit := n.Init()
// For normal function calls, the function callee expression
// may contain side effects (e.g., added by addinit during
// inlconv2expr or inlconv2list). Make sure to preserve these,
// if necessary (#42703).
- if n.Op == OCALLFUNC {
- callee := n.Left
- for callee.Op == OCONVNOP {
- ninit.AppendNodes(&callee.Ninit)
- callee = callee.Left
+ if n.Op() == ir.OCALLFUNC {
+ callee := n.Left()
+ for callee.Op() == ir.OCONVNOP {
+ conv := callee.(*ir.ConvExpr)
+ ninit.AppendNodes(conv.PtrInit())
+ callee = conv.Left()
}
- if callee.Op != ONAME && callee.Op != OCLOSURE {
- Fatalf("unexpected callee expression: %v", callee)
+ if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR {
+ base.Fatalf("unexpected callee expression: %v", callee)
}
}
// Make temp names to use instead of the originals.
- inlvars := make(map[*Node]*Node)
+ inlvars := make(map[*ir.Name]ir.Node)
// record formals/locals for later post-processing
- var inlfvars []*Node
+ var inlfvars []ir.Node
// Handle captured variables when inlining closures.
- if fn.Name.Defn != nil {
- if c := fn.Name.Defn.Func.Closure; c != nil {
- for _, v := range c.Func.Closure.Func.Cvars.Slice() {
- if v.Op == OXXX {
- continue
- }
+ if c := fn.OClosure; c != nil {
+ for _, v := range fn.ClosureVars {
+ if v.Op() == ir.OXXX {
+ continue
+ }
- o := v.Name.Param.Outer
- // make sure the outer param matches the inlining location
- // NB: if we enabled inlining of functions containing OCLOSURE or refined
- // the reassigned check via some sort of copy propagation this would most
- // likely need to be changed to a loop to walk up to the correct Param
- if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.Closure != Curfn) {
- Fatalf("%v: unresolvable capture %v %v\n", n.Line(), fn, v)
- }
+ o := v.Outer
+ // make sure the outer param matches the inlining location
+ // NB: if we enabled inlining of functions containing OCLOSURE or refined
+ // the reassigned check via some sort of copy propagation this would most
+ // likely need to be changed to a loop to walk up to the correct Param
+ if o == nil || o.Curfn != Curfn {
+ base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v)
+ }
- if v.Name.Byval() {
- iv := typecheck(inlvar(v), ctxExpr)
- ninit.Append(nod(ODCL, iv, nil))
- ninit.Append(typecheck(nod(OAS, iv, o), ctxStmt))
- inlvars[v] = iv
- } else {
- addr := newname(lookup("&" + v.Sym.Name))
- addr.Type = types.NewPtr(v.Type)
- ia := typecheck(inlvar(addr), ctxExpr)
- ninit.Append(nod(ODCL, ia, nil))
- ninit.Append(typecheck(nod(OAS, ia, nod(OADDR, o, nil)), ctxStmt))
- inlvars[addr] = ia
-
- // When capturing by reference, all occurrence of the captured var
- // must be substituted with dereference of the temporary address
- inlvars[v] = typecheck(nod(ODEREF, ia, nil), ctxExpr)
- }
+ if v.Byval() {
+ iv := typecheck(inlvar(v), ctxExpr)
+ ninit.Append(ir.Nod(ir.ODCL, iv, nil))
+ ninit.Append(typecheck(ir.Nod(ir.OAS, iv, o), ctxStmt))
+ inlvars[v] = iv
+ } else {
+ addr := NewName(lookup("&" + v.Sym().Name))
+ addr.SetType(types.NewPtr(v.Type()))
+ ia := typecheck(inlvar(addr), ctxExpr)
+ ninit.Append(ir.Nod(ir.ODCL, ia, nil))
+ ninit.Append(typecheck(ir.Nod(ir.OAS, ia, nodAddr(o)), ctxStmt))
+ inlvars[addr] = ia
+
+ // When capturing by reference, all occurrence of the captured var
+ // must be substituted with dereference of the temporary address
+ inlvars[v] = typecheck(ir.Nod(ir.ODEREF, ia, nil), ctxExpr)
}
}
}
- for _, ln := range fn.Func.Inl.Dcl {
- if ln.Op != ONAME {
+ for _, ln := range fn.Inl.Dcl {
+ if ln.Op() != ir.ONAME {
continue
}
- if ln.Class() == PPARAMOUT { // return values handled below.
+ if ln.Class() == ir.PPARAMOUT { // return values handled below.
continue
}
- if ln.isParamStackCopy() { // ignore the on-stack copy of a parameter that moved to the heap
+ if isParamStackCopy(ln) { // ignore the on-stack copy of a parameter that moved to the heap
// TODO(mdempsky): Remove once I'm confident
// this never actually happens. We currently
// perform inlining before escape analysis, so
// nothing should have moved to the heap yet.
- Fatalf("impossible: %v", ln)
+ base.Fatalf("impossible: %v", ln)
}
inlf := typecheck(inlvar(ln), ctxExpr)
inlvars[ln] = inlf
- if genDwarfInline > 0 {
- if ln.Class() == PPARAM {
- inlf.Name.SetInlFormal(true)
+ if base.Flag.GenDwarfInl > 0 {
+ if ln.Class() == ir.PPARAM {
+ inlf.Name().SetInlFormal(true)
} else {
- inlf.Name.SetInlLocal(true)
+ inlf.Name().SetInlLocal(true)
}
- inlf.Pos = ln.Pos
+ inlf.SetPos(ln.Pos())
inlfvars = append(inlfvars, inlf)
}
}
nreturns := 0
- inspectList(asNodes(fn.Func.Inl.Body), func(n *Node) bool {
- if n != nil && n.Op == ORETURN {
+ ir.VisitList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) {
+ if n != nil && n.Op() == ir.ORETURN {
nreturns++
}
- return true
})
// We can delay declaring+initializing result parameters if:
delayretvars := nreturns == 1
// temporaries for return values.
- var retvars []*Node
- for i, t := range fn.Type.Results().Fields().Slice() {
- var m *Node
- if n := asNode(t.Nname); n != nil && !n.isBlank() && !strings.HasPrefix(n.Sym.Name, "~r") {
+ var retvars []ir.Node
+ for i, t := range fn.Type().Results().Fields().Slice() {
+ var m ir.Node
+ if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym().Name, "~r") {
+ n := n.(*ir.Name)
m = inlvar(n)
m = typecheck(m, ctxExpr)
inlvars[n] = m
m = retvar(t, i)
}
- if genDwarfInline > 0 {
+ if base.Flag.GenDwarfInl > 0 {
// Don't update the src.Pos on a return variable if it
// was manufactured by the inliner (e.g. "~R2"); such vars
// were not part of the original callee.
- if !strings.HasPrefix(m.Sym.Name, "~R") {
- m.Name.SetInlFormal(true)
- m.Pos = t.Pos
+ if !strings.HasPrefix(m.Sym().Name, "~R") {
+ m.Name().SetInlFormal(true)
+ m.SetPos(t.Pos)
inlfvars = append(inlfvars, m)
}
}
}
// Assign arguments to the parameters' temp names.
- as := nod(OAS2, nil, nil)
+ as := ir.Nod(ir.OAS2, nil, nil)
as.SetColas(true)
- if n.Op == OCALLMETH {
- if n.Left.Left == nil {
- Fatalf("method call without receiver: %+v", n)
+ if n.Op() == ir.OCALLMETH {
+ sel := n.Left().(*ir.SelectorExpr)
+ if sel.Left() == nil {
+ base.Fatalf("method call without receiver: %+v", n)
}
- as.Rlist.Append(n.Left.Left)
+ as.PtrRlist().Append(sel.Left())
}
- as.Rlist.Append(n.List.Slice()...)
+ as.PtrRlist().Append(n.List().Slice()...)
// For non-dotted calls to variadic functions, we assign the
// variadic parameter's temp name separately.
- var vas *Node
+ var vas *ir.AssignStmt
- if recv := fn.Type.Recv(); recv != nil {
- as.List.Append(inlParam(recv, as, inlvars))
+ if recv := fn.Type().Recv(); recv != nil {
+ as.PtrList().Append(inlParam(recv, as, inlvars))
}
- for _, param := range fn.Type.Params().Fields().Slice() {
+ for _, param := range fn.Type().Params().Fields().Slice() {
// For ordinary parameters or variadic parameters in
// dotted calls, just add the variable to the
// assignment list, and we're done.
if !param.IsDDD() || n.IsDDD() {
- as.List.Append(inlParam(param, as, inlvars))
+ as.PtrList().Append(inlParam(param, as, inlvars))
continue
}
// Otherwise, we need to collect the remaining values
// to pass as a slice.
- x := as.List.Len()
- for as.List.Len() < as.Rlist.Len() {
- as.List.Append(argvar(param.Type, as.List.Len()))
+ x := as.List().Len()
+ for as.List().Len() < as.Rlist().Len() {
+ as.PtrList().Append(argvar(param.Type, as.List().Len()))
}
- varargs := as.List.Slice()[x:]
+ varargs := as.List().Slice()[x:]
- vas = nod(OAS, nil, nil)
- vas.Left = inlParam(param, vas, inlvars)
+ vas = ir.NewAssignStmt(base.Pos, nil, nil)
+ vas.SetLeft(inlParam(param, vas, inlvars))
if len(varargs) == 0 {
- vas.Right = nodnil()
- vas.Right.Type = param.Type
+ vas.SetRight(nodnil())
+ vas.Right().SetType(param.Type)
} else {
- vas.Right = nod(OCOMPLIT, nil, typenod(param.Type))
- vas.Right.List.Set(varargs)
+ lit := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(param.Type))
+ lit.PtrList().Set(varargs)
+ vas.SetRight(lit)
}
}
- if as.Rlist.Len() != 0 {
- as = typecheck(as, ctxStmt)
- ninit.Append(as)
+ if as.Rlist().Len() != 0 {
+ ninit.Append(typecheck(as, ctxStmt))
}
if vas != nil {
- vas = typecheck(vas, ctxStmt)
- ninit.Append(vas)
+ ninit.Append(typecheck(vas, ctxStmt))
}
if !delayretvars {
// Zero the return parameters.
for _, n := range retvars {
- ninit.Append(nod(ODCL, n, nil))
- ras := nod(OAS, n, nil)
- ras = typecheck(ras, ctxStmt)
- ninit.Append(ras)
+ ninit.Append(ir.Nod(ir.ODCL, n, nil))
+ ras := ir.Nod(ir.OAS, n, nil)
+ ninit.Append(typecheck(ras, ctxStmt))
}
}
inlgen++
parent := -1
- if b := Ctxt.PosTable.Pos(n.Pos).Base(); b != nil {
+ if b := base.Ctxt.PosTable.Pos(n.Pos()).Base(); b != nil {
parent = b.InliningIndex()
}
- newIndex := Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym())
+
+ sym := fn.Sym().Linksym()
+ newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
// Add an inline mark just before the inlined body.
// This mark is inline in the code so that it's a reasonable spot
// to put a breakpoint. Not sure if that's really necessary or not
// (in which case it could go at the end of the function instead).
// Note issue 28603.
- inlMark := nod(OINLMARK, nil, nil)
- inlMark.Pos = n.Pos.WithIsStmt()
- inlMark.Xoffset = int64(newIndex)
+ inlMark := ir.Nod(ir.OINLMARK, nil, nil)
+ inlMark.SetPos(n.Pos().WithIsStmt())
+ inlMark.SetOffset(int64(newIndex))
ninit.Append(inlMark)
- if genDwarfInline > 0 {
- if !fn.Sym.Linksym().WasInlined() {
- Ctxt.DwFixups.SetPrecursorFunc(fn.Sym.Linksym(), fn)
- fn.Sym.Linksym().Set(obj.AttrWasInlined, true)
+ if base.Flag.GenDwarfInl > 0 {
+ if !sym.WasInlined() {
+ base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
+ sym.Set(obj.AttrWasInlined, true)
}
}
bases: make(map[*src.PosBase]*src.PosBase),
newInlIndex: newIndex,
}
+ subst.edit = subst.node
- body := subst.list(asNodes(fn.Func.Inl.Body))
+ body := subst.list(ir.AsNodes(fn.Inl.Body))
- lab := nodSym(OLABEL, nil, retlabel)
+ lab := nodSym(ir.OLABEL, nil, retlabel)
body = append(body, lab)
typecheckslice(body, ctxStmt)
- if genDwarfInline > 0 {
+ if base.Flag.GenDwarfInl > 0 {
for _, v := range inlfvars {
- v.Pos = subst.updatedPos(v.Pos)
+ v.SetPos(subst.updatedPos(v.Pos()))
}
}
//dumplist("ninit post", ninit);
- call := nod(OINLCALL, nil, nil)
- call.Ninit.Set(ninit.Slice())
- call.Nbody.Set(body)
- call.Rlist.Set(retvars)
- call.Type = n.Type
+ call := ir.Nod(ir.OINLCALL, nil, nil)
+ call.PtrInit().Set(ninit.Slice())
+ call.PtrBody().Set(body)
+ call.PtrRlist().Set(retvars)
+ call.SetType(n.Type())
call.SetTypecheck(1)
// transitive inlining
// instead we emit the things that the body needs
// and each use must redo the inlining.
// luckily these are small.
- inlnodelist(call.Nbody, maxCost, inlMap)
- for _, n := range call.Nbody.Slice() {
- if n.Op == OINLCALL {
- inlconv2stmt(n)
- }
- }
+ ir.EditChildren(call, edit)
- if Debug.m > 2 {
- fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call)
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: After inlining %+v\n\n", ir.Line(call), call)
}
return call
// Every time we expand a function we generate a new set of tmpnames,
// PAUTO's in the calling functions, and link them off of the
// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
-func inlvar(var_ *Node) *Node {
- if Debug.m > 3 {
+func inlvar(var_ ir.Node) ir.Node {
+ if base.Flag.LowerM > 3 {
fmt.Printf("inlvar %+v\n", var_)
}
- n := newname(var_.Sym)
- n.Type = var_.Type
- n.SetClass(PAUTO)
- n.Name.SetUsed(true)
- n.Name.Curfn = Curfn // the calling function, not the called one
- n.Name.SetAddrtaken(var_.Name.Addrtaken())
+ n := NewName(var_.Sym())
+ n.SetType(var_.Type())
+ n.SetClass(ir.PAUTO)
+ n.SetUsed(true)
+ n.Curfn = Curfn // the calling function, not the called one
+ n.SetAddrtaken(var_.Name().Addrtaken())
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ Curfn.Dcl = append(Curfn.Dcl, n)
return n
}
// Synthesize a variable to store the inlined function's results in.
-func retvar(t *types.Field, i int) *Node {
- n := newname(lookupN("~R", i))
- n.Type = t.Type
- n.SetClass(PAUTO)
- n.Name.SetUsed(true)
- n.Name.Curfn = Curfn // the calling function, not the called one
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+func retvar(t *types.Field, i int) ir.Node {
+ n := NewName(lookupN("~R", i))
+ n.SetType(t.Type)
+ n.SetClass(ir.PAUTO)
+ n.SetUsed(true)
+ n.Curfn = Curfn // the calling function, not the called one
+ Curfn.Dcl = append(Curfn.Dcl, n)
return n
}
// Synthesize a variable to store the inlined function's arguments
// when they come from a multiple return call.
-func argvar(t *types.Type, i int) *Node {
- n := newname(lookupN("~arg", i))
- n.Type = t.Elem()
- n.SetClass(PAUTO)
- n.Name.SetUsed(true)
- n.Name.Curfn = Curfn // the calling function, not the called one
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+func argvar(t *types.Type, i int) ir.Node {
+ n := NewName(lookupN("~arg", i))
+ n.SetType(t.Elem())
+ n.SetClass(ir.PAUTO)
+ n.SetUsed(true)
+ n.Curfn = Curfn // the calling function, not the called one
+ Curfn.Dcl = append(Curfn.Dcl, n)
return n
}
retlabel *types.Sym
// Temporary result variables.
- retvars []*Node
+ retvars []ir.Node
// Whether result variables should be initialized at the
// "return" statement.
delayretvars bool
- inlvars map[*Node]*Node
+ inlvars map[*ir.Name]ir.Node
// bases maps from original PosBase to PosBase with an extra
// inlined call frame.
// newInlIndex is the index of the inlined call frame to
// insert for inlined nodes.
newInlIndex int
+
+ edit func(ir.Node) ir.Node // cached copy of subst.node method value closure
}
// list inlines a list of nodes.
-func (subst *inlsubst) list(ll Nodes) []*Node {
- s := make([]*Node, 0, ll.Len())
+func (subst *inlsubst) list(ll ir.Nodes) []ir.Node {
+ s := make([]ir.Node, 0, ll.Len())
for _, n := range ll.Slice() {
s = append(s, subst.node(n))
}
// inlined function, substituting references to input/output
// parameters with ones to the tmpnames, and substituting returns with
// assignments to the output.
-func (subst *inlsubst) node(n *Node) *Node {
+func (subst *inlsubst) node(n ir.Node) ir.Node {
if n == nil {
return nil
}
- switch n.Op {
- case ONAME:
+ switch n.Op() {
+ case ir.ONAME:
+ n := n.(*ir.Name)
if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
- if Debug.m > 2 {
+ if base.Flag.LowerM > 2 {
fmt.Printf("substituting name %+v -> %+v\n", n, inlvar)
}
return inlvar
}
- if Debug.m > 2 {
+ if base.Flag.LowerM > 2 {
fmt.Printf("not substituting name %+v\n", n)
}
return n
- case OLITERAL, OTYPE:
+ case ir.OMETHEXPR:
+ return n
+
+ case ir.OLITERAL, ir.ONIL, ir.OTYPE:
// If n is a named constant or type, we can continue
// using it in the inline copy. Otherwise, make a copy
// so we can update the line number.
- if n.Sym != nil {
+ if n.Sym() != nil {
+ return n
+ }
+ if n, ok := n.(*ir.Name); ok && n.Op() == ir.OLITERAL {
+ // This happens for unnamed OLITERAL.
+ // which should really not be a *Name, but for now it is.
+ // ir.Copy(n) is not allowed generally and would panic below,
+ // but it's OK in this situation.
+ n = n.CloneName()
+ n.SetPos(subst.updatedPos(n.Pos()))
return n
}
- // Since we don't handle bodies with closures, this return is guaranteed to belong to the current inlined function.
-
- // dump("Return before substitution", n);
- case ORETURN:
- m := nodSym(OGOTO, nil, subst.retlabel)
- m.Ninit.Set(subst.list(n.Ninit))
-
- if len(subst.retvars) != 0 && n.List.Len() != 0 {
- as := nod(OAS2, nil, nil)
+ case ir.ORETURN:
+ // Since we don't handle bodies with closures,
+ // this return is guaranteed to belong to the current inlined function.
+ init := subst.list(n.Init())
+ if len(subst.retvars) != 0 && n.List().Len() != 0 {
+ as := ir.Nod(ir.OAS2, nil, nil)
// Make a shallow copy of retvars.
// Otherwise OINLCALL.Rlist will be the same list,
// and later walk and typecheck may clobber it.
for _, n := range subst.retvars {
- as.List.Append(n)
+ as.PtrList().Append(n)
}
- as.Rlist.Set(subst.list(n.List))
+ as.PtrRlist().Set(subst.list(n.List()))
if subst.delayretvars {
- for _, n := range as.List.Slice() {
- as.Ninit.Append(nod(ODCL, n, nil))
- n.Name.Defn = as
+ for _, n := range as.List().Slice() {
+ as.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
+ n.Name().Defn = as
}
}
- as = typecheck(as, ctxStmt)
- m.Ninit.Append(as)
+ init = append(init, typecheck(as, ctxStmt))
}
+ init = append(init, nodSym(ir.OGOTO, nil, subst.retlabel))
+ typecheckslice(init, ctxStmt)
+ return ir.NewBlockStmt(base.Pos, init)
- typecheckslice(m.Ninit.Slice(), ctxStmt)
- m = typecheck(m, ctxStmt)
-
- // dump("Return after substitution", m);
+ case ir.OGOTO:
+ m := ir.Copy(n).(*ir.BranchStmt)
+ m.SetPos(subst.updatedPos(m.Pos()))
+ m.PtrInit().Set(nil)
+ p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen)
+ m.SetSym(lookup(p))
return m
- case OGOTO, OLABEL:
- m := n.copy()
- m.Pos = subst.updatedPos(m.Pos)
- m.Ninit.Set(nil)
- p := fmt.Sprintf("%s·%d", n.Sym.Name, inlgen)
- m.Sym = lookup(p)
-
+ case ir.OLABEL:
+ m := ir.Copy(n).(*ir.LabelStmt)
+ m.SetPos(subst.updatedPos(m.Pos()))
+ m.PtrInit().Set(nil)
+ p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen)
+ m.SetSym(lookup(p))
return m
}
- m := n.copy()
- m.Pos = subst.updatedPos(m.Pos)
- m.Ninit.Set(nil)
-
- if n.Op == OCLOSURE {
- Fatalf("cannot inline function containing closure: %+v", n)
+ if n.Op() == ir.OCLOSURE {
+ base.Fatalf("cannot inline function containing closure: %+v", n)
}
- m.Left = subst.node(n.Left)
- m.Right = subst.node(n.Right)
- m.List.Set(subst.list(n.List))
- m.Rlist.Set(subst.list(n.Rlist))
- m.Ninit.Set(append(m.Ninit.Slice(), subst.list(n.Ninit)...))
- m.Nbody.Set(subst.list(n.Nbody))
-
+ m := ir.Copy(n)
+ m.SetPos(subst.updatedPos(m.Pos()))
+ ir.EditChildren(m, subst.edit)
return m
}
func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos {
- pos := Ctxt.PosTable.Pos(xpos)
+ pos := base.Ctxt.PosTable.Pos(xpos)
oldbase := pos.Base() // can be nil
newbase := subst.bases[oldbase]
if newbase == nil {
subst.bases[oldbase] = newbase
}
pos.SetBase(newbase)
- return Ctxt.PosTable.XPos(pos)
+ return base.Ctxt.PosTable.XPos(pos)
}
-func pruneUnusedAutos(ll []*Node, vis *hairyVisitor) []*Node {
- s := make([]*Node, 0, len(ll))
+func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
+ s := make([]*ir.Name, 0, len(ll))
for _, n := range ll {
- if n.Class() == PAUTO {
+ if n.Class() == ir.PAUTO {
if _, found := vis.usedLocals[n]; !found {
continue
}
// devirtualize replaces interface method calls within fn with direct
// concrete-type method calls where applicable.
-func devirtualize(fn *Node) {
+func devirtualize(fn *ir.Func) {
Curfn = fn
- inspectList(fn.Nbody, func(n *Node) bool {
- if n.Op == OCALLINTER {
- devirtualizeCall(n)
+ ir.VisitList(fn.Body(), func(n ir.Node) {
+ if n.Op() == ir.OCALLINTER {
+ devirtualizeCall(n.(*ir.CallExpr))
}
- return true
})
}
-func devirtualizeCall(call *Node) {
- recv := staticValue(call.Left.Left)
- if recv.Op != OCONVIFACE {
+func devirtualizeCall(call *ir.CallExpr) {
+ sel := call.Left().(*ir.SelectorExpr)
+ r := staticValue(sel.Left())
+ if r.Op() != ir.OCONVIFACE {
return
}
+ recv := r.(*ir.ConvExpr)
- typ := recv.Left.Type
+ typ := recv.Left().Type()
if typ.IsInterface() {
return
}
- x := nodl(call.Left.Pos, ODOTTYPE, call.Left.Left, nil)
- x.Type = typ
- x = nodlSym(call.Left.Pos, OXDOT, x, call.Left.Sym)
- x = typecheck(x, ctxExpr|ctxCallee)
- switch x.Op {
- case ODOTMETH:
- if Debug.m != 0 {
- Warnl(call.Pos, "devirtualizing %v to %v", call.Left, typ)
+ dt := ir.NodAt(sel.Pos(), ir.ODOTTYPE, sel.Left(), nil)
+ dt.SetType(typ)
+ x := typecheck(nodlSym(sel.Pos(), ir.OXDOT, dt, sel.Sym()), ctxExpr|ctxCallee)
+ switch x.Op() {
+ case ir.ODOTMETH:
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ)
}
- call.Op = OCALLMETH
- call.Left = x
- case ODOTINTER:
+ call.SetOp(ir.OCALLMETH)
+ call.SetLeft(x)
+ case ir.ODOTINTER:
// Promoted method from embedded interface-typed field (#42279).
- if Debug.m != 0 {
- Warnl(call.Pos, "partially devirtualizing %v to %v", call.Left, typ)
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ)
}
- call.Op = OCALLINTER
- call.Left = x
+ call.SetOp(ir.OCALLINTER)
+ call.SetLeft(x)
default:
// TODO(mdempsky): Turn back into Fatalf after more testing.
- if Debug.m != 0 {
- Warnl(call.Pos, "failed to devirtualize %v (%v)", x, x.Op)
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
}
return
}
// Receiver parameter size may have changed; need to update
// call.Type to get correct stack offsets for result
// parameters.
- checkwidth(x.Type)
- switch ft := x.Type; ft.NumResults() {
+ checkwidth(x.Type())
+ switch ft := x.Type(); ft.NumResults() {
case 0:
case 1:
- call.Type = ft.Results().Field(0).Type
+ call.SetType(ft.Results().Field(0).Type)
default:
- call.Type = ft.Results()
+ call.SetType(ft.Results())
}
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/internal/objabi"
"cmd/internal/src"
"strings"
)
-// lineno is the source position at the start of the most recently lexed token.
-// TODO(gri) rename and eventually remove
-var lineno src.XPos
-
-func makePos(base *src.PosBase, line, col uint) src.XPos {
- return Ctxt.PosTable.XPos(src.MakePos(base, line, col))
+func makePos(b *src.PosBase, line, col uint) src.XPos {
+ return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
}
func isSpace(c rune) bool {
return len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"'
}
-type PragmaFlag int16
-
const (
- // Func pragmas.
- Nointerface PragmaFlag = 1 << iota
- Noescape // func parameters don't escape
- Norace // func must not have race detector annotations
- Nosplit // func should not execute on separate stack
- Noinline // func should not be inlined
- NoCheckPtr // func should not be instrumented by checkptr
- CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
- UintptrEscapes // pointers converted to uintptr escape
-
- // Runtime-only func pragmas.
- // See ../../../../runtime/README.md for detailed descriptions.
- Systemstack // func must run on system stack
- Nowritebarrier // emit compiler error instead of write barrier
- Nowritebarrierrec // error on write barrier in this or recursive callees
- Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
-
- // Runtime and cgo type pragmas
- NotInHeap // values of this type must not be heap allocated
-
- // Go command pragmas
- GoBuildPragma
-)
-
-const (
- FuncPragmas = Nointerface |
- Noescape |
- Norace |
- Nosplit |
- Noinline |
- NoCheckPtr |
- CgoUnsafeArgs |
- UintptrEscapes |
- Systemstack |
- Nowritebarrier |
- Nowritebarrierrec |
- Yeswritebarrierrec
-
- TypePragmas = NotInHeap
+ FuncPragmas = ir.Nointerface |
+ ir.Noescape |
+ ir.Norace |
+ ir.Nosplit |
+ ir.Noinline |
+ ir.NoCheckPtr |
+ ir.CgoUnsafeArgs |
+ ir.UintptrEscapes |
+ ir.Systemstack |
+ ir.Nowritebarrier |
+ ir.Nowritebarrierrec |
+ ir.Yeswritebarrierrec
+
+ TypePragmas = ir.NotInHeap
)
-func pragmaFlag(verb string) PragmaFlag {
+func pragmaFlag(verb string) ir.PragmaFlag {
switch verb {
case "go:build":
- return GoBuildPragma
+ return ir.GoBuildPragma
case "go:nointerface":
if objabi.Fieldtrack_enabled != 0 {
- return Nointerface
+ return ir.Nointerface
}
case "go:noescape":
- return Noescape
+ return ir.Noescape
case "go:norace":
- return Norace
+ return ir.Norace
case "go:nosplit":
- return Nosplit | NoCheckPtr // implies NoCheckPtr (see #34972)
+ return ir.Nosplit | ir.NoCheckPtr // implies NoCheckPtr (see #34972)
case "go:noinline":
- return Noinline
+ return ir.Noinline
case "go:nocheckptr":
- return NoCheckPtr
+ return ir.NoCheckPtr
case "go:systemstack":
- return Systemstack
+ return ir.Systemstack
case "go:nowritebarrier":
- return Nowritebarrier
+ return ir.Nowritebarrier
case "go:nowritebarrierrec":
- return Nowritebarrierrec | Nowritebarrier // implies Nowritebarrier
+ return ir.Nowritebarrierrec | ir.Nowritebarrier // implies Nowritebarrier
case "go:yeswritebarrierrec":
- return Yeswritebarrierrec
+ return ir.Yeswritebarrierrec
case "go:cgo_unsafe_args":
- return CgoUnsafeArgs | NoCheckPtr // implies NoCheckPtr (see #34968)
+ return ir.CgoUnsafeArgs | ir.NoCheckPtr // implies NoCheckPtr (see #34968)
case "go:uintptrescapes":
// For the next function declared in the file
// any uintptr arguments may be pointer values
// call. The conversion to uintptr must appear
// in the argument list.
// Used in syscall/dll_windows.go.
- return UintptrEscapes
+ return ir.UintptrEscapes
case "go:notinheap":
- return NotInHeap
+ return ir.NotInHeap
}
return 0
}
import (
"bufio"
"bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
- "cmd/internal/sys"
"flag"
"fmt"
+ "go/constant"
"internal/goversion"
"io"
"io/ioutil"
"strings"
)
-var (
- buildid string
- spectre string
- spectreIndex bool
-)
-
-var (
- Debug_append int
- Debug_checkptr int
- Debug_closure int
- Debug_compilelater int
- debug_dclstack int
- Debug_dumpptrs int
- Debug_libfuzzer int
- Debug_panic int
- Debug_slice int
- Debug_vlog bool
- Debug_wb int
- Debug_pctab string
- Debug_locationlist int
- Debug_typecheckinl int
- Debug_gendwarfinl int
- Debug_softfloat int
- Debug_defer int
-)
-
-// Debug arguments.
-// These can be specified with the -d flag, as in "-d nil"
-// to set the debug_checknil variable.
-// Multiple options can be comma-separated.
-// Each option accepts an optional argument, as in "gcprog=2"
-var debugtab = []struct {
- name string
- help string
- val interface{} // must be *int or *string
-}{
- {"append", "print information about append compilation", &Debug_append},
- {"checkptr", "instrument unsafe pointer conversions", &Debug_checkptr},
- {"closure", "print information about closure compilation", &Debug_closure},
- {"compilelater", "compile functions as late as possible", &Debug_compilelater},
- {"disablenil", "disable nil checks", &disable_checknil},
- {"dclstack", "run internal dclstack check", &debug_dclstack},
- {"dumpptrs", "show Node pointer values in Dump/dumplist output", &Debug_dumpptrs},
- {"gcprog", "print dump of GC programs", &Debug_gcprog},
- {"libfuzzer", "coverage instrumentation for libfuzzer", &Debug_libfuzzer},
- {"nil", "print information about nil checks", &Debug_checknil},
- {"panic", "do not hide any compiler panic", &Debug_panic},
- {"slice", "print information about slice compilation", &Debug_slice},
- {"typeassert", "print information about type assertion inlining", &Debug_typeassert},
- {"wb", "print information about write barriers", &Debug_wb},
- {"export", "print export data", &Debug_export},
- {"pctab", "print named pc-value table", &Debug_pctab},
- {"locationlists", "print information about DWARF location list creation", &Debug_locationlist},
- {"typecheckinl", "eager typechecking of inline function bodies", &Debug_typecheckinl},
- {"dwarfinl", "print information about DWARF inlined function creation", &Debug_gendwarfinl},
- {"softfloat", "force compiler to emit soft-float code", &Debug_softfloat},
- {"defer", "print information about defer compilation", &Debug_defer},
- {"fieldtrack", "enable fieldtracking", &objabi.Fieldtrack_enabled},
-}
-
-const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
-
-<key> is one of:
-
-`
-
-const debugHelpFooter = `
-<value> is key-specific.
-
-Key "checkptr" supports values:
- "0": instrumentation disabled
- "1": conversions involving unsafe.Pointer are instrumented
- "2": conversions to unsafe.Pointer force heap allocation
-
-Key "pctab" supports values:
- "pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
-`
-
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
- objabi.Flagprint(os.Stderr)
- Exit(2)
-}
-
func hidePanic() {
- if Debug_panic == 0 && nsavederrors+nerrors > 0 {
+ if base.Debug.Panic == 0 && base.Errors() > 0 {
// If we've already complained about things
// in the program, don't bother complaining
// about a panic too; let the user clean up
// the code and try again.
if err := recover(); err != nil {
- errorexit()
+ if err == "-h" {
+ panic(err)
+ }
+ base.ErrorExit()
}
}
}
-// supportsDynlink reports whether or not the code generator for the given
-// architecture supports the -shared and -dynlink flags.
-func supportsDynlink(arch *sys.Arch) bool {
- return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X)
-}
-
-// timing data for compiler phases
-var timings Timings
-var benchfile string
-
-var nowritebarrierrecCheck *nowritebarrierrecChecker
+// Target is the package being compiled.
+var Target *ir.Package
// Main parses flags and Go source files specified in the command-line
// arguments, type-checks the parsed Go package, compiles functions to machine
archInit(&thearch)
- Ctxt = obj.Linknew(thearch.LinkArch)
- Ctxt.DiagFunc = yyerror
- Ctxt.DiagFlush = flusherrors
- Ctxt.Bso = bufio.NewWriter(os.Stdout)
+ base.Ctxt = obj.Linknew(thearch.LinkArch)
+ base.Ctxt.DiagFunc = base.Errorf
+ base.Ctxt.DiagFlush = base.FlushErrors
+ base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
// UseBASEntries is preferred because it shaves about 2% off build time, but LLDB, dsymutil, and dwarfdump
// on Darwin don't support it properly, especially since macOS 10.14 (Mojave). This is exposed as a flag
// to allow testing with LLVM tools on Linux, and to help with reporting this bug to the LLVM project.
// See bugs 31188 and 21945 (CLs 170638, 98075, 72371).
- Ctxt.UseBASEntries = Ctxt.Headtype != objabi.Hdarwin
+ base.Ctxt.UseBASEntries = base.Ctxt.Headtype != objabi.Hdarwin
- localpkg = types.NewPkg("", "")
- localpkg.Prefix = "\"\""
+ types.LocalPkg = types.NewPkg("", "")
+ types.LocalPkg.Prefix = "\"\""
// We won't know localpkg's height until after import
// processing. In the mean time, set to MaxPkgHeight to ensure
// height comparisons at least work until then.
- localpkg.Height = types.MaxPkgHeight
+ types.LocalPkg.Height = types.MaxPkgHeight
// pseudo-package, for scoping
- builtinpkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin?
- builtinpkg.Prefix = "go.builtin" // not go%2ebuiltin
+ types.BuiltinPkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin?
+ types.BuiltinPkg.Prefix = "go.builtin" // not go%2ebuiltin
// pseudo-package, accessed by import "unsafe"
unsafepkg = types.NewPkg("unsafe", "unsafe")
// pseudo-package used for methods with anonymous receivers
gopkg = types.NewPkg("go", "")
- Wasm := objabi.GOARCH == "wasm"
-
- // Whether the limit for stack-allocated objects is much smaller than normal.
- // This can be helpful for diagnosing certain causes of GC latency. See #27732.
- smallFrames := false
- jsonLogOpt := ""
-
- flag.BoolVar(&compiling_runtime, "+", false, "compiling runtime")
- flag.BoolVar(&compiling_std, "std", false, "compiling standard library")
- flag.StringVar(&localimport, "D", "", "set relative `path` for local imports")
-
- objabi.Flagcount("%", "debug non-static initializers", &Debug.P)
- objabi.Flagcount("B", "disable bounds checking", &Debug.B)
- objabi.Flagcount("C", "disable printing of columns in error messages", &Debug.C)
- objabi.Flagcount("E", "debug symbol export", &Debug.E)
- objabi.Flagcount("K", "debug missing line numbers", &Debug.K)
- objabi.Flagcount("L", "show full file names in error messages", &Debug.L)
- objabi.Flagcount("N", "disable optimizations", &Debug.N)
- objabi.Flagcount("S", "print assembly listing", &Debug.S)
- objabi.Flagcount("W", "debug parse tree after type checking", &Debug.W)
- objabi.Flagcount("e", "no limit on number of errors reported", &Debug.e)
- objabi.Flagcount("h", "halt on error", &Debug.h)
- objabi.Flagcount("j", "debug runtime-initialized variables", &Debug.j)
- objabi.Flagcount("l", "disable inlining", &Debug.l)
- objabi.Flagcount("m", "print optimization decisions", &Debug.m)
- objabi.Flagcount("r", "debug generated wrappers", &Debug.r)
- objabi.Flagcount("w", "debug type checking", &Debug.w)
-
- objabi.Flagfn1("I", "add `directory` to import search path", addidir)
- objabi.AddVersionFlag() // -V
- flag.StringVar(&asmhdr, "asmhdr", "", "write assembly header to `file`")
- flag.StringVar(&buildid, "buildid", "", "record `id` as the build id in the export metadata")
- flag.IntVar(&nBackendWorkers, "c", 1, "concurrency during compilation, 1 means no concurrency")
- flag.BoolVar(&pure_go, "complete", false, "compiling complete package (no C or assembly)")
- flag.StringVar(&debugstr, "d", "", "print debug information about items in `list`; try -d help")
- flag.BoolVar(&flagDWARF, "dwarf", !Wasm, "generate DWARF symbols")
- flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode")
- flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records")
- objabi.Flagfn1("embedcfg", "read go:embed configuration from `file`", readEmbedCfg)
- objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap)
- objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg)
- flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`")
- flag.StringVar(&flag_lang, "lang", "", "release to compile for")
- flag.StringVar(&linkobj, "linkobj", "", "write linker-specific object to `file`")
- objabi.Flagcount("live", "debug liveness analysis", &debuglive)
- if sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
- flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer")
- }
- flag.BoolVar(&nolocalimports, "nolocalimports", false, "reject local (relative) imports")
- flag.StringVar(&outfile, "o", "", "write output to `file`")
- flag.StringVar(&myimportpath, "p", "", "set expected package import `path`")
- flag.BoolVar(&writearchive, "pack", false, "write to file.a instead of file.o")
- if sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
- flag.BoolVar(&flag_race, "race", false, "enable race detector")
- }
- flag.StringVar(&spectre, "spectre", spectre, "enable spectre mitigations in `list` (all, index, ret)")
- if enableTrace {
- flag.BoolVar(&trace, "t", false, "trace type-checking")
- }
- flag.StringVar(&pathPrefix, "trimpath", "", "remove `prefix` from recorded source file paths")
- flag.BoolVar(&Debug_vlog, "v", false, "increase debug verbosity")
- flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier")
- var flag_shared bool
- var flag_dynlink bool
- if supportsDynlink(thearch.LinkArch.Arch) {
- flag.BoolVar(&flag_shared, "shared", false, "generate code that can be linked into a shared library")
- flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
- flag.BoolVar(&Ctxt.Flag_linkshared, "linkshared", false, "generate code that will be linked against Go shared libraries")
- }
- flag.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`")
- flag.StringVar(&memprofile, "memprofile", "", "write memory profile to `file`")
- flag.Int64Var(&memprofilerate, "memprofilerate", 0, "set runtime.MemProfileRate to `rate`")
- var goversion string
- flag.StringVar(&goversion, "goversion", "", "required version of the runtime")
- var symabisPath string
- flag.StringVar(&symabisPath, "symabis", "", "read symbol ABIs from `file`")
- flag.StringVar(&traceprofile, "traceprofile", "", "write an execution trace to `file`")
- flag.StringVar(&blockprofile, "blockprofile", "", "write block profile to `file`")
- flag.StringVar(&mutexprofile, "mutexprofile", "", "write mutex profile to `file`")
- flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`")
- flag.BoolVar(&smallFrames, "smallframes", false, "reduce the size limit for stack allocated objects")
- flag.BoolVar(&Ctxt.UseBASEntries, "dwarfbasentries", Ctxt.UseBASEntries, "use base address selection entries in DWARF")
- flag.StringVar(&jsonLogOpt, "json", "", "version,destination for JSON compiler/optimizer logging")
-
- objabi.Flagparse(usage)
-
- Ctxt.Pkgpath = myimportpath
-
- for _, f := range strings.Split(spectre, ",") {
- f = strings.TrimSpace(f)
- switch f {
- default:
- log.Fatalf("unknown setting -spectre=%s", f)
- case "":
- // nothing
- case "all":
- spectreIndex = true
- Ctxt.Retpoline = true
- case "index":
- spectreIndex = true
- case "ret":
- Ctxt.Retpoline = true
- }
- }
-
- if spectreIndex {
- switch objabi.GOARCH {
- case "amd64":
- // ok
- default:
- log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
- }
- }
+ base.DebugSSA = ssa.PhaseOption
+ base.ParseFlags()
// Record flags that affect the build result. (And don't
// record flags that don't, since that would cause spurious
// changes in the binary.)
recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre")
- if smallFrames {
+ if !enableTrace && base.Flag.LowerT {
+ log.Fatalf("compiler not built with support for -t")
+ }
+
+ // Enable inlining (after recordFlags, to avoid recording the rewritten -l). For now:
+ // default: inlining on. (Flag.LowerL == 1)
+ // -l: inlining off (Flag.LowerL == 0)
+ // -l=2, -l=3: inlining on again, with extra debugging (Flag.LowerL > 1)
+ if base.Flag.LowerL <= 1 {
+ base.Flag.LowerL = 1 - base.Flag.LowerL
+ }
+
+ if base.Flag.SmallFrames {
maxStackVarSize = 128 * 1024
maxImplicitStackVarSize = 16 * 1024
}
- Ctxt.Flag_shared = flag_dynlink || flag_shared
- Ctxt.Flag_dynlink = flag_dynlink
- Ctxt.Flag_optimize = Debug.N == 0
-
- Ctxt.Debugasm = Debug.S
- Ctxt.Debugvlog = Debug_vlog
- if flagDWARF {
- Ctxt.DebugInfo = debuginfo
- Ctxt.GenAbstractFunc = genAbstractFunc
- Ctxt.DwFixups = obj.NewDwarfFixupTable(Ctxt)
+ if base.Flag.Dwarf {
+ base.Ctxt.DebugInfo = debuginfo
+ base.Ctxt.GenAbstractFunc = genAbstractFunc
+ base.Ctxt.DwFixups = obj.NewDwarfFixupTable(base.Ctxt)
} else {
// turn off inline generation if no dwarf at all
- genDwarfInline = 0
- Ctxt.Flag_locationlists = false
+ base.Flag.GenDwarfInl = 0
+ base.Ctxt.Flag_locationlists = false
}
-
- if flag.NArg() < 1 && debugstr != "help" && debugstr != "ssa/help" {
- usage()
- }
-
- if goversion != "" && goversion != runtime.Version() {
- fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), goversion)
- Exit(2)
+ if base.Ctxt.Flag_locationlists && len(base.Ctxt.Arch.DWARFRegisters) == 0 {
+ log.Fatalf("location lists requested but register mapping not available on %v", base.Ctxt.Arch.Name)
}
checkLang()
- if symabisPath != "" {
- readSymABIs(symabisPath, myimportpath)
+ if base.Flag.SymABIs != "" {
+ readSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath)
}
- thearch.LinkArch.Init(Ctxt)
-
- if outfile == "" {
- p := flag.Arg(0)
- if i := strings.LastIndex(p, "/"); i >= 0 {
- p = p[i+1:]
- }
- if runtime.GOOS == "windows" {
- if i := strings.LastIndex(p, `\`); i >= 0 {
- p = p[i+1:]
- }
- }
- if i := strings.LastIndex(p, "."); i >= 0 {
- p = p[:i]
- }
- suffix := ".o"
- if writearchive {
- suffix = ".a"
- }
- outfile = p + suffix
+ if ispkgin(omit_pkgs) {
+ base.Flag.Race = false
+ base.Flag.MSan = false
}
+ thearch.LinkArch.Init(base.Ctxt)
startProfile()
-
- if flag_race && flag_msan {
- log.Fatal("cannot use both -race and -msan")
- }
- if flag_race || flag_msan {
- // -race and -msan imply -d=checkptr for now.
- Debug_checkptr = 1
- }
- if ispkgin(omit_pkgs) {
- flag_race = false
- flag_msan = false
- }
- if flag_race {
+ if base.Flag.Race {
racepkg = types.NewPkg("runtime/race", "")
}
- if flag_msan {
+ if base.Flag.MSan {
msanpkg = types.NewPkg("runtime/msan", "")
}
- if flag_race || flag_msan {
+ if base.Flag.Race || base.Flag.MSan {
instrumenting = true
}
-
- if compiling_runtime && Debug.N != 0 {
- log.Fatal("cannot disable optimizations while compiling runtime")
- }
- if nBackendWorkers < 1 {
- log.Fatalf("-c must be at least 1, got %d", nBackendWorkers)
- }
- if nBackendWorkers > 1 && !concurrentBackendAllowed() {
- log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
- }
- if Ctxt.Flag_locationlists && len(Ctxt.Arch.DWARFRegisters) == 0 {
- log.Fatalf("location lists requested but register mapping not available on %v", Ctxt.Arch.Name)
- }
-
- // parse -d argument
- if debugstr != "" {
- Split:
- for _, name := range strings.Split(debugstr, ",") {
- if name == "" {
- continue
- }
- // display help about the -d option itself and quit
- if name == "help" {
- fmt.Print(debugHelpHeader)
- maxLen := len("ssa/help")
- for _, t := range debugtab {
- if len(t.name) > maxLen {
- maxLen = len(t.name)
- }
- }
- for _, t := range debugtab {
- fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
- }
- // ssa options have their own help
- fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
- fmt.Print(debugHelpFooter)
- os.Exit(0)
- }
- val, valstring, haveInt := 1, "", true
- if i := strings.IndexAny(name, "=:"); i >= 0 {
- var err error
- name, valstring = name[:i], name[i+1:]
- val, err = strconv.Atoi(valstring)
- if err != nil {
- val, haveInt = 1, false
- }
- }
- for _, t := range debugtab {
- if t.name != name {
- continue
- }
- switch vp := t.val.(type) {
- case nil:
- // Ignore
- case *string:
- *vp = valstring
- case *int:
- if !haveInt {
- log.Fatalf("invalid debug value %v", name)
- }
- *vp = val
- default:
- panic("bad debugtab type")
- }
- continue Split
- }
- // special case for ssa for now
- if strings.HasPrefix(name, "ssa/") {
- // expect form ssa/phase/flag
- // e.g. -d=ssa/generic_cse/time
- // _ in phase name also matches space
- phase := name[4:]
- flag := "debug" // default flag is debug
- if i := strings.Index(phase, "/"); i >= 0 {
- flag = phase[i+1:]
- phase = phase[:i]
- }
- err := ssa.PhaseOption(phase, flag, val, valstring)
- if err != "" {
- log.Fatalf(err)
- }
- continue Split
- }
- log.Fatalf("unknown debug key -d %s\n", name)
- }
- }
-
- if compiling_runtime {
- // Runtime can't use -d=checkptr, at least not yet.
- Debug_checkptr = 0
-
- // Fuzzing the runtime isn't interesting either.
- Debug_libfuzzer = 0
- }
-
- // set via a -d flag
- Ctxt.Debugpcln = Debug_pctab
- if flagDWARF {
- dwarf.EnableLogging(Debug_gendwarfinl != 0)
+ if base.Flag.Dwarf {
+ dwarf.EnableLogging(base.Debug.DwarfInl != 0)
}
-
- if Debug_softfloat != 0 {
+ if base.Debug.SoftFloat != 0 {
thearch.SoftFloat = true
}
- // enable inlining. for now:
- // default: inlining on. (Debug.l == 1)
- // -l: inlining off (Debug.l == 0)
- // -l=2, -l=3: inlining on again, with extra debugging (Debug.l > 1)
- if Debug.l <= 1 {
- Debug.l = 1 - Debug.l
+ if base.Flag.JSON != "" { // parse version,destination from json logging optimization.
+ logopt.LogJsonOption(base.Flag.JSON)
}
- if jsonLogOpt != "" { // parse version,destination from json logging optimization.
- logopt.LogJsonOption(jsonLogOpt)
- }
-
- ssaDump = os.Getenv("GOSSAFUNC")
- ssaDir = os.Getenv("GOSSADIR")
- if ssaDump != "" {
- if strings.HasSuffix(ssaDump, "+") {
- ssaDump = ssaDump[:len(ssaDump)-1]
- ssaDumpStdout = true
- }
- spl := strings.Split(ssaDump, ":")
- if len(spl) > 1 {
- ssaDump = spl[0]
- ssaDumpCFG = spl[1]
- }
- }
-
- trackScopes = flagDWARF
+ ir.EscFmt = escFmt
+ IsIntrinsicCall = isIntrinsicCall
+ SSADumpInline = ssaDumpInline
+ initSSAEnv()
+ initSSATables()
Widthptr = thearch.LinkArch.PtrSize
Widthreg = thearch.LinkArch.RegSize
-
- // initialize types package
- // (we need to do this to break dependencies that otherwise
- // would lead to import cycles)
- types.Widthptr = Widthptr
- types.Dowidth = dowidth
- types.Fatalf = Fatalf
- types.Sconv = func(s *types.Sym, flag, mode int) string {
- return sconv(s, FmtFlag(flag), fmtMode(mode))
- }
- types.Tconv = func(t *types.Type, flag, mode int) string {
- return tconv(t, FmtFlag(flag), fmtMode(mode))
- }
- types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) {
- symFormat(sym, s, verb, fmtMode(mode))
- }
- types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) {
- typeFormat(t, s, verb, fmtMode(mode))
- }
+ MaxWidth = thearch.MAXWIDTH
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
return typenamesym(t).Linksym()
}
- types.FmtLeft = int(FmtLeft)
- types.FmtUnsigned = int(FmtUnsigned)
- types.FErr = int(FErr)
- types.Ctxt = Ctxt
- initUniverse()
+ Target = new(ir.Package)
- dclcontext = PEXTERN
- nerrors = 0
+ NeedFuncSym = makefuncsym
+ NeedITab = func(t, iface *types.Type) { itabname(t, iface) }
+ NeedRuntimeType = addsignat // TODO(rsc): typenamesym for lock?
autogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
- timings.Start("fe", "loadsys")
- loadsys()
+ types.TypeLinkSym = func(t *types.Type) *obj.LSym {
+ return typenamesym(t).Linksym()
+ }
+ TypecheckInit()
+ // Parse input.
timings.Start("fe", "parse")
lines := parseFiles(flag.Args())
+ cgoSymABIs()
timings.Stop()
timings.AddEvent(int64(lines), "lines")
-
- finishUniverse()
-
recordPackageName()
- typecheckok = true
-
- // Process top-level declarations in phases.
-
- // Phase 1: const, type, and names and types of funcs.
- // This will gather all the information about types
- // and methods but doesn't depend on any of it.
- //
- // We also defer type alias declarations until phase 2
- // to avoid cycles like #18640.
- // TODO(gri) Remove this again once we have a fix for #25838.
-
- // Don't use range--typecheck can add closures to xtop.
- timings.Start("fe", "typecheck", "top1")
- for i := 0; i < len(xtop); i++ {
- n := xtop[i]
- if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias()) {
- xtop[i] = typecheck(n, ctxStmt)
- }
- }
+ // Typecheck.
+ TypecheckPackage()
- // Phase 2: Variable assignments.
- // To check interface assignments, depends on phase 1.
+ // With all user code typechecked, it's now safe to verify unused dot imports.
+ checkDotImports()
+ base.ExitIfErrors()
- // Don't use range--typecheck can add closures to xtop.
- timings.Start("fe", "typecheck", "top2")
- for i := 0; i < len(xtop); i++ {
- n := xtop[i]
- if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias() {
- xtop[i] = typecheck(n, ctxStmt)
- }
+ // Build init task.
+ if initTask := fninit(); initTask != nil {
+ exportsym(initTask)
}
- // Phase 3: Type check function bodies.
- // Don't use range--typecheck can add closures to xtop.
- timings.Start("fe", "typecheck", "func")
- var fcount int64
- for i := 0; i < len(xtop); i++ {
- n := xtop[i]
- if n.Op == ODCLFUNC {
- Curfn = n
- decldepth = 1
- saveerrors()
- typecheckslice(Curfn.Nbody.Slice(), ctxStmt)
- checkreturn(Curfn)
- if nerrors != 0 {
- Curfn.Nbody.Set(nil) // type errors; do not compile
- }
- // Now that we've checked whether n terminates,
- // we can eliminate some obviously dead code.
- deadcode(Curfn)
- fcount++
- }
- }
- // With all types checked, it's now safe to verify map keys. One single
- // check past phase 9 isn't sufficient, as we may exit with other errors
- // before then, thus skipping map key errors.
- checkMapKeys()
- timings.AddEvent(fcount, "funcs")
-
- if nsavederrors+nerrors != 0 {
- errorexit()
- }
-
- fninit(xtop)
-
- // Phase 4: Decide how to capture closed variables.
- // This needs to run before escape analysis,
- // because variables captured by value do not escape.
- timings.Start("fe", "capturevars")
- for _, n := range xtop {
- if n.Op == ODCLFUNC && n.Func.Closure != nil {
- Curfn = n
- capturevars(n)
- }
- }
- capturevarscomplete = true
-
- Curfn = nil
-
- if nsavederrors+nerrors != 0 {
- errorexit()
- }
-
- // Phase 5: Inlining
+ // Inlining
timings.Start("fe", "inlining")
- if Debug_typecheckinl != 0 {
- // Typecheck imported function bodies if Debug.l > 1,
- // otherwise lazily when used or re-exported.
- for _, n := range importlist {
- if n.Func.Inl != nil {
- saveerrors()
- typecheckinl(n)
- }
- }
-
- if nsavederrors+nerrors != 0 {
- errorexit()
- }
+ if base.Flag.LowerL != 0 {
+ InlinePackage()
}
- if Debug.l != 0 {
- // Find functions that can be inlined and clone them before walk expands them.
- visitBottomUp(xtop, func(list []*Node, recursive bool) {
- numfns := numNonClosures(list)
- for _, n := range list {
- if !recursive || numfns > 1 {
- // We allow inlining if there is no
- // recursion, or the recursion cycle is
- // across more than one function.
- caninl(n)
- } else {
- if Debug.m > 1 {
- fmt.Printf("%v: cannot inline %v: recursive\n", n.Line(), n.Func.Nname)
- }
- }
- inlcalls(n)
- }
- })
- }
-
- for _, n := range xtop {
- if n.Op == ODCLFUNC {
- devirtualize(n)
+ // Devirtualize.
+ for _, n := range Target.Decls {
+ if n.Op() == ir.ODCLFUNC {
+ devirtualize(n.(*ir.Func))
}
}
Curfn = nil
- // Phase 6: Escape analysis.
+ // Escape analysis.
// Required for moving heap allocations onto stack,
// which in turn is required by the closure implementation,
// which stores the addresses of stack variables into the closure.
// Large values are also moved off stack in escape analysis;
// because large values may contain pointers, it must happen early.
timings.Start("fe", "escapes")
- escapes(xtop)
+ escapes(Target.Decls)
// Collect information for go:nowritebarrierrec
// checking. This must happen before transformclosure.
// We'll do the final check after write barriers are
// inserted.
- if compiling_runtime {
- nowritebarrierrecCheck = newNowritebarrierrecChecker()
+ if base.Flag.CompilingRuntime {
+ EnableNoWriteBarrierRecCheck()
}
- // Phase 7: Transform closure bodies to properly reference captured variables.
+ // Transform closure bodies to properly reference captured variables.
// This needs to happen before walk, because closures must be transformed
// before walk reaches a call of a closure.
timings.Start("fe", "xclosures")
- for _, n := range xtop {
- if n.Op == ODCLFUNC && n.Func.Closure != nil {
- Curfn = n
- transformclosure(n)
+ for _, n := range Target.Decls {
+ if n.Op() == ir.ODCLFUNC {
+ n := n.(*ir.Func)
+ if n.Func().OClosure != nil {
+ Curfn = n
+ transformclosure(n)
+ }
}
}
Curfn = nil
peekitabs()
- // Phase 8: Compile top level functions.
- // Don't use range--walk can add functions to xtop.
+ // Compile top level functions.
+ // Don't use range--walk can add functions to Target.Decls.
timings.Start("be", "compilefuncs")
- fcount = 0
- for i := 0; i < len(xtop); i++ {
- n := xtop[i]
- if n.Op == ODCLFUNC {
- funccompile(n)
+ fcount := int64(0)
+ for i := 0; i < len(Target.Decls); i++ {
+ n := Target.Decls[i]
+ if n.Op() == ir.ODCLFUNC {
+ funccompile(n.(*ir.Func))
fcount++
}
}
compileFunctions()
- if nowritebarrierrecCheck != nil {
- // Write barriers are now known. Check the
- // call graph.
- nowritebarrierrecCheck.check()
- nowritebarrierrecCheck = nil
+ if base.Flag.CompilingRuntime {
+ // Write barriers are now known. Check the call graph.
+ NoWriteBarrierRecCheck()
}
// Finalize DWARF inline routine DIEs, then explicitly turn off
// DWARF inlining gen so as to avoid problems with generated
// method wrappers.
- if Ctxt.DwFixups != nil {
- Ctxt.DwFixups.Finalize(myimportpath, Debug_gendwarfinl != 0)
- Ctxt.DwFixups = nil
- genDwarfInline = 0
- }
-
- // Phase 9: Check external declarations.
- timings.Start("be", "externaldcls")
- for i, n := range externdcl {
- if n.Op == ONAME {
- externdcl[i] = typecheck(externdcl[i], ctxExpr)
- }
- }
- // Check the map keys again, since we typechecked the external
- // declarations.
- checkMapKeys()
-
- if nerrors+nsavederrors != 0 {
- errorexit()
+ if base.Ctxt.DwFixups != nil {
+ base.Ctxt.DwFixups.Finalize(base.Ctxt.Pkgpath, base.Debug.DwarfInl != 0)
+ base.Ctxt.DwFixups = nil
+ base.Flag.GenDwarfInl = 0
}
// Write object data to disk.
timings.Start("be", "dumpobj")
dumpdata()
- Ctxt.NumberSyms()
+ base.Ctxt.NumberSyms()
dumpobj()
- if asmhdr != "" {
+ if base.Flag.AsmHdr != "" {
dumpasmhdr()
}
+ CheckLargeStacks()
+ CheckFuncStack()
+
+ if len(compilequeue) != 0 {
+ base.Fatalf("%d uncompiled functions", len(compilequeue))
+ }
+
+ logopt.FlushLoggedOpts(base.Ctxt, base.Ctxt.Pkgpath)
+ base.ExitIfErrors()
+
+ base.FlushErrors()
+ timings.Stop()
+
+ if base.Flag.Bench != "" {
+ if err := writebench(base.Flag.Bench); err != nil {
+ log.Fatalf("cannot write benchmark data: %v", err)
+ }
+ }
+}
+
+func CheckLargeStacks() {
// Check whether any of the functions we have compiled have gigantic stack frames.
sort.Slice(largeStackFrames, func(i, j int) bool {
return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
})
for _, large := range largeStackFrames {
if large.callee != 0 {
- yyerrorl(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
+ base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
} else {
- yyerrorl(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
+ base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
}
}
+}
- if len(funcStack) != 0 {
- Fatalf("funcStack is non-empty: %v", len(funcStack))
- }
- if len(compilequeue) != 0 {
- Fatalf("%d uncompiled functions", len(compilequeue))
- }
-
- logopt.FlushLoggedOpts(Ctxt, myimportpath)
-
- if nerrors+nsavederrors != 0 {
- errorexit()
- }
-
- flusherrors()
- timings.Stop()
-
- if benchfile != "" {
- if err := writebench(benchfile); err != nil {
- log.Fatalf("cannot write benchmark data: %v", err)
+func cgoSymABIs() {
+ // The linker expects an ABI0 wrapper for all cgo-exported
+ // functions.
+ for _, prag := range Target.CgoPragmas {
+ switch prag[0] {
+ case "cgo_export_static", "cgo_export_dynamic":
+ if symabiRefs == nil {
+ symabiRefs = make(map[string]obj.ABI)
+ }
+ symabiRefs[prag[1]] = obj.ABI0
}
}
}
// numNonClosures returns the number of functions in list which are not closures.
-func numNonClosures(list []*Node) int {
+func numNonClosures(list []*ir.Func) int {
count := 0
- for _, n := range list {
- if n.Func.Closure == nil {
+ for _, fn := range list {
+ if fn.OClosure == nil {
count++
}
}
fmt.Fprintln(&buf, "commit:", objabi.Version)
fmt.Fprintln(&buf, "goos:", runtime.GOOS)
fmt.Fprintln(&buf, "goarch:", runtime.GOARCH)
- timings.Write(&buf, "BenchmarkCompile:"+myimportpath+":")
+ timings.Write(&buf, "BenchmarkCompile:"+base.Ctxt.Pkgpath+":")
n, err := f.Write(buf.Bytes())
if err != nil {
return f.Close()
}
-var (
- importMap = map[string]string{}
- packageFile map[string]string // nil means not in use
-)
-
-func addImportMap(s string) {
- if strings.Count(s, "=") != 1 {
- log.Fatal("-importmap argument must be of the form source=actual")
- }
- i := strings.Index(s, "=")
- source, actual := s[:i], s[i+1:]
- if source == "" || actual == "" {
- log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
- }
- importMap[source] = actual
-}
-
-func readImportCfg(file string) {
- packageFile = map[string]string{}
- data, err := ioutil.ReadFile(file)
- if err != nil {
- log.Fatalf("-importcfg: %v", err)
- }
-
- for lineNum, line := range strings.Split(string(data), "\n") {
- lineNum++ // 1-based
- line = strings.TrimSpace(line)
- if line == "" || strings.HasPrefix(line, "#") {
- continue
- }
-
- var verb, args string
- if i := strings.Index(line, " "); i < 0 {
- verb = line
- } else {
- verb, args = line[:i], strings.TrimSpace(line[i+1:])
- }
- var before, after string
- if i := strings.Index(args, "="); i >= 0 {
- before, after = args[:i], args[i+1:]
- }
- switch verb {
- default:
- log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
- case "importmap":
- if before == "" || after == "" {
- log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
- }
- importMap[before] = after
- case "packagefile":
- if before == "" || after == "" {
- log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
- }
- packageFile[before] = after
- }
- }
-}
-
// symabiDefs and symabiRefs record the defined and referenced ABIs of
// symbols required by non-Go code. These are keyed by link symbol
// name, where the local package prefix is always `"".`
}
}
-func saveerrors() {
- nsavederrors += nerrors
- nerrors = 0
-}
-
func arsize(b *bufio.Reader, name string) int {
var buf [ArhdrSize]byte
if _, err := io.ReadFull(b, buf[:]); err != nil {
return i
}
-var idirs []string
-
-func addidir(dir string) {
- if dir != "" {
- idirs = append(idirs, dir)
- }
-}
-
func isDriveLetter(b byte) bool {
return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z'
}
func findpkg(name string) (file string, ok bool) {
if islocalname(name) {
- if nolocalimports {
+ if base.Flag.NoLocalImports {
return "", false
}
- if packageFile != nil {
- file, ok = packageFile[name]
+ if base.Flag.Cfg.PackageFile != nil {
+ file, ok = base.Flag.Cfg.PackageFile[name]
return file, ok
}
// don't want to see "encoding/../encoding/base64"
// as different from "encoding/base64".
if q := path.Clean(name); q != name {
- yyerror("non-canonical import path %q (should be %q)", name, q)
+ base.Errorf("non-canonical import path %q (should be %q)", name, q)
return "", false
}
- if packageFile != nil {
- file, ok = packageFile[name]
+ if base.Flag.Cfg.PackageFile != nil {
+ file, ok = base.Flag.Cfg.PackageFile[name]
return file, ok
}
- for _, dir := range idirs {
+ for _, dir := range base.Flag.Cfg.ImportDirs {
file = fmt.Sprintf("%s/%s.a", dir, name)
if _, err := os.Stat(file); err == nil {
return file, true
if objabi.GOROOT != "" {
suffix := ""
suffixsep := ""
- if flag_installsuffix != "" {
+ if base.Flag.InstallSuffix != "" {
suffixsep = "_"
- suffix = flag_installsuffix
- } else if flag_race {
+ suffix = base.Flag.InstallSuffix
+ } else if base.Flag.Race {
suffixsep = "_"
suffix = "race"
- } else if flag_msan {
+ } else if base.Flag.MSan {
suffixsep = "_"
suffix = "msan"
}
case varTag:
importvar(Runtimepkg, src.NoXPos, sym, typ)
default:
- Fatalf("unhandled declaration tag %v", d.tag)
+ base.Fatalf("unhandled declaration tag %v", d.tag)
}
}
// imported so far.
var myheight int
-func importfile(f *Val) *types.Pkg {
- path_, ok := f.U.(string)
- if !ok {
- yyerror("import path must be a string")
+func importfile(f constant.Value) *types.Pkg {
+ if f.Kind() != constant.String {
+ base.Errorf("import path must be a string")
return nil
}
+ path_ := constant.StringVal(f)
if len(path_) == 0 {
- yyerror("import path is empty")
+ base.Errorf("import path is empty")
return nil
}
// the main package, just as we reserve the import
// path "math" to identify the standard math package.
if path_ == "main" {
- yyerror("cannot import \"main\"")
- errorexit()
+ base.Errorf("cannot import \"main\"")
+ base.ErrorExit()
}
- if myimportpath != "" && path_ == myimportpath {
- yyerror("import %q while compiling that package (import cycle)", path_)
- errorexit()
+ if base.Ctxt.Pkgpath != "" && path_ == base.Ctxt.Pkgpath {
+ base.Errorf("import %q while compiling that package (import cycle)", path_)
+ base.ErrorExit()
}
- if mapped, ok := importMap[path_]; ok {
+ if mapped, ok := base.Flag.Cfg.ImportMap[path_]; ok {
path_ = mapped
}
if islocalname(path_) {
if path_[0] == '/' {
- yyerror("import path cannot be absolute path")
+ base.Errorf("import path cannot be absolute path")
return nil
}
- prefix := Ctxt.Pathname
- if localimport != "" {
- prefix = localimport
+ prefix := base.Ctxt.Pathname
+ if base.Flag.D != "" {
+ prefix = base.Flag.D
}
path_ = path.Join(prefix, path_)
file, found := findpkg(path_)
if !found {
- yyerror("can't find import: %q", path_)
- errorexit()
+ base.Errorf("can't find import: %q", path_)
+ base.ErrorExit()
}
importpkg := types.NewPkg(path_, "")
imp, err := bio.Open(file)
if err != nil {
- yyerror("can't open import: %q: %v", path_, err)
- errorexit()
+ base.Errorf("can't open import: %q: %v", path_, err)
+ base.ErrorExit()
}
defer imp.Close()
// check object header
p, err := imp.ReadString('\n')
if err != nil {
- yyerror("import %s: reading input: %v", file, err)
- errorexit()
+ base.Errorf("import %s: reading input: %v", file, err)
+ base.ErrorExit()
}
if p == "!<arch>\n" { // package archive
// package export block should be first
sz := arsize(imp.Reader, "__.PKGDEF")
if sz <= 0 {
- yyerror("import %s: not a package file", file)
- errorexit()
+ base.Errorf("import %s: not a package file", file)
+ base.ErrorExit()
}
p, err = imp.ReadString('\n')
if err != nil {
- yyerror("import %s: reading input: %v", file, err)
- errorexit()
+ base.Errorf("import %s: reading input: %v", file, err)
+ base.ErrorExit()
}
}
if !strings.HasPrefix(p, "go object ") {
- yyerror("import %s: not a go object file: %s", file, p)
- errorexit()
+ base.Errorf("import %s: not a go object file: %s", file, p)
+ base.ErrorExit()
}
q := fmt.Sprintf("%s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
if p[10:] != q {
- yyerror("import %s: object is [%s] expected [%s]", file, p[10:], q)
- errorexit()
+ base.Errorf("import %s: object is [%s] expected [%s]", file, p[10:], q)
+ base.ErrorExit()
}
// process header lines
for {
p, err = imp.ReadString('\n')
if err != nil {
- yyerror("import %s: reading input: %v", file, err)
- errorexit()
+ base.Errorf("import %s: reading input: %v", file, err)
+ base.ErrorExit()
}
if p == "\n" {
break // header ends with blank line
}
}
- // In the importfile, if we find:
- // $$\n (textual format): not supported anymore
- // $$B\n (binary format) : import directly, then feed the lexer a dummy statement
+ // Expect $$B\n to signal binary import format.
// look for $$
var c byte
var fingerprint goobj.FingerprintType
switch c {
case '\n':
- yyerror("cannot import %s: old export format no longer supported (recompile library)", path_)
+ base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path_)
return nil
case 'B':
- if Debug_export != 0 {
+ if base.Debug.Export != 0 {
fmt.Printf("importing %s (%s)\n", path_, file)
}
imp.ReadByte() // skip \n after $$B
c, err = imp.ReadByte()
if err != nil {
- yyerror("import %s: reading input: %v", file, err)
- errorexit()
+ base.Errorf("import %s: reading input: %v", file, err)
+ base.ErrorExit()
}
// Indexed format is distinguished by an 'i' byte,
// whereas previous export formats started with 'c', 'd', or 'v'.
if c != 'i' {
- yyerror("import %s: unexpected package format byte: %v", file, c)
- errorexit()
+ base.Errorf("import %s: unexpected package format byte: %v", file, c)
+ base.ErrorExit()
}
fingerprint = iimport(importpkg, imp)
default:
- yyerror("no import in %q", path_)
- errorexit()
+ base.Errorf("no import in %q", path_)
+ base.ErrorExit()
}
// assume files move (get installed) so don't record the full path
- if packageFile != nil {
+ if base.Flag.Cfg.PackageFile != nil {
// If using a packageFile map, assume path_ can be recorded directly.
- Ctxt.AddImport(path_, fingerprint)
+ base.Ctxt.AddImport(path_, fingerprint)
} else {
// For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
- Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint)
+ base.Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint)
}
if importpkg.Height >= myheight {
elem = elem[i+1:]
}
if name == "" || elem == name {
- yyerrorl(lineno, "imported and not used: %q", path)
+ base.ErrorfAt(lineno, "imported and not used: %q", path)
} else {
- yyerrorl(lineno, "imported and not used: %q as %s", path, name)
+ base.ErrorfAt(lineno, "imported and not used: %q as %s", path, name)
}
}
func mkpackage(pkgname string) {
- if localpkg.Name == "" {
+ if types.LocalPkg.Name == "" {
if pkgname == "_" {
- yyerror("invalid package name _")
+ base.Errorf("invalid package name _")
}
- localpkg.Name = pkgname
+ types.LocalPkg.Name = pkgname
} else {
- if pkgname != localpkg.Name {
- yyerror("package %s; expected %s", pkgname, localpkg.Name)
+ if pkgname != types.LocalPkg.Name {
+ base.Errorf("package %s; expected %s", pkgname, types.LocalPkg.Name)
}
}
}
}
var unused []importedPkg
- for _, s := range localpkg.Syms {
- n := asNode(s.Def)
+ for _, s := range types.LocalPkg.Syms {
+ n := ir.AsNode(s.Def)
if n == nil {
continue
}
- if n.Op == OPACK {
+ if n.Op() == ir.OPACK {
// throw away top-level package name left over
// from previous file.
// leave s->block set to cause redeclaration
// errors if a conflicting top-level name is
// introduced by a different file.
- if !n.Name.Used() && nsyntaxerrors == 0 {
- unused = append(unused, importedPkg{n.Pos, n.Name.Pkg.Path, s.Name})
+ p := n.(*ir.PkgName)
+ if !p.Used && base.SyntaxErrors() == 0 {
+ unused = append(unused, importedPkg{p.Pos(), p.Pkg.Path, s.Name})
}
s.Def = nil
continue
if IsAlias(s) {
// throw away top-level name left over
// from previous import . "x"
- if n.Name != nil && n.Name.Pack != nil && !n.Name.Pack.Name.Used() && nsyntaxerrors == 0 {
- unused = append(unused, importedPkg{n.Name.Pack.Pos, n.Name.Pack.Name.Pkg.Path, ""})
- n.Name.Pack.Name.SetUsed(true)
- }
+ // We'll report errors after type checking in checkDotImports.
s.Def = nil
continue
}
}
func IsAlias(sym *types.Sym) bool {
- return sym.Def != nil && asNode(sym.Def).Sym != sym
-}
-
-// By default, assume any debug flags are incompatible with concurrent
-// compilation. A few are safe and potentially in common use for
-// normal compiles, though; return true for those.
-func concurrentFlagOk() bool {
- // Report whether any debug flag that would prevent concurrent
- // compilation is set, by zeroing out the allowed ones and then
- // checking if the resulting struct is zero.
- d := Debug
- d.B = 0 // disable bounds checking
- d.C = 0 // disable printing of columns in error messages
- d.e = 0 // no limit on errors; errors all come from non-concurrent code
- d.N = 0 // disable optimizations
- d.l = 0 // disable inlining
- d.w = 0 // all printing happens before compilation
- d.W = 0 // all printing happens before compilation
- d.S = 0 // printing disassembly happens at the end (but see concurrentBackendAllowed below)
-
- return d == DebugFlags{}
-}
-
-func concurrentBackendAllowed() bool {
- if !concurrentFlagOk() {
- return false
- }
-
- // Debug.S by itself is ok, because all printing occurs
- // while writing the object file, and that is non-concurrent.
- // Adding Debug_vlog, however, causes Debug.S to also print
- // while flushing the plist, which happens concurrently.
- if Debug_vlog || debugstr != "" || debuglive > 0 {
- return false
- }
- // TODO: Test and delete this condition.
- if objabi.Fieldtrack_enabled != 0 {
- return false
- }
- // TODO: fix races and enable the following flags
- if Ctxt.Flag_shared || Ctxt.Flag_dynlink || flag_race {
- return false
- }
- return true
+ return sym.Def != nil && sym.Def.Sym() != sym
}
// recordFlags records the specified command-line flags to be placed
// in the DWARF info.
func recordFlags(flags ...string) {
- if myimportpath == "" {
+ if base.Ctxt.Pkgpath == "" {
// We can't record the flags if we don't know what the
// package name is.
return
if cmd.Len() == 0 {
return
}
- s := Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + myimportpath)
+ s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath)
s.Type = objabi.SDWARFCUINFO
// Sometimes (for example when building tests) we can link
// together two package main archives. So allow dups.
s.Set(obj.AttrDuplicateOK, true)
- Ctxt.Data = append(Ctxt.Data, s)
+ base.Ctxt.Data = append(base.Ctxt.Data, s)
s.P = cmd.Bytes()[1:]
}
// recordPackageName records the name of the package being
// compiled, so that the linker can save it in the compile unit's DIE.
func recordPackageName() {
- s := Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + myimportpath)
+ s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath)
s.Type = objabi.SDWARFCUINFO
// Sometimes (for example when building tests) we can link
// together two package main archives. So allow dups.
s.Set(obj.AttrDuplicateOK, true)
- Ctxt.Data = append(Ctxt.Data, s)
- s.P = []byte(localpkg.Name)
+ base.Ctxt.Data = append(base.Ctxt.Data, s)
+ s.P = []byte(types.LocalPkg.Name)
}
-// flag_lang is the language version we are compiling for, set by the -lang flag.
-var flag_lang string
-
// currentLang returns the current language version.
func currentLang() string {
return fmt.Sprintf("go1.%d", goversion.Version)
// any language version is supported.
var langWant lang
-// langSupported reports whether language version major.minor is
-// supported in a particular package.
-func langSupported(major, minor int, pkg *types.Pkg) bool {
+// AllowsGoVersion reports whether a particular package
+// is allowed to use Go version major.minor.
+// We assume the imported packages have all been checked,
+// so we only have to check the local package against the -lang flag.
+func AllowsGoVersion(pkg *types.Pkg, major, minor int) bool {
if pkg == nil {
// TODO(mdempsky): Set Pkg for local types earlier.
- pkg = localpkg
+ pkg = types.LocalPkg
}
- if pkg != localpkg {
+ if pkg != types.LocalPkg {
// Assume imported packages passed type-checking.
return true
}
-
if langWant.major == 0 && langWant.minor == 0 {
return true
}
return langWant.major > major || (langWant.major == major && langWant.minor >= minor)
}
+func langSupported(major, minor int, pkg *types.Pkg) bool {
+ return AllowsGoVersion(pkg, major, minor)
+}
+
// checkLang verifies that the -lang flag holds a valid value, and
// exits if not. It initializes data used by langSupported.
func checkLang() {
- if flag_lang == "" {
+ if base.Flag.Lang == "" {
return
}
var err error
- langWant, err = parseLang(flag_lang)
+ langWant, err = parseLang(base.Flag.Lang)
if err != nil {
- log.Fatalf("invalid value %q for -lang: %v", flag_lang, err)
+ log.Fatalf("invalid value %q for -lang: %v", base.Flag.Lang, err)
}
- if def := currentLang(); flag_lang != def {
+ if def := currentLang(); base.Flag.Lang != def {
defVers, err := parseLang(def)
if err != nil {
log.Fatalf("internal error parsing default lang %q: %v", def, err)
}
if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) {
- log.Fatalf("invalid value %q for -lang: max known version is %q", flag_lang, def)
+ log.Fatalf("invalid value %q for -lang: max known version is %q", base.Flag.Lang, def)
}
}
}
}
return lang{major: major, minor: minor}, nil
}
+
+// useNewABIWrapGen returns TRUE if the compiler should generate an
+// ABI wrapper for the function 'f'.
+func useABIWrapGen(f *ir.Func) bool {
+ if !base.Flag.ABIWrap {
+ return false
+ }
+
+ // Support limit option for bisecting.
+ if base.Flag.ABIWrapLimit == 1 {
+ return false
+ }
+ if base.Flag.ABIWrapLimit < 1 {
+ return true
+ }
+ base.Flag.ABIWrapLimit--
+ if base.Debug.ABIWrap != 0 && base.Flag.ABIWrapLimit == 1 {
+ fmt.Fprintf(os.Stderr, "=-= limit reached after new wrapper for %s\n",
+ f.LSym.Name)
+ }
+
+ return true
+}
fmt.Fprintln(&b)
fmt.Fprintln(&b, "package gc")
fmt.Fprintln(&b)
- fmt.Fprintln(&b, `import "cmd/compile/internal/types"`)
+ fmt.Fprintln(&b, `import (`)
+ fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`)
+ fmt.Fprintln(&b, ` "cmd/compile/internal/types"`)
+ fmt.Fprintln(&b, `)`)
mkbuiltin(&b, "runtime")
case *ast.Ident:
switch t.Name {
case "byte":
- return "types.Bytetype"
+ return "types.ByteType"
case "rune":
- return "types.Runetype"
+ return "types.RuneType"
}
- return fmt.Sprintf("types.Types[T%s]", strings.ToUpper(t.Name))
+ return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(t.Name))
case *ast.SelectorExpr:
if t.X.(*ast.Ident).Name != "unsafe" || t.Sel.Name != "Pointer" {
log.Fatalf("unhandled type: %#v", t)
}
- return "types.Types[TUNSAFEPTR]"
+ return "types.Types[types.TUNSAFEPTR]"
case *ast.ArrayType:
if t.Len == nil {
if len(t.Methods.List) != 0 {
log.Fatal("non-empty interfaces unsupported")
}
- return "types.Types[TINTER]"
+ return "types.Types[types.TINTER]"
case *ast.MapType:
return fmt.Sprintf("types.NewMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value))
case *ast.StarExpr:
}
}
}
- return fmt.Sprintf("[]*Node{%s}", strings.Join(res, ", "))
+ return fmt.Sprintf("[]*ir.Field{%s}", strings.Join(res, ", "))
}
func intconst(e ast.Expr) int64 {
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "fmt"
- "math"
- "math/big"
-)
-
-// implements float arithmetic
-
-const (
- // Maximum size in bits for Mpints before signalling
- // overflow and also mantissa precision for Mpflts.
- Mpprec = 512
- // Turn on for constant arithmetic debugging output.
- Mpdebug = false
-)
-
-// Mpflt represents a floating-point constant.
-type Mpflt struct {
- Val big.Float
-}
-
-// Mpcplx represents a complex constant.
-type Mpcplx struct {
- Real Mpflt
- Imag Mpflt
-}
-
-// Use newMpflt (not new(Mpflt)!) to get the correct default precision.
-func newMpflt() *Mpflt {
- var a Mpflt
- a.Val.SetPrec(Mpprec)
- return &a
-}
-
-// Use newMpcmplx (not new(Mpcplx)!) to get the correct default precision.
-func newMpcmplx() *Mpcplx {
- var a Mpcplx
- a.Real = *newMpflt()
- a.Imag = *newMpflt()
- return &a
-}
-
-func (a *Mpflt) SetInt(b *Mpint) {
- if b.checkOverflow(0) {
- // sign doesn't really matter but copy anyway
- a.Val.SetInf(b.Val.Sign() < 0)
- return
- }
- a.Val.SetInt(&b.Val)
-}
-
-func (a *Mpflt) Set(b *Mpflt) {
- a.Val.Set(&b.Val)
-}
-
-func (a *Mpflt) Add(b *Mpflt) {
- if Mpdebug {
- fmt.Printf("\n%v + %v", a, b)
- }
-
- a.Val.Add(&a.Val, &b.Val)
-
- if Mpdebug {
- fmt.Printf(" = %v\n\n", a)
- }
-}
-
-func (a *Mpflt) AddFloat64(c float64) {
- var b Mpflt
-
- b.SetFloat64(c)
- a.Add(&b)
-}
-
-func (a *Mpflt) Sub(b *Mpflt) {
- if Mpdebug {
- fmt.Printf("\n%v - %v", a, b)
- }
-
- a.Val.Sub(&a.Val, &b.Val)
-
- if Mpdebug {
- fmt.Printf(" = %v\n\n", a)
- }
-}
-
-func (a *Mpflt) Mul(b *Mpflt) {
- if Mpdebug {
- fmt.Printf("%v\n * %v\n", a, b)
- }
-
- a.Val.Mul(&a.Val, &b.Val)
-
- if Mpdebug {
- fmt.Printf(" = %v\n\n", a)
- }
-}
-
-func (a *Mpflt) MulFloat64(c float64) {
- var b Mpflt
-
- b.SetFloat64(c)
- a.Mul(&b)
-}
-
-func (a *Mpflt) Quo(b *Mpflt) {
- if Mpdebug {
- fmt.Printf("%v\n / %v\n", a, b)
- }
-
- a.Val.Quo(&a.Val, &b.Val)
-
- if Mpdebug {
- fmt.Printf(" = %v\n\n", a)
- }
-}
-
-func (a *Mpflt) Cmp(b *Mpflt) int {
- return a.Val.Cmp(&b.Val)
-}
-
-func (a *Mpflt) CmpFloat64(c float64) int {
- if c == 0 {
- return a.Val.Sign() // common case shortcut
- }
- return a.Val.Cmp(big.NewFloat(c))
-}
-
-func (a *Mpflt) Float64() float64 {
- x, _ := a.Val.Float64()
-
- // check for overflow
- if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpflt Float64")
- }
-
- return x + 0 // avoid -0 (should not be needed, but be conservative)
-}
-
-func (a *Mpflt) Float32() float64 {
- x32, _ := a.Val.Float32()
- x := float64(x32)
-
- // check for overflow
- if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpflt Float32")
- }
-
- return x + 0 // avoid -0 (should not be needed, but be conservative)
-}
-
-func (a *Mpflt) SetFloat64(c float64) {
- if Mpdebug {
- fmt.Printf("\nconst %g", c)
- }
-
- // convert -0 to 0
- if c == 0 {
- c = 0
- }
- a.Val.SetFloat64(c)
-
- if Mpdebug {
- fmt.Printf(" = %v\n", a)
- }
-}
-
-func (a *Mpflt) Neg() {
- // avoid -0
- if a.Val.Sign() != 0 {
- a.Val.Neg(&a.Val)
- }
-}
-
-func (a *Mpflt) SetString(as string) {
- f, _, err := a.Val.Parse(as, 0)
- if err != nil {
- yyerror("malformed constant: %s (%v)", as, err)
- a.Val.SetFloat64(0)
- return
- }
-
- if f.IsInf() {
- yyerror("constant too large: %s", as)
- a.Val.SetFloat64(0)
- return
- }
-
- // -0 becomes 0
- if f.Sign() == 0 && f.Signbit() {
- a.Val.SetFloat64(0)
- }
-}
-
-func (f *Mpflt) String() string {
- return f.Val.Text('b', 0)
-}
-
-func (fvp *Mpflt) GoString() string {
- // determine sign
- sign := ""
- f := &fvp.Val
- if f.Sign() < 0 {
- sign = "-"
- f = new(big.Float).Abs(f)
- }
-
- // Don't try to convert infinities (will not terminate).
- if f.IsInf() {
- return sign + "Inf"
- }
-
- // Use exact fmt formatting if in float64 range (common case):
- // proceed if f doesn't underflow to 0 or overflow to inf.
- if x, _ := f.Float64(); f.Sign() == 0 == (x == 0) && !math.IsInf(x, 0) {
- return fmt.Sprintf("%s%.6g", sign, x)
- }
-
- // Out of float64 range. Do approximate manual to decimal
- // conversion to avoid precise but possibly slow Float
- // formatting.
- // f = mant * 2**exp
- var mant big.Float
- exp := f.MantExp(&mant) // 0.5 <= mant < 1.0
-
- // approximate float64 mantissa m and decimal exponent d
- // f ~ m * 10**d
- m, _ := mant.Float64() // 0.5 <= m < 1.0
- d := float64(exp) * (math.Ln2 / math.Ln10) // log_10(2)
-
- // adjust m for truncated (integer) decimal exponent e
- e := int64(d)
- m *= math.Pow(10, d-float64(e))
-
- // ensure 1 <= m < 10
- switch {
- case m < 1-0.5e-6:
- // The %.6g format below rounds m to 5 digits after the
- // decimal point. Make sure that m*10 < 10 even after
- // rounding up: m*10 + 0.5e-5 < 10 => m < 1 - 0.5e6.
- m *= 10
- e--
- case m >= 10:
- m /= 10
- e++
- }
-
- return fmt.Sprintf("%s%.6ge%+d", sign, m, e)
-}
-
-// complex multiply v *= rv
-// (a, b) * (c, d) = (a*c - b*d, b*c + a*d)
-func (v *Mpcplx) Mul(rv *Mpcplx) {
- var ac, ad, bc, bd Mpflt
-
- ac.Set(&v.Real)
- ac.Mul(&rv.Real) // ac
-
- bd.Set(&v.Imag)
- bd.Mul(&rv.Imag) // bd
-
- bc.Set(&v.Imag)
- bc.Mul(&rv.Real) // bc
-
- ad.Set(&v.Real)
- ad.Mul(&rv.Imag) // ad
-
- v.Real.Set(&ac)
- v.Real.Sub(&bd) // ac-bd
-
- v.Imag.Set(&bc)
- v.Imag.Add(&ad) // bc+ad
-}
-
-// complex divide v /= rv
-// (a, b) / (c, d) = ((a*c + b*d), (b*c - a*d))/(c*c + d*d)
-func (v *Mpcplx) Div(rv *Mpcplx) bool {
- if rv.Real.CmpFloat64(0) == 0 && rv.Imag.CmpFloat64(0) == 0 {
- return false
- }
-
- var ac, ad, bc, bd, cc_plus_dd Mpflt
-
- cc_plus_dd.Set(&rv.Real)
- cc_plus_dd.Mul(&rv.Real) // cc
-
- ac.Set(&rv.Imag)
- ac.Mul(&rv.Imag) // dd
- cc_plus_dd.Add(&ac) // cc+dd
-
- // We already checked that c and d are not both zero, but we can't
- // assume that c²+d² != 0 follows, because for tiny values of c
- // and/or d c²+d² can underflow to zero. Check that c²+d² is
- // nonzero, return if it's not.
- if cc_plus_dd.CmpFloat64(0) == 0 {
- return false
- }
-
- ac.Set(&v.Real)
- ac.Mul(&rv.Real) // ac
-
- bd.Set(&v.Imag)
- bd.Mul(&rv.Imag) // bd
-
- bc.Set(&v.Imag)
- bc.Mul(&rv.Real) // bc
-
- ad.Set(&v.Real)
- ad.Mul(&rv.Imag) // ad
-
- v.Real.Set(&ac)
- v.Real.Add(&bd) // ac+bd
- v.Real.Quo(&cc_plus_dd) // (ac+bd)/(cc+dd)
-
- v.Imag.Set(&bc)
- v.Imag.Sub(&ad) // bc-ad
- v.Imag.Quo(&cc_plus_dd) // (bc+ad)/(cc+dd)
-
- return true
-}
-
-func (v *Mpcplx) String() string {
- return fmt.Sprintf("(%s+%si)", v.Real.String(), v.Imag.String())
-}
-
-func (v *Mpcplx) GoString() string {
- var re string
- sre := v.Real.CmpFloat64(0)
- if sre != 0 {
- re = v.Real.GoString()
- }
-
- var im string
- sim := v.Imag.CmpFloat64(0)
- if sim != 0 {
- im = v.Imag.GoString()
- }
-
- switch {
- case sre == 0 && sim == 0:
- return "0"
- case sre == 0:
- return im + "i"
- case sim == 0:
- return re
- case sim < 0:
- return fmt.Sprintf("(%s%si)", re, im)
- default:
- return fmt.Sprintf("(%s+%si)", re, im)
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "fmt"
- "math/big"
-)
-
-// implements integer arithmetic
-
-// Mpint represents an integer constant.
-type Mpint struct {
- Val big.Int
- Ovf bool // set if Val overflowed compiler limit (sticky)
- Rune bool // set if syntax indicates default type rune
-}
-
-func (a *Mpint) SetOverflow() {
- a.Val.SetUint64(1) // avoid spurious div-zero errors
- a.Ovf = true
-}
-
-func (a *Mpint) checkOverflow(extra int) bool {
- // We don't need to be precise here, any reasonable upper limit would do.
- // For now, use existing limit so we pass all the tests unchanged.
- if a.Val.BitLen()+extra > Mpprec {
- a.SetOverflow()
- }
- return a.Ovf
-}
-
-func (a *Mpint) Set(b *Mpint) {
- a.Val.Set(&b.Val)
-}
-
-func (a *Mpint) SetFloat(b *Mpflt) bool {
- // avoid converting huge floating-point numbers to integers
- // (2*Mpprec is large enough to permit all tests to pass)
- if b.Val.MantExp(nil) > 2*Mpprec {
- a.SetOverflow()
- return false
- }
-
- if _, acc := b.Val.Int(&a.Val); acc == big.Exact {
- return true
- }
-
- const delta = 16 // a reasonably small number of bits > 0
- var t big.Float
- t.SetPrec(Mpprec - delta)
-
- // try rounding down a little
- t.SetMode(big.ToZero)
- t.Set(&b.Val)
- if _, acc := t.Int(&a.Val); acc == big.Exact {
- return true
- }
-
- // try rounding up a little
- t.SetMode(big.AwayFromZero)
- t.Set(&b.Val)
- if _, acc := t.Int(&a.Val); acc == big.Exact {
- return true
- }
-
- a.Ovf = false
- return false
-}
-
-func (a *Mpint) Add(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Add")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Add(&a.Val, &b.Val)
-
- if a.checkOverflow(0) {
- yyerror("constant addition overflow")
- }
-}
-
-func (a *Mpint) Sub(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Sub")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Sub(&a.Val, &b.Val)
-
- if a.checkOverflow(0) {
- yyerror("constant subtraction overflow")
- }
-}
-
-func (a *Mpint) Mul(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Mul")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Mul(&a.Val, &b.Val)
-
- if a.checkOverflow(0) {
- yyerror("constant multiplication overflow")
- }
-}
-
-func (a *Mpint) Quo(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Quo")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Quo(&a.Val, &b.Val)
-
- if a.checkOverflow(0) {
- // can only happen for div-0 which should be checked elsewhere
- yyerror("constant division overflow")
- }
-}
-
-func (a *Mpint) Rem(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Rem")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Rem(&a.Val, &b.Val)
-
- if a.checkOverflow(0) {
- // should never happen
- yyerror("constant modulo overflow")
- }
-}
-
-func (a *Mpint) Or(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Or")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Or(&a.Val, &b.Val)
-}
-
-func (a *Mpint) And(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint And")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.And(&a.Val, &b.Val)
-}
-
-func (a *Mpint) AndNot(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint AndNot")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.AndNot(&a.Val, &b.Val)
-}
-
-func (a *Mpint) Xor(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Xor")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Xor(&a.Val, &b.Val)
-}
-
-func (a *Mpint) Lsh(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Lsh")
- }
- a.SetOverflow()
- return
- }
-
- s := b.Int64()
- if s < 0 || s >= Mpprec {
- msg := "shift count too large"
- if s < 0 {
- msg = "invalid negative shift count"
- }
- yyerror("%s: %d", msg, s)
- a.SetInt64(0)
- return
- }
-
- if a.checkOverflow(int(s)) {
- yyerror("constant shift overflow")
- return
- }
- a.Val.Lsh(&a.Val, uint(s))
-}
-
-func (a *Mpint) Rsh(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Rsh")
- }
- a.SetOverflow()
- return
- }
-
- s := b.Int64()
- if s < 0 {
- yyerror("invalid negative shift count: %d", s)
- if a.Val.Sign() < 0 {
- a.SetInt64(-1)
- } else {
- a.SetInt64(0)
- }
- return
- }
-
- a.Val.Rsh(&a.Val, uint(s))
-}
-
-func (a *Mpint) Cmp(b *Mpint) int {
- return a.Val.Cmp(&b.Val)
-}
-
-func (a *Mpint) CmpInt64(c int64) int {
- if c == 0 {
- return a.Val.Sign() // common case shortcut
- }
- return a.Val.Cmp(big.NewInt(c))
-}
-
-func (a *Mpint) Neg() {
- a.Val.Neg(&a.Val)
-}
-
-func (a *Mpint) Int64() int64 {
- if a.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("constant overflow")
- }
- return 0
- }
-
- return a.Val.Int64()
-}
-
-func (a *Mpint) SetInt64(c int64) {
- a.Val.SetInt64(c)
-}
-
-func (a *Mpint) SetString(as string) {
- _, ok := a.Val.SetString(as, 0)
- if !ok {
- // The lexer checks for correct syntax of the literal
- // and reports detailed errors. Thus SetString should
- // never fail (in theory it might run out of memory,
- // but that wouldn't be reported as an error here).
- Fatalf("malformed integer constant: %s", as)
- return
- }
- if a.checkOverflow(0) {
- yyerror("constant too large: %s", as)
- }
-}
-
-func (a *Mpint) GoString() string {
- return a.Val.String()
-}
-
-func (a *Mpint) String() string {
- return fmt.Sprintf("%#x", &a.Val)
-}
import (
"fmt"
+ "go/constant"
+ "go/token"
"os"
"path/filepath"
"runtime"
"unicode"
"unicode/utf8"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
- "cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
)
// parseFiles concurrently parses files into *syntax.File structures.
// Each declaration in every *syntax.File is converted to a syntax tree
-// and its root represented by *Node is appended to xtop.
+// and its root represented by *Node is appended to Target.Decls.
// Returns the total count of parsed lines.
func parseFiles(filenames []string) uint {
noders := make([]*noder, 0, len(filenames))
for _, filename := range filenames {
p := &noder{
- basemap: make(map[*syntax.PosBase]*src.PosBase),
- err: make(chan syntax.Error),
+ basemap: make(map[*syntax.PosBase]*src.PosBase),
+ err: make(chan syntax.Error),
+ trackScopes: base.Flag.Dwarf,
}
noders = append(noders, p)
var lines uint
for _, p := range noders {
for e := range p.err {
- p.yyerrorpos(e.Pos, "%s", e.Msg)
+ p.errorAt(e.Pos, "%s", e.Msg)
}
p.node()
lines += p.file.Lines
p.file = nil // release memory
- if nsyntaxerrors != 0 {
- errorexit()
+ if base.SyntaxErrors() != 0 {
+ base.ErrorExit()
}
// Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure.
testdclstack()
}
- localpkg.Height = myheight
+ for _, p := range noders {
+ p.processPragmas()
+ }
+
+ types.LocalPkg.Height = myheight
return lines
}
}
func (p *noder) makeXPos(pos syntax.Pos) (_ src.XPos) {
- return Ctxt.PosTable.XPos(src.MakePos(p.makeSrcPosBase(pos.Base()), pos.Line(), pos.Col()))
+ return base.Ctxt.PosTable.XPos(src.MakePos(p.makeSrcPosBase(pos.Base()), pos.Line(), pos.Col()))
}
-func (p *noder) yyerrorpos(pos syntax.Pos, format string, args ...interface{}) {
- yyerrorl(p.makeXPos(pos), format, args...)
+func (p *noder) errorAt(pos syntax.Pos, format string, args ...interface{}) {
+ base.ErrorfAt(p.makeXPos(pos), format, args...)
}
-var pathPrefix string
-
// TODO(gri) Can we eliminate fileh in favor of absFilename?
func fileh(name string) string {
- return objabi.AbsFile("", name, pathPrefix)
+ return objabi.AbsFile("", name, base.Flag.TrimPath)
}
func absFilename(name string) string {
- return objabi.AbsFile(Ctxt.Pathname, name, pathPrefix)
+ return objabi.AbsFile(base.Ctxt.Pathname, name, base.Flag.TrimPath)
}
// noder transforms package syntax's AST into a Node tree.
linknames []linkname
pragcgobuf [][]string
err chan syntax.Error
- scope ScopeID
+ scope ir.ScopeID
importedUnsafe bool
importedEmbed bool
// scopeVars is a stack tracking the number of variables declared in the
// current function at the moment each open scope was opened.
- scopeVars []int
+ trackScopes bool
+ scopeVars []int
lastCloseScopePos syntax.Pos
}
-func (p *noder) funcBody(fn *Node, block *syntax.BlockStmt) {
+func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) {
oldScope := p.scope
p.scope = 0
funchdr(fn)
if block != nil {
body := p.stmts(block.List)
if body == nil {
- body = []*Node{nod(OEMPTY, nil, nil)}
+ body = []ir.Node{ir.Nod(ir.OBLOCK, nil, nil)}
}
- fn.Nbody.Set(body)
+ fn.PtrBody().Set(body)
- lineno = p.makeXPos(block.Rbrace)
- fn.Func.Endlineno = lineno
+ base.Pos = p.makeXPos(block.Rbrace)
+ fn.Endlineno = base.Pos
}
funcbody()
func (p *noder) openScope(pos syntax.Pos) {
types.Markdcl()
- if trackScopes {
- Curfn.Func.Parents = append(Curfn.Func.Parents, p.scope)
- p.scopeVars = append(p.scopeVars, len(Curfn.Func.Dcl))
- p.scope = ScopeID(len(Curfn.Func.Parents))
+ if p.trackScopes {
+ Curfn.Parents = append(Curfn.Parents, p.scope)
+ p.scopeVars = append(p.scopeVars, len(Curfn.Dcl))
+ p.scope = ir.ScopeID(len(Curfn.Parents))
p.markScope(pos)
}
p.lastCloseScopePos = pos
types.Popdcl()
- if trackScopes {
+ if p.trackScopes {
scopeVars := p.scopeVars[len(p.scopeVars)-1]
p.scopeVars = p.scopeVars[:len(p.scopeVars)-1]
- if scopeVars == len(Curfn.Func.Dcl) {
+ if scopeVars == len(Curfn.Dcl) {
// no variables were declared in this scope, so we can retract it.
- if int(p.scope) != len(Curfn.Func.Parents) {
- Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted")
+ if int(p.scope) != len(Curfn.Parents) {
+ base.Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted")
}
- p.scope = Curfn.Func.Parents[p.scope-1]
- Curfn.Func.Parents = Curfn.Func.Parents[:len(Curfn.Func.Parents)-1]
+ p.scope = Curfn.Parents[p.scope-1]
+ Curfn.Parents = Curfn.Parents[:len(Curfn.Parents)-1]
- nmarks := len(Curfn.Func.Marks)
- Curfn.Func.Marks[nmarks-1].Scope = p.scope
- prevScope := ScopeID(0)
+ nmarks := len(Curfn.Marks)
+ Curfn.Marks[nmarks-1].Scope = p.scope
+ prevScope := ir.ScopeID(0)
if nmarks >= 2 {
- prevScope = Curfn.Func.Marks[nmarks-2].Scope
+ prevScope = Curfn.Marks[nmarks-2].Scope
}
- if Curfn.Func.Marks[nmarks-1].Scope == prevScope {
- Curfn.Func.Marks = Curfn.Func.Marks[:nmarks-1]
+ if Curfn.Marks[nmarks-1].Scope == prevScope {
+ Curfn.Marks = Curfn.Marks[:nmarks-1]
}
return
}
- p.scope = Curfn.Func.Parents[p.scope-1]
+ p.scope = Curfn.Parents[p.scope-1]
p.markScope(pos)
}
func (p *noder) markScope(pos syntax.Pos) {
xpos := p.makeXPos(pos)
- if i := len(Curfn.Func.Marks); i > 0 && Curfn.Func.Marks[i-1].Pos == xpos {
- Curfn.Func.Marks[i-1].Scope = p.scope
+ if i := len(Curfn.Marks); i > 0 && Curfn.Marks[i-1].Pos == xpos {
+ Curfn.Marks[i-1].Scope = p.scope
} else {
- Curfn.Func.Marks = append(Curfn.Func.Marks, Mark{xpos, p.scope})
+ Curfn.Marks = append(Curfn.Marks, ir.Mark{Pos: xpos, Scope: p.scope})
}
}
mkpackage(p.file.PkgName.Value)
if pragma, ok := p.file.Pragma.(*Pragma); ok {
- pragma.Flag &^= GoBuildPragma
+ pragma.Flag &^= ir.GoBuildPragma
p.checkUnused(pragma)
}
- xtop = append(xtop, p.decls(p.file.DeclList)...)
+ Target.Decls = append(Target.Decls, p.decls(p.file.DeclList)...)
+
+ base.Pos = src.NoXPos
+ clearImports()
+}
- for _, n := range p.linknames {
+func (p *noder) processPragmas() {
+ for _, l := range p.linknames {
if !p.importedUnsafe {
- p.yyerrorpos(n.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
+ p.errorAt(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
continue
}
- s := lookup(n.local)
- if n.remote != "" {
- s.Linkname = n.remote
- } else {
- // Use the default object symbol name if the
- // user didn't provide one.
- if myimportpath == "" {
- p.yyerrorpos(n.pos, "//go:linkname requires linkname argument or -p compiler flag")
- } else {
- s.Linkname = objabi.PathToPrefix(myimportpath) + "." + n.local
- }
+ n := ir.AsNode(lookup(l.local).Def)
+ if n == nil || n.Op() != ir.ONAME {
+ // TODO(mdempsky): Change to p.errorAt before Go 1.17 release.
+ // base.WarnfAt(p.makeXPos(l.pos), "//go:linkname must refer to declared function or variable (will be an error in Go 1.17)")
+ continue
}
- }
-
- // The linker expects an ABI0 wrapper for all cgo-exported
- // functions.
- for _, prag := range p.pragcgobuf {
- switch prag[0] {
- case "cgo_export_static", "cgo_export_dynamic":
- if symabiRefs == nil {
- symabiRefs = make(map[string]obj.ABI)
- }
- symabiRefs[prag[1]] = obj.ABI0
+ if n.Sym().Linkname != "" {
+ p.errorAt(l.pos, "duplicate //go:linkname for %s", l.local)
+ continue
}
+ n.Sym().Linkname = l.remote
}
-
- pragcgobuf = append(pragcgobuf, p.pragcgobuf...)
- lineno = src.NoXPos
- clearImports()
+ Target.CgoPragmas = append(Target.CgoPragmas, p.pragcgobuf...)
}
-func (p *noder) decls(decls []syntax.Decl) (l []*Node) {
+func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) {
var cs constState
for _, decl := range decls {
p.checkUnused(pragma)
}
- val := p.basicLit(imp.Path)
- ipkg := importfile(&val)
+ ipkg := importfile(p.basicLit(imp.Path))
if ipkg == nil {
- if nerrors == 0 {
- Fatalf("phase error in import")
+ if base.Errors() == 0 {
+ base.Fatalf("phase error in import")
}
return
}
p.importedEmbed = true
}
+ if !ipkg.Direct {
+ Target.Imports = append(Target.Imports, ipkg)
+ }
ipkg.Direct = true
var my *types.Sym
my = lookup(ipkg.Name)
}
- pack := p.nod(imp, OPACK, nil, nil)
- pack.Sym = my
- pack.Name.Pkg = ipkg
+ pack := ir.NewPkgName(p.pos(imp), my, ipkg)
switch my.Name {
case ".":
- importdot(ipkg, pack)
+ importDot(pack)
return
case "init":
- yyerrorl(pack.Pos, "cannot import package as init - init must be a func")
+ base.ErrorfAt(pack.Pos(), "cannot import package as init - init must be a func")
return
case "_":
return
}
if my.Def != nil {
- redeclare(pack.Pos, my, "as imported package name")
+ redeclare(pack.Pos(), my, "as imported package name")
}
- my.Def = asTypesNode(pack)
- my.Lastlineno = pack.Pos
+ my.Def = pack
+ my.Lastlineno = pack.Pos()
my.Block = 1 // at top level
}
-func (p *noder) varDecl(decl *syntax.VarDecl) []*Node {
- names := p.declNames(decl.NameList)
+func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
+ names := p.declNames(ir.ONAME, decl.NameList)
typ := p.typeExprOrNil(decl.Type)
- var exprs []*Node
+ var exprs []ir.Node
if decl.Values != nil {
exprs = p.exprList(decl.Values)
}
// so at that point it hasn't seen the imports.
// We're left to check now, just before applying the //go:embed lines.
for _, e := range pragma.Embeds {
- p.yyerrorpos(e.Pos, "//go:embed only allowed in Go files that import \"embed\"")
+ p.errorAt(e.Pos, "//go:embed only allowed in Go files that import \"embed\"")
}
} else {
exprs = varEmbed(p, names, typ, exprs, pragma.Embeds)
// constant declarations are handled correctly (e.g., issue 15550).
type constState struct {
group *syntax.Group
- typ *Node
- values []*Node
+ typ ir.Ntype
+ values []ir.Node
iota int64
}
-func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node {
+func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
if decl.Group == nil || decl.Group != cs.group {
*cs = constState{
group: decl.Group,
p.checkUnused(pragma)
}
- names := p.declNames(decl.NameList)
+ names := p.declNames(ir.OLITERAL, decl.NameList)
typ := p.typeExprOrNil(decl.Type)
- var values []*Node
+ var values []ir.Node
if decl.Values != nil {
values = p.exprList(decl.Values)
cs.typ, cs.values = typ, values
} else {
if typ != nil {
- yyerror("const declaration cannot have type without expression")
+ base.Errorf("const declaration cannot have type without expression")
}
typ, values = cs.typ, cs.values
}
- nn := make([]*Node, 0, len(names))
+ nn := make([]ir.Node, 0, len(names))
for i, n := range names {
if i >= len(values) {
- yyerror("missing value in const declaration")
+ base.Errorf("missing value in const declaration")
break
}
v := values[i]
if decl.Values == nil {
- v = treecopy(v, n.Pos)
+ v = ir.DeepCopy(n.Pos(), v)
}
-
- n.Op = OLITERAL
declare(n, dclcontext)
- n.Name.Param.Ntype = typ
- n.Name.Defn = v
+ n.Ntype = typ
+ n.Defn = v
n.SetIota(cs.iota)
- nn = append(nn, p.nod(decl, ODCLCONST, n, nil))
+ nn = append(nn, p.nod(decl, ir.ODCLCONST, n, nil))
}
if len(values) > len(names) {
- yyerror("extra expression in const declaration")
+ base.Errorf("extra expression in const declaration")
}
cs.iota++
return nn
}
-func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node {
- n := p.declName(decl.Name)
- n.Op = OTYPE
+func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node {
+ n := p.declName(ir.OTYPE, decl.Name)
declare(n, dclcontext)
// decl.Type may be nil but in that case we got a syntax error during parsing
typ := p.typeExprOrNil(decl.Type)
- param := n.Name.Param
- param.Ntype = typ
- param.SetAlias(decl.Alias)
+ n.Ntype = typ
+ n.SetAlias(decl.Alias)
if pragma, ok := decl.Pragma.(*Pragma); ok {
if !decl.Alias {
- param.SetPragma(pragma.Flag & TypePragmas)
+ n.SetPragma(pragma.Flag & TypePragmas)
pragma.Flag &^= TypePragmas
}
p.checkUnused(pragma)
}
- nod := p.nod(decl, ODCLTYPE, n, nil)
- if param.Alias() && !langSupported(1, 9, localpkg) {
- yyerrorl(nod.Pos, "type aliases only supported as of -lang=go1.9")
+ nod := p.nod(decl, ir.ODCLTYPE, n, nil)
+ if n.Alias() && !langSupported(1, 9, types.LocalPkg) {
+ base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9")
}
return nod
}
-func (p *noder) declNames(names []*syntax.Name) []*Node {
- nodes := make([]*Node, 0, len(names))
+func (p *noder) declNames(op ir.Op, names []*syntax.Name) []*ir.Name {
+ nodes := make([]*ir.Name, 0, len(names))
for _, name := range names {
- nodes = append(nodes, p.declName(name))
+ nodes = append(nodes, p.declName(op, name))
}
return nodes
}
-func (p *noder) declName(name *syntax.Name) *Node {
- n := dclname(p.name(name))
- n.Pos = p.pos(name)
- return n
+func (p *noder) declName(op ir.Op, name *syntax.Name) *ir.Name {
+ return ir.NewDeclNameAt(p.pos(name), op, p.name(name))
}
-func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node {
+func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
name := p.name(fun.Name)
t := p.signature(fun.Recv, fun.Type)
- f := p.nod(fun, ODCLFUNC, nil, nil)
+ f := ir.NewFunc(p.pos(fun))
if fun.Recv == nil {
if name.Name == "init" {
name = renameinit()
- if t.List.Len() > 0 || t.Rlist.Len() > 0 {
- yyerrorl(f.Pos, "func init must have no arguments and no return values")
+ if len(t.Params) > 0 || len(t.Results) > 0 {
+ base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values")
}
+ Target.Inits = append(Target.Inits, f)
}
- if localpkg.Name == "main" && name.Name == "main" {
- if t.List.Len() > 0 || t.Rlist.Len() > 0 {
- yyerrorl(f.Pos, "func main must have no arguments and no return values")
+ if types.LocalPkg.Name == "main" && name.Name == "main" {
+ if len(t.Params) > 0 || len(t.Results) > 0 {
+ base.ErrorfAt(f.Pos(), "func main must have no arguments and no return values")
}
}
} else {
- f.Func.Shortname = name
- name = nblank.Sym // filled in by typecheckfunc
+ f.Shortname = name
+ name = ir.BlankNode.Sym() // filled in by typecheckfunc
}
- f.Func.Nname = newfuncnamel(p.pos(fun.Name), name)
- f.Func.Nname.Name.Defn = f
- f.Func.Nname.Name.Param.Ntype = t
+ f.Nname = newFuncNameAt(p.pos(fun.Name), name, f)
+ f.Nname.Defn = f
+ f.Nname.Ntype = t
if pragma, ok := fun.Pragma.(*Pragma); ok {
- f.Func.Pragma = pragma.Flag & FuncPragmas
- if pragma.Flag&Systemstack != 0 && pragma.Flag&Nosplit != 0 {
- yyerrorl(f.Pos, "go:nosplit and go:systemstack cannot be combined")
+ f.Pragma = pragma.Flag & FuncPragmas
+ if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 {
+ base.ErrorfAt(f.Pos(), "go:nosplit and go:systemstack cannot be combined")
}
pragma.Flag &^= FuncPragmas
p.checkUnused(pragma)
}
if fun.Recv == nil {
- declare(f.Func.Nname, PFUNC)
+ declare(f.Nname, ir.PFUNC)
}
p.funcBody(f, fun.Body)
if fun.Body != nil {
- if f.Func.Pragma&Noescape != 0 {
- yyerrorl(f.Pos, "can only use //go:noescape with external func implementations")
+ if f.Pragma&ir.Noescape != 0 {
+ base.ErrorfAt(f.Pos(), "can only use //go:noescape with external func implementations")
}
} else {
- if pure_go || strings.HasPrefix(f.funcname(), "init.") {
+ if base.Flag.Complete || strings.HasPrefix(ir.FuncName(f), "init.") {
// Linknamed functions are allowed to have no body. Hopefully
// the linkname target has a body. See issue 23311.
isLinknamed := false
for _, n := range p.linknames {
- if f.funcname() == n.local {
+ if ir.FuncName(f) == n.local {
isLinknamed = true
break
}
}
if !isLinknamed {
- yyerrorl(f.Pos, "missing function body")
+ base.ErrorfAt(f.Pos(), "missing function body")
}
}
}
return f
}
-func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *Node {
- n := p.nod(typ, OTFUNC, nil, nil)
+func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.FuncType {
+ var rcvr *ir.Field
if recv != nil {
- n.Left = p.param(recv, false, false)
+ rcvr = p.param(recv, false, false)
}
- n.List.Set(p.params(typ.ParamList, true))
- n.Rlist.Set(p.params(typ.ResultList, false))
- return n
+ return ir.NewFuncType(p.pos(typ), rcvr,
+ p.params(typ.ParamList, true),
+ p.params(typ.ResultList, false))
}
-func (p *noder) params(params []*syntax.Field, dddOk bool) []*Node {
- nodes := make([]*Node, 0, len(params))
+func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Field {
+ nodes := make([]*ir.Field, 0, len(params))
for i, param := range params {
p.setlineno(param)
nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
return nodes
}
-func (p *noder) param(param *syntax.Field, dddOk, final bool) *Node {
+func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Field {
var name *types.Sym
if param.Name != nil {
name = p.name(param.Name)
}
typ := p.typeExpr(param.Type)
- n := p.nodSym(param, ODCLFIELD, typ, name)
+ n := ir.NewField(p.pos(param), name, typ, nil)
// rewrite ...T parameter
- if typ.Op == ODDD {
+ if typ, ok := typ.(*ir.SliceType); ok && typ.DDD {
if !dddOk {
// We mark these as syntax errors to get automatic elimination
- // of multiple such errors per line (see yyerrorl in subr.go).
- yyerror("syntax error: cannot use ... in receiver or result parameter list")
+ // of multiple such errors per line (see ErrorfAt in subr.go).
+ base.Errorf("syntax error: cannot use ... in receiver or result parameter list")
} else if !final {
if param.Name == nil {
- yyerror("syntax error: cannot use ... with non-final parameter")
+ base.Errorf("syntax error: cannot use ... with non-final parameter")
} else {
- p.yyerrorpos(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value)
+ p.errorAt(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value)
}
}
- typ.Op = OTARRAY
- typ.Right = typ.Left
- typ.Left = nil
- n.SetIsDDD(true)
- if n.Left != nil {
- n.Left.SetIsDDD(true)
- }
+ typ.DDD = false
+ n.IsDDD = true
}
return n
}
-func (p *noder) exprList(expr syntax.Expr) []*Node {
+func (p *noder) exprList(expr syntax.Expr) []ir.Node {
if list, ok := expr.(*syntax.ListExpr); ok {
return p.exprs(list.ElemList)
}
- return []*Node{p.expr(expr)}
+ return []ir.Node{p.expr(expr)}
}
-func (p *noder) exprs(exprs []syntax.Expr) []*Node {
- nodes := make([]*Node, 0, len(exprs))
+func (p *noder) exprs(exprs []syntax.Expr) []ir.Node {
+ nodes := make([]ir.Node, 0, len(exprs))
for _, expr := range exprs {
nodes = append(nodes, p.expr(expr))
}
return nodes
}
-func (p *noder) expr(expr syntax.Expr) *Node {
+func (p *noder) expr(expr syntax.Expr) ir.Node {
p.setlineno(expr)
switch expr := expr.(type) {
case nil, *syntax.BadExpr:
case *syntax.Name:
return p.mkname(expr)
case *syntax.BasicLit:
- n := nodlit(p.basicLit(expr))
+ n := ir.NewLiteral(p.basicLit(expr))
+ if expr.Kind == syntax.RuneLit {
+ n.SetType(types.UntypedRune)
+ }
n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error
return n
case *syntax.CompositeLit:
- n := p.nod(expr, OCOMPLIT, nil, nil)
+ n := p.nod(expr, ir.OCOMPLIT, nil, nil)
if expr.Type != nil {
- n.Right = p.expr(expr.Type)
+ n.SetRight(p.expr(expr.Type))
}
l := p.exprs(expr.ElemList)
for i, e := range l {
l[i] = p.wrapname(expr.ElemList[i], e)
}
- n.List.Set(l)
- lineno = p.makeXPos(expr.Rbrace)
+ n.PtrList().Set(l)
+ base.Pos = p.makeXPos(expr.Rbrace)
return n
case *syntax.KeyValueExpr:
// use position of expr.Key rather than of expr (which has position of ':')
- return p.nod(expr.Key, OKEY, p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
+ return p.nod(expr.Key, ir.OKEY, p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
case *syntax.FuncLit:
return p.funcLit(expr)
case *syntax.ParenExpr:
- return p.nod(expr, OPAREN, p.expr(expr.X), nil)
+ return p.nod(expr, ir.OPAREN, p.expr(expr.X), nil)
case *syntax.SelectorExpr:
// parser.new_dotname
obj := p.expr(expr.X)
- if obj.Op == OPACK {
- obj.Name.SetUsed(true)
- return importName(obj.Name.Pkg.Lookup(expr.Sel.Value))
+ if obj.Op() == ir.OPACK {
+ pack := obj.(*ir.PkgName)
+ pack.Used = true
+ return importName(pack.Pkg.Lookup(expr.Sel.Value))
}
- n := nodSym(OXDOT, obj, p.name(expr.Sel))
- n.Pos = p.pos(expr) // lineno may have been changed by p.expr(expr.X)
+ n := nodSym(ir.OXDOT, obj, p.name(expr.Sel))
+ n.SetPos(p.pos(expr)) // lineno may have been changed by p.expr(expr.X)
return n
case *syntax.IndexExpr:
- return p.nod(expr, OINDEX, p.expr(expr.X), p.expr(expr.Index))
+ return p.nod(expr, ir.OINDEX, p.expr(expr.X), p.expr(expr.Index))
case *syntax.SliceExpr:
- op := OSLICE
+ op := ir.OSLICE
if expr.Full {
- op = OSLICE3
+ op = ir.OSLICE3
}
- n := p.nod(expr, op, p.expr(expr.X), nil)
- var index [3]*Node
+ n := ir.NewSliceExpr(p.pos(expr), op, p.expr(expr.X))
+ var index [3]ir.Node
for i, x := range &expr.Index {
if x != nil {
index[i] = p.expr(x)
n.SetSliceBounds(index[0], index[1], index[2])
return n
case *syntax.AssertExpr:
- return p.nod(expr, ODOTTYPE, p.expr(expr.X), p.typeExpr(expr.Type))
+ return p.nod(expr, ir.ODOTTYPE, p.expr(expr.X), p.typeExpr(expr.Type))
case *syntax.Operation:
if expr.Op == syntax.Add && expr.Y != nil {
return p.sum(expr)
}
x := p.expr(expr.X)
if expr.Y == nil {
- return p.nod(expr, p.unOp(expr.Op), x, nil)
+ pos, op := p.pos(expr), p.unOp(expr.Op)
+ switch op {
+ case ir.OADDR:
+ return nodAddrAt(pos, x)
+ case ir.ODEREF:
+ return ir.NewStarExpr(pos, x)
+ }
+ return ir.NewUnaryExpr(pos, op, x)
+ }
+
+ pos, op, y := p.pos(expr), p.binOp(expr.Op), p.expr(expr.Y)
+ switch op {
+ case ir.OANDAND, ir.OOROR:
+ return ir.NewLogicalExpr(pos, op, x, y)
}
- return p.nod(expr, p.binOp(expr.Op), x, p.expr(expr.Y))
+ return ir.NewBinaryExpr(pos, op, x, y)
case *syntax.CallExpr:
- n := p.nod(expr, OCALL, p.expr(expr.Fun), nil)
- n.List.Set(p.exprs(expr.ArgList))
+ n := p.nod(expr, ir.OCALL, p.expr(expr.Fun), nil)
+ n.PtrList().Set(p.exprs(expr.ArgList))
n.SetIsDDD(expr.HasDots)
return n
case *syntax.ArrayType:
- var len *Node
+ var len ir.Node
if expr.Len != nil {
len = p.expr(expr.Len)
- } else {
- len = p.nod(expr, ODDD, nil, nil)
}
- return p.nod(expr, OTARRAY, len, p.typeExpr(expr.Elem))
+ return ir.NewArrayType(p.pos(expr), len, p.typeExpr(expr.Elem))
case *syntax.SliceType:
- return p.nod(expr, OTARRAY, nil, p.typeExpr(expr.Elem))
+ return ir.NewSliceType(p.pos(expr), p.typeExpr(expr.Elem))
case *syntax.DotsType:
- return p.nod(expr, ODDD, p.typeExpr(expr.Elem), nil)
+ t := ir.NewSliceType(p.pos(expr), p.typeExpr(expr.Elem))
+ t.DDD = true
+ return t
case *syntax.StructType:
return p.structType(expr)
case *syntax.InterfaceType:
case *syntax.FuncType:
return p.signature(nil, expr)
case *syntax.MapType:
- return p.nod(expr, OTMAP, p.typeExpr(expr.Key), p.typeExpr(expr.Value))
+ return ir.NewMapType(p.pos(expr),
+ p.typeExpr(expr.Key), p.typeExpr(expr.Value))
case *syntax.ChanType:
- n := p.nod(expr, OTCHAN, p.typeExpr(expr.Elem), nil)
- n.SetTChanDir(p.chanDir(expr.Dir))
- return n
+ return ir.NewChanType(p.pos(expr),
+ p.typeExpr(expr.Elem), p.chanDir(expr.Dir))
case *syntax.TypeSwitchGuard:
- n := p.nod(expr, OTYPESW, nil, p.expr(expr.X))
+ var tag *ir.Ident
if expr.Lhs != nil {
- n.Left = p.declName(expr.Lhs)
- if n.Left.isBlank() {
- yyerror("invalid variable name %v in type switch", n.Left)
+ tag = ir.NewIdent(p.pos(expr.Lhs), p.name(expr.Lhs))
+ if ir.IsBlank(tag) {
+ base.Errorf("invalid variable name %v in type switch", tag)
}
}
- return n
+ return ir.NewTypeSwitchGuard(p.pos(expr), tag, p.expr(expr.X))
}
panic("unhandled Expr")
}
// sum efficiently handles very large summation expressions (such as
// in issue #16394). In particular, it avoids left recursion and
// collapses string literals.
-func (p *noder) sum(x syntax.Expr) *Node {
+func (p *noder) sum(x syntax.Expr) ir.Node {
// While we need to handle long sums with asymptotic
// efficiency, the vast majority of sums are very small: ~95%
// have only 2 or 3 operands, and ~99% of string literals are
// handle correctly. For now, we avoid these problems by
// treating named string constants the same as non-constant
// operands.
- var nstr *Node
+ var nstr ir.Node
chunks := make([]string, 0, 1)
n := p.expr(x)
- if Isconst(n, CTSTR) && n.Sym == nil {
+ if ir.IsConst(n, constant.String) && n.Sym() == nil {
nstr = n
- chunks = append(chunks, nstr.StringVal())
+ chunks = append(chunks, ir.StringVal(nstr))
}
for i := len(adds) - 1; i >= 0; i-- {
add := adds[i]
r := p.expr(add.Y)
- if Isconst(r, CTSTR) && r.Sym == nil {
+ if ir.IsConst(r, constant.String) && r.Sym() == nil {
if nstr != nil {
// Collapse r into nstr instead of adding to n.
- chunks = append(chunks, r.StringVal())
+ chunks = append(chunks, ir.StringVal(r))
continue
}
nstr = r
- chunks = append(chunks, nstr.StringVal())
+ chunks = append(chunks, ir.StringVal(nstr))
} else {
if len(chunks) > 1 {
- nstr.SetVal(Val{U: strings.Join(chunks, "")})
+ nstr.SetVal(constant.MakeString(strings.Join(chunks, "")))
}
nstr = nil
chunks = chunks[:0]
}
- n = p.nod(add, OADD, n, r)
+ n = p.nod(add, ir.OADD, n, r)
}
if len(chunks) > 1 {
- nstr.SetVal(Val{U: strings.Join(chunks, "")})
+ nstr.SetVal(constant.MakeString(strings.Join(chunks, "")))
}
return n
}
-func (p *noder) typeExpr(typ syntax.Expr) *Node {
+func (p *noder) typeExpr(typ syntax.Expr) ir.Ntype {
// TODO(mdempsky): Be stricter? typecheck should handle errors anyway.
- return p.expr(typ)
+ n := p.expr(typ)
+ if n == nil {
+ return nil
+ }
+ if _, ok := n.(ir.Ntype); !ok {
+ ir.Dump("NOT NTYPE", n)
+ }
+ return n.(ir.Ntype)
}
-func (p *noder) typeExprOrNil(typ syntax.Expr) *Node {
+func (p *noder) typeExprOrNil(typ syntax.Expr) ir.Ntype {
if typ != nil {
- return p.expr(typ)
+ return p.typeExpr(typ)
}
return nil
}
panic("unhandled ChanDir")
}
-func (p *noder) structType(expr *syntax.StructType) *Node {
- l := make([]*Node, 0, len(expr.FieldList))
+func (p *noder) structType(expr *syntax.StructType) ir.Node {
+ l := make([]*ir.Field, 0, len(expr.FieldList))
for i, field := range expr.FieldList {
p.setlineno(field)
- var n *Node
+ var n *ir.Field
if field.Name == nil {
n = p.embedded(field.Type)
} else {
- n = p.nodSym(field, ODCLFIELD, p.typeExpr(field.Type), p.name(field.Name))
+ n = ir.NewField(p.pos(field), p.name(field.Name), p.typeExpr(field.Type), nil)
}
if i < len(expr.TagList) && expr.TagList[i] != nil {
- n.SetVal(p.basicLit(expr.TagList[i]))
+ n.Note = constant.StringVal(p.basicLit(expr.TagList[i]))
}
l = append(l, n)
}
p.setlineno(expr)
- n := p.nod(expr, OTSTRUCT, nil, nil)
- n.List.Set(l)
- return n
+ return ir.NewStructType(p.pos(expr), l)
}
-func (p *noder) interfaceType(expr *syntax.InterfaceType) *Node {
- l := make([]*Node, 0, len(expr.MethodList))
+func (p *noder) interfaceType(expr *syntax.InterfaceType) ir.Node {
+ l := make([]*ir.Field, 0, len(expr.MethodList))
for _, method := range expr.MethodList {
p.setlineno(method)
- var n *Node
+ var n *ir.Field
if method.Name == nil {
- n = p.nodSym(method, ODCLFIELD, importName(p.packname(method.Type)), nil)
+ n = ir.NewField(p.pos(method), nil, importName(p.packname(method.Type)).(ir.Ntype), nil)
} else {
mname := p.name(method.Name)
- sig := p.typeExpr(method.Type)
- sig.Left = fakeRecv()
- n = p.nodSym(method, ODCLFIELD, sig, mname)
- ifacedcl(n)
+ if mname.IsBlank() {
+ base.Errorf("methods must have a unique non-blank name")
+ continue
+ }
+ sig := p.typeExpr(method.Type).(*ir.FuncType)
+ sig.Recv = fakeRecv()
+ n = ir.NewField(p.pos(method), mname, sig, nil)
}
l = append(l, n)
}
- n := p.nod(expr, OTINTER, nil, nil)
- n.List.Set(l)
- return n
+ return ir.NewInterfaceType(p.pos(expr), l)
}
func (p *noder) packname(expr syntax.Expr) *types.Sym {
switch expr := expr.(type) {
case *syntax.Name:
name := p.name(expr)
- if n := oldname(name); n.Name != nil && n.Name.Pack != nil {
- n.Name.Pack.Name.SetUsed(true)
+ if n := oldname(name); n.Name() != nil && n.Name().PkgName != nil {
+ n.Name().PkgName.Used = true
}
return name
case *syntax.SelectorExpr:
name := p.name(expr.X.(*syntax.Name))
- def := asNode(name.Def)
+ def := ir.AsNode(name.Def)
if def == nil {
- yyerror("undefined: %v", name)
+ base.Errorf("undefined: %v", name)
return name
}
var pkg *types.Pkg
- if def.Op != OPACK {
- yyerror("%v is not a package", name)
- pkg = localpkg
+ if def.Op() != ir.OPACK {
+ base.Errorf("%v is not a package", name)
+ pkg = types.LocalPkg
} else {
- def.Name.SetUsed(true)
- pkg = def.Name.Pkg
+ def := def.(*ir.PkgName)
+ def.Used = true
+ pkg = def.Pkg
}
return pkg.Lookup(expr.Sel.Value)
}
panic(fmt.Sprintf("unexpected packname: %#v", expr))
}
-func (p *noder) embedded(typ syntax.Expr) *Node {
+func (p *noder) embedded(typ syntax.Expr) *ir.Field {
op, isStar := typ.(*syntax.Operation)
if isStar {
if op.Op != syntax.Mul || op.Y != nil {
}
sym := p.packname(typ)
- n := p.nodSym(typ, ODCLFIELD, importName(sym), lookup(sym.Name))
- n.SetEmbedded(true)
+ n := ir.NewField(p.pos(typ), lookup(sym.Name), importName(sym).(ir.Ntype), nil)
+ n.Embedded = true
if isStar {
- n.Left = p.nod(op, ODEREF, n.Left, nil)
+ n.Ntype = ir.NewStarExpr(p.pos(op), n.Ntype)
}
return n
}
-func (p *noder) stmts(stmts []syntax.Stmt) []*Node {
+func (p *noder) stmts(stmts []syntax.Stmt) []ir.Node {
return p.stmtsFall(stmts, false)
}
-func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*Node {
- var nodes []*Node
+func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node {
+ var nodes []ir.Node
for i, stmt := range stmts {
s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
if s == nil {
- } else if s.Op == OBLOCK && s.Ninit.Len() == 0 {
- nodes = append(nodes, s.List.Slice()...)
+ } else if s.Op() == ir.OBLOCK && s.(*ir.BlockStmt).List().Len() > 0 {
+ // Inline non-empty block.
+ // Empty blocks must be preserved for checkreturn.
+ nodes = append(nodes, s.(*ir.BlockStmt).List().Slice()...)
} else {
nodes = append(nodes, s)
}
return nodes
}
-func (p *noder) stmt(stmt syntax.Stmt) *Node {
+func (p *noder) stmt(stmt syntax.Stmt) ir.Node {
return p.stmtFall(stmt, false)
}
-func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node {
+func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node {
p.setlineno(stmt)
switch stmt := stmt.(type) {
case *syntax.EmptyStmt:
l := p.blockStmt(stmt)
if len(l) == 0 {
// TODO(mdempsky): Line number?
- return nod(OEMPTY, nil, nil)
+ return ir.Nod(ir.OBLOCK, nil, nil)
}
return liststmt(l)
case *syntax.ExprStmt:
return p.wrapname(stmt, p.expr(stmt.X))
case *syntax.SendStmt:
- return p.nod(stmt, OSEND, p.expr(stmt.Chan), p.expr(stmt.Value))
+ return p.nod(stmt, ir.OSEND, p.expr(stmt.Chan), p.expr(stmt.Value))
case *syntax.DeclStmt:
return liststmt(p.decls(stmt.DeclList))
case *syntax.AssignStmt:
if stmt.Op != 0 && stmt.Op != syntax.Def {
- n := p.nod(stmt, OASOP, p.expr(stmt.Lhs), p.expr(stmt.Rhs))
+ n := ir.NewAssignOpStmt(p.pos(stmt), p.binOp(stmt.Op), p.expr(stmt.Lhs), p.expr(stmt.Rhs))
n.SetImplicit(stmt.Rhs == syntax.ImplicitOne)
- n.SetSubOp(p.binOp(stmt.Op))
return n
}
- n := p.nod(stmt, OAS, nil, nil) // assume common case
-
rhs := p.exprList(stmt.Rhs)
- lhs := p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def)
-
- if len(lhs) == 1 && len(rhs) == 1 {
- // common case
- n.Left = lhs[0]
- n.Right = rhs[0]
- } else {
- n.Op = OAS2
- n.List.Set(lhs)
- n.Rlist.Set(rhs)
+ if list, ok := stmt.Lhs.(*syntax.ListExpr); ok && len(list.ElemList) != 1 || len(rhs) != 1 {
+ n := p.nod(stmt, ir.OAS2, nil, nil)
+ n.SetColas(stmt.Op == syntax.Def)
+ n.PtrList().Set(p.assignList(stmt.Lhs, n, n.Colas()))
+ n.PtrRlist().Set(rhs)
+ return n
}
+
+ n := p.nod(stmt, ir.OAS, nil, nil)
+ n.SetColas(stmt.Op == syntax.Def)
+ n.SetLeft(p.assignList(stmt.Lhs, n, n.Colas())[0])
+ n.SetRight(rhs[0])
return n
case *syntax.BranchStmt:
- var op Op
+ var op ir.Op
switch stmt.Tok {
case syntax.Break:
- op = OBREAK
+ op = ir.OBREAK
case syntax.Continue:
- op = OCONTINUE
+ op = ir.OCONTINUE
case syntax.Fallthrough:
if !fallOK {
- yyerror("fallthrough statement out of place")
+ base.Errorf("fallthrough statement out of place")
}
- op = OFALL
+ op = ir.OFALL
case syntax.Goto:
- op = OGOTO
+ op = ir.OGOTO
default:
panic("unhandled BranchStmt")
}
- n := p.nod(stmt, op, nil, nil)
+ var sym *types.Sym
if stmt.Label != nil {
- n.Sym = p.name(stmt.Label)
+ sym = p.name(stmt.Label)
}
- return n
+ return ir.NewBranchStmt(p.pos(stmt), op, sym)
case *syntax.CallStmt:
- var op Op
+ var op ir.Op
switch stmt.Tok {
case syntax.Defer:
- op = ODEFER
+ op = ir.ODEFER
case syntax.Go:
- op = OGO
+ op = ir.OGO
default:
panic("unhandled CallStmt")
}
- return p.nod(stmt, op, p.expr(stmt.Call), nil)
+ return ir.NewGoDeferStmt(p.pos(stmt), op, p.expr(stmt.Call))
case *syntax.ReturnStmt:
- var results []*Node
+ var results []ir.Node
if stmt.Results != nil {
results = p.exprList(stmt.Results)
}
- n := p.nod(stmt, ORETURN, nil, nil)
- n.List.Set(results)
- if n.List.Len() == 0 && Curfn != nil {
- for _, ln := range Curfn.Func.Dcl {
- if ln.Class() == PPARAM {
+ n := p.nod(stmt, ir.ORETURN, nil, nil)
+ n.PtrList().Set(results)
+ if n.List().Len() == 0 && Curfn != nil {
+ for _, ln := range Curfn.Dcl {
+ if ln.Class() == ir.PPARAM {
continue
}
- if ln.Class() != PPARAMOUT {
+ if ln.Class() != ir.PPARAMOUT {
break
}
- if asNode(ln.Sym.Def) != ln {
- yyerror("%s is shadowed during return", ln.Sym.Name)
+ if ln.Sym().Def != ln {
+ base.Errorf("%s is shadowed during return", ln.Sym().Name)
}
}
}
panic("unhandled Stmt")
}
-func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node {
+func (p *noder) assignList(expr syntax.Expr, defn ir.Node, colas bool) []ir.Node {
if !colas {
return p.exprList(expr)
}
- defn.SetColas(true)
-
var exprs []syntax.Expr
if list, ok := expr.(*syntax.ListExpr); ok {
exprs = list.ElemList
exprs = []syntax.Expr{expr}
}
- res := make([]*Node, len(exprs))
+ res := make([]ir.Node, len(exprs))
seen := make(map[*types.Sym]bool, len(exprs))
newOrErr := false
for i, expr := range exprs {
p.setlineno(expr)
- res[i] = nblank
+ res[i] = ir.BlankNode
name, ok := expr.(*syntax.Name)
if !ok {
- p.yyerrorpos(expr.Pos(), "non-name %v on left side of :=", p.expr(expr))
+ p.errorAt(expr.Pos(), "non-name %v on left side of :=", p.expr(expr))
newOrErr = true
continue
}
}
if seen[sym] {
- p.yyerrorpos(expr.Pos(), "%v repeated on left side of :=", sym)
+ p.errorAt(expr.Pos(), "%v repeated on left side of :=", sym)
newOrErr = true
continue
}
}
newOrErr = true
- n := newname(sym)
+ n := NewName(sym)
declare(n, dclcontext)
- n.Name.Defn = defn
- defn.Ninit.Append(nod(ODCL, n, nil))
+ n.Defn = defn
+ defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
res[i] = n
}
if !newOrErr {
- yyerrorl(defn.Pos, "no new variables on left side of :=")
+ base.ErrorfAt(defn.Pos(), "no new variables on left side of :=")
}
return res
}
-func (p *noder) blockStmt(stmt *syntax.BlockStmt) []*Node {
+func (p *noder) blockStmt(stmt *syntax.BlockStmt) []ir.Node {
p.openScope(stmt.Pos())
nodes := p.stmts(stmt.List)
p.closeScope(stmt.Rbrace)
return nodes
}
-func (p *noder) ifStmt(stmt *syntax.IfStmt) *Node {
+func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node {
p.openScope(stmt.Pos())
- n := p.nod(stmt, OIF, nil, nil)
+ n := p.nod(stmt, ir.OIF, nil, nil)
if stmt.Init != nil {
- n.Ninit.Set1(p.stmt(stmt.Init))
+ n.PtrInit().Set1(p.stmt(stmt.Init))
}
if stmt.Cond != nil {
- n.Left = p.expr(stmt.Cond)
+ n.SetLeft(p.expr(stmt.Cond))
}
- n.Nbody.Set(p.blockStmt(stmt.Then))
+ n.PtrBody().Set(p.blockStmt(stmt.Then))
if stmt.Else != nil {
e := p.stmt(stmt.Else)
- if e.Op == OBLOCK && e.Ninit.Len() == 0 {
- n.Rlist.Set(e.List.Slice())
+ if e.Op() == ir.OBLOCK {
+ n.PtrRlist().Set(e.List().Slice())
} else {
- n.Rlist.Set1(e)
+ n.PtrRlist().Set1(e)
}
}
p.closeAnotherScope()
return n
}
-func (p *noder) forStmt(stmt *syntax.ForStmt) *Node {
+func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node {
p.openScope(stmt.Pos())
- var n *Node
if r, ok := stmt.Init.(*syntax.RangeClause); ok {
if stmt.Cond != nil || stmt.Post != nil {
panic("unexpected RangeClause")
}
- n = p.nod(r, ORANGE, nil, p.expr(r.X))
+ n := p.nod(r, ir.ORANGE, nil, p.expr(r.X))
if r.Lhs != nil {
- n.List.Set(p.assignList(r.Lhs, n, r.Def))
- }
- } else {
- n = p.nod(stmt, OFOR, nil, nil)
- if stmt.Init != nil {
- n.Ninit.Set1(p.stmt(stmt.Init))
- }
- if stmt.Cond != nil {
- n.Left = p.expr(stmt.Cond)
- }
- if stmt.Post != nil {
- n.Right = p.stmt(stmt.Post)
+ n.SetColas(r.Def)
+ n.PtrList().Set(p.assignList(r.Lhs, n, n.Colas()))
}
+ n.PtrBody().Set(p.blockStmt(stmt.Body))
+ p.closeAnotherScope()
+ return n
}
- n.Nbody.Set(p.blockStmt(stmt.Body))
+
+ n := p.nod(stmt, ir.OFOR, nil, nil)
+ if stmt.Init != nil {
+ n.PtrInit().Set1(p.stmt(stmt.Init))
+ }
+ if stmt.Cond != nil {
+ n.SetLeft(p.expr(stmt.Cond))
+ }
+ if stmt.Post != nil {
+ n.SetRight(p.stmt(stmt.Post))
+ }
+ n.PtrBody().Set(p.blockStmt(stmt.Body))
p.closeAnotherScope()
return n
}
-func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *Node {
+func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
p.openScope(stmt.Pos())
- n := p.nod(stmt, OSWITCH, nil, nil)
+ n := p.nod(stmt, ir.OSWITCH, nil, nil)
if stmt.Init != nil {
- n.Ninit.Set1(p.stmt(stmt.Init))
+ n.PtrInit().Set1(p.stmt(stmt.Init))
}
if stmt.Tag != nil {
- n.Left = p.expr(stmt.Tag)
+ n.SetLeft(p.expr(stmt.Tag))
}
- tswitch := n.Left
- if tswitch != nil && tswitch.Op != OTYPESW {
- tswitch = nil
+ var tswitch *ir.TypeSwitchGuard
+ if l := n.Left(); l != nil && l.Op() == ir.OTYPESW {
+ tswitch = l.(*ir.TypeSwitchGuard)
}
- n.List.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
+ n.PtrList().Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
p.closeScope(stmt.Rbrace)
return n
}
-func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace syntax.Pos) []*Node {
- nodes := make([]*Node, 0, len(clauses))
+func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitchGuard, rbrace syntax.Pos) []ir.Node {
+ nodes := make([]ir.Node, 0, len(clauses))
for i, clause := range clauses {
p.setlineno(clause)
if i > 0 {
}
p.openScope(clause.Pos())
- n := p.nod(clause, OCASE, nil, nil)
+ n := p.nod(clause, ir.OCASE, nil, nil)
if clause.Cases != nil {
- n.List.Set(p.exprList(clause.Cases))
+ n.PtrList().Set(p.exprList(clause.Cases))
}
- if tswitch != nil && tswitch.Left != nil {
- nn := newname(tswitch.Left.Sym)
+ if tswitch != nil && tswitch.Left() != nil {
+ nn := NewName(tswitch.Left().Sym())
declare(nn, dclcontext)
- n.Rlist.Set1(nn)
+ n.PtrRlist().Set1(nn)
// keep track of the instances for reporting unused
- nn.Name.Defn = tswitch
+ nn.Defn = tswitch
}
// Trim trailing empty statements. We omit them from
body = body[:len(body)-1]
}
- n.Nbody.Set(p.stmtsFall(body, true))
- if l := n.Nbody.Len(); l > 0 && n.Nbody.Index(l-1).Op == OFALL {
+ n.PtrBody().Set(p.stmtsFall(body, true))
+ if l := n.Body().Len(); l > 0 && n.Body().Index(l-1).Op() == ir.OFALL {
if tswitch != nil {
- yyerror("cannot fallthrough in type switch")
+ base.Errorf("cannot fallthrough in type switch")
}
if i+1 == len(clauses) {
- yyerror("cannot fallthrough final case in switch")
+ base.Errorf("cannot fallthrough final case in switch")
}
}
return nodes
}
-func (p *noder) selectStmt(stmt *syntax.SelectStmt) *Node {
- n := p.nod(stmt, OSELECT, nil, nil)
- n.List.Set(p.commClauses(stmt.Body, stmt.Rbrace))
+func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node {
+ n := p.nod(stmt, ir.OSELECT, nil, nil)
+ n.PtrList().Set(p.commClauses(stmt.Body, stmt.Rbrace))
return n
}
-func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*Node {
- nodes := make([]*Node, 0, len(clauses))
+func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []ir.Node {
+ nodes := make([]ir.Node, 0, len(clauses))
for i, clause := range clauses {
p.setlineno(clause)
if i > 0 {
}
p.openScope(clause.Pos())
- n := p.nod(clause, OCASE, nil, nil)
+ n := p.nod(clause, ir.OCASE, nil, nil)
if clause.Comm != nil {
- n.List.Set1(p.stmt(clause.Comm))
+ n.PtrList().Set1(p.stmt(clause.Comm))
}
- n.Nbody.Set(p.stmts(clause.Body))
+ n.PtrBody().Set(p.stmts(clause.Body))
nodes = append(nodes, n)
}
if len(clauses) > 0 {
return nodes
}
-func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *Node {
- lhs := p.nodSym(label, OLABEL, nil, p.name(label.Label))
+func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node {
+ sym := p.name(label.Label)
+ lhs := p.nodSym(label, ir.OLABEL, nil, sym)
- var ls *Node
+ var ls ir.Node
if label.Stmt != nil { // TODO(mdempsky): Should always be present.
ls = p.stmtFall(label.Stmt, fallOK)
+ // Attach label directly to control statement too.
+ if ls != nil {
+ switch ls.Op() {
+ case ir.OFOR:
+ ls.SetSym(sym)
+ case ir.ORANGE:
+ ls.SetSym(sym)
+ case ir.OSWITCH:
+ ls.SetSym(sym)
+ case ir.OSELECT:
+ ls.SetSym(sym)
+ }
+ }
}
- lhs.Name.Defn = ls
- l := []*Node{lhs}
+ l := []ir.Node{lhs}
if ls != nil {
- if ls.Op == OBLOCK && ls.Ninit.Len() == 0 {
- l = append(l, ls.List.Slice()...)
+ if ls.Op() == ir.OBLOCK {
+ l = append(l, ls.List().Slice()...)
} else {
l = append(l, ls)
}
return liststmt(l)
}
-var unOps = [...]Op{
- syntax.Recv: ORECV,
- syntax.Mul: ODEREF,
- syntax.And: OADDR,
+var unOps = [...]ir.Op{
+ syntax.Recv: ir.ORECV,
+ syntax.Mul: ir.ODEREF,
+ syntax.And: ir.OADDR,
- syntax.Not: ONOT,
- syntax.Xor: OBITNOT,
- syntax.Add: OPLUS,
- syntax.Sub: ONEG,
+ syntax.Not: ir.ONOT,
+ syntax.Xor: ir.OBITNOT,
+ syntax.Add: ir.OPLUS,
+ syntax.Sub: ir.ONEG,
}
-func (p *noder) unOp(op syntax.Operator) Op {
+func (p *noder) unOp(op syntax.Operator) ir.Op {
if uint64(op) >= uint64(len(unOps)) || unOps[op] == 0 {
panic("invalid Operator")
}
return unOps[op]
}
-var binOps = [...]Op{
- syntax.OrOr: OOROR,
- syntax.AndAnd: OANDAND,
+var binOps = [...]ir.Op{
+ syntax.OrOr: ir.OOROR,
+ syntax.AndAnd: ir.OANDAND,
- syntax.Eql: OEQ,
- syntax.Neq: ONE,
- syntax.Lss: OLT,
- syntax.Leq: OLE,
- syntax.Gtr: OGT,
- syntax.Geq: OGE,
+ syntax.Eql: ir.OEQ,
+ syntax.Neq: ir.ONE,
+ syntax.Lss: ir.OLT,
+ syntax.Leq: ir.OLE,
+ syntax.Gtr: ir.OGT,
+ syntax.Geq: ir.OGE,
- syntax.Add: OADD,
- syntax.Sub: OSUB,
- syntax.Or: OOR,
- syntax.Xor: OXOR,
+ syntax.Add: ir.OADD,
+ syntax.Sub: ir.OSUB,
+ syntax.Or: ir.OOR,
+ syntax.Xor: ir.OXOR,
- syntax.Mul: OMUL,
- syntax.Div: ODIV,
- syntax.Rem: OMOD,
- syntax.And: OAND,
- syntax.AndNot: OANDNOT,
- syntax.Shl: OLSH,
- syntax.Shr: ORSH,
+ syntax.Mul: ir.OMUL,
+ syntax.Div: ir.ODIV,
+ syntax.Rem: ir.OMOD,
+ syntax.And: ir.OAND,
+ syntax.AndNot: ir.OANDNOT,
+ syntax.Shl: ir.OLSH,
+ syntax.Shr: ir.ORSH,
}
-func (p *noder) binOp(op syntax.Operator) Op {
+func (p *noder) binOp(op syntax.Operator) ir.Op {
if uint64(op) >= uint64(len(binOps)) || binOps[op] == 0 {
panic("invalid Operator")
}
// literal is not compatible with the current language version.
func checkLangCompat(lit *syntax.BasicLit) {
s := lit.Value
- if len(s) <= 2 || langSupported(1, 13, localpkg) {
+ if len(s) <= 2 || langSupported(1, 13, types.LocalPkg) {
return
}
// len(s) > 2
if strings.Contains(s, "_") {
- yyerrorv("go1.13", "underscores in numeric literals")
+ base.ErrorfVers("go1.13", "underscores in numeric literals")
return
}
if s[0] != '0' {
return
}
- base := s[1]
- if base == 'b' || base == 'B' {
- yyerrorv("go1.13", "binary literals")
+ radix := s[1]
+ if radix == 'b' || radix == 'B' {
+ base.ErrorfVers("go1.13", "binary literals")
return
}
- if base == 'o' || base == 'O' {
- yyerrorv("go1.13", "0o/0O-style octal literals")
+ if radix == 'o' || radix == 'O' {
+ base.ErrorfVers("go1.13", "0o/0O-style octal literals")
return
}
- if lit.Kind != syntax.IntLit && (base == 'x' || base == 'X') {
- yyerrorv("go1.13", "hexadecimal floating-point literals")
+ if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') {
+ base.ErrorfVers("go1.13", "hexadecimal floating-point literals")
}
}
-func (p *noder) basicLit(lit *syntax.BasicLit) Val {
+func (p *noder) basicLit(lit *syntax.BasicLit) constant.Value {
// We don't use the errors of the conversion routines to determine
// if a literal string is valid because the conversion routines may
// accept a wider syntax than the language permits. Rely on lit.Bad
// instead.
- switch s := lit.Value; lit.Kind {
- case syntax.IntLit:
- checkLangCompat(lit)
- x := new(Mpint)
- if !lit.Bad {
- x.SetString(s)
- }
- return Val{U: x}
-
- case syntax.FloatLit:
- checkLangCompat(lit)
- x := newMpflt()
- if !lit.Bad {
- x.SetString(s)
- }
- return Val{U: x}
+ if lit.Bad {
+ return constant.MakeUnknown()
+ }
- case syntax.ImagLit:
+ switch lit.Kind {
+ case syntax.IntLit, syntax.FloatLit, syntax.ImagLit:
checkLangCompat(lit)
- x := newMpcmplx()
- if !lit.Bad {
- x.Imag.SetString(strings.TrimSuffix(s, "i"))
- }
- return Val{U: x}
-
- case syntax.RuneLit:
- x := new(Mpint)
- x.Rune = true
- if !lit.Bad {
- u, _ := strconv.Unquote(s)
- var r rune
- if len(u) == 1 {
- r = rune(u[0])
- } else {
- r, _ = utf8.DecodeRuneInString(u)
- }
- x.SetInt64(int64(r))
- }
- return Val{U: x}
+ }
- case syntax.StringLit:
- var x string
- if !lit.Bad {
- if len(s) > 0 && s[0] == '`' {
- // strip carriage returns from raw string
- s = strings.Replace(s, "\r", "", -1)
- }
- x, _ = strconv.Unquote(s)
- }
- return Val{U: x}
+ v := constant.MakeFromLiteral(lit.Value, tokenForLitKind[lit.Kind], 0)
+ if v.Kind() == constant.Unknown {
+ // TODO(mdempsky): Better error message?
+ p.errorAt(lit.Pos(), "malformed constant: %s", lit.Value)
+ }
- default:
- panic("unhandled BasicLit kind")
+ // go/constant uses big.Rat by default, which is more precise, but
+ // causes toolstash -cmp and some tests to fail. For now, convert
+ // to big.Float to match cmd/compile's historical precision.
+ // TODO(mdempsky): Remove.
+ if v.Kind() == constant.Float {
+ v = constant.Make(bigFloatVal(v))
}
+
+ return v
+}
+
+var tokenForLitKind = [...]token.Token{
+ syntax.IntLit: token.INT,
+ syntax.RuneLit: token.CHAR,
+ syntax.FloatLit: token.FLOAT,
+ syntax.ImagLit: token.IMAG,
+ syntax.StringLit: token.STRING,
}
func (p *noder) name(name *syntax.Name) *types.Sym {
return lookup(name.Value)
}
-func (p *noder) mkname(name *syntax.Name) *Node {
+func (p *noder) mkname(name *syntax.Name) ir.Node {
// TODO(mdempsky): Set line number?
return mkname(p.name(name))
}
-func (p *noder) wrapname(n syntax.Node, x *Node) *Node {
+func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node {
// These nodes do not carry line numbers.
// Introduce a wrapper node to give them the correct line.
- switch x.Op {
- case OTYPE, OLITERAL:
- if x.Sym == nil {
+ switch x.Op() {
+ case ir.OTYPE, ir.OLITERAL:
+ if x.Sym() == nil {
break
}
fallthrough
- case ONAME, ONONAME, OPACK:
- x = p.nod(n, OPAREN, x, nil)
- x.SetImplicit(true)
+ case ir.ONAME, ir.ONONAME, ir.OPACK:
+ p := p.nod(n, ir.OPAREN, x, nil)
+ p.SetImplicit(true)
+ return p
}
return x
}
-func (p *noder) nod(orig syntax.Node, op Op, left, right *Node) *Node {
- return nodl(p.pos(orig), op, left, right)
+func (p *noder) nod(orig syntax.Node, op ir.Op, left, right ir.Node) ir.Node {
+ return ir.NodAt(p.pos(orig), op, left, right)
}
-func (p *noder) nodSym(orig syntax.Node, op Op, left *Node, sym *types.Sym) *Node {
+func (p *noder) nodSym(orig syntax.Node, op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
n := nodSym(op, left, sym)
- n.Pos = p.pos(orig)
+ n.SetPos(p.pos(orig))
return n
}
func (p *noder) pos(n syntax.Node) src.XPos {
// TODO(gri): orig.Pos() should always be known - fix package syntax
- xpos := lineno
+ xpos := base.Pos
if pos := n.Pos(); pos.IsKnown() {
xpos = p.makeXPos(pos)
}
func (p *noder) setlineno(n syntax.Node) {
if n != nil {
- lineno = p.pos(n)
+ base.Pos = p.pos(n)
}
}
// *Pragma is the value stored in a syntax.Pragma during parsing.
type Pragma struct {
- Flag PragmaFlag // collected bits
- Pos []PragmaPos // position of each individual flag
+ Flag ir.PragmaFlag // collected bits
+ Pos []PragmaPos // position of each individual flag
Embeds []PragmaEmbed
}
type PragmaPos struct {
- Flag PragmaFlag
+ Flag ir.PragmaFlag
Pos syntax.Pos
}
func (p *noder) checkUnused(pragma *Pragma) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
- p.yyerrorpos(pos.Pos, "misplaced compiler directive")
+ p.errorAt(pos.Pos, "misplaced compiler directive")
}
}
if len(pragma.Embeds) > 0 {
for _, e := range pragma.Embeds {
- p.yyerrorpos(e.Pos, "misplaced go:embed directive")
+ p.errorAt(e.Pos, "misplaced go:embed directive")
}
}
}
var target string
if len(f) == 3 {
target = f[2]
+ } else if base.Ctxt.Pkgpath != "" {
+ // Use the default object symbol name if the
+ // user didn't provide one.
+ target = objabi.PathToPrefix(base.Ctxt.Pkgpath) + "." + f[1]
+ } else {
+ p.error(syntax.Error{Pos: pos, Msg: "//go:linkname requires linkname argument or -p compiler flag"})
+ break
}
p.linknames = append(p.linknames, linkname{pos, f[1], target})
// For security, we disallow //go:cgo_* directives other
// than cgo_import_dynamic outside cgo-generated files.
// Exception: they are allowed in the standard library, for runtime and syscall.
- if !isCgoGeneratedFile(pos) && !compiling_std {
+ if !isCgoGeneratedFile(pos) && !base.Flag.Std {
p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in cgo-generated code", text)})
}
p.pragcgo(pos, text)
verb = verb[:i]
}
flag := pragmaFlag(verb)
- const runtimePragmas = Systemstack | Nowritebarrier | Nowritebarrierrec | Yeswritebarrierrec
- if !compiling_runtime && flag&runtimePragmas != 0 {
+ const runtimePragmas = ir.Systemstack | ir.Nowritebarrier | ir.Nowritebarrierrec | ir.Yeswritebarrierrec
+ if !base.Flag.CompilingRuntime && flag&runtimePragmas != 0 {
p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)})
}
- if flag == 0 && !allowedStdPragmas[verb] && compiling_std {
+ if flag == 0 && !allowedStdPragmas[verb] && base.Flag.Std {
p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)})
}
pragma.Flag |= flag
return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
}
-func mkname(sym *types.Sym) *Node {
+func mkname(sym *types.Sym) ir.Node {
n := oldname(sym)
- if n.Name != nil && n.Name.Pack != nil {
- n.Name.Pack.Name.SetUsed(true)
+ if n.Name() != nil && n.Name().PkgName != nil {
+ n.Name().PkgName.Used = true
}
return n
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/obj"
"crypto/sha256"
"encoding/json"
"fmt"
+ "go/constant"
"io"
"io/ioutil"
"os"
)
func dumpobj() {
- if linkobj == "" {
- dumpobj1(outfile, modeCompilerObj|modeLinkerObj)
+ if base.Flag.LinkObj == "" {
+ dumpobj1(base.Flag.LowerO, modeCompilerObj|modeLinkerObj)
return
}
- dumpobj1(outfile, modeCompilerObj)
- dumpobj1(linkobj, modeLinkerObj)
+ dumpobj1(base.Flag.LowerO, modeCompilerObj)
+ dumpobj1(base.Flag.LinkObj, modeLinkerObj)
}
func dumpobj1(outfile string, mode int) {
bout, err := bio.Create(outfile)
if err != nil {
- flusherrors()
+ base.FlushErrors()
fmt.Printf("can't create %s: %v\n", outfile, err)
- errorexit()
+ base.ErrorExit()
}
defer bout.Close()
bout.WriteString("!<arch>\n")
func printObjHeader(bout *bio.Writer) {
fmt.Fprintf(bout, "go object %s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
- if buildid != "" {
- fmt.Fprintf(bout, "build id %q\n", buildid)
+ if base.Flag.BuildID != "" {
+ fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID)
}
- if localpkg.Name == "main" {
+ if types.LocalPkg.Name == "main" {
fmt.Fprintf(bout, "main\n")
}
fmt.Fprintf(bout, "\n") // header ends with blank line
}
func dumpdata() {
- externs := len(externdcl)
- xtops := len(xtop)
+ numExterns := len(Target.Externs)
+ numDecls := len(Target.Decls)
- dumpglobls()
+ dumpglobls(Target.Externs)
+ dumpfuncsyms()
addptabs()
- exportlistLen := len(exportlist)
- addsignats(externdcl)
+ numExports := len(Target.Exports)
+ addsignats(Target.Externs)
dumpsignats()
dumptabs()
- ptabsLen := len(ptabs)
- itabsLen := len(itabs)
+ numPTabs, numITabs := CountTabs()
dumpimportstrings()
dumpbasictypes()
dumpembeds()
// In the typical case, we loop 0 or 1 times.
// It was not until issue 24761 that we found any code that required a loop at all.
for {
- for i := xtops; i < len(xtop); i++ {
- n := xtop[i]
- if n.Op == ODCLFUNC {
- funccompile(n)
+ for i := numDecls; i < len(Target.Decls); i++ {
+ n := Target.Decls[i]
+ if n.Op() == ir.ODCLFUNC {
+ funccompile(n.(*ir.Func))
}
}
- xtops = len(xtop)
+ numDecls = len(Target.Decls)
compileFunctions()
dumpsignats()
- if xtops == len(xtop) {
+ if numDecls == len(Target.Decls) {
break
}
}
// Dump extra globals.
- tmp := externdcl
-
- if externdcl != nil {
- externdcl = externdcl[externs:]
- }
- dumpglobls()
- externdcl = tmp
+ dumpglobls(Target.Externs[numExterns:])
if zerosize > 0 {
zero := mappkg.Lookup("zero")
addGCLocals()
- if exportlistLen != len(exportlist) {
- Fatalf("exportlist changed after compile functions loop")
+ if numExports != len(Target.Exports) {
+ base.Fatalf("Target.Exports changed after compile functions loop")
}
- if ptabsLen != len(ptabs) {
- Fatalf("ptabs changed after compile functions loop")
+ newNumPTabs, newNumITabs := CountTabs()
+ if newNumPTabs != numPTabs {
+ base.Fatalf("ptabs changed after compile functions loop")
}
- if itabsLen != len(itabs) {
- Fatalf("itabs changed after compile functions loop")
+ if newNumITabs != numITabs {
+ base.Fatalf("itabs changed after compile functions loop")
}
}
func dumpLinkerObj(bout *bio.Writer) {
printObjHeader(bout)
- if len(pragcgobuf) != 0 {
+ if len(Target.CgoPragmas) != 0 {
// write empty export section; must be before cgo section
fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
fmt.Fprintf(bout, "\n$$ // cgo\n")
- if err := json.NewEncoder(bout).Encode(pragcgobuf); err != nil {
- Fatalf("serializing pragcgobuf: %v", err)
+ if err := json.NewEncoder(bout).Encode(Target.CgoPragmas); err != nil {
+ base.Fatalf("serializing pragcgobuf: %v", err)
}
fmt.Fprintf(bout, "\n$$\n\n")
}
fmt.Fprintf(bout, "\n!\n")
- obj.WriteObjFile(Ctxt, bout)
+ obj.WriteObjFile(base.Ctxt, bout)
}
func addptabs() {
- if !Ctxt.Flag_dynlink || localpkg.Name != "main" {
+ if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
return
}
- for _, exportn := range exportlist {
- s := exportn.Sym
- n := asNode(s.Def)
- if n == nil {
+ for _, exportn := range Target.Exports {
+ s := exportn.Sym()
+ nn := ir.AsNode(s.Def)
+ if nn == nil {
continue
}
- if n.Op != ONAME {
+ if nn.Op() != ir.ONAME {
continue
}
+ n := nn.(*ir.Name)
if !types.IsExported(s.Name) {
continue
}
if s.Pkg.Name != "main" {
continue
}
- if n.Type.Etype == TFUNC && n.Class() == PFUNC {
+ if n.Type().Kind() == types.TFUNC && n.Class() == ir.PFUNC {
// function
- ptabs = append(ptabs, ptabEntry{s: s, t: asNode(s.Def).Type})
+ ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()})
} else {
// variable
- ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(asNode(s.Def).Type)})
+ ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(s.Def.Type())})
}
}
}
-func dumpGlobal(n *Node) {
- if n.Type == nil {
- Fatalf("external %v nil type\n", n)
+func dumpGlobal(n *ir.Name) {
+ if n.Type() == nil {
+ base.Fatalf("external %v nil type\n", n)
}
- if n.Class() == PFUNC {
+ if n.Class() == ir.PFUNC {
return
}
- if n.Sym.Pkg != localpkg {
+ if n.Sym().Pkg != types.LocalPkg {
return
}
- dowidth(n.Type)
+ dowidth(n.Type())
ggloblnod(n)
}
-func dumpGlobalConst(n *Node) {
+func dumpGlobalConst(n ir.Node) {
// only export typed constants
- t := n.Type
+ t := n.Type()
if t == nil {
return
}
- if n.Sym.Pkg != localpkg {
+ if n.Sym().Pkg != types.LocalPkg {
return
}
// only export integer constants for now
- switch t.Etype {
- case TINT8:
- case TINT16:
- case TINT32:
- case TINT64:
- case TINT:
- case TUINT8:
- case TUINT16:
- case TUINT32:
- case TUINT64:
- case TUINT:
- case TUINTPTR:
- // ok
- case TIDEAL:
- if !Isconst(n, CTINT) {
- return
- }
- x := n.Val().U.(*Mpint)
- if x.Cmp(minintval[TINT]) < 0 || x.Cmp(maxintval[TINT]) > 0 {
+ if !t.IsInteger() {
+ return
+ }
+ v := n.Val()
+ if t.IsUntyped() {
+ // Export untyped integers as int (if they fit).
+ t = types.Types[types.TINT]
+ if doesoverflow(v, t) {
return
}
- // Ideal integers we export as int (if they fit).
- t = types.Types[TINT]
- default:
- return
}
- Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64Val())
+ base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, typesymname(t), ir.IntVal(t, v))
}
-func dumpglobls() {
+func dumpglobls(externs []ir.Node) {
// add globals
- for _, n := range externdcl {
- switch n.Op {
- case ONAME:
- dumpGlobal(n)
- case OLITERAL:
+ for _, n := range externs {
+ switch n.Op() {
+ case ir.ONAME:
+ dumpGlobal(n.(*ir.Name))
+ case ir.OLITERAL:
dumpGlobalConst(n)
}
}
+}
+func dumpfuncsyms() {
sort.Slice(funcsyms, func(i, j int) bool {
return funcsyms[i].LinksymName() < funcsyms[j].LinksymName()
})
dsymptr(sf, 0, s.Linksym(), 0)
ggloblsym(sf, int32(Widthptr), obj.DUPOK|obj.RODATA)
}
-
- // Do not reprocess funcsyms on next dumpglobls call.
- funcsyms = nil
}
// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
// This is done during the sequential phase after compilation, since
// global symbols can't be declared during parallel compilation.
func addGCLocals() {
- for _, s := range Ctxt.Text {
+ for _, s := range base.Ctxt.Text {
fn := s.Func()
if fn == nil {
continue
func duintxx(s *obj.LSym, off int, v uint64, wid int) int {
if off&(wid-1) != 0 {
- Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
+ base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
}
- s.WriteInt(Ctxt, int64(off), wid, int64(v))
+ s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
return off + wid
}
symname = strconv.Quote(s)
}
- symdata := Ctxt.Lookup(stringSymPrefix + symname)
+ symdata := base.Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
off := dstringdata(symdata, 0, s, pos, "string")
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
if readonly {
sym = stringsym(pos, string(data))
} else {
- sym = slicedata(pos, string(data)).Sym.Linksym()
+ sym = slicedata(pos, string(data)).Sym().Linksym()
}
if len(hash) > 0 {
sum := sha256.Sum256(data)
var symdata *obj.LSym
if readonly {
symname := fmt.Sprintf(stringSymPattern, size, sum)
- symdata = Ctxt.Lookup(stringSymPrefix + symname)
+ symdata = base.Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
info := symdata.NewFileInfo()
info.Name = file
} else {
// Emit a zero-length data symbol
// and then fix up length and content to use file.
- symdata = slicedata(pos, "").Sym.Linksym()
+ symdata = slicedata(pos, "").Sym().Linksym()
symdata.Size = size
symdata.Type = objabi.SNOPTRDATA
info := symdata.NewFileInfo()
var slicedataGen int
-func slicedata(pos src.XPos, s string) *Node {
+func slicedata(pos src.XPos, s string) *ir.Name {
slicedataGen++
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
- sym := localpkg.Lookup(symname)
- symnode := newname(sym)
- sym.Def = asTypesNode(symnode)
+ sym := types.LocalPkg.Lookup(symname)
+ symnode := NewName(sym)
+ sym.Def = symnode
lsym := sym.Linksym()
off := dstringdata(lsym, 0, s, pos, "slice")
return symnode
}
-func slicebytes(nam *Node, s string) {
- if nam.Op != ONAME {
- Fatalf("slicebytes %v", nam)
+func slicebytes(nam *ir.Name, off int64, s string) {
+ if nam.Op() != ir.ONAME {
+ base.Fatalf("slicebytes %v", nam)
}
- slicesym(nam, slicedata(nam.Pos, s), int64(len(s)))
+ slicesym(nam, off, slicedata(nam.Pos(), s), int64(len(s)))
}
func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
// causing a cryptic error message by the linker. Check for oversize objects here
// and provide a useful error message instead.
if int64(len(t)) > 2e9 {
- yyerrorl(pos, "%v with length %v is too big", what, len(t))
+ base.ErrorfAt(pos, "%v with length %v is too big", what, len(t))
return 0
}
- s.WriteString(Ctxt, int64(off), len(t), t)
+ s.WriteString(base.Ctxt, int64(off), len(t), t)
return off + len(t)
}
func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
off = int(Rnd(int64(off), int64(Widthptr)))
- s.WriteAddr(Ctxt, int64(off), Widthptr, x, int64(xoff))
+ s.WriteAddr(base.Ctxt, int64(off), Widthptr, x, int64(xoff))
off += Widthptr
return off
}
func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int {
- s.WriteOff(Ctxt, int64(off), x, 0)
+ s.WriteOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
- s.WriteWeakOff(Ctxt, int64(off), x, 0)
+ s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
-// slicesym writes a static slice symbol {&arr, lencap, lencap} to n.
-// arr must be an ONAME. slicesym does not modify n.
-func slicesym(n, arr *Node, lencap int64) {
- s := n.Sym.Linksym()
- base := n.Xoffset
- if arr.Op != ONAME {
- Fatalf("slicesym non-name arr %v", arr)
+// slicesym writes a static slice symbol {&arr, lencap, lencap} to n+noff.
+// slicesym does not modify n.
+func slicesym(n *ir.Name, noff int64, arr *ir.Name, lencap int64) {
+ s := n.Sym().Linksym()
+ if arr.Op() != ir.ONAME {
+ base.Fatalf("slicesym non-name arr %v", arr)
}
- s.WriteAddr(Ctxt, base, Widthptr, arr.Sym.Linksym(), arr.Xoffset)
- s.WriteInt(Ctxt, base+sliceLenOffset, Widthptr, lencap)
- s.WriteInt(Ctxt, base+sliceCapOffset, Widthptr, lencap)
+ s.WriteAddr(base.Ctxt, noff, Widthptr, arr.Sym().Linksym(), 0)
+ s.WriteInt(base.Ctxt, noff+sliceLenOffset, Widthptr, lencap)
+ s.WriteInt(base.Ctxt, noff+sliceCapOffset, Widthptr, lencap)
}
// addrsym writes the static address of a to n. a must be an ONAME.
// Neither n nor a is modified.
-func addrsym(n, a *Node) {
- if n.Op != ONAME {
- Fatalf("addrsym n op %v", n.Op)
+func addrsym(n *ir.Name, noff int64, a *ir.Name, aoff int64) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("addrsym n op %v", n.Op())
}
- if n.Sym == nil {
- Fatalf("addrsym nil n sym")
+ if n.Sym() == nil {
+ base.Fatalf("addrsym nil n sym")
}
- if a.Op != ONAME {
- Fatalf("addrsym a op %v", a.Op)
+ if a.Op() != ir.ONAME {
+ base.Fatalf("addrsym a op %v", a.Op())
}
- s := n.Sym.Linksym()
- s.WriteAddr(Ctxt, n.Xoffset, Widthptr, a.Sym.Linksym(), a.Xoffset)
+ s := n.Sym().Linksym()
+ s.WriteAddr(base.Ctxt, noff, Widthptr, a.Sym().Linksym(), aoff)
}
// pfuncsym writes the static address of f to n. f must be a global function.
// Neither n nor f is modified.
-func pfuncsym(n, f *Node) {
- if n.Op != ONAME {
- Fatalf("pfuncsym n op %v", n.Op)
+func pfuncsym(n *ir.Name, noff int64, f *ir.Name) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("pfuncsym n op %v", n.Op())
}
- if n.Sym == nil {
- Fatalf("pfuncsym nil n sym")
+ if n.Sym() == nil {
+ base.Fatalf("pfuncsym nil n sym")
}
- if f.Class() != PFUNC {
- Fatalf("pfuncsym class not PFUNC %d", f.Class())
+ if f.Class() != ir.PFUNC {
+ base.Fatalf("pfuncsym class not PFUNC %d", f.Class())
}
- s := n.Sym.Linksym()
- s.WriteAddr(Ctxt, n.Xoffset, Widthptr, funcsym(f.Sym).Linksym(), f.Xoffset)
+ s := n.Sym().Linksym()
+ s.WriteAddr(base.Ctxt, noff, Widthptr, funcsym(f.Sym()).Linksym(), 0)
}
// litsym writes the static literal c to n.
// Neither n nor c is modified.
-func litsym(n, c *Node, wid int) {
- if n.Op != ONAME {
- Fatalf("litsym n op %v", n.Op)
- }
- if c.Op != OLITERAL {
- Fatalf("litsym c op %v", c.Op)
- }
- if n.Sym == nil {
- Fatalf("litsym nil n sym")
- }
- s := n.Sym.Linksym()
- switch u := c.Val().U.(type) {
- case bool:
- i := int64(obj.Bool2int(u))
- s.WriteInt(Ctxt, n.Xoffset, wid, i)
-
- case *Mpint:
- s.WriteInt(Ctxt, n.Xoffset, wid, u.Int64())
-
- case *Mpflt:
- f := u.Float64()
- switch n.Type.Etype {
- case TFLOAT32:
- s.WriteFloat32(Ctxt, n.Xoffset, float32(f))
- case TFLOAT64:
- s.WriteFloat64(Ctxt, n.Xoffset, f)
+func litsym(n *ir.Name, noff int64, c ir.Node, wid int) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("litsym n op %v", n.Op())
+ }
+ if n.Sym() == nil {
+ base.Fatalf("litsym nil n sym")
+ }
+ if c.Op() == ir.ONIL {
+ return
+ }
+ if c.Op() != ir.OLITERAL {
+ base.Fatalf("litsym c op %v", c.Op())
+ }
+ s := n.Sym().Linksym()
+ switch u := c.Val(); u.Kind() {
+ case constant.Bool:
+ i := int64(obj.Bool2int(constant.BoolVal(u)))
+ s.WriteInt(base.Ctxt, noff, wid, i)
+
+ case constant.Int:
+ s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u))
+
+ case constant.Float:
+ f, _ := constant.Float64Val(u)
+ switch c.Type().Kind() {
+ case types.TFLOAT32:
+ s.WriteFloat32(base.Ctxt, noff, float32(f))
+ case types.TFLOAT64:
+ s.WriteFloat64(base.Ctxt, noff, f)
}
- case *Mpcplx:
- r := u.Real.Float64()
- i := u.Imag.Float64()
- switch n.Type.Etype {
- case TCOMPLEX64:
- s.WriteFloat32(Ctxt, n.Xoffset, float32(r))
- s.WriteFloat32(Ctxt, n.Xoffset+4, float32(i))
- case TCOMPLEX128:
- s.WriteFloat64(Ctxt, n.Xoffset, r)
- s.WriteFloat64(Ctxt, n.Xoffset+8, i)
+ case constant.Complex:
+ re, _ := constant.Float64Val(constant.Real(u))
+ im, _ := constant.Float64Val(constant.Imag(u))
+ switch c.Type().Kind() {
+ case types.TCOMPLEX64:
+ s.WriteFloat32(base.Ctxt, noff, float32(re))
+ s.WriteFloat32(base.Ctxt, noff+4, float32(im))
+ case types.TCOMPLEX128:
+ s.WriteFloat64(base.Ctxt, noff, re)
+ s.WriteFloat64(base.Ctxt, noff+8, im)
}
- case string:
- symdata := stringsym(n.Pos, u)
- s.WriteAddr(Ctxt, n.Xoffset, Widthptr, symdata, 0)
- s.WriteInt(Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(u)))
+ case constant.String:
+ i := constant.StringVal(u)
+ symdata := stringsym(n.Pos(), i)
+ s.WriteAddr(base.Ctxt, noff, Widthptr, symdata, 0)
+ s.WriteInt(base.Ctxt, noff+int64(Widthptr), Widthptr, int64(len(i)))
default:
- Fatalf("litsym unhandled OLITERAL %v", c)
+ base.Fatalf("litsym unhandled OLITERAL %v", c)
}
}
+++ /dev/null
-// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT.
-
-package gc
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[OXXX-0]
- _ = x[ONAME-1]
- _ = x[ONONAME-2]
- _ = x[OTYPE-3]
- _ = x[OPACK-4]
- _ = x[OLITERAL-5]
- _ = x[OADD-6]
- _ = x[OSUB-7]
- _ = x[OOR-8]
- _ = x[OXOR-9]
- _ = x[OADDSTR-10]
- _ = x[OADDR-11]
- _ = x[OANDAND-12]
- _ = x[OAPPEND-13]
- _ = x[OBYTES2STR-14]
- _ = x[OBYTES2STRTMP-15]
- _ = x[ORUNES2STR-16]
- _ = x[OSTR2BYTES-17]
- _ = x[OSTR2BYTESTMP-18]
- _ = x[OSTR2RUNES-19]
- _ = x[OAS-20]
- _ = x[OAS2-21]
- _ = x[OAS2DOTTYPE-22]
- _ = x[OAS2FUNC-23]
- _ = x[OAS2MAPR-24]
- _ = x[OAS2RECV-25]
- _ = x[OASOP-26]
- _ = x[OCALL-27]
- _ = x[OCALLFUNC-28]
- _ = x[OCALLMETH-29]
- _ = x[OCALLINTER-30]
- _ = x[OCALLPART-31]
- _ = x[OCAP-32]
- _ = x[OCLOSE-33]
- _ = x[OCLOSURE-34]
- _ = x[OCOMPLIT-35]
- _ = x[OMAPLIT-36]
- _ = x[OSTRUCTLIT-37]
- _ = x[OARRAYLIT-38]
- _ = x[OSLICELIT-39]
- _ = x[OPTRLIT-40]
- _ = x[OCONV-41]
- _ = x[OCONVIFACE-42]
- _ = x[OCONVNOP-43]
- _ = x[OCOPY-44]
- _ = x[ODCL-45]
- _ = x[ODCLFUNC-46]
- _ = x[ODCLFIELD-47]
- _ = x[ODCLCONST-48]
- _ = x[ODCLTYPE-49]
- _ = x[ODELETE-50]
- _ = x[ODOT-51]
- _ = x[ODOTPTR-52]
- _ = x[ODOTMETH-53]
- _ = x[ODOTINTER-54]
- _ = x[OXDOT-55]
- _ = x[ODOTTYPE-56]
- _ = x[ODOTTYPE2-57]
- _ = x[OEQ-58]
- _ = x[ONE-59]
- _ = x[OLT-60]
- _ = x[OLE-61]
- _ = x[OGE-62]
- _ = x[OGT-63]
- _ = x[ODEREF-64]
- _ = x[OINDEX-65]
- _ = x[OINDEXMAP-66]
- _ = x[OKEY-67]
- _ = x[OSTRUCTKEY-68]
- _ = x[OLEN-69]
- _ = x[OMAKE-70]
- _ = x[OMAKECHAN-71]
- _ = x[OMAKEMAP-72]
- _ = x[OMAKESLICE-73]
- _ = x[OMAKESLICECOPY-74]
- _ = x[OMUL-75]
- _ = x[ODIV-76]
- _ = x[OMOD-77]
- _ = x[OLSH-78]
- _ = x[ORSH-79]
- _ = x[OAND-80]
- _ = x[OANDNOT-81]
- _ = x[ONEW-82]
- _ = x[ONEWOBJ-83]
- _ = x[ONOT-84]
- _ = x[OBITNOT-85]
- _ = x[OPLUS-86]
- _ = x[ONEG-87]
- _ = x[OOROR-88]
- _ = x[OPANIC-89]
- _ = x[OPRINT-90]
- _ = x[OPRINTN-91]
- _ = x[OPAREN-92]
- _ = x[OSEND-93]
- _ = x[OSLICE-94]
- _ = x[OSLICEARR-95]
- _ = x[OSLICESTR-96]
- _ = x[OSLICE3-97]
- _ = x[OSLICE3ARR-98]
- _ = x[OSLICEHEADER-99]
- _ = x[ORECOVER-100]
- _ = x[ORECV-101]
- _ = x[ORUNESTR-102]
- _ = x[OSELRECV-103]
- _ = x[OSELRECV2-104]
- _ = x[OIOTA-105]
- _ = x[OREAL-106]
- _ = x[OIMAG-107]
- _ = x[OCOMPLEX-108]
- _ = x[OALIGNOF-109]
- _ = x[OOFFSETOF-110]
- _ = x[OSIZEOF-111]
- _ = x[OBLOCK-112]
- _ = x[OBREAK-113]
- _ = x[OCASE-114]
- _ = x[OCONTINUE-115]
- _ = x[ODEFER-116]
- _ = x[OEMPTY-117]
- _ = x[OFALL-118]
- _ = x[OFOR-119]
- _ = x[OFORUNTIL-120]
- _ = x[OGOTO-121]
- _ = x[OIF-122]
- _ = x[OLABEL-123]
- _ = x[OGO-124]
- _ = x[ORANGE-125]
- _ = x[ORETURN-126]
- _ = x[OSELECT-127]
- _ = x[OSWITCH-128]
- _ = x[OTYPESW-129]
- _ = x[OTCHAN-130]
- _ = x[OTMAP-131]
- _ = x[OTSTRUCT-132]
- _ = x[OTINTER-133]
- _ = x[OTFUNC-134]
- _ = x[OTARRAY-135]
- _ = x[ODDD-136]
- _ = x[OINLCALL-137]
- _ = x[OEFACE-138]
- _ = x[OITAB-139]
- _ = x[OIDATA-140]
- _ = x[OSPTR-141]
- _ = x[OCLOSUREVAR-142]
- _ = x[OCFUNC-143]
- _ = x[OCHECKNIL-144]
- _ = x[OVARDEF-145]
- _ = x[OVARKILL-146]
- _ = x[OVARLIVE-147]
- _ = x[ORESULT-148]
- _ = x[OINLMARK-149]
- _ = x[ORETJMP-150]
- _ = x[OGETG-151]
- _ = x[OEND-152]
-}
-
-const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND"
-
-var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 70, 82, 91, 100, 112, 121, 123, 126, 136, 143, 150, 157, 161, 165, 173, 181, 190, 198, 201, 206, 213, 220, 226, 235, 243, 251, 257, 261, 270, 277, 281, 284, 291, 299, 307, 314, 320, 323, 329, 336, 344, 348, 355, 363, 365, 367, 369, 371, 373, 375, 380, 385, 393, 396, 405, 408, 412, 420, 427, 436, 449, 452, 455, 458, 461, 464, 467, 473, 476, 482, 485, 491, 495, 498, 502, 507, 512, 518, 523, 527, 532, 540, 548, 554, 563, 574, 581, 585, 592, 599, 607, 611, 615, 619, 626, 633, 641, 647, 652, 657, 661, 669, 674, 679, 683, 686, 694, 698, 700, 705, 707, 712, 718, 724, 730, 736, 741, 745, 752, 758, 763, 769, 772, 779, 784, 788, 793, 797, 807, 812, 820, 826, 833, 840, 846, 853, 859, 863, 866}
-
-func (i Op) String() string {
- if i >= Op(len(_Op_index)-1) {
- return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Op_name[_Op_index[i]:_Op_index[i+1]]
-}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
// Order holds state during the ordering process.
type Order struct {
- out []*Node // list of generated statements
- temp []*Node // stack of temporary variables
- free map[string][]*Node // free list of unused temporaries, by type.LongString().
+ out []ir.Node // list of generated statements
+ temp []*ir.Name // stack of temporary variables
+ free map[string][]*ir.Name // free list of unused temporaries, by type.LongString().
+ edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
}
// Order rewrites fn.Nbody to apply the ordering constraints
// described in the comment at the top of the file.
-func order(fn *Node) {
- if Debug.W > 1 {
- s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym)
- dumplist(s, fn.Nbody)
+func order(fn *ir.Func) {
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nbefore order %v", fn.Sym())
+ ir.DumpList(s, fn.Body())
}
- orderBlock(&fn.Nbody, map[string][]*Node{})
+ orderBlock(fn.PtrBody(), map[string][]*ir.Name{})
+}
+
+// append typechecks stmt and appends it to out.
+func (o *Order) append(stmt ir.Node) {
+ o.out = append(o.out, typecheck(stmt, ctxStmt))
}
// newTemp allocates a new temporary with the given type,
// pushes it onto the temp stack, and returns it.
// If clear is true, newTemp emits code to zero the temporary.
-func (o *Order) newTemp(t *types.Type, clear bool) *Node {
- var v *Node
+func (o *Order) newTemp(t *types.Type, clear bool) *ir.Name {
+ var v *ir.Name
// Note: LongString is close to the type equality we want,
// but not exactly. We still need to double-check with types.Identical.
key := t.LongString()
a := o.free[key]
for i, n := range a {
- if types.Identical(t, n.Type) {
+ if types.Identical(t, n.Type()) {
v = a[i]
a[i] = a[len(a)-1]
a = a[:len(a)-1]
v = temp(t)
}
if clear {
- a := nod(OAS, v, nil)
- a = typecheck(a, ctxStmt)
- o.out = append(o.out, a)
+ o.append(ir.Nod(ir.OAS, v, nil))
}
o.temp = append(o.temp, v)
// copyExpr behaves like newTemp but also emits
// code to initialize the temporary to the value n.
-//
-// The clear argument is provided for use when the evaluation
-// of tmp = n turns into a function call that is passed a pointer
-// to the temporary as the output space. If the call blocks before
-// tmp has been written, the garbage collector will still treat the
-// temporary as live, so we must zero it before entering that call.
+func (o *Order) copyExpr(n ir.Node) ir.Node {
+ return o.copyExpr1(n, false)
+}
+
+// copyExprClear is like copyExpr but clears the temp before assignment.
+// It is provided for use when the evaluation of tmp = n turns into
+// a function call that is passed a pointer to the temporary as the output space.
+// If the call blocks before tmp has been written,
+// the garbage collector will still treat the temporary as live,
+// so we must zero it before entering that call.
// Today, this only happens for channel receive operations.
// (The other candidate would be map access, but map access
// returns a pointer to the result data instead of taking a pointer
// to be filled in.)
-func (o *Order) copyExpr(n *Node, t *types.Type, clear bool) *Node {
+func (o *Order) copyExprClear(n ir.Node) *ir.Name {
+ return o.copyExpr1(n, true)
+}
+
+func (o *Order) copyExpr1(n ir.Node, clear bool) *ir.Name {
+ t := n.Type()
v := o.newTemp(t, clear)
- a := nod(OAS, v, n)
- a = typecheck(a, ctxStmt)
- o.out = append(o.out, a)
+ o.append(ir.Nod(ir.OAS, v, n))
return v
}
// The definition of cheap is that n is a variable or constant.
// If not, cheapExpr allocates a new tmp, emits tmp = n,
// and then returns tmp.
-func (o *Order) cheapExpr(n *Node) *Node {
+func (o *Order) cheapExpr(n ir.Node) ir.Node {
if n == nil {
return nil
}
- switch n.Op {
- case ONAME, OLITERAL:
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
- case OLEN, OCAP:
- l := o.cheapExpr(n.Left)
- if l == n.Left {
+ case ir.OLEN, ir.OCAP:
+ l := o.cheapExpr(n.Left())
+ if l == n.Left() {
return n
}
- a := n.sepcopy()
- a.Left = l
+ a := ir.SepCopy(n).(*ir.UnaryExpr)
+ a.SetLeft(l)
return typecheck(a, ctxExpr)
}
- return o.copyExpr(n, n.Type, false)
+ return o.copyExpr(n)
}
// safeExpr returns a safe version of n.
// as assigning to the original n.
//
// The intended use is to apply to x when rewriting x += y into x = x + y.
-func (o *Order) safeExpr(n *Node) *Node {
- switch n.Op {
- case ONAME, OLITERAL:
+func (o *Order) safeExpr(n ir.Node) ir.Node {
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
- case ODOT, OLEN, OCAP:
- l := o.safeExpr(n.Left)
- if l == n.Left {
+ case ir.OLEN, ir.OCAP:
+ l := o.safeExpr(n.Left())
+ if l == n.Left() {
+ return n
+ }
+ a := ir.SepCopy(n).(*ir.UnaryExpr)
+ a.SetLeft(l)
+ return typecheck(a, ctxExpr)
+
+ case ir.ODOT:
+ l := o.safeExpr(n.Left())
+ if l == n.Left() {
+ return n
+ }
+ a := ir.SepCopy(n).(*ir.SelectorExpr)
+ a.SetLeft(l)
+ return typecheck(a, ctxExpr)
+
+ case ir.ODOTPTR:
+ l := o.cheapExpr(n.Left())
+ if l == n.Left() {
return n
}
- a := n.sepcopy()
- a.Left = l
+ a := ir.SepCopy(n).(*ir.SelectorExpr)
+ a.SetLeft(l)
return typecheck(a, ctxExpr)
- case ODOTPTR, ODEREF:
- l := o.cheapExpr(n.Left)
- if l == n.Left {
+ case ir.ODEREF:
+ l := o.cheapExpr(n.Left())
+ if l == n.Left() {
return n
}
- a := n.sepcopy()
- a.Left = l
+ a := ir.SepCopy(n).(*ir.StarExpr)
+ a.SetLeft(l)
return typecheck(a, ctxExpr)
- case OINDEX, OINDEXMAP:
- var l *Node
- if n.Left.Type.IsArray() {
- l = o.safeExpr(n.Left)
+ case ir.OINDEX, ir.OINDEXMAP:
+ var l ir.Node
+ if n.Left().Type().IsArray() {
+ l = o.safeExpr(n.Left())
} else {
- l = o.cheapExpr(n.Left)
+ l = o.cheapExpr(n.Left())
}
- r := o.cheapExpr(n.Right)
- if l == n.Left && r == n.Right {
+ r := o.cheapExpr(n.Right())
+ if l == n.Left() && r == n.Right() {
return n
}
- a := n.sepcopy()
- a.Left = l
- a.Right = r
+ a := ir.SepCopy(n).(*ir.IndexExpr)
+ a.SetLeft(l)
+ a.SetRight(r)
return typecheck(a, ctxExpr)
default:
- Fatalf("order.safeExpr %v", n.Op)
+ base.Fatalf("order.safeExpr %v", n.Op())
return nil // not reached
}
}
// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
// because we emit explicit VARKILL instructions marking the end of those
// temporaries' lifetimes.
-func isaddrokay(n *Node) bool {
- return islvalue(n) && (n.Op != ONAME || n.Class() == PEXTERN || n.IsAutoTmp())
+func isaddrokay(n ir.Node) bool {
+ return islvalue(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class() == ir.PEXTERN || ir.IsAutoTmp(n))
}
// addrTemp ensures that n is okay to pass by address to runtime routines.
// tmp = n, and then returns tmp.
// The result of addrTemp MUST be assigned back to n, e.g.
// n.Left = o.addrTemp(n.Left)
-func (o *Order) addrTemp(n *Node) *Node {
- if consttype(n) != CTxxx {
+func (o *Order) addrTemp(n ir.Node) ir.Node {
+ if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
// TODO: expand this to all static composite literal nodes?
n = defaultlit(n, nil)
- dowidth(n.Type)
- vstat := readonlystaticname(n.Type)
+ dowidth(n.Type())
+ vstat := readonlystaticname(n.Type())
var s InitSchedule
- s.staticassign(vstat, n)
+ s.staticassign(vstat, 0, n, n.Type())
if s.out != nil {
- Fatalf("staticassign of const generated code: %+v", n)
+ base.Fatalf("staticassign of const generated code: %+v", n)
}
- vstat = typecheck(vstat, ctxExpr)
+ vstat = typecheck(vstat, ctxExpr).(*ir.Name)
return vstat
}
if isaddrokay(n) {
return n
}
- return o.copyExpr(n, n.Type, false)
+ return o.copyExpr(n)
}
// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
// It should only be used for map runtime calls which have *_fast* versions.
-func (o *Order) mapKeyTemp(t *types.Type, n *Node) *Node {
+func (o *Order) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
// Most map calls need to take the address of the key.
// Exception: map*_fast* calls. See golang.org/issue/19015.
if mapfast(t) == mapslow {
// It would be nice to handle these generally, but because
// []byte keys are not allowed in maps, the use of string(k)
// comes up in important cases in practice. See issue 3512.
-func mapKeyReplaceStrConv(n *Node) bool {
+func mapKeyReplaceStrConv(n ir.Node) bool {
var replaced bool
- switch n.Op {
- case OBYTES2STR:
- n.Op = OBYTES2STRTMP
+ switch n.Op() {
+ case ir.OBYTES2STR:
+ n.SetOp(ir.OBYTES2STRTMP)
replaced = true
- case OSTRUCTLIT:
- for _, elem := range n.List.Slice() {
- if mapKeyReplaceStrConv(elem.Left) {
+ case ir.OSTRUCTLIT:
+ for _, elem := range n.List().Slice() {
+ elem := elem.(*ir.StructKeyExpr)
+ if mapKeyReplaceStrConv(elem.Left()) {
replaced = true
}
}
- case OARRAYLIT:
- for _, elem := range n.List.Slice() {
- if elem.Op == OKEY {
- elem = elem.Right
+ case ir.OARRAYLIT:
+ for _, elem := range n.List().Slice() {
+ if elem.Op() == ir.OKEY {
+ elem = elem.(*ir.KeyExpr).Right()
}
if mapKeyReplaceStrConv(elem) {
replaced = true
// which must have been returned by markTemp.
func (o *Order) popTemp(mark ordermarker) {
for _, n := range o.temp[mark:] {
- key := n.Type.LongString()
+ key := n.Type().LongString()
o.free[key] = append(o.free[key], n)
}
o.temp = o.temp[:mark]
// cleanTempNoPop emits VARKILL instructions to *out
// for each temporary above the mark on the temporary stack.
// It does not pop the temporaries from the stack.
-func (o *Order) cleanTempNoPop(mark ordermarker) []*Node {
- var out []*Node
+func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node {
+ var out []ir.Node
for i := len(o.temp) - 1; i >= int(mark); i-- {
n := o.temp[i]
- kill := nod(OVARKILL, n, nil)
- kill = typecheck(kill, ctxStmt)
- out = append(out, kill)
+ out = append(out, typecheck(ir.Nod(ir.OVARKILL, n, nil), ctxStmt))
}
return out
}
}
// stmtList orders each of the statements in the list.
-func (o *Order) stmtList(l Nodes) {
+func (o *Order) stmtList(l ir.Nodes) {
s := l.Slice()
for i := range s {
orderMakeSliceCopy(s[i:])
// m = OMAKESLICE([]T, x); OCOPY(m, s)
// and rewrites it to:
// m = OMAKESLICECOPY([]T, x, s); nil
-func orderMakeSliceCopy(s []*Node) {
- if Debug.N != 0 || instrumenting {
+func orderMakeSliceCopy(s []ir.Node) {
+ if base.Flag.N != 0 || instrumenting {
return
}
-
- if len(s) < 2 {
+ if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY {
return
}
- asn := s[0]
- copyn := s[1]
-
- if asn == nil || asn.Op != OAS {
- return
- }
- if asn.Left.Op != ONAME {
- return
- }
- if asn.Left.isBlank() {
- return
- }
- maken := asn.Right
- if maken == nil || maken.Op != OMAKESLICE {
- return
- }
- if maken.Esc == EscNone {
- return
- }
- if maken.Left == nil || maken.Right != nil {
- return
- }
- if copyn.Op != OCOPY {
- return
- }
- if copyn.Left.Op != ONAME {
- return
- }
- if asn.Left.Sym != copyn.Left.Sym {
- return
- }
- if copyn.Right.Op != ONAME {
+ as := s[0].(*ir.AssignStmt)
+ cp := s[1].(*ir.BinaryExpr)
+ if as.Right() == nil || as.Right().Op() != ir.OMAKESLICE || ir.IsBlank(as.Left()) ||
+ as.Left().Op() != ir.ONAME || cp.Left().Op() != ir.ONAME || cp.Right().Op() != ir.ONAME ||
+ as.Left().Name() != cp.Left().Name() || cp.Left().Name() == cp.Right().Name() {
+ // The line above this one is correct with the differing equality operators:
+ // we want as.X and cp.X to be the same name,
+ // but we want the initial data to be coming from a different name.
return
}
- if copyn.Left.Sym == copyn.Right.Sym {
+ mk := as.Right().(*ir.MakeExpr)
+ if mk.Esc() == EscNone || mk.Left() == nil || mk.Right() != nil {
return
}
-
- maken.Op = OMAKESLICECOPY
- maken.Right = copyn.Right
+ mk.SetOp(ir.OMAKESLICECOPY)
+ mk.SetRight(cp.Right())
// Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
- maken.SetBounded(maken.Left.Op == OLEN && samesafeexpr(maken.Left.Left, copyn.Right))
-
- maken = typecheck(maken, ctxExpr)
-
+ mk.SetBounded(mk.Left().Op() == ir.OLEN && samesafeexpr(mk.Left().(*ir.UnaryExpr).Left(), cp.Right()))
+ as.SetRight(typecheck(mk, ctxExpr))
s[1] = nil // remove separate copy call
-
- return
}
// edge inserts coverage instrumentation for libfuzzer.
func (o *Order) edge() {
- if Debug_libfuzzer == 0 {
+ if base.Debug.Libfuzzer == 0 {
return
}
// Create a new uint8 counter to be allocated in section
// __libfuzzer_extra_counters.
- counter := staticname(types.Types[TUINT8])
- counter.Name.SetLibfuzzerExtraCounter(true)
+ counter := staticname(types.Types[types.TUINT8])
+ counter.Name().SetLibfuzzerExtraCounter(true)
// counter += 1
- incr := nod(OASOP, counter, nodintconst(1))
- incr.SetSubOp(OADD)
- incr = typecheck(incr, ctxStmt)
-
- o.out = append(o.out, incr)
+ incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, nodintconst(1))
+ o.append(incr)
}
// orderBlock orders the block of statements in n into a new slice,
// and then replaces the old slice in n with the new slice.
// free is a map that can be used to obtain temporary variables by type.
-func orderBlock(n *Nodes, free map[string][]*Node) {
+func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
var order Order
order.free = free
mark := order.markTemp()
// leaves them as the init list of the final *np.
// The result of exprInPlace MUST be assigned back to n, e.g.
// n.Left = o.exprInPlace(n.Left)
-func (o *Order) exprInPlace(n *Node) *Node {
+func (o *Order) exprInPlace(n ir.Node) ir.Node {
var order Order
order.free = o.free
n = order.expr(n, nil)
- n = addinit(n, order.out)
+ n = initExpr(order.out, n)
// insert new temporaries from order
// at head of outer list.
// The result of orderStmtInPlace MUST be assigned back to n, e.g.
// n.Left = orderStmtInPlace(n.Left)
// free is a map that can be used to obtain temporary variables by type.
-func orderStmtInPlace(n *Node, free map[string][]*Node) *Node {
+func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
var order Order
order.free = free
mark := order.markTemp()
}
// init moves n's init list to o.out.
-func (o *Order) init(n *Node) {
- if n.mayBeShared() {
+func (o *Order) init(n ir.Node) {
+ if ir.MayBeShared(n) {
// For concurrency safety, don't mutate potentially shared nodes.
// First, ensure that no work is required here.
- if n.Ninit.Len() > 0 {
- Fatalf("order.init shared node with ninit")
+ if n.Init().Len() > 0 {
+ base.Fatalf("order.init shared node with ninit")
}
return
}
- o.stmtList(n.Ninit)
- n.Ninit.Set(nil)
+ o.stmtList(n.Init())
+ n.PtrInit().Set(nil)
}
// call orders the call expression n.
// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
-func (o *Order) call(n *Node) {
- if n.Ninit.Len() > 0 {
- // Caller should have already called o.init(n).
- Fatalf("%v with unexpected ninit", n.Op)
+func (o *Order) call(nn ir.Node) {
+ if nn.Init().Len() > 0 {
+ // Caller should have already called o.init(nn).
+ base.Fatalf("%v with unexpected ninit", nn.Op())
}
// Builtin functions.
- if n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER {
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
- o.exprList(n.List)
+ if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLMETH && nn.Op() != ir.OCALLINTER {
+ switch n := nn.(type) {
+ default:
+ base.Fatalf("unexpected call: %+v", n)
+ case *ir.UnaryExpr:
+ n.SetLeft(o.expr(n.Left(), nil))
+ case *ir.ConvExpr:
+ n.SetLeft(o.expr(n.Left(), nil))
+ case *ir.BinaryExpr:
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
+ case *ir.MakeExpr:
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
+ case *ir.CallExpr:
+ o.exprList(n.List())
+ }
return
}
+ n := nn.(*ir.CallExpr)
fixVariadicCall(n)
- n.Left = o.expr(n.Left, nil)
- o.exprList(n.List)
+ n.SetLeft(o.expr(n.Left(), nil))
+ o.exprList(n.List())
- if n.Op == OCALLINTER {
+ if n.Op() == ir.OCALLINTER {
return
}
- keepAlive := func(arg *Node) {
+ keepAlive := func(arg ir.Node) {
// If the argument is really a pointer being converted to uintptr,
// arrange for the pointer to be kept alive until the call returns,
// by copying it into a temp and marking that temp
// still alive when we pop the temp stack.
- if arg.Op == OCONVNOP && arg.Left.Type.IsUnsafePtr() {
- x := o.copyExpr(arg.Left, arg.Left.Type, false)
- arg.Left = x
- x.Name.SetAddrtaken(true) // ensure SSA keeps the x variable
- n.Nbody.Append(typecheck(nod(OVARLIVE, x, nil), ctxStmt))
+ if arg.Op() == ir.OCONVNOP {
+ if arg.Left().Type().IsUnsafePtr() {
+ x := o.copyExpr(arg.Left())
+ arg.SetLeft(x)
+ x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable
+ n.PtrBody().Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt))
+ }
}
}
// Check for "unsafe-uintptr" tag provided by escape analysis.
- for i, param := range n.Left.Type.Params().FieldSlice() {
+ for i, param := range n.Left().Type().Params().FieldSlice() {
if param.Note == unsafeUintptrTag || param.Note == uintptrEscapesTag {
- if arg := n.List.Index(i); arg.Op == OSLICELIT {
- for _, elt := range arg.List.Slice() {
+ if arg := n.List().Index(i); arg.Op() == ir.OSLICELIT {
+ for _, elt := range arg.List().Slice() {
keepAlive(elt)
}
} else {
// cases they are also typically registerizable, so not much harm done.
// And this only applies to the multiple-assignment form.
// We could do a more precise analysis if needed, like in walk.go.
-func (o *Order) mapAssign(n *Node) {
- switch n.Op {
+func (o *Order) mapAssign(n ir.Node) {
+ switch n.Op() {
default:
- Fatalf("order.mapAssign %v", n.Op)
-
- case OAS, OASOP:
- if n.Left.Op == OINDEXMAP {
- // Make sure we evaluate the RHS before starting the map insert.
- // We need to make sure the RHS won't panic. See issue 22881.
- if n.Right.Op == OAPPEND {
- s := n.Right.List.Slice()[1:]
- for i, n := range s {
- s[i] = o.cheapExpr(n)
- }
- } else {
- n.Right = o.cheapExpr(n.Right)
- }
+ base.Fatalf("order.mapAssign %v", n.Op())
+
+ case ir.OAS:
+ if n.Left().Op() == ir.OINDEXMAP {
+ n.SetRight(o.safeMapRHS(n.Right()))
+ }
+ o.out = append(o.out, n)
+ case ir.OASOP:
+ if n.Left().Op() == ir.OINDEXMAP {
+ n.SetRight(o.safeMapRHS(n.Right()))
}
o.out = append(o.out, n)
- case OAS2, OAS2DOTTYPE, OAS2MAPR, OAS2FUNC:
- var post []*Node
- for i, m := range n.List.Slice() {
+ case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC:
+ var post []ir.Node
+ for i, m := range n.List().Slice() {
switch {
- case m.Op == OINDEXMAP:
- if !m.Left.IsAutoTmp() {
- m.Left = o.copyExpr(m.Left, m.Left.Type, false)
+ case m.Op() == ir.OINDEXMAP:
+ m := m.(*ir.IndexExpr)
+ if !ir.IsAutoTmp(m.Left()) {
+ m.SetLeft(o.copyExpr(m.Left()))
}
- if !m.Right.IsAutoTmp() {
- m.Right = o.copyExpr(m.Right, m.Right.Type, false)
+ if !ir.IsAutoTmp(m.Right()) {
+ m.SetRight(o.copyExpr(m.Right()))
}
fallthrough
- case instrumenting && n.Op == OAS2FUNC && !m.isBlank():
- t := o.newTemp(m.Type, false)
- n.List.SetIndex(i, t)
- a := nod(OAS, m, t)
- a = typecheck(a, ctxStmt)
- post = append(post, a)
+ case instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m):
+ t := o.newTemp(m.Type(), false)
+ n.List().SetIndex(i, t)
+ a := ir.Nod(ir.OAS, m, t)
+ post = append(post, typecheck(a, ctxStmt))
}
}
}
}
+func (o *Order) safeMapRHS(r ir.Node) ir.Node {
+ // Make sure we evaluate the RHS before starting the map insert.
+ // We need to make sure the RHS won't panic. See issue 22881.
+ if r.Op() == ir.OAPPEND {
+ s := r.List().Slice()[1:]
+ for i, n := range s {
+ s[i] = o.cheapExpr(n)
+ }
+ return r
+ }
+ return o.cheapExpr(r)
+}
+
// stmt orders the statement n, appending to o.out.
// Temporaries created during the statement are cleaned
// up using VARKILL instructions as possible.
-func (o *Order) stmt(n *Node) {
+func (o *Order) stmt(n ir.Node) {
if n == nil {
return
}
lno := setlineno(n)
o.init(n)
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("order.stmt %v", n.Op)
+ base.Fatalf("order.stmt %v", n.Op())
- case OVARKILL, OVARLIVE, OINLMARK:
+ case ir.OVARKILL, ir.OVARLIVE, ir.OINLMARK:
o.out = append(o.out, n)
- case OAS:
+ case ir.OAS:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, n.Left)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), n.Left()))
o.mapAssign(n)
o.cleanTemp(t)
- case OASOP:
+ case ir.OASOP:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
- if instrumenting || n.Left.Op == OINDEXMAP && (n.SubOp() == ODIV || n.SubOp() == OMOD) {
+ if instrumenting || n.Left().Op() == ir.OINDEXMAP && (n.SubOp() == ir.ODIV || n.SubOp() == ir.OMOD) {
// Rewrite m[k] op= r into m[k] = m[k] op r so
// that we can ensure that if op panics
// because r is zero, the panic happens before
// the map assignment.
-
- n.Left = o.safeExpr(n.Left)
-
- l := treecopy(n.Left, src.NoXPos)
- if l.Op == OINDEXMAP {
- l.SetIndexMapLValue(false)
+ // DeepCopy is a big hammer here, but safeExpr
+ // makes sure there is nothing too deep being copied.
+ l1 := o.safeExpr(n.Left())
+ l2 := ir.DeepCopy(src.NoXPos, l1)
+ if l2.Op() == ir.OINDEXMAP {
+ l2.SetIndexMapLValue(false)
}
- l = o.copyExpr(l, n.Left.Type, false)
- n.Right = nod(n.SubOp(), l, n.Right)
- n.Right = typecheck(n.Right, ctxExpr)
- n.Right = o.expr(n.Right, nil)
-
- n.Op = OAS
- n.ResetAux()
+ l2 = o.copyExpr(l2)
+ r := o.expr(typecheck(ir.NewBinaryExpr(n.Pos(), n.SubOp(), l2, n.Right()), ctxExpr), nil)
+ as := typecheck(ir.NodAt(n.Pos(), ir.OAS, l1, r), ctxStmt)
+ o.mapAssign(as)
+ o.cleanTemp(t)
+ return
}
o.mapAssign(n)
o.cleanTemp(t)
- case OAS2:
+ case ir.OAS2:
t := o.markTemp()
- o.exprList(n.List)
- o.exprList(n.Rlist)
+ o.exprList(n.List())
+ o.exprList(n.Rlist())
o.mapAssign(n)
o.cleanTemp(t)
// Special: avoid copy of func call n.Right
- case OAS2FUNC:
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
t := o.markTemp()
- o.exprList(n.List)
- o.init(n.Right)
- o.call(n.Right)
+ o.exprList(n.List())
+ o.init(n.Rlist().First())
+ o.call(n.Rlist().First())
o.as2(n)
o.cleanTemp(t)
//
// OAS2MAPR: make sure key is addressable if needed,
// and make sure OINDEXMAP is not copied out.
- case OAS2DOTTYPE, OAS2RECV, OAS2MAPR:
+ case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
+ n := n.(*ir.AssignListStmt)
t := o.markTemp()
- o.exprList(n.List)
-
- switch r := n.Right; r.Op {
- case ODOTTYPE2, ORECV:
- r.Left = o.expr(r.Left, nil)
- case OINDEXMAP:
- r.Left = o.expr(r.Left, nil)
- r.Right = o.expr(r.Right, nil)
+ o.exprList(n.List())
+
+ switch r := n.Rlist().First(); r.Op() {
+ case ir.ODOTTYPE2:
+ r.SetLeft(o.expr(r.Left(), nil))
+ case ir.ORECV:
+ r.SetLeft(o.expr(r.Left(), nil))
+ case ir.OINDEXMAP:
+ r.SetLeft(o.expr(r.Left(), nil))
+ r.SetRight(o.expr(r.Right(), nil))
// See similar conversion for OINDEXMAP below.
- _ = mapKeyReplaceStrConv(r.Right)
- r.Right = o.mapKeyTemp(r.Left.Type, r.Right)
+ _ = mapKeyReplaceStrConv(r.Right())
+ r.SetRight(o.mapKeyTemp(r.Left().Type(), r.Right()))
default:
- Fatalf("order.stmt: %v", r.Op)
+ base.Fatalf("order.stmt: %v", r.Op())
}
o.okAs2(n)
o.cleanTemp(t)
// Special: does not save n onto out.
- case OBLOCK, OEMPTY:
- o.stmtList(n.List)
+ case ir.OBLOCK:
+ o.stmtList(n.List())
// Special: n->left is not an expression; save as is.
- case OBREAK,
- OCONTINUE,
- ODCL,
- ODCLCONST,
- ODCLTYPE,
- OFALL,
- OGOTO,
- OLABEL,
- ORETJMP:
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.ODCL,
+ ir.ODCLCONST,
+ ir.ODCLTYPE,
+ ir.OFALL,
+ ir.OGOTO,
+ ir.OLABEL,
+ ir.ORETJMP:
o.out = append(o.out, n)
// Special: handle call arguments.
- case OCALLFUNC, OCALLINTER, OCALLMETH:
+ case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
t := o.markTemp()
o.call(n)
o.out = append(o.out, n)
o.cleanTemp(t)
- case OCLOSE,
- OCOPY,
- OPRINT,
- OPRINTN,
- ORECOVER,
- ORECV:
+ case ir.OCLOSE, ir.ORECV:
+ t := o.markTemp()
+ n.SetLeft(o.expr(n.Left(), nil))
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ case ir.OCOPY:
+ t := o.markTemp()
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ case ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
- o.exprList(n.List)
- o.exprList(n.Rlist)
+ o.exprList(n.List())
o.out = append(o.out, n)
o.cleanTemp(t)
// Special: order arguments to inner call but not call itself.
- case ODEFER, OGO:
+ case ir.ODEFER, ir.OGO:
t := o.markTemp()
- o.init(n.Left)
- o.call(n.Left)
+ o.init(n.Left())
+ o.call(n.Left())
o.out = append(o.out, n)
o.cleanTemp(t)
- case ODELETE:
+ case ir.ODELETE:
t := o.markTemp()
- n.List.SetFirst(o.expr(n.List.First(), nil))
- n.List.SetSecond(o.expr(n.List.Second(), nil))
- n.List.SetSecond(o.mapKeyTemp(n.List.First().Type, n.List.Second()))
+ n.List().SetFirst(o.expr(n.List().First(), nil))
+ n.List().SetSecond(o.expr(n.List().Second(), nil))
+ n.List().SetSecond(o.mapKeyTemp(n.List().First().Type(), n.List().Second()))
o.out = append(o.out, n)
o.cleanTemp(t)
// Clean temporaries from condition evaluation at
// beginning of loop body and after for statement.
- case OFOR:
+ case ir.OFOR:
t := o.markTemp()
- n.Left = o.exprInPlace(n.Left)
- n.Nbody.Prepend(o.cleanTempNoPop(t)...)
- orderBlock(&n.Nbody, o.free)
- n.Right = orderStmtInPlace(n.Right, o.free)
+ n.SetLeft(o.exprInPlace(n.Left()))
+ n.PtrBody().Prepend(o.cleanTempNoPop(t)...)
+ orderBlock(n.PtrBody(), o.free)
+ n.SetRight(orderStmtInPlace(n.Right(), o.free))
o.out = append(o.out, n)
o.cleanTemp(t)
// Clean temporaries from condition at
// beginning of both branches.
- case OIF:
+ case ir.OIF:
t := o.markTemp()
- n.Left = o.exprInPlace(n.Left)
- n.Nbody.Prepend(o.cleanTempNoPop(t)...)
- n.Rlist.Prepend(o.cleanTempNoPop(t)...)
+ n.SetLeft(o.exprInPlace(n.Left()))
+ n.PtrBody().Prepend(o.cleanTempNoPop(t)...)
+ n.PtrRlist().Prepend(o.cleanTempNoPop(t)...)
o.popTemp(t)
- orderBlock(&n.Nbody, o.free)
- orderBlock(&n.Rlist, o.free)
+ orderBlock(n.PtrBody(), o.free)
+ orderBlock(n.PtrRlist(), o.free)
o.out = append(o.out, n)
// Special: argument will be converted to interface using convT2E
// so make sure it is an addressable temporary.
- case OPANIC:
+ case ir.OPANIC:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- if !n.Left.Type.IsInterface() {
- n.Left = o.addrTemp(n.Left)
+ n.SetLeft(o.expr(n.Left(), nil))
+ if !n.Left().Type().IsInterface() {
+ n.SetLeft(o.addrTemp(n.Left()))
}
o.out = append(o.out, n)
o.cleanTemp(t)
- case ORANGE:
+ case ir.ORANGE:
// n.Right is the expression being ranged over.
// order it, and then make a copy if we need one.
// We almost always do, to ensure that we don't
// Mark []byte(str) range expression to reuse string backing storage.
// It is safe because the storage cannot be mutated.
- if n.Right.Op == OSTR2BYTES {
- n.Right.Op = OSTR2BYTESTMP
+ n := n.(*ir.RangeStmt)
+ if n.Right().Op() == ir.OSTR2BYTES {
+ n.Right().(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
}
t := o.markTemp()
- n.Right = o.expr(n.Right, nil)
+ n.SetRight(o.expr(n.Right(), nil))
orderBody := true
- switch n.Type.Etype {
+ switch n.Type().Kind() {
default:
- Fatalf("order.stmt range %v", n.Type)
+ base.Fatalf("order.stmt range %v", n.Type())
- case TARRAY, TSLICE:
- if n.List.Len() < 2 || n.List.Second().isBlank() {
+ case types.TARRAY, types.TSLICE:
+ if n.List().Len() < 2 || ir.IsBlank(n.List().Second()) {
// for i := range x will only use x once, to compute len(x).
// No need to copy it.
break
}
fallthrough
- case TCHAN, TSTRING:
+ case types.TCHAN, types.TSTRING:
// chan, string, slice, array ranges use value multiple times.
// make copy.
- r := n.Right
+ r := n.Right()
- if r.Type.IsString() && r.Type != types.Types[TSTRING] {
- r = nod(OCONV, r, nil)
- r.Type = types.Types[TSTRING]
+ if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
+ r = ir.Nod(ir.OCONV, r, nil)
+ r.SetType(types.Types[types.TSTRING])
r = typecheck(r, ctxExpr)
}
- n.Right = o.copyExpr(r, r.Type, false)
+ n.SetRight(o.copyExpr(r))
- case TMAP:
+ case types.TMAP:
if isMapClear(n) {
// Preserve the body of the map clear pattern so it can
// be detected during walk. The loop body will not be used
// copy the map value in case it is a map literal.
// TODO(rsc): Make tmp = literal expressions reuse tmp.
// For maps tmp is just one word so it hardly matters.
- r := n.Right
- n.Right = o.copyExpr(r, r.Type, false)
+ r := n.Right()
+ n.SetRight(o.copyExpr(r))
- // prealloc[n] is the temp for the iterator.
+ // n.Prealloc is the temp for the iterator.
// hiter contains pointers and needs to be zeroed.
- prealloc[n] = o.newTemp(hiter(n.Type), true)
+ n.Prealloc = o.newTemp(hiter(n.Type()), true)
}
- o.exprListInPlace(n.List)
+ o.exprListInPlace(n.List())
if orderBody {
- orderBlock(&n.Nbody, o.free)
+ orderBlock(n.PtrBody(), o.free)
}
o.out = append(o.out, n)
o.cleanTemp(t)
- case ORETURN:
- o.exprList(n.List)
+ case ir.ORETURN:
+ o.exprList(n.List())
o.out = append(o.out, n)
// Special: clean case temporaries in each block entry.
// reordered after the channel evaluation for a different
// case (if p were nil, then the timing of the fault would
// give this away).
- case OSELECT:
+ case ir.OSELECT:
t := o.markTemp()
-
- for _, n2 := range n.List.Slice() {
- if n2.Op != OCASE {
- Fatalf("order select case %v", n2.Op)
- }
- r := n2.Left
- setlineno(n2)
+ for _, ncas := range n.List().Slice() {
+ ncas := ncas.(*ir.CaseStmt)
+ r := ncas.Left()
+ setlineno(ncas)
// Append any new body prologue to ninit.
// The next loop will insert ninit into nbody.
- if n2.Ninit.Len() != 0 {
- Fatalf("order select ninit")
+ if ncas.Init().Len() != 0 {
+ base.Fatalf("order select ninit")
}
if r == nil {
continue
}
- switch r.Op {
+ switch r.Op() {
default:
- Dump("select case", r)
- Fatalf("unknown op in select %v", r.Op)
-
- // If this is case x := <-ch or case x, y := <-ch, the case has
- // the ODCL nodes to declare x and y. We want to delay that
- // declaration (and possible allocation) until inside the case body.
- // Delete the ODCL nodes here and recreate them inside the body below.
- case OSELRECV, OSELRECV2:
- if r.Colas() {
- i := 0
- if r.Ninit.Len() != 0 && r.Ninit.First().Op == ODCL && r.Ninit.First().Left == r.Left {
- i++
- }
- if i < r.Ninit.Len() && r.Ninit.Index(i).Op == ODCL && r.List.Len() != 0 && r.Ninit.Index(i).Left == r.List.First() {
- i++
- }
- if i >= r.Ninit.Len() {
- r.Ninit.Set(nil)
- }
- }
+ ir.Dump("select case", r)
+ base.Fatalf("unknown op in select %v", r.Op())
- if r.Ninit.Len() != 0 {
- dumplist("ninit", r.Ninit)
- Fatalf("ninit on select recv")
- }
-
- // case x = <-c
+ case ir.OSELRECV2:
// case x, ok = <-c
- // r->left is x, r->ntest is ok, r->right is ORECV, r->right->left is c.
- // r->left == N means 'case <-c'.
- // c is always evaluated; x and ok are only evaluated when assigned.
- r.Right.Left = o.expr(r.Right.Left, nil)
-
- if !r.Right.Left.IsAutoTmp() {
- r.Right.Left = o.copyExpr(r.Right.Left, r.Right.Left.Type, false)
+ r := r.(*ir.AssignListStmt)
+ recv := r.Rlist().First().(*ir.UnaryExpr)
+ recv.SetLeft(o.expr(recv.Left(), nil))
+ if !ir.IsAutoTmp(recv.Left()) {
+ recv.SetLeft(o.copyExpr(recv.Left()))
}
-
- // Introduce temporary for receive and move actual copy into case body.
- // avoids problems with target being addressed, as usual.
- // NOTE: If we wanted to be clever, we could arrange for just one
- // temporary per distinct type, sharing the temp among all receives
- // with that temp. Similarly one ok bool could be shared among all
- // the x,ok receives. Not worth doing until there's a clear need.
- if r.Left != nil && r.Left.isBlank() {
- r.Left = nil
- }
- if r.Left != nil {
- // use channel element type for temporary to avoid conversions,
- // such as in case interfacevalue = <-intchan.
- // the conversion happens in the OAS instead.
- tmp1 := r.Left
-
- if r.Colas() {
- tmp2 := nod(ODCL, tmp1, nil)
- tmp2 = typecheck(tmp2, ctxStmt)
- n2.Ninit.Append(tmp2)
+ init := r.PtrInit().Slice()
+ r.PtrInit().Set(nil)
+
+ colas := r.Colas()
+ do := func(i int, t *types.Type) {
+ n := r.List().Index(i)
+ if ir.IsBlank(n) {
+ return
}
-
- r.Left = o.newTemp(r.Right.Left.Type.Elem(), r.Right.Left.Type.Elem().HasPointers())
- tmp2 := nod(OAS, tmp1, r.Left)
- tmp2 = typecheck(tmp2, ctxStmt)
- n2.Ninit.Append(tmp2)
- }
-
- if r.List.Len() != 0 && r.List.First().isBlank() {
- r.List.Set(nil)
- }
- if r.List.Len() != 0 {
- tmp1 := r.List.First()
- if r.Colas() {
- tmp2 := nod(ODCL, tmp1, nil)
- tmp2 = typecheck(tmp2, ctxStmt)
- n2.Ninit.Append(tmp2)
+ // If this is case x := <-ch or case x, y := <-ch, the case has
+ // the ODCL nodes to declare x and y. We want to delay that
+ // declaration (and possible allocation) until inside the case body.
+ // Delete the ODCL nodes here and recreate them inside the body below.
+ if colas {
+ if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).Left() == n {
+ init = init[1:]
+ }
+ dcl := typecheck(ir.Nod(ir.ODCL, n, nil), ctxStmt)
+ ncas.PtrInit().Append(dcl)
}
-
- r.List.Set1(o.newTemp(types.Types[TBOOL], false))
- tmp2 := okas(tmp1, r.List.First())
- tmp2 = typecheck(tmp2, ctxStmt)
- n2.Ninit.Append(tmp2)
+ tmp := o.newTemp(t, t.HasPointers())
+ as := typecheck(ir.Nod(ir.OAS, n, conv(tmp, n.Type())), ctxStmt)
+ ncas.PtrInit().Append(as)
+ r.PtrList().SetIndex(i, tmp)
}
- orderBlock(&n2.Ninit, o.free)
+ do(0, recv.Left().Type().Elem())
+ do(1, types.Types[types.TBOOL])
+ if len(init) != 0 {
+ ir.DumpList("ninit", r.Init())
+ base.Fatalf("ninit on select recv")
+ }
+ orderBlock(ncas.PtrInit(), o.free)
- case OSEND:
- if r.Ninit.Len() != 0 {
- dumplist("ninit", r.Ninit)
- Fatalf("ninit on select send")
+ case ir.OSEND:
+ if r.Init().Len() != 0 {
+ ir.DumpList("ninit", r.Init())
+ base.Fatalf("ninit on select send")
}
// case c <- x
// r->left is c, r->right is x, both are always evaluated.
- r.Left = o.expr(r.Left, nil)
+ r.SetLeft(o.expr(r.Left(), nil))
- if !r.Left.IsAutoTmp() {
- r.Left = o.copyExpr(r.Left, r.Left.Type, false)
+ if !ir.IsAutoTmp(r.Left()) {
+ r.SetLeft(o.copyExpr(r.Left()))
}
- r.Right = o.expr(r.Right, nil)
- if !r.Right.IsAutoTmp() {
- r.Right = o.copyExpr(r.Right, r.Right.Type, false)
+ r.SetRight(o.expr(r.Right(), nil))
+ if !ir.IsAutoTmp(r.Right()) {
+ r.SetRight(o.copyExpr(r.Right()))
}
}
}
// Now that we have accumulated all the temporaries, clean them.
// Also insert any ninit queued during the previous loop.
// (The temporary cleaning must follow that ninit work.)
- for _, n3 := range n.List.Slice() {
- orderBlock(&n3.Nbody, o.free)
- n3.Nbody.Prepend(o.cleanTempNoPop(t)...)
+ for _, cas := range n.List().Slice() {
+ cas := cas.(*ir.CaseStmt)
+ orderBlock(cas.PtrBody(), o.free)
+ cas.PtrBody().Prepend(o.cleanTempNoPop(t)...)
// TODO(mdempsky): Is this actually necessary?
// walkselect appears to walk Ninit.
- n3.Nbody.Prepend(n3.Ninit.Slice()...)
- n3.Ninit.Set(nil)
+ cas.PtrBody().Prepend(cas.Init().Slice()...)
+ cas.PtrInit().Set(nil)
}
o.out = append(o.out, n)
o.popTemp(t)
// Special: value being sent is passed as a pointer; make it addressable.
- case OSEND:
+ case ir.OSEND:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
if instrumenting {
// Force copying to the stack so that (chan T)(nil) <- x
// is still instrumented as a read of x.
- n.Right = o.copyExpr(n.Right, n.Right.Type, false)
+ n.SetRight(o.copyExpr(n.Right()))
} else {
- n.Right = o.addrTemp(n.Right)
+ n.SetRight(o.addrTemp(n.Right()))
}
o.out = append(o.out, n)
o.cleanTemp(t)
// the if-else chain instead.)
// For now just clean all the temporaries at the end.
// In practice that's fine.
- case OSWITCH:
- if Debug_libfuzzer != 0 && !hasDefaultCase(n) {
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
// Add empty "default:" case for instrumentation.
- n.List.Append(nod(OCASE, nil, nil))
+ n.PtrList().Append(ir.Nod(ir.OCASE, nil, nil))
}
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- for _, ncas := range n.List.Slice() {
- if ncas.Op != OCASE {
- Fatalf("order switch case %v", ncas.Op)
- }
- o.exprListInPlace(ncas.List)
- orderBlock(&ncas.Nbody, o.free)
+ n.SetLeft(o.expr(n.Left(), nil))
+ for _, ncas := range n.List().Slice() {
+ ncas := ncas.(*ir.CaseStmt)
+ o.exprListInPlace(ncas.List())
+ orderBlock(ncas.PtrBody(), o.free)
}
o.out = append(o.out, n)
o.cleanTemp(t)
}
- lineno = lno
+ base.Pos = lno
}
-func hasDefaultCase(n *Node) bool {
- for _, ncas := range n.List.Slice() {
- if ncas.Op != OCASE {
- Fatalf("expected case, found %v", ncas.Op)
- }
- if ncas.List.Len() == 0 {
+func hasDefaultCase(n *ir.SwitchStmt) bool {
+ for _, ncas := range n.List().Slice() {
+ ncas := ncas.(*ir.CaseStmt)
+ if ncas.List().Len() == 0 {
return true
}
}
}
// exprList orders the expression list l into o.
-func (o *Order) exprList(l Nodes) {
+func (o *Order) exprList(l ir.Nodes) {
s := l.Slice()
for i := range s {
s[i] = o.expr(s[i], nil)
// exprListInPlace orders the expression list l but saves
// the side effects on the individual expression ninit lists.
-func (o *Order) exprListInPlace(l Nodes) {
+func (o *Order) exprListInPlace(l ir.Nodes) {
s := l.Slice()
for i := range s {
s[i] = o.exprInPlace(s[i])
}
}
-// prealloc[x] records the allocation to use for x.
-var prealloc = map[*Node]*Node{}
+func (o *Order) exprNoLHS(n ir.Node) ir.Node {
+ return o.expr(n, nil)
+}
// expr orders a single expression, appending side
// effects to o.out as needed.
// to avoid copying the result of the expression to a temporary.)
// The result of expr MUST be assigned back to n, e.g.
// n.Left = o.expr(n.Left, lhs)
-func (o *Order) expr(n, lhs *Node) *Node {
+func (o *Order) expr(n, lhs ir.Node) ir.Node {
if n == nil {
return n
}
-
lno := setlineno(n)
+ n = o.expr1(n, lhs)
+ base.Pos = lno
+ return n
+}
+
+func (o *Order) expr1(n, lhs ir.Node) ir.Node {
o.init(n)
- switch n.Op {
+ switch n.Op() {
default:
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
- o.exprList(n.List)
- o.exprList(n.Rlist)
+ if o.edit == nil {
+ o.edit = o.exprNoLHS // create closure once
+ }
+ ir.EditChildren(n, o.edit)
+ return n
// Addition of strings turns into a function call.
// Allocate a temporary to hold the strings.
// Fewer than 5 strings use direct runtime helpers.
- case OADDSTR:
- o.exprList(n.List)
+ case ir.OADDSTR:
+ n := n.(*ir.AddStringExpr)
+ o.exprList(n.List())
- if n.List.Len() > 5 {
- t := types.NewArray(types.Types[TSTRING], int64(n.List.Len()))
- prealloc[n] = o.newTemp(t, false)
+ if n.List().Len() > 5 {
+ t := types.NewArray(types.Types[types.TSTRING], int64(n.List().Len()))
+ n.Prealloc = o.newTemp(t, false)
}
// Mark string(byteSlice) arguments to reuse byteSlice backing
hasbyte := false
haslit := false
- for _, n1 := range n.List.Slice() {
- hasbyte = hasbyte || n1.Op == OBYTES2STR
- haslit = haslit || n1.Op == OLITERAL && len(n1.StringVal()) != 0
+ for _, n1 := range n.List().Slice() {
+ hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
+ haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0
}
if haslit && hasbyte {
- for _, n2 := range n.List.Slice() {
- if n2.Op == OBYTES2STR {
- n2.Op = OBYTES2STRTMP
+ for _, n2 := range n.List().Slice() {
+ if n2.Op() == ir.OBYTES2STR {
+ n2.SetOp(ir.OBYTES2STRTMP)
}
}
}
+ return n
- case OINDEXMAP:
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
+ case ir.OINDEXMAP:
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
needCopy := false
if !n.IndexMapLValue() {
// can not be changed before the map index by forcing
// the map index to happen immediately following the
// conversions. See copyExpr a few lines below.
- needCopy = mapKeyReplaceStrConv(n.Right)
+ needCopy = mapKeyReplaceStrConv(n.Right())
if instrumenting {
- // Race detector needs the copy so it can
- // call treecopy on the result.
+ // Race detector needs the copy.
needCopy = true
}
}
// key must be addressable
- n.Right = o.mapKeyTemp(n.Left.Type, n.Right)
+ n.SetRight(o.mapKeyTemp(n.Left().Type(), n.Right()))
if needCopy {
- n = o.copyExpr(n, n.Type, false)
+ return o.copyExpr(n)
}
+ return n
// concrete type (not interface) argument might need an addressable
// temporary to pass to the runtime conversion routine.
- case OCONVIFACE:
- n.Left = o.expr(n.Left, nil)
- if n.Left.Type.IsInterface() {
- break
+ case ir.OCONVIFACE:
+ n.SetLeft(o.expr(n.Left(), nil))
+ if n.Left().Type().IsInterface() {
+ return n
}
- if _, needsaddr := convFuncName(n.Left.Type, n.Type); needsaddr || isStaticCompositeLiteral(n.Left) {
+ if _, needsaddr := convFuncName(n.Left().Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.Left()) {
// Need a temp if we need to pass the address to the conversion function.
// We also process static composite literal node here, making a named static global
// whose address we can put directly in an interface (see OCONVIFACE case in walk).
- n.Left = o.addrTemp(n.Left)
+ n.SetLeft(o.addrTemp(n.Left()))
}
+ return n
- case OCONVNOP:
- if n.Type.IsKind(TUNSAFEPTR) && n.Left.Type.IsKind(TUINTPTR) && (n.Left.Op == OCALLFUNC || n.Left.Op == OCALLINTER || n.Left.Op == OCALLMETH) {
+ case ir.OCONVNOP:
+ if n.Type().IsKind(types.TUNSAFEPTR) && n.Left().Type().IsKind(types.TUINTPTR) && (n.Left().Op() == ir.OCALLFUNC || n.Left().Op() == ir.OCALLINTER || n.Left().Op() == ir.OCALLMETH) {
+ call := n.Left().(*ir.CallExpr)
// When reordering unsafe.Pointer(f()) into a separate
// statement, the conversion and function call must stay
// together. See golang.org/issue/15329.
- o.init(n.Left)
- o.call(n.Left)
- if lhs == nil || lhs.Op != ONAME || instrumenting {
- n = o.copyExpr(n, n.Type, false)
+ o.init(call)
+ o.call(call)
+ if lhs == nil || lhs.Op() != ir.ONAME || instrumenting {
+ return o.copyExpr(n)
}
} else {
- n.Left = o.expr(n.Left, nil)
+ n.SetLeft(o.expr(n.Left(), nil))
}
+ return n
- case OANDAND, OOROR:
+ case ir.OANDAND, ir.OOROR:
// ... = LHS && RHS
//
// var r bool
// }
// ... = r
- r := o.newTemp(n.Type, false)
+ r := o.newTemp(n.Type(), false)
// Evaluate left-hand side.
- lhs := o.expr(n.Left, nil)
- o.out = append(o.out, typecheck(nod(OAS, r, lhs), ctxStmt))
+ lhs := o.expr(n.Left(), nil)
+ o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, lhs), ctxStmt))
// Evaluate right-hand side, save generated code.
saveout := o.out
o.out = nil
t := o.markTemp()
o.edge()
- rhs := o.expr(n.Right, nil)
- o.out = append(o.out, typecheck(nod(OAS, r, rhs), ctxStmt))
+ rhs := o.expr(n.Right(), nil)
+ o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, rhs), ctxStmt))
o.cleanTemp(t)
gen := o.out
o.out = saveout
// If left-hand side doesn't cause a short-circuit, issue right-hand side.
- nif := nod(OIF, r, nil)
- if n.Op == OANDAND {
- nif.Nbody.Set(gen)
+ nif := ir.Nod(ir.OIF, r, nil)
+ if n.Op() == ir.OANDAND {
+ nif.PtrBody().Set(gen)
} else {
- nif.Rlist.Set(gen)
+ nif.PtrRlist().Set(gen)
}
o.out = append(o.out, nif)
- n = r
-
- case OCALLFUNC,
- OCALLINTER,
- OCALLMETH,
- OCAP,
- OCOMPLEX,
- OCOPY,
- OIMAG,
- OLEN,
- OMAKECHAN,
- OMAKEMAP,
- OMAKESLICE,
- OMAKESLICECOPY,
- ONEW,
- OREAL,
- ORECOVER,
- OSTR2BYTES,
- OSTR2BYTESTMP,
- OSTR2RUNES:
+ return r
+
+ case ir.OCALLFUNC,
+ ir.OCALLINTER,
+ ir.OCALLMETH,
+ ir.OCAP,
+ ir.OCOMPLEX,
+ ir.OCOPY,
+ ir.OIMAG,
+ ir.OLEN,
+ ir.OMAKECHAN,
+ ir.OMAKEMAP,
+ ir.OMAKESLICE,
+ ir.OMAKESLICECOPY,
+ ir.ONEW,
+ ir.OREAL,
+ ir.ORECOVER,
+ ir.OSTR2BYTES,
+ ir.OSTR2BYTESTMP,
+ ir.OSTR2RUNES:
if isRuneCount(n) {
// len([]rune(s)) is rewritten to runtime.countrunes(s) later.
- n.Left.Left = o.expr(n.Left.Left, nil)
+ conv := n.(*ir.UnaryExpr).Left().(*ir.ConvExpr)
+ conv.SetLeft(o.expr(conv.Left(), nil))
} else {
o.call(n)
}
- if lhs == nil || lhs.Op != ONAME || instrumenting {
- n = o.copyExpr(n, n.Type, false)
+ if lhs == nil || lhs.Op() != ir.ONAME || instrumenting {
+ return o.copyExpr(n)
}
+ return n
- case OAPPEND:
+ case ir.OAPPEND:
// Check for append(x, make([]T, y)...) .
if isAppendOfMake(n) {
- n.List.SetFirst(o.expr(n.List.First(), nil)) // order x
- n.List.Second().Left = o.expr(n.List.Second().Left, nil) // order y
+ n.List().SetFirst(o.expr(n.List().First(), nil)) // order x
+ mk := n.List().Second().(*ir.MakeExpr)
+ mk.SetLeft(o.expr(mk.Left(), nil)) // order y
} else {
- o.exprList(n.List)
+ o.exprList(n.List())
}
- if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.List.First()) {
- n = o.copyExpr(n, n.Type, false)
+ if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.List().First()) {
+ return o.copyExpr(n)
}
+ return n
- case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
- n.Left = o.expr(n.Left, nil)
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
+ n.SetLeft(o.expr(n.Left(), nil))
low, high, max := n.SliceBounds()
low = o.expr(low, nil)
low = o.cheapExpr(low)
max = o.expr(max, nil)
max = o.cheapExpr(max)
n.SetSliceBounds(low, high, max)
- if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.Left) {
- n = o.copyExpr(n, n.Type, false)
+ if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Left()) {
+ return o.copyExpr(n)
}
+ return n
- case OCLOSURE:
- if n.Transient() && n.Func.Closure.Func.Cvars.Len() > 0 {
- prealloc[n] = o.newTemp(closureType(n), false)
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ if n.Transient() && len(n.Func().ClosureVars) > 0 {
+ n.Prealloc = o.newTemp(closureType(n), false)
}
+ return n
- case OSLICELIT, OCALLPART:
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
- o.exprList(n.List)
- o.exprList(n.Rlist)
+ case ir.OCALLPART:
+ n := n.(*ir.CallPartExpr)
+ n.SetLeft(o.expr(n.Left(), nil))
if n.Transient() {
- var t *types.Type
- switch n.Op {
- case OSLICELIT:
- t = types.NewArray(n.Type.Elem(), n.Right.Int64Val())
- case OCALLPART:
- t = partialCallType(n)
- }
- prealloc[n] = o.newTemp(t, false)
+ t := partialCallType(n)
+ n.Prealloc = o.newTemp(t, false)
}
+ return n
- case ODOTTYPE, ODOTTYPE2:
- n.Left = o.expr(n.Left, nil)
- if !isdirectiface(n.Type) || instrumenting {
- n = o.copyExpr(n, n.Type, true)
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ o.exprList(n.List())
+ if n.Transient() {
+ t := types.NewArray(n.Type().Elem(), n.Len)
+ n.Prealloc = o.newTemp(t, false)
}
+ return n
- case ORECV:
- n.Left = o.expr(n.Left, nil)
- n = o.copyExpr(n, n.Type, true)
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n.SetLeft(o.expr(n.Left(), nil))
+ if !isdirectiface(n.Type()) || instrumenting {
+ return o.copyExprClear(n)
+ }
+ return n
- case OEQ, ONE, OLT, OLE, OGT, OGE:
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
+ case ir.ORECV:
+ n.SetLeft(o.expr(n.Left(), nil))
+ return o.copyExprClear(n)
- t := n.Left.Type
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
+
+ t := n.Left().Type()
switch {
case t.IsString():
// Mark string(byteSlice) arguments to reuse byteSlice backing
// buffer during conversion. String comparison does not
// memorize the strings for later use, so it is safe.
- if n.Left.Op == OBYTES2STR {
- n.Left.Op = OBYTES2STRTMP
+ if n.Left().Op() == ir.OBYTES2STR {
+ n.Left().(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
}
- if n.Right.Op == OBYTES2STR {
- n.Right.Op = OBYTES2STRTMP
+ if n.Right().Op() == ir.OBYTES2STR {
+ n.Right().(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
}
case t.IsStruct() || t.IsArray():
// for complex comparisons, we need both args to be
// addressable so we can pass them to the runtime.
- n.Left = o.addrTemp(n.Left)
- n.Right = o.addrTemp(n.Right)
+ n.SetLeft(o.addrTemp(n.Left()))
+ n.SetRight(o.addrTemp(n.Right()))
}
- case OMAPLIT:
+ return n
+
+ case ir.OMAPLIT:
// Order map by converting:
// map[int]int{
// a(): b(),
// Without this special case, order would otherwise compute all
// the keys and values before storing any of them to the map.
// See issue 26552.
- entries := n.List.Slice()
+ entries := n.List().Slice()
statics := entries[:0]
- var dynamics []*Node
+ var dynamics []*ir.KeyExpr
for _, r := range entries {
- if r.Op != OKEY {
- Fatalf("OMAPLIT entry not OKEY: %v\n", r)
- }
+ r := r.(*ir.KeyExpr)
- if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
+ if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
dynamics = append(dynamics, r)
continue
}
// Recursively ordering some static entries can change them to dynamic;
// e.g., OCONVIFACE nodes. See #31777.
- r = o.expr(r, nil)
- if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
+ r = o.expr(r, nil).(*ir.KeyExpr)
+ if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
dynamics = append(dynamics, r)
continue
}
statics = append(statics, r)
}
- n.List.Set(statics)
+ n.PtrList().Set(statics)
if len(dynamics) == 0 {
- break
+ return n
}
// Emit the creation of the map (with all its static entries).
- m := o.newTemp(n.Type, false)
- as := nod(OAS, m, n)
+ m := o.newTemp(n.Type(), false)
+ as := ir.Nod(ir.OAS, m, n)
typecheck(as, ctxStmt)
o.stmt(as)
- n = m
// Emit eval+insert of dynamic entries, one at a time.
for _, r := range dynamics {
- as := nod(OAS, nod(OINDEX, n, r.Left), r.Right)
+ as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, r.Left()), r.Right())
typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP
o.stmt(as)
}
+ return m
}
- lineno = lno
- return n
-}
-
-// okas creates and returns an assignment of val to ok,
-// including an explicit conversion if necessary.
-func okas(ok, val *Node) *Node {
- if !ok.isBlank() {
- val = conv(val, ok.Type)
- }
- return nod(OAS, ok, val)
+ // No return - type-assertions above. Each case must return for itself.
}
// as2 orders OAS2XXXX nodes. It creates temporaries to ensure left-to-right assignment.
// tmp1, tmp2, tmp3 = ...
// a, b, a = tmp1, tmp2, tmp3
// This is necessary to ensure left to right assignment order.
-func (o *Order) as2(n *Node) {
- tmplist := []*Node{}
- left := []*Node{}
- for ni, l := range n.List.Slice() {
- if !l.isBlank() {
- tmp := o.newTemp(l.Type, l.Type.HasPointers())
- n.List.SetIndex(ni, tmp)
+func (o *Order) as2(n *ir.AssignListStmt) {
+ tmplist := []ir.Node{}
+ left := []ir.Node{}
+ for ni, l := range n.List().Slice() {
+ if !ir.IsBlank(l) {
+ tmp := o.newTemp(l.Type(), l.Type().HasPointers())
+ n.List().SetIndex(ni, tmp)
tmplist = append(tmplist, tmp)
left = append(left, l)
}
o.out = append(o.out, n)
- as := nod(OAS2, nil, nil)
- as.List.Set(left)
- as.Rlist.Set(tmplist)
- as = typecheck(as, ctxStmt)
- o.stmt(as)
+ as := ir.Nod(ir.OAS2, nil, nil)
+ as.PtrList().Set(left)
+ as.PtrRlist().Set(tmplist)
+ o.stmt(typecheck(as, ctxStmt))
}
// okAs2 orders OAS2XXX with ok.
// Just like as2, this also adds temporaries to ensure left-to-right assignment.
-func (o *Order) okAs2(n *Node) {
- var tmp1, tmp2 *Node
- if !n.List.First().isBlank() {
- typ := n.Right.Type
+func (o *Order) okAs2(n *ir.AssignListStmt) {
+ var tmp1, tmp2 ir.Node
+ if !ir.IsBlank(n.List().First()) {
+ typ := n.Rlist().First().Type()
tmp1 = o.newTemp(typ, typ.HasPointers())
}
- if !n.List.Second().isBlank() {
- tmp2 = o.newTemp(types.Types[TBOOL], false)
+ if !ir.IsBlank(n.List().Second()) {
+ tmp2 = o.newTemp(types.Types[types.TBOOL], false)
}
o.out = append(o.out, n)
if tmp1 != nil {
- r := nod(OAS, n.List.First(), tmp1)
- r = typecheck(r, ctxStmt)
- o.mapAssign(r)
- n.List.SetFirst(tmp1)
+ r := ir.Nod(ir.OAS, n.List().First(), tmp1)
+ o.mapAssign(typecheck(r, ctxStmt))
+ n.List().SetFirst(tmp1)
}
if tmp2 != nil {
- r := okas(n.List.Second(), tmp2)
- r = typecheck(r, ctxStmt)
- o.mapAssign(r)
- n.List.SetSecond(tmp2)
+ r := ir.Nod(ir.OAS, n.List().Second(), conv(tmp2, n.List().Second().Type()))
+ o.mapAssign(typecheck(r, ctxStmt))
+ n.List().SetSecond(tmp2)
}
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/dwarf"
// "Portable" code generation.
var (
- nBackendWorkers int // number of concurrent backend workers, set by a compiler flag
- compilequeue []*Node // functions waiting to be compiled
+ compilequeue []*ir.Func // functions waiting to be compiled
)
-func emitptrargsmap(fn *Node) {
- if fn.funcname() == "_" || fn.Func.Nname.Sym.Linkname != "" {
+func emitptrargsmap(fn *ir.Func) {
+ if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" {
return
}
- lsym := Ctxt.Lookup(fn.Func.lsym.Name + ".args_stackmap")
-
- nptr := int(fn.Type.ArgWidth() / int64(Widthptr))
+ lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap")
+ nptr := int(fn.Type().ArgWidth() / int64(Widthptr))
bv := bvalloc(int32(nptr) * 2)
nbitmap := 1
- if fn.Type.NumResults() > 0 {
+ if fn.Type().NumResults() > 0 {
nbitmap = 2
}
off := duint32(lsym, 0, uint32(nbitmap))
off = duint32(lsym, off, uint32(bv.n))
- if fn.IsMethod() {
- onebitwalktype1(fn.Type.Recvs(), 0, bv)
+ if ir.IsMethod(fn) {
+ onebitwalktype1(fn.Type().Recvs(), 0, bv)
}
- if fn.Type.NumParams() > 0 {
- onebitwalktype1(fn.Type.Params(), 0, bv)
+ if fn.Type().NumParams() > 0 {
+ onebitwalktype1(fn.Type().Params(), 0, bv)
}
off = dbvec(lsym, off, bv)
- if fn.Type.NumResults() > 0 {
- onebitwalktype1(fn.Type.Results(), 0, bv)
+ if fn.Type().NumResults() > 0 {
+ onebitwalktype1(fn.Type().Results(), 0, bv)
off = dbvec(lsym, off, bv)
}
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
-func cmpstackvarlt(a, b *Node) bool {
- if (a.Class() == PAUTO) != (b.Class() == PAUTO) {
- return b.Class() == PAUTO
+func cmpstackvarlt(a, b *ir.Name) bool {
+ if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) {
+ return b.Class() == ir.PAUTO
}
- if a.Class() != PAUTO {
- return a.Xoffset < b.Xoffset
+ if a.Class() != ir.PAUTO {
+ return a.FrameOffset() < b.FrameOffset()
}
- if a.Name.Used() != b.Name.Used() {
- return a.Name.Used()
+ if a.Used() != b.Used() {
+ return a.Used()
}
- ap := a.Type.HasPointers()
- bp := b.Type.HasPointers()
+ ap := a.Type().HasPointers()
+ bp := b.Type().HasPointers()
if ap != bp {
return ap
}
- ap = a.Name.Needzero()
- bp = b.Name.Needzero()
+ ap = a.Needzero()
+ bp = b.Needzero()
if ap != bp {
return ap
}
- if a.Type.Width != b.Type.Width {
- return a.Type.Width > b.Type.Width
+ if a.Type().Width != b.Type().Width {
+ return a.Type().Width > b.Type().Width
}
- return a.Sym.Name < b.Sym.Name
+ return a.Sym().Name < b.Sym().Name
}
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
-type byStackVar []*Node
+type byStackVar []*ir.Name
func (s byStackVar) Len() int { return len(s) }
func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
func (s *ssafn) AllocFrame(f *ssa.Func) {
s.stksize = 0
s.stkptrsize = 0
- fn := s.curfn.Func
+ fn := s.curfn
// Mark the PAUTO's unused.
for _, ln := range fn.Dcl {
- if ln.Class() == PAUTO {
- ln.Name.SetUsed(false)
+ if ln.Class() == ir.PAUTO {
+ ln.SetUsed(false)
}
}
for _, l := range f.RegAlloc {
if ls, ok := l.(ssa.LocalSlot); ok {
- ls.N.(*Node).Name.SetUsed(true)
+ ls.N.Name().SetUsed(true)
}
}
scratchUsed := false
for _, b := range f.Blocks {
for _, v := range b.Values {
- if n, ok := v.Aux.(*Node); ok {
+ if n, ok := v.Aux.(*ir.Name); ok {
switch n.Class() {
- case PPARAM, PPARAMOUT:
+ case ir.PPARAM, ir.PPARAMOUT:
// Don't modify nodfp; it is a global.
if n != nodfp {
- n.Name.SetUsed(true)
+ n.Name().SetUsed(true)
}
- case PAUTO:
- n.Name.SetUsed(true)
+ case ir.PAUTO:
+ n.Name().SetUsed(true)
}
}
if !scratchUsed {
}
if f.Config.NeedsFpScratch && scratchUsed {
- s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64])
+ s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64])
}
sort.Sort(byStackVar(fn.Dcl))
// Reassign stack offsets of the locals that are used.
lastHasPtr := false
for i, n := range fn.Dcl {
- if n.Op != ONAME || n.Class() != PAUTO {
+ if n.Op() != ir.ONAME || n.Class() != ir.PAUTO {
continue
}
- if !n.Name.Used() {
+ if !n.Used() {
fn.Dcl = fn.Dcl[:i]
break
}
- dowidth(n.Type)
- w := n.Type.Width
- if w >= thearch.MAXWIDTH || w < 0 {
- Fatalf("bad width")
+ dowidth(n.Type())
+ w := n.Type().Width
+ if w >= MaxWidth || w < 0 {
+ base.Fatalf("bad width")
}
if w == 0 && lastHasPtr {
// Pad between a pointer-containing object and a zero-sized object.
w = 1
}
s.stksize += w
- s.stksize = Rnd(s.stksize, int64(n.Type.Align))
- if n.Type.HasPointers() {
+ s.stksize = Rnd(s.stksize, int64(n.Type().Align))
+ if n.Type().HasPointers() {
s.stkptrsize = s.stksize
lastHasPtr = true
} else {
if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
s.stksize = Rnd(s.stksize, int64(Widthptr))
}
- n.Xoffset = -s.stksize
+ n.SetFrameOffset(-s.stksize)
}
s.stksize = Rnd(s.stksize, int64(Widthreg))
s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
}
-func funccompile(fn *Node) {
+func funccompile(fn *ir.Func) {
if Curfn != nil {
- Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym)
+ base.Fatalf("funccompile %v inside %v", fn.Sym(), Curfn.Sym())
}
- if fn.Type == nil {
- if nerrors == 0 {
- Fatalf("funccompile missing type")
+ if fn.Type() == nil {
+ if base.Errors() == 0 {
+ base.Fatalf("funccompile missing type")
}
return
}
// assign parameter offsets
- dowidth(fn.Type)
+ dowidth(fn.Type())
- if fn.Nbody.Len() == 0 {
+ if fn.Body().Len() == 0 {
// Initialize ABI wrappers if necessary.
- fn.Func.initLSym(false)
+ initLSym(fn, false)
emitptrargsmap(fn)
return
}
- dclcontext = PAUTO
+ dclcontext = ir.PAUTO
Curfn = fn
-
compile(fn)
-
Curfn = nil
- dclcontext = PEXTERN
+ dclcontext = ir.PEXTERN
}
-func compile(fn *Node) {
- saveerrors()
-
- order(fn)
- if nerrors != 0 {
- return
- }
-
+func compile(fn *ir.Func) {
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in markTypeUsedInInterface).
- fn.Func.initLSym(true)
+ initLSym(fn, true)
+ errorsBefore := base.Errors()
walk(fn)
- if nerrors != 0 {
+ if base.Errors() > errorsBefore {
return
}
- if instrumenting {
- instrument(fn)
- }
// From this point, there should be no uses of Curfn. Enforce that.
Curfn = nil
- if fn.funcname() == "_" {
+ if ir.FuncName(fn) == "_" {
// We don't need to generate code for this function, just report errors in its body.
// At this point we've generated any errors needed.
// (Beyond here we generate only non-spec errors, like "stack frame too large".)
// be types of stack objects. We need to do this here
// because symbols must be allocated before the parallel
// phase of the compiler.
- for _, n := range fn.Func.Dcl {
+ for _, n := range fn.Dcl {
switch n.Class() {
- case PPARAM, PPARAMOUT, PAUTO:
- if livenessShouldTrack(n) && n.Name.Addrtaken() {
- dtypesym(n.Type)
+ case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
+ if livenessShouldTrack(n) && n.Addrtaken() {
+ dtypesym(n.Type())
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
- if fn.Func.lsym.Func().StackObjects == nil {
- fn.Func.lsym.Func().StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
+ if fn.LSym.Func().StackObjects == nil {
+ fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj")
}
}
}
// If functions are not compiled immediately,
// they are enqueued in compilequeue,
// which is drained by compileFunctions.
-func compilenow(fn *Node) bool {
+func compilenow(fn *ir.Func) bool {
// Issue 38068: if this function is a method AND an inline
// candidate AND was not inlined (yet), put it onto the compile
// queue instead of compiling it immediately. This is in case we
// wind up inlining it into a method wrapper that is generated by
- // compiling a function later on in the xtop list.
- if fn.IsMethod() && isInlinableButNotInlined(fn) {
+ // compiling a function later on in the Target.Decls list.
+ if ir.IsMethod(fn) && isInlinableButNotInlined(fn) {
return false
}
- return nBackendWorkers == 1 && Debug_compilelater == 0
+ return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0
}
// isInlinableButNotInlined returns true if 'fn' was marked as an
// inline candidate but then never inlined (presumably because we
// found no call sites).
-func isInlinableButNotInlined(fn *Node) bool {
- if fn.Func.Nname.Func.Inl == nil {
+func isInlinableButNotInlined(fn *ir.Func) bool {
+ if fn.Inl == nil {
return false
}
- if fn.Sym == nil {
+ if fn.Sym() == nil {
return true
}
- return !fn.Sym.Linksym().WasInlined()
+ return !fn.Sym().Linksym().WasInlined()
}
const maxStackSize = 1 << 30
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
-func compileSSA(fn *Node, worker int) {
+func compileSSA(fn *ir.Func, worker int) {
f := buildssa(fn, worker)
// Note: check arg size to fix issue 25507.
- if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize {
+ if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
largeStackFramesMu.Lock()
- largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos})
+ largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
if pp.Text.To.Offset >= maxStackSize {
largeStackFramesMu.Lock()
locals := f.Frontend().(*ssafn).stksize
- largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos})
+ largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
pp.Flush() // assemble, fill in boilerplate, etc.
// fieldtrack must be called after pp.Flush. See issue 20014.
- fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
+ fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
}
func init() {
sizeCalculationDisabled = true // not safe to calculate sizes concurrently
if race.Enabled {
// Randomize compilation order to try to shake out races.
- tmp := make([]*Node, len(compilequeue))
+ tmp := make([]*ir.Func, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
// since they're most likely to be the slowest.
// This helps avoid stragglers.
sort.Slice(compilequeue, func(i, j int) bool {
- return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len()
+ return compilequeue[i].Body().Len() > compilequeue[j].Body().Len()
})
}
var wg sync.WaitGroup
- Ctxt.InParallel = true
- c := make(chan *Node, nBackendWorkers)
- for i := 0; i < nBackendWorkers; i++ {
+ base.Ctxt.InParallel = true
+ c := make(chan *ir.Func, base.Flag.LowerC)
+ for i := 0; i < base.Flag.LowerC; i++ {
wg.Add(1)
go func(worker int) {
for fn := range c {
close(c)
compilequeue = nil
wg.Wait()
- Ctxt.InParallel = false
+ base.Ctxt.InParallel = false
sizeCalculationDisabled = false
}
}
func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
- fn := curfn.(*Node)
- if fn.Func.Nname != nil {
- if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
- Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
- }
- }
-
- var apdecls []*Node
+ fn := curfn.(*ir.Func)
+
+ if fn.Nname != nil {
+ expect := fn.Sym().Linksym()
+ if fnsym.ABI() == obj.ABI0 {
+ expect = fn.Sym().LinksymABI0()
+ }
+ if fnsym != expect {
+ base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
+ }
+ }
+
+ // Back when there were two different *Funcs for a function, this code
+ // was not consistent about whether a particular *Node being processed
+ // was an ODCLFUNC or ONAME node. Partly this is because inlined function
+ // bodies have no ODCLFUNC node, which was it's own inconsistency.
+ // In any event, the handling of the two different nodes for DWARF purposes
+ // was subtly different, likely in unintended ways. CL 272253 merged the
+ // two nodes' Func fields, so that code sees the same *Func whether it is
+ // holding the ODCLFUNC or the ONAME. This resulted in changes in the
+ // DWARF output. To preserve the existing DWARF output and leave an
+ // intentional change for a future CL, this code does the following when
+ // fn.Op == ONAME:
+ //
+ // 1. Disallow use of createComplexVars in createDwarfVars.
+ // It was not possible to reach that code for an ONAME before,
+ // because the DebugInfo was set only on the ODCLFUNC Func.
+ // Calling into it in the ONAME case causes an index out of bounds panic.
+ //
+ // 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func,
+ // not the ONAME Func. Populating apdecls for the ONAME case results
+ // in selected being populated after createSimpleVars is called in
+ // createDwarfVars, and then that causes the loop to skip all the entries
+ // in dcl, meaning that the RecordAutoType calls don't happen.
+ //
+ // These two adjustments keep toolstash -cmp working for now.
+ // Deciding the right answer is, as they say, future work.
+ //
+ // We can tell the difference between the old ODCLFUNC and ONAME
+ // cases by looking at the infosym.Name. If it's empty, DebugInfo is
+ // being called from (*obj.Link).populateDWARF, which used to use
+ // the ODCLFUNC. If it's non-empty (the name will end in $abstract),
+ // DebugInfo is being called from (*obj.Link).DwarfAbstractFunc,
+ // which used to use the ONAME form.
+ isODCLFUNC := infosym.Name == ""
+
+ var apdecls []*ir.Name
// Populate decls for fn.
- for _, n := range fn.Func.Dcl {
- if n.Op != ONAME { // might be OTYPE or OLITERAL
- continue
- }
- switch n.Class() {
- case PAUTO:
- if !n.Name.Used() {
- // Text == nil -> generating abstract function
- if fnsym.Func().Text != nil {
- Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
+ if isODCLFUNC {
+ for _, n := range fn.Dcl {
+ if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
+ continue
+ }
+ switch n.Class() {
+ case ir.PAUTO:
+ if !n.Used() {
+ // Text == nil -> generating abstract function
+ if fnsym.Func().Text != nil {
+ base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
+ }
+ continue
}
+ case ir.PPARAM, ir.PPARAMOUT:
+ default:
continue
}
- case PPARAM, PPARAMOUT:
- default:
- continue
+ apdecls = append(apdecls, n)
+ fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
- apdecls = append(apdecls, n)
- fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
- decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls)
+ decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls)
// For each type referenced by the functions auto vars but not
- // already referenced by a dwarf var, attach a dummy relocation to
+ // already referenced by a dwarf var, attach an R_USETYPE relocation to
// the function symbol to insure that the type included in DWARF
// processing during linking.
typesyms := []*obj.LSym{}
}
fnsym.Func().Autot = nil
- var varScopes []ScopeID
+ var varScopes []ir.ScopeID
for _, decl := range decls {
pos := declPos(decl)
- varScopes = append(varScopes, findScope(fn.Func.Marks, pos))
+ varScopes = append(varScopes, findScope(fn.Marks, pos))
}
scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
var inlcalls dwarf.InlCalls
- if genDwarfInline > 0 {
+ if base.Flag.GenDwarfInl > 0 {
inlcalls = assembleInlines(fnsym, dwarfVars)
}
return scopes, inlcalls
}
-func declPos(decl *Node) src.XPos {
- if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) {
+func declPos(decl *ir.Name) src.XPos {
+ if decl.Name().Defn != nil && (decl.Name().Captured() || decl.Name().Byval()) {
// It's not clear which position is correct for captured variables here:
// * decl.Pos is the wrong position for captured variables, in the inner
// function, but it is the right position in the outer function.
// case statement.
// This code is probably wrong for type switch variables that are also
// captured.
- return decl.Name.Defn.Pos
+ return decl.Name().Defn.Pos()
}
- return decl.Pos
+ return decl.Pos()
}
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
-func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
+func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
var vars []*dwarf.Var
- var decls []*Node
- selected := make(map[*Node]bool)
+ var decls []*ir.Name
+ selected := make(map[*ir.Name]bool)
for _, n := range apDecls {
- if n.IsAutoTmp() {
+ if ir.IsAutoTmp(n) {
continue
}
return decls, vars, selected
}
-func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
+func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
var abbrev int
- offs := n.Xoffset
+ var offs int64
switch n.Class() {
- case PAUTO:
+ case ir.PAUTO:
+ offs = n.FrameOffset()
abbrev = dwarf.DW_ABRV_AUTO
- if Ctxt.FixedFrameSize() == 0 {
+ if base.Ctxt.FixedFrameSize() == 0 {
offs -= int64(Widthptr)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
offs -= int64(Widthptr)
}
- case PPARAM, PPARAMOUT:
+ case ir.PPARAM, ir.PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM
- offs += Ctxt.FixedFrameSize()
+ offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
default:
- Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
+ base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
}
- typename := dwarf.InfoPrefix + typesymname(n.Type)
+ typename := dwarf.InfoPrefix + typesymname(n.Type())
delete(fnsym.Func().Autot, ngotype(n).Linksym())
inlIndex := 0
- if genDwarfInline > 1 {
- if n.Name.InlFormal() || n.Name.InlLocal() {
- inlIndex = posInlIndex(n.Pos) + 1
- if n.Name.InlFormal() {
+ if base.Flag.GenDwarfInl > 1 {
+ if n.Name().InlFormal() || n.Name().InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM
}
}
}
- declpos := Ctxt.InnermostPos(declPos(n))
+ declpos := base.Ctxt.InnermostPos(declPos(n))
return &dwarf.Var{
- Name: n.Sym.Name,
- IsReturnValue: n.Class() == PPARAMOUT,
- IsInlFormal: n.Name.InlFormal(),
+ Name: n.Sym().Name,
+ IsReturnValue: n.Class() == ir.PPARAMOUT,
+ IsInlFormal: n.Name().InlFormal(),
Abbrev: abbrev,
StackOffset: int32(offs),
- Type: Ctxt.Lookup(typename),
+ Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
-func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) {
- debugInfo := fn.DebugInfo
+func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
+ debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
// Produce a DWARF variable entry for each user variable.
- var decls []*Node
+ var decls []*ir.Name
var vars []*dwarf.Var
- ssaVars := make(map[*Node]bool)
+ ssaVars := make(map[*ir.Name]bool)
for varID, dvar := range debugInfo.Vars {
- n := dvar.(*Node)
+ n := dvar
ssaVars[n] = true
for _, slot := range debugInfo.VarSlots[varID] {
- ssaVars[debugInfo.Slots[slot].N.(*Node)] = true
+ ssaVars[debugInfo.Slots[slot].N] = true
}
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
// createDwarfVars process fn, returning a list of DWARF variables and the
// Nodes they represent.
-func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) {
+func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) {
// Collect a raw list of DWARF vars.
var vars []*dwarf.Var
- var decls []*Node
- var selected map[*Node]bool
- if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil {
+ var decls []*ir.Name
+ var selected map[*ir.Name]bool
+ if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
decls, vars, selected = createSimpleVars(fnsym, apDecls)
if _, found := selected[n]; found {
continue
}
- c := n.Sym.Name[0]
- if c == '.' || n.Type.IsUntyped() {
+ c := n.Sym().Name[0]
+ if c == '.' || n.Type().IsUntyped() {
continue
}
- if n.Class() == PPARAM && !canSSAType(n.Type) {
+ if n.Class() == ir.PPARAM && !canSSAType(n.Type()) {
// SSA-able args get location lists, and may move in and
// out of registers, so those are handled elsewhere.
// Autos and named output params seem to get handled
decls = append(decls, n)
continue
}
- typename := dwarf.InfoPrefix + typesymname(n.Type)
+ typename := dwarf.InfoPrefix + typesymname(n.Type())
decls = append(decls, n)
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
- isReturnValue := (n.Class() == PPARAMOUT)
- if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ isReturnValue := (n.Class() == ir.PPARAMOUT)
+ if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
- } else if n.Class() == PAUTOHEAP {
+ } else if n.Class() == ir.PAUTOHEAP {
// If dcl in question has been promoted to heap, do a bit
// of extra work to recover original class (auto or param);
// see issue 30908. This insures that we get the proper
// misleading location for the param (we want pointer-to-heap
// and not stack).
// TODO(thanm): generate a better location expression
- stackcopy := n.Name.Param.Stackcopy
- if stackcopy != nil && (stackcopy.Class() == PPARAM || stackcopy.Class() == PPARAMOUT) {
+ stackcopy := n.Name().Stackcopy
+ if stackcopy != nil && (stackcopy.Class() == ir.PPARAM || stackcopy.Class() == ir.PPARAMOUT) {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
- isReturnValue = (stackcopy.Class() == PPARAMOUT)
+ isReturnValue = (stackcopy.Class() == ir.PPARAMOUT)
}
}
inlIndex := 0
- if genDwarfInline > 1 {
- if n.Name.InlFormal() || n.Name.InlLocal() {
- inlIndex = posInlIndex(n.Pos) + 1
- if n.Name.InlFormal() {
+ if base.Flag.GenDwarfInl > 1 {
+ if n.Name().InlFormal() || n.Name().InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
- declpos := Ctxt.InnermostPos(n.Pos)
+ declpos := base.Ctxt.InnermostPos(n.Pos())
vars = append(vars, &dwarf.Var{
- Name: n.Sym.Name,
+ Name: n.Sym().Name,
IsReturnValue: isReturnValue,
Abbrev: abbrev,
- StackOffset: int32(n.Xoffset),
- Type: Ctxt.Lookup(typename),
+ StackOffset: int32(n.FrameOffset()),
+ Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
// function that is not local to the package being compiled, then the
// names of the variables may have been "versioned" to avoid conflicts
// with local vars; disregard this versioning when sorting.
-func preInliningDcls(fnsym *obj.LSym) []*Node {
- fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node)
- var rdcl []*Node
- for _, n := range fn.Func.Inl.Dcl {
- c := n.Sym.Name[0]
+func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
+ fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func)
+ var rdcl []*ir.Name
+ for _, n := range fn.Inl.Dcl {
+ c := n.Sym().Name[0]
// Avoid reporting "_" parameters, since if there are more than
// one, it can result in a collision later on, as in #23179.
- if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() {
+ if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
continue
}
rdcl = append(rdcl, n)
// stack pointer, suitable for use in a DWARF location entry. This has nothing
// to do with its offset in the user variable.
func stackOffset(slot ssa.LocalSlot) int32 {
- n := slot.N.(*Node)
- var base int64
+ n := slot.N
+ var off int64
switch n.Class() {
- case PAUTO:
- if Ctxt.FixedFrameSize() == 0 {
- base -= int64(Widthptr)
+ case ir.PAUTO:
+ off = n.FrameOffset()
+ if base.Ctxt.FixedFrameSize() == 0 {
+ off -= int64(Widthptr)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
- base -= int64(Widthptr)
+ off -= int64(Widthptr)
}
- case PPARAM, PPARAMOUT:
- base += Ctxt.FixedFrameSize()
+ case ir.PPARAM, ir.PPARAMOUT:
+ off = n.FrameOffset() + base.Ctxt.FixedFrameSize()
}
- return int32(base + n.Xoffset + slot.Off)
+ return int32(off + slot.Off)
}
// createComplexVar builds a single DWARF variable entry and location list.
-func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
- debug := fn.DebugInfo
- n := debug.Vars[varID].(*Node)
+func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var {
+ debug := fn.DebugInfo.(*ssa.FuncDebug)
+ n := debug.Vars[varID]
var abbrev int
switch n.Class() {
- case PAUTO:
+ case ir.PAUTO:
abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
- case PPARAM, PPARAMOUT:
+ case ir.PPARAM, ir.PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
default:
return nil
delete(fnsym.Func().Autot, gotype)
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
- if genDwarfInline > 1 {
- if n.Name.InlFormal() || n.Name.InlLocal() {
- inlIndex = posInlIndex(n.Pos) + 1
- if n.Name.InlFormal() {
+ if base.Flag.GenDwarfInl > 1 {
+ if n.Name().InlFormal() || n.Name().InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
- declpos := Ctxt.InnermostPos(n.Pos)
+ declpos := base.Ctxt.InnermostPos(n.Pos())
dvar := &dwarf.Var{
- Name: n.Sym.Name,
- IsReturnValue: n.Class() == PPARAMOUT,
- IsInlFormal: n.Name.InlFormal(),
+ Name: n.Sym().Name,
+ IsReturnValue: n.Class() == ir.PPARAMOUT,
+ IsInlFormal: n.Name().InlFormal(),
Abbrev: abbrev,
- Type: Ctxt.Lookup(typename),
+ Type: base.Ctxt.Lookup(typename),
// The stack offset is used as a sorting key, so for decomposed
// variables just give it the first one. It's not used otherwise.
// This won't work well if the first slot hasn't been assigned a stack
list := debug.LocationLists[varID]
if len(list) != 0 {
dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
- debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
+ debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
}
}
return dvar
package gc
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
+ "cmd/internal/src"
"reflect"
"sort"
"testing"
)
func typeWithoutPointers() *types.Type {
- t := types.New(TSTRUCT)
- f := &types.Field{Type: types.New(TINT)}
- t.SetFields([]*types.Field{f})
- return t
+ return types.NewStruct(types.NoPkg, []*types.Field{
+ types.NewField(src.NoXPos, nil, types.New(types.TINT)),
+ })
}
func typeWithPointers() *types.Type {
- t := types.New(TSTRUCT)
- f := &types.Field{Type: types.NewPtr(types.New(TINT))}
- t.SetFields([]*types.Field{f})
- return t
+ return types.NewStruct(types.NoPkg, []*types.Field{
+ types.NewField(src.NoXPos, nil, types.NewPtr(types.New(types.TINT))),
+ })
}
-func markUsed(n *Node) *Node {
- n.Name.SetUsed(true)
+func markUsed(n *ir.Name) *ir.Name {
+ n.SetUsed(true)
return n
}
-func markNeedZero(n *Node) *Node {
- n.Name.SetNeedzero(true)
+func markNeedZero(n *ir.Name) *ir.Name {
+ n.SetNeedzero(true)
return n
}
-func nodeWithClass(n Node, c Class) *Node {
- n.SetClass(c)
- n.Name = new(Name)
- return &n
-}
-
// Test all code paths for cmpstackvarlt.
func TestCmpstackvar(t *testing.T) {
+ nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name {
+ if s == nil {
+ s = &types.Sym{Name: "."}
+ }
+ n := NewName(s)
+ n.SetType(t)
+ n.SetFrameOffset(xoffset)
+ n.SetClass(cl)
+ return n
+ }
testdata := []struct {
- a, b *Node
+ a, b *ir.Name
lt bool
}{
{
- nodeWithClass(Node{}, PAUTO),
- nodeWithClass(Node{}, PFUNC),
+ nod(0, nil, nil, ir.PAUTO),
+ nod(0, nil, nil, ir.PFUNC),
false,
},
{
- nodeWithClass(Node{}, PFUNC),
- nodeWithClass(Node{}, PAUTO),
+ nod(0, nil, nil, ir.PFUNC),
+ nod(0, nil, nil, ir.PAUTO),
true,
},
{
- nodeWithClass(Node{Xoffset: 0}, PFUNC),
- nodeWithClass(Node{Xoffset: 10}, PFUNC),
+ nod(0, nil, nil, ir.PFUNC),
+ nod(10, nil, nil, ir.PFUNC),
true,
},
{
- nodeWithClass(Node{Xoffset: 20}, PFUNC),
- nodeWithClass(Node{Xoffset: 10}, PFUNC),
+ nod(20, nil, nil, ir.PFUNC),
+ nod(10, nil, nil, ir.PFUNC),
false,
},
{
- nodeWithClass(Node{Xoffset: 10}, PFUNC),
- nodeWithClass(Node{Xoffset: 10}, PFUNC),
+ nod(10, nil, nil, ir.PFUNC),
+ nod(10, nil, nil, ir.PFUNC),
false,
},
{
- nodeWithClass(Node{Xoffset: 10}, PPARAM),
- nodeWithClass(Node{Xoffset: 20}, PPARAMOUT),
+ nod(10, nil, nil, ir.PPARAM),
+ nod(20, nil, nil, ir.PPARAMOUT),
true,
},
{
- nodeWithClass(Node{Xoffset: 10}, PPARAMOUT),
- nodeWithClass(Node{Xoffset: 20}, PPARAM),
+ nod(10, nil, nil, ir.PPARAMOUT),
+ nod(20, nil, nil, ir.PPARAM),
true,
},
{
- markUsed(nodeWithClass(Node{}, PAUTO)),
- nodeWithClass(Node{}, PAUTO),
+ markUsed(nod(0, nil, nil, ir.PAUTO)),
+ nod(0, nil, nil, ir.PAUTO),
true,
},
{
- nodeWithClass(Node{}, PAUTO),
- markUsed(nodeWithClass(Node{}, PAUTO)),
+ nod(0, nil, nil, ir.PAUTO),
+ markUsed(nod(0, nil, nil, ir.PAUTO)),
false,
},
{
- nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
- nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
+ nod(0, typeWithoutPointers(), nil, ir.PAUTO),
+ nod(0, typeWithPointers(), nil, ir.PAUTO),
false,
},
{
- nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
- nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
+ nod(0, typeWithPointers(), nil, ir.PAUTO),
+ nod(0, typeWithoutPointers(), nil, ir.PAUTO),
true,
},
{
- markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
- nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
+ markNeedZero(nod(0, &types.Type{}, nil, ir.PAUTO)),
+ nod(0, &types.Type{}, nil, ir.PAUTO),
true,
},
{
- nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
- markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
+ nod(0, &types.Type{}, nil, ir.PAUTO),
+ markNeedZero(nod(0, &types.Type{}, nil, ir.PAUTO)),
false,
},
{
- nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
+ nod(0, &types.Type{Width: 1}, nil, ir.PAUTO),
+ nod(0, &types.Type{Width: 2}, nil, ir.PAUTO),
false,
},
{
- nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
+ nod(0, &types.Type{Width: 2}, nil, ir.PAUTO),
+ nod(0, &types.Type{Width: 1}, nil, ir.PAUTO),
true,
},
{
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
true,
},
{
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
false,
},
{
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
false,
},
}
for _, d := range testdata {
got := cmpstackvarlt(d.a, d.b)
if got != d.lt {
- t.Errorf("want %#v < %#v", d.a, d.b)
+ t.Errorf("want %v < %v", d.a, d.b)
}
// If we expect a < b to be true, check that b < a is false.
if d.lt && cmpstackvarlt(d.b, d.a) {
- t.Errorf("unexpected %#v < %#v", d.b, d.a)
+ t.Errorf("unexpected %v < %v", d.b, d.a)
}
}
}
func TestStackvarSort(t *testing.T) {
- inp := []*Node{
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
- nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
- markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
- nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
+ nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name {
+ n := NewName(s)
+ n.SetType(t)
+ n.SetFrameOffset(xoffset)
+ n.SetClass(cl)
+ return n
+ }
+ inp := []*ir.Name{
+ nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ nod(20, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ markUsed(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
+ nod(0, typeWithoutPointers(), &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
+ markNeedZero(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
+ nod(0, &types.Type{Width: 1}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{Width: 2}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
}
- want := []*Node{
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
- markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
- nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
- nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
+ want := []*ir.Name{
+ nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ nod(20, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ markUsed(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
+ markNeedZero(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
+ nod(0, &types.Type{Width: 2}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{Width: 1}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
+ nod(0, typeWithoutPointers(), &types.Sym{}, ir.PAUTO),
}
sort.Sort(byStackVar(inp))
if !reflect.DeepEqual(want, inp) {
package gc
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/src"
const debugPhi = false
+// FwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref.
+type FwdRefAux struct {
+ _ [0]func() // ensure ir.Node isn't compared for equality
+ N ir.Node
+}
+
+func (FwdRefAux) CanBeAnSSAAux() {}
+
// insertPhis finds all the places in the function where a phi is
// necessary and inserts them.
// Uses FwdRef ops to find all uses of variables, and s.defvars to find
}
type phiState struct {
- s *state // SSA state
- f *ssa.Func // function to work on
- defvars []map[*Node]*ssa.Value // defined variables at end of each block
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
- varnum map[*Node]int32 // variable numbering
+ varnum map[ir.Node]int32 // variable numbering
// properties of the dominator tree
idom []*ssa.Block // dominator parents
hasDef *sparseSet // has a write of the variable we're processing
// miscellaneous
- placeholder *ssa.Value // dummy value to use as a "not set yet" placeholder.
+ placeholder *ssa.Value // value to use as a "not set yet" placeholder.
}
func (s *phiState) insertPhis() {
// Find all the variables for which we need to match up reads & writes.
// This step prunes any basic-block-only variables from consideration.
// Generate a numbering for these variables.
- s.varnum = map[*Node]int32{}
- var vars []*Node
+ s.varnum = map[ir.Node]int32{}
+ var vars []ir.Node
var vartypes []*types.Type
for _, b := range s.f.Blocks {
for _, v := range b.Values {
if v.Op != ssa.OpFwdRef {
continue
}
- var_ := v.Aux.(*Node)
+ var_ := v.Aux.(FwdRefAux).N
// Optimization: look back 1 block for the definition.
if len(b.Preds) == 1 {
if v.Op == ssa.OpPhi {
v.AuxInt = 0
}
+ // Any remaining FwdRefs are dead code.
+ if v.Op == ssa.OpFwdRef {
+ v.Op = ssa.OpUnknown
+ v.Aux = nil
+ }
}
}
}
-func (s *phiState) insertVarPhis(n int, var_ *Node, defs []*ssa.Block, typ *types.Type) {
+func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *types.Type) {
priq := &s.priq
q := s.q
queued := s.queued
hasPhi.add(c.ID)
v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right?
// Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building.
- s.s.addNamedValue(var_, v)
+ if var_.Op() == ir.ONAME {
+ s.s.addNamedValue(var_.(*ir.Name), v)
+ }
for range c.Preds {
v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs.
}
if v.Op != ssa.OpFwdRef {
continue
}
- n := s.varnum[v.Aux.(*Node)]
+ n := s.varnum[v.Aux.(FwdRefAux).N]
v.Op = ssa.OpCopy
v.Aux = nil
v.AddArg(values[n])
// Variant to use for small functions.
type simplePhiState struct {
- s *state // SSA state
- f *ssa.Func // function to work on
- fwdrefs []*ssa.Value // list of FwdRefs to be processed
- defvars []map[*Node]*ssa.Value // defined variables at end of each block
- reachable []bool // which blocks are reachable
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ fwdrefs []*ssa.Value // list of FwdRefs to be processed
+ defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
+ reachable []bool // which blocks are reachable
}
func (s *simplePhiState) insertPhis() {
continue
}
s.fwdrefs = append(s.fwdrefs, v)
- var_ := v.Aux.(*Node)
+ var_ := v.Aux.(FwdRefAux).N
if _, ok := s.defvars[b.ID][var_]; !ok {
s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
}
v := s.fwdrefs[len(s.fwdrefs)-1]
s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
b := v.Block
- var_ := v.Aux.(*Node)
+ var_ := v.Aux.(FwdRefAux).N
if b == s.f.Entry {
// No variable should be live at entry.
s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
}
// lookupVarOutgoing finds the variable's value at the end of block b.
-func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ *Node, line src.XPos) *ssa.Value {
+func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.Node, line src.XPos) *ssa.Value {
for {
if v := s.defvars[b.ID][var_]; v != nil {
return v
}
}
// Generate a FwdRef for the variable and return that.
- v := b.NewValue0A(line, ssa.OpFwdRef, t, var_)
+ v := b.NewValue0A(line, ssa.OpFwdRef, t, FwdRefAux{N: var_})
s.defvars[b.ID][var_] = v
- s.s.addNamedValue(var_, v)
+ if var_.Op() == ir.ONAME {
+ s.s.addNamedValue(var_.(*ir.Name), v)
+ }
s.fwdrefs = append(s.fwdrefs, v)
return v
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
// A collection of global state used by liveness analysis.
type Liveness struct {
- fn *Node
+ fn *ir.Func
f *ssa.Func
- vars []*Node
- idx map[*Node]int32
+ vars []*ir.Name
+ idx map[*ir.Name]int32
stkptrsize int64
be []BlockEffects
// nor do we care about non-local variables,
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
-func livenessShouldTrack(n *Node) bool {
- return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Type.HasPointers()
+func livenessShouldTrack(nn ir.Node) bool {
+ if nn.Op() != ir.ONAME {
+ return false
+ }
+ n := nn.(*ir.Name)
+ return (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
// and a map for looking up indices by *Node.
-func getvariables(fn *Node) ([]*Node, map[*Node]int32) {
- var vars []*Node
- for _, n := range fn.Func.Dcl {
+func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) {
+ var vars []*ir.Name
+ for _, n := range fn.Dcl {
if livenessShouldTrack(n) {
vars = append(vars, n)
}
}
- idx := make(map[*Node]int32, len(vars))
+ idx := make(map[*ir.Name]int32, len(vars))
for i, n := range vars {
idx[n] = int32(i)
}
func (lv *Liveness) initcache() {
if lv.cache.initialized {
- Fatalf("liveness cache initialized twice")
+ base.Fatalf("liveness cache initialized twice")
return
}
lv.cache.initialized = true
for i, node := range lv.vars {
switch node.Class() {
- case PPARAM:
+ case ir.PPARAM:
// A return instruction with a p.to is a tail return, which brings
// the stack pointer back up (if it ever went down) and then jumps
// to a new function entirely. That form of instruction must read
// function runs.
lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i))
- case PPARAMOUT:
+ case ir.PPARAMOUT:
// All results are live at every return point.
// Note that this point is after escaping return values
// are copied back to the stack using their PAUTOHEAP references.
// If v does not affect any tracked variables, it returns -1, 0.
func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
n, e := affectedNode(v)
- if e == 0 || n == nil || n.Op != ONAME { // cheapest checks first
+ if e == 0 || n == nil || n.Op() != ir.ONAME { // cheapest checks first
return -1, 0
}
+ nn := n.(*ir.Name)
// AllocFrame has dropped unused variables from
// lv.fn.Func.Dcl, but they might still be referenced by
// OpVarFoo pseudo-ops. Ignore them to prevent "lost track of
// variable" ICEs (issue 19632).
switch v.Op {
case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive:
- if !n.Name.Used() {
+ if !nn.Name().Used() {
return -1, 0
}
}
if e&(ssa.SymRead|ssa.SymAddr) != 0 {
effect |= uevar
}
- if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) {
+ if e&ssa.SymWrite != 0 && (!isfat(n.Type()) || v.Op == ssa.OpVarDef) {
effect |= varkill
}
return -1, 0
}
- if pos, ok := lv.idx[n]; ok {
+ if pos, ok := lv.idx[nn]; ok {
return pos, effect
}
return -1, 0
}
// affectedNode returns the *Node affected by v
-func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
+func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) {
// Special cases.
switch v.Op {
case ssa.OpLoadReg:
return n, ssa.SymWrite
case ssa.OpVarLive:
- return v.Aux.(*Node), ssa.SymRead
+ return v.Aux.(*ir.Name), ssa.SymRead
case ssa.OpVarDef, ssa.OpVarKill:
- return v.Aux.(*Node), ssa.SymWrite
+ return v.Aux.(*ir.Name), ssa.SymWrite
case ssa.OpKeepAlive:
n, _ := AutoVar(v.Args[0])
return n, ssa.SymRead
case nil, *obj.LSym:
// ok, but no node
return nil, e
- case *Node:
+ case *ir.Name:
return a, e
default:
- Fatalf("weird aux: %s", v.LongString())
+ base.Fatalf("weird aux: %s", v.LongString())
return nil, e
}
}
// Constructs a new liveness structure used to hold the global state of the
// liveness computation. The cfg argument is a slice of *BasicBlocks and the
// vars argument is a slice of *Nodes.
-func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkptrsize int64) *Liveness {
+func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int32, stkptrsize int64) *Liveness {
lv := &Liveness{
fn: fn,
f: f,
// on future calls with the same type t.
func onebitwalktype1(t *types.Type, off int64, bv bvec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 {
- Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
+ base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
}
if !t.HasPointers() {
// Note: this case ensures that pointers to go:notinheap types
return
}
- switch t.Etype {
- case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
+ switch t.Kind() {
+ case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP:
if off&int64(Widthptr-1) != 0 {
- Fatalf("onebitwalktype1: invalid alignment, %v", t)
+ base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) // pointer
- case TSTRING:
+ case types.TSTRING:
// struct { byte *str; intgo len; }
if off&int64(Widthptr-1) != 0 {
- Fatalf("onebitwalktype1: invalid alignment, %v", t)
+ base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) //pointer in first slot
- case TINTER:
+ case types.TINTER:
// struct { Itab *tab; void *data; }
// or, when isnilinter(t)==true:
// struct { Type *type; void *data; }
if off&int64(Widthptr-1) != 0 {
- Fatalf("onebitwalktype1: invalid alignment, %v", t)
+ base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
// The first word of an interface is a pointer, but we don't
// treat it as such.
// well as scan itabs to update their itab._type fields).
bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot
- case TSLICE:
+ case types.TSLICE:
// struct { byte *array; uintgo len; uintgo cap; }
if off&int64(Widthptr-1) != 0 {
- Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
+ base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer)
- case TARRAY:
+ case types.TARRAY:
elt := t.Elem()
if elt.Width == 0 {
// Short-circuit for #20739.
off += elt.Width
}
- case TSTRUCT:
+ case types.TSTRUCT:
for _, f := range t.Fields().Slice() {
onebitwalktype1(f.Type, off+f.Offset, bv)
}
default:
- Fatalf("onebitwalktype1: unexpected type, %v", t)
+ base.Fatalf("onebitwalktype1: unexpected type, %v", t)
}
}
// Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes.
-func (lv *Liveness) pointerMap(liveout bvec, vars []*Node, args, locals bvec) {
+func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Name, args, locals bvec) {
for i := int32(0); ; i++ {
i = liveout.Next(i)
if i < 0 {
}
node := vars[i]
switch node.Class() {
- case PAUTO:
- onebitwalktype1(node.Type, node.Xoffset+lv.stkptrsize, locals)
+ case ir.PAUTO:
+ onebitwalktype1(node.Type(), node.FrameOffset()+lv.stkptrsize, locals)
- case PPARAM, PPARAMOUT:
- onebitwalktype1(node.Type, node.Xoffset, args)
+ case ir.PPARAM, ir.PPARAMOUT:
+ onebitwalktype1(node.Type(), node.FrameOffset(), args)
}
}
}
// go:nosplit functions are similar. Since safe points used to
// be coupled with stack checks, go:nosplit often actually
// means "no safe points in this function".
- return compiling_runtime || f.NoSplit
+ return base.Flag.CompilingRuntime || f.NoSplit
}
// markUnsafePoints finds unsafe points and computes lv.unsafePoints.
// pointers to copy values back to the stack).
// TODO: if the output parameter is heap-allocated, then we
// don't need to keep the stack copy live?
- if lv.fn.Func.HasDefer() {
+ if lv.fn.HasDefer() {
for i, n := range lv.vars {
- if n.Class() == PPARAMOUT {
- if n.Name.IsOutputParamHeapAddr() {
+ if n.Class() == ir.PPARAMOUT {
+ if n.Name().IsOutputParamHeapAddr() {
// Just to be paranoid. Heap addresses are PAUTOs.
- Fatalf("variable %v both output param and heap output param", n)
+ base.Fatalf("variable %v both output param and heap output param", n)
}
- if n.Name.Param.Heapaddr != nil {
+ if n.Name().Heapaddr != nil {
// If this variable moved to the heap, then
// its stack copy is not live.
continue
// Note: zeroing is handled by zeroResults in walk.go.
livedefer.Set(int32(i))
}
- if n.Name.IsOutputParamHeapAddr() {
+ if n.Name().IsOutputParamHeapAddr() {
// This variable will be overwritten early in the function
// prologue (from the result of a mallocgc) but we need to
// zero it in case that malloc causes a stack scan.
- n.Name.SetNeedzero(true)
+ n.Name().SetNeedzero(true)
livedefer.Set(int32(i))
}
- if n.Name.OpenDeferSlot() {
+ if n.Name().OpenDeferSlot() {
// Open-coded defer args slots must be live
// everywhere in a function, since a panic can
// occur (almost) anywhere. Because it is live
// everywhere, it must be zeroed on entry.
livedefer.Set(int32(i))
// It was already marked as Needzero when created.
- if !n.Name.Needzero() {
- Fatalf("all pointer-containing defer arg slots should have Needzero set")
+ if !n.Name().Needzero() {
+ base.Fatalf("all pointer-containing defer arg slots should have Needzero set")
}
}
}
if b == lv.f.Entry {
if index != 0 {
- Fatalf("bad index for entry point: %v", index)
+ base.Fatalf("bad index for entry point: %v", index)
}
// Check to make sure only input variables are live.
if !liveout.Get(int32(i)) {
continue
}
- if n.Class() == PPARAM {
+ if n.Class() == ir.PPARAM {
continue // ok
}
- Fatalf("bad live variable at entry of %v: %L", lv.fn.Func.Nname, n)
+ base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Nname, n)
}
// Record live variables.
}
// If we have an open-coded deferreturn call, make a liveness map for it.
- if lv.fn.Func.OpenCodedDeferDisallowed() {
+ if lv.fn.OpenCodedDeferDisallowed() {
lv.livenessMap.deferreturn = LivenessDontCare
} else {
lv.livenessMap.deferreturn = LivenessIndex{
// the only things that can possibly be live are the
// input parameters.
for j, n := range lv.vars {
- if n.Class() != PPARAM && lv.stackMaps[0].Get(int32(j)) {
- lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func.Nname, n)
+ if n.Class() != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) {
+ lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Nname, n)
}
}
}
}
func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
- if debuglive == 0 || lv.fn.funcname() == "init" || strings.HasPrefix(lv.fn.funcname(), ".") {
+ if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
return
}
if !(v == nil || v.Op.IsCall()) {
return
}
- pos := lv.fn.Func.Nname.Pos
+ pos := lv.fn.Nname.Pos()
if v != nil {
pos = v.Pos
}
s := "live at "
if v == nil {
- s += fmt.Sprintf("entry to %s:", lv.fn.funcname())
+ s += fmt.Sprintf("entry to %s:", ir.FuncName(lv.fn))
} else if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
fn := sym.Fn.Name
if pos := strings.Index(fn, "."); pos >= 0 {
}
}
- Warnl(pos, s)
+ base.WarnfAt(pos, s)
}
func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
if !live.Get(int32(i)) {
continue
}
- fmt.Printf("%s%s", comma, n.Sym.Name)
+ fmt.Printf("%s%s", comma, n.Sym().Name)
comma = ","
}
return true
}
fmt.Printf("%s=", name)
if x {
- fmt.Printf("%s", lv.vars[pos].Sym.Name)
+ fmt.Printf("%s", lv.vars[pos].Sym().Name)
}
return true
// This format synthesizes the information used during the multiple passes
// into a single presentation.
func (lv *Liveness) printDebug() {
- fmt.Printf("liveness: %s\n", lv.fn.funcname())
+ fmt.Printf("liveness: %s\n", ir.FuncName(lv.fn))
for i, b := range lv.f.Blocks {
if i > 0 {
if b == lv.f.Entry {
live := lv.stackMaps[0]
- fmt.Printf("(%s) function entry\n", linestr(lv.fn.Func.Nname.Pos))
+ fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Nname.Pos()))
fmt.Printf("\tlive=")
printed = false
for j, n := range lv.vars {
}
for _, v := range b.Values {
- fmt.Printf("(%s) %v\n", linestr(v.Pos), v.LongString())
+ fmt.Printf("(%s) %v\n", base.FmtPos(v.Pos), v.LongString())
pcdata := lv.livenessMap.Get(v)
// Size args bitmaps to be just large enough to hold the largest pointer.
// First, find the largest Xoffset node we care about.
// (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
- var maxArgNode *Node
+ var maxArgNode *ir.Name
for _, n := range lv.vars {
switch n.Class() {
- case PPARAM, PPARAMOUT:
- if maxArgNode == nil || n.Xoffset > maxArgNode.Xoffset {
+ case ir.PPARAM, ir.PPARAMOUT:
+ if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() {
maxArgNode = n
}
}
// Next, find the offset of the largest pointer in the largest node.
var maxArgs int64
if maxArgNode != nil {
- maxArgs = maxArgNode.Xoffset + typeptrdata(maxArgNode.Type)
+ maxArgs = maxArgNode.FrameOffset() + typeptrdata(maxArgNode.Type())
}
// Size locals bitmaps to be stkptrsize sized.
// These symbols will be added to Ctxt.Data by addGCLocals
// after parallel compilation is done.
makeSym := func(tmpSym *obj.LSym) *obj.LSym {
- return Ctxt.LookupInit(fmt.Sprintf("gclocals·%x", md5.Sum(tmpSym.P)), func(lsym *obj.LSym) {
+ return base.Ctxt.LookupInit(fmt.Sprintf("gclocals·%x", md5.Sum(tmpSym.P)), func(lsym *obj.LSym) {
lsym.P = tmpSym.P
lsym.Set(obj.AttrContentAddressable, true)
})
// pointer variables in the function and emits a runtime data
// structure read by the garbage collector.
// Returns a map from GC safe points to their corresponding stack map index.
-func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
+func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) LivenessMap {
// Construct the global liveness state.
- vars, idx := getvariables(e.curfn)
- lv := newliveness(e.curfn, f, vars, idx, e.stkptrsize)
+ vars, idx := getvariables(curfn)
+ lv := newliveness(curfn, f, vars, idx, stkptrsize)
// Run the dataflow framework.
lv.prologue()
lv.solve()
lv.epilogue()
- if debuglive > 0 {
+ if base.Flag.Live > 0 {
lv.showlive(nil, lv.stackMaps[0])
for _, b := range f.Blocks {
for _, val := range b.Values {
}
}
}
- if debuglive >= 2 {
+ if base.Flag.Live >= 2 {
lv.printDebug()
}
}
// Emit the live pointer map data structures
- ls := e.curfn.Func.lsym
+ ls := curfn.LSym
fninfo := ls.Func()
fninfo.GCArgs, fninfo.GCLocals = lv.emit()
// to fully initialize t.
func isfat(t *types.Type) bool {
if t != nil {
- switch t.Etype {
- case TSLICE, TSTRING,
- TINTER: // maybe remove later
+ switch t.Kind() {
+ case types.TSLICE, types.TSTRING,
+ types.TINTER: // maybe remove later
return true
- case TARRAY:
+ case types.TARRAY:
// Array of 1 element, check if element is fat
if t.NumElem() == 1 {
return isfat(t.Elem())
}
return true
- case TSTRUCT:
+ case types.TSTRUCT:
// Struct with 1 field, check if field is fat
if t.NumFields() == 1 {
return isfat(t.Field(0).Type)
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"cmd/internal/sys"
var norace_inst_pkgs = []string{"sync", "sync/atomic"}
func ispkgin(pkgs []string) bool {
- if myimportpath != "" {
+ if base.Ctxt.Pkgpath != "" {
for _, p := range pkgs {
- if myimportpath == p {
+ if base.Ctxt.Pkgpath == p {
return true
}
}
return false
}
-func instrument(fn *Node) {
- if fn.Func.Pragma&Norace != 0 {
+func instrument(fn *ir.Func) {
+ if fn.Pragma&ir.Norace != 0 || (fn.Sym().Linksym() != nil && fn.Sym().Linksym().ABIWrapper()) {
return
}
- if !flag_race || !ispkgin(norace_inst_pkgs) {
- fn.Func.SetInstrumentBody(true)
+ if !base.Flag.Race || !ispkgin(norace_inst_pkgs) {
+ fn.SetInstrumentBody(true)
}
- if flag_race {
- lno := lineno
- lineno = src.NoXPos
+ if base.Flag.Race {
+ lno := base.Pos
+ base.Pos = src.NoXPos
if thearch.LinkArch.Arch.Family != sys.AMD64 {
- fn.Func.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
- fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
+ fn.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
+ fn.Exit.Append(mkcall("racefuncexit", nil, nil))
} else {
// nodpc is the PC of the caller as extracted by
// This only works for amd64. This will not
// work on arm or others that might support
// race in the future.
- nodpc := nodfp.copy()
- nodpc.Type = types.Types[TUINTPTR]
- nodpc.Xoffset = int64(-Widthptr)
- fn.Func.Dcl = append(fn.Func.Dcl, nodpc)
- fn.Func.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
- fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
+ nodpc := nodfp.CloneName()
+ nodpc.SetType(types.Types[types.TUINTPTR])
+ nodpc.SetFrameOffset(int64(-Widthptr))
+ fn.Dcl = append(fn.Dcl, nodpc)
+ fn.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
+ fn.Exit.Append(mkcall("racefuncexit", nil, nil))
}
- lineno = lno
+ base.Pos = lno
}
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/sys"
"unicode/utf8"
)
// range
-func typecheckrange(n *Node) {
+func typecheckrange(n *ir.RangeStmt) {
// Typechecking order is important here:
// 0. first typecheck range expression (slice/map/chan),
// it is evaluated only once and so logically it is not part of the loop.
// second half of dance, the first half being typecheckrangeExpr
n.SetTypecheck(1)
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
if n1.Typecheck() == 0 {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
decldepth++
- typecheckslice(n.Nbody.Slice(), ctxStmt)
+ typecheckslice(n.Body().Slice(), ctxStmt)
decldepth--
}
-func typecheckrangeExpr(n *Node) {
- n.Right = typecheck(n.Right, ctxExpr)
+func typecheckrangeExpr(n *ir.RangeStmt) {
+ n.SetRight(typecheck(n.Right(), ctxExpr))
- t := n.Right.Type
+ t := n.Right().Type()
if t == nil {
return
}
// delicate little dance. see typecheckas2
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
- if n1.Name == nil || n1.Name.Defn != n {
+ if !ir.DeclaredBy(n1, n) {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
}
if t.IsPtr() && t.Elem().IsArray() {
t = t.Elem()
}
- n.Type = t
+ n.SetType(t)
var t1, t2 *types.Type
toomany := false
- switch t.Etype {
+ switch t.Kind() {
default:
- yyerrorl(n.Pos, "cannot range over %L", n.Right)
+ base.ErrorfAt(n.Pos(), "cannot range over %L", n.Right())
return
- case TARRAY, TSLICE:
- t1 = types.Types[TINT]
+ case types.TARRAY, types.TSLICE:
+ t1 = types.Types[types.TINT]
t2 = t.Elem()
- case TMAP:
+ case types.TMAP:
t1 = t.Key()
t2 = t.Elem()
- case TCHAN:
+ case types.TCHAN:
if !t.ChanDir().CanRecv() {
- yyerrorl(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type)
+ base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.Right(), n.Right().Type())
return
}
t1 = t.Elem()
t2 = nil
- if n.List.Len() == 2 {
+ if n.List().Len() == 2 {
toomany = true
}
- case TSTRING:
- t1 = types.Types[TINT]
- t2 = types.Runetype
+ case types.TSTRING:
+ t1 = types.Types[types.TINT]
+ t2 = types.RuneType
}
- if n.List.Len() > 2 || toomany {
- yyerrorl(n.Pos, "too many variables in range")
+ if n.List().Len() > 2 || toomany {
+ base.ErrorfAt(n.Pos(), "too many variables in range")
}
- var v1, v2 *Node
- if n.List.Len() != 0 {
- v1 = n.List.First()
+ var v1, v2 ir.Node
+ if n.List().Len() != 0 {
+ v1 = n.List().First()
}
- if n.List.Len() > 1 {
- v2 = n.List.Second()
+ if n.List().Len() > 1 {
+ v2 = n.List().Second()
}
// this is not only an optimization but also a requirement in the spec.
// "if the second iteration variable is the blank identifier, the range
// clause is equivalent to the same clause with only the first variable
// present."
- if v2.isBlank() {
+ if ir.IsBlank(v2) {
if v1 != nil {
- n.List.Set1(v1)
+ n.PtrList().Set1(v1)
}
v2 = nil
}
if v1 != nil {
- if v1.Name != nil && v1.Name.Defn == n {
- v1.Type = t1
- } else if v1.Type != nil {
- if op, why := assignop(t1, v1.Type); op == OXXX {
- yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
+ if ir.DeclaredBy(v1, n) {
+ v1.SetType(t1)
+ } else if v1.Type() != nil {
+ if op, why := assignop(t1, v1.Type()); op == ir.OXXX {
+ base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t1, v1, why)
}
}
checkassign(n, v1)
}
if v2 != nil {
- if v2.Name != nil && v2.Name.Defn == n {
- v2.Type = t2
- } else if v2.Type != nil {
- if op, why := assignop(t2, v2.Type); op == OXXX {
- yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
+ if ir.DeclaredBy(v2, n) {
+ v2.SetType(t2)
+ } else if v2.Type() != nil {
+ if op, why := assignop(t2, v2.Type()); op == ir.OXXX {
+ base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t2, v2, why)
}
}
checkassign(n, v2)
// simpler forms. The result must be assigned back to n.
// Node n may also be modified in place, and may also be
// the returned node.
-func walkrange(n *Node) *Node {
- if isMapClear(n) {
- m := n.Right
+func walkrange(nrange *ir.RangeStmt) ir.Node {
+ if isMapClear(nrange) {
+ m := nrange.Right()
lno := setlineno(m)
- n = mapClear(m)
- lineno = lno
+ n := mapClear(m)
+ base.Pos = lno
return n
}
+ nfor := ir.NodAt(nrange.Pos(), ir.OFOR, nil, nil)
+ nfor.SetInit(nrange.Init())
+ nfor.SetSym(nrange.Sym())
+
// variable name conventions:
// ohv1, hv1, hv2: hidden (old) val 1, 2
// ha, hit: hidden aggregate, iterator
// hb: hidden bool
// a, v1, v2: not hidden aggregate, val 1, 2
- t := n.Type
+ t := nrange.Type()
- a := n.Right
+ a := nrange.Right()
lno := setlineno(a)
- n.Right = nil
- var v1, v2 *Node
- l := n.List.Len()
+ var v1, v2 ir.Node
+ l := nrange.List().Len()
if l > 0 {
- v1 = n.List.First()
+ v1 = nrange.List().First()
}
if l > 1 {
- v2 = n.List.Second()
+ v2 = nrange.List().Second()
}
- if v2.isBlank() {
+ if ir.IsBlank(v2) {
v2 = nil
}
- if v1.isBlank() && v2 == nil {
+ if ir.IsBlank(v1) && v2 == nil {
v1 = nil
}
if v1 == nil && v2 != nil {
- Fatalf("walkrange: v2 != nil while v1 == nil")
+ base.Fatalf("walkrange: v2 != nil while v1 == nil")
}
- // n.List has no meaning anymore, clear it
- // to avoid erroneous processing by racewalk.
- n.List.Set(nil)
-
- var ifGuard *Node
-
- translatedLoopOp := OFOR
+ var ifGuard *ir.IfStmt
- var body []*Node
- var init []*Node
- switch t.Etype {
+ var body []ir.Node
+ var init []ir.Node
+ switch t.Kind() {
default:
- Fatalf("walkrange")
+ base.Fatalf("walkrange")
- case TARRAY, TSLICE:
- if arrayClear(n, v1, v2, a) {
- lineno = lno
- return n
+ case types.TARRAY, types.TSLICE:
+ if nn := arrayClear(nrange, v1, v2, a); nn != nil {
+ base.Pos = lno
+ return nn
}
// order.stmt arranged for a copy of the array/slice variable if needed.
ha := a
- hv1 := temp(types.Types[TINT])
- hn := temp(types.Types[TINT])
+ hv1 := temp(types.Types[types.TINT])
+ hn := temp(types.Types[types.TINT])
- init = append(init, nod(OAS, hv1, nil))
- init = append(init, nod(OAS, hn, nod(OLEN, ha, nil)))
+ init = append(init, ir.Nod(ir.OAS, hv1, nil))
+ init = append(init, ir.Nod(ir.OAS, hn, ir.Nod(ir.OLEN, ha, nil)))
- n.Left = nod(OLT, hv1, hn)
- n.Right = nod(OAS, hv1, nod(OADD, hv1, nodintconst(1)))
+ nfor.SetLeft(ir.Nod(ir.OLT, hv1, hn))
+ nfor.SetRight(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
// for range ha { body }
if v1 == nil {
// for v1 := range ha { body }
if v2 == nil {
- body = []*Node{nod(OAS, v1, hv1)}
+ body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
break
}
// for v1, v2 := range ha { body }
- if cheapComputableIndex(n.Type.Elem().Width) {
+ if cheapComputableIndex(nrange.Type().Elem().Width) {
// v1, v2 = hv1, ha[hv1]
- tmp := nod(OINDEX, ha, hv1)
+ tmp := ir.Nod(ir.OINDEX, ha, hv1)
tmp.SetBounded(true)
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
- a := nod(OAS2, nil, nil)
- a.List.Set2(v1, v2)
- a.Rlist.Set2(hv1, tmp)
- body = []*Node{a}
+ a := ir.Nod(ir.OAS2, nil, nil)
+ a.PtrList().Set2(v1, v2)
+ a.PtrRlist().Set2(hv1, tmp)
+ body = []ir.Node{a}
break
}
// TODO(austin): OFORUNTIL inhibits bounds-check
// elimination on the index variable (see #20711).
// Enhance the prove pass to understand this.
- ifGuard = nod(OIF, nil, nil)
- ifGuard.Left = nod(OLT, hv1, hn)
- translatedLoopOp = OFORUNTIL
+ ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil)
+ ifGuard.SetLeft(ir.Nod(ir.OLT, hv1, hn))
+ nfor.SetOp(ir.OFORUNTIL)
- hp := temp(types.NewPtr(n.Type.Elem()))
- tmp := nod(OINDEX, ha, nodintconst(0))
+ hp := temp(types.NewPtr(nrange.Type().Elem()))
+ tmp := ir.Nod(ir.OINDEX, ha, nodintconst(0))
tmp.SetBounded(true)
- init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil)))
+ init = append(init, ir.Nod(ir.OAS, hp, nodAddr(tmp)))
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
- a := nod(OAS2, nil, nil)
- a.List.Set2(v1, v2)
- a.Rlist.Set2(hv1, nod(ODEREF, hp, nil))
+ a := ir.Nod(ir.OAS2, nil, nil)
+ a.PtrList().Set2(v1, v2)
+ a.PtrRlist().Set2(hv1, ir.Nod(ir.ODEREF, hp, nil))
body = append(body, a)
// Advance pointer as part of the late increment.
// This runs *after* the condition check, so we know
// advancing the pointer is safe and won't go past the
// end of the allocation.
- a = nod(OAS, hp, addptr(hp, t.Elem().Width))
- a = typecheck(a, ctxStmt)
- n.List.Set1(a)
+ as := ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width))
+ nfor.PtrList().Set1(typecheck(as, ctxStmt))
- case TMAP:
+ case types.TMAP:
// order.stmt allocated the iterator for us.
// we only use a once, so no copy needed.
ha := a
- hit := prealloc[n]
- th := hit.Type
- n.Left = nil
+ hit := nrange.Prealloc
+ th := hit.Type()
keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
elemsym := th.Field(1).Sym // ditto
fn := syslook("mapiterinit")
fn = substArgTypes(fn, t.Key(), t.Elem(), th)
- init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nod(OADDR, hit, nil)))
- n.Left = nod(ONE, nodSym(ODOT, hit, keysym), nodnil())
+ init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nodAddr(hit)))
+ nfor.SetLeft(ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil()))
fn = syslook("mapiternext")
fn = substArgTypes(fn, th)
- n.Right = mkcall1(fn, nil, nil, nod(OADDR, hit, nil))
+ nfor.SetRight(mkcall1(fn, nil, nil, nodAddr(hit)))
- key := nodSym(ODOT, hit, keysym)
- key = nod(ODEREF, key, nil)
+ key := ir.Nod(ir.ODEREF, nodSym(ir.ODOT, hit, keysym), nil)
if v1 == nil {
body = nil
} else if v2 == nil {
- body = []*Node{nod(OAS, v1, key)}
+ body = []ir.Node{ir.Nod(ir.OAS, v1, key)}
} else {
- elem := nodSym(ODOT, hit, elemsym)
- elem = nod(ODEREF, elem, nil)
- a := nod(OAS2, nil, nil)
- a.List.Set2(v1, v2)
- a.Rlist.Set2(key, elem)
- body = []*Node{a}
+ elem := ir.Nod(ir.ODEREF, nodSym(ir.ODOT, hit, elemsym), nil)
+ a := ir.Nod(ir.OAS2, nil, nil)
+ a.PtrList().Set2(v1, v2)
+ a.PtrRlist().Set2(key, elem)
+ body = []ir.Node{a}
}
- case TCHAN:
+ case types.TCHAN:
// order.stmt arranged for a copy of the channel variable.
ha := a
- n.Left = nil
-
hv1 := temp(t.Elem())
hv1.SetTypecheck(1)
if t.Elem().HasPointers() {
- init = append(init, nod(OAS, hv1, nil))
+ init = append(init, ir.Nod(ir.OAS, hv1, nil))
}
- hb := temp(types.Types[TBOOL])
+ hb := temp(types.Types[types.TBOOL])
- n.Left = nod(ONE, hb, nodbool(false))
- a := nod(OAS2RECV, nil, nil)
+ nfor.SetLeft(ir.Nod(ir.ONE, hb, nodbool(false)))
+ a := ir.Nod(ir.OAS2RECV, nil, nil)
a.SetTypecheck(1)
- a.List.Set2(hv1, hb)
- a.Right = nod(ORECV, ha, nil)
- n.Left.Ninit.Set1(a)
+ a.PtrList().Set2(hv1, hb)
+ a.PtrRlist().Set1(ir.Nod(ir.ORECV, ha, nil))
+ nfor.Left().PtrInit().Set1(a)
if v1 == nil {
body = nil
} else {
- body = []*Node{nod(OAS, v1, hv1)}
+ body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
}
// Zero hv1. This prevents hv1 from being the sole, inaccessible
// reference to an otherwise GC-able value during the next channel receive.
// See issue 15281.
- body = append(body, nod(OAS, hv1, nil))
+ body = append(body, ir.Nod(ir.OAS, hv1, nil))
- case TSTRING:
+ case types.TSTRING:
// Transform string range statements like "for v1, v2 = range a" into
//
// ha := a
// order.stmt arranged for a copy of the string variable.
ha := a
- hv1 := temp(types.Types[TINT])
- hv1t := temp(types.Types[TINT])
- hv2 := temp(types.Runetype)
+ hv1 := temp(types.Types[types.TINT])
+ hv1t := temp(types.Types[types.TINT])
+ hv2 := temp(types.RuneType)
// hv1 := 0
- init = append(init, nod(OAS, hv1, nil))
+ init = append(init, ir.Nod(ir.OAS, hv1, nil))
// hv1 < len(ha)
- n.Left = nod(OLT, hv1, nod(OLEN, ha, nil))
+ nfor.SetLeft(ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil)))
if v1 != nil {
// hv1t = hv1
- body = append(body, nod(OAS, hv1t, hv1))
+ body = append(body, ir.Nod(ir.OAS, hv1t, hv1))
}
// hv2 := rune(ha[hv1])
- nind := nod(OINDEX, ha, hv1)
+ nind := ir.Nod(ir.OINDEX, ha, hv1)
nind.SetBounded(true)
- body = append(body, nod(OAS, hv2, conv(nind, types.Runetype)))
+ body = append(body, ir.Nod(ir.OAS, hv2, conv(nind, types.RuneType)))
// if hv2 < utf8.RuneSelf
- nif := nod(OIF, nil, nil)
- nif.Left = nod(OLT, hv2, nodintconst(utf8.RuneSelf))
+ nif := ir.Nod(ir.OIF, nil, nil)
+ nif.SetLeft(ir.Nod(ir.OLT, hv2, nodintconst(utf8.RuneSelf)))
// hv1++
- nif.Nbody.Set1(nod(OAS, hv1, nod(OADD, hv1, nodintconst(1))))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
// } else {
- eif := nod(OAS2, nil, nil)
- nif.Rlist.Set1(eif)
+ eif := ir.Nod(ir.OAS2, nil, nil)
+ nif.PtrRlist().Set1(eif)
// hv2, hv1 = decoderune(ha, hv1)
- eif.List.Set2(hv2, hv1)
+ eif.PtrList().Set2(hv2, hv1)
fn := syslook("decoderune")
- eif.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, ha, hv1))
+ eif.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, ha, hv1))
body = append(body, nif)
if v1 != nil {
if v2 != nil {
// v1, v2 = hv1t, hv2
- a := nod(OAS2, nil, nil)
- a.List.Set2(v1, v2)
- a.Rlist.Set2(hv1t, hv2)
+ a := ir.Nod(ir.OAS2, nil, nil)
+ a.PtrList().Set2(v1, v2)
+ a.PtrRlist().Set2(hv1t, hv2)
body = append(body, a)
} else {
// v1 = hv1t
- body = append(body, nod(OAS, v1, hv1t))
+ body = append(body, ir.Nod(ir.OAS, v1, hv1t))
}
}
}
- n.Op = translatedLoopOp
typecheckslice(init, ctxStmt)
if ifGuard != nil {
- ifGuard.Ninit.Append(init...)
- ifGuard = typecheck(ifGuard, ctxStmt)
+ ifGuard.PtrInit().Append(init...)
+ ifGuard = typecheck(ifGuard, ctxStmt).(*ir.IfStmt)
} else {
- n.Ninit.Append(init...)
+ nfor.PtrInit().Append(init...)
}
- typecheckslice(n.Left.Ninit.Slice(), ctxStmt)
+ typecheckslice(nfor.Left().Init().Slice(), ctxStmt)
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- n.Right = typecheck(n.Right, ctxStmt)
+ nfor.SetLeft(typecheck(nfor.Left(), ctxExpr))
+ nfor.SetLeft(defaultlit(nfor.Left(), nil))
+ nfor.SetRight(typecheck(nfor.Right(), ctxStmt))
typecheckslice(body, ctxStmt)
- n.Nbody.Prepend(body...)
+ nfor.PtrBody().Append(body...)
+ nfor.PtrBody().Append(nrange.Body().Slice()...)
+ var n ir.Node = nfor
if ifGuard != nil {
- ifGuard.Nbody.Set1(n)
+ ifGuard.PtrBody().Set1(n)
n = ifGuard
}
n = walkstmt(n)
- lineno = lno
+ base.Pos = lno
return n
}
// }
//
// where == for keys of map m is reflexive.
-func isMapClear(n *Node) bool {
- if Debug.N != 0 || instrumenting {
+func isMapClear(n *ir.RangeStmt) bool {
+ if base.Flag.N != 0 || instrumenting {
return false
}
- if n.Op != ORANGE || n.Type.Etype != TMAP || n.List.Len() != 1 {
+ if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || n.List().Len() != 1 {
return false
}
- k := n.List.First()
- if k == nil || k.isBlank() {
+ k := n.List().First()
+ if k == nil || ir.IsBlank(k) {
return false
}
// Require k to be a new variable name.
- if k.Name == nil || k.Name.Defn != n {
+ if !ir.DeclaredBy(k, n) {
return false
}
- if n.Nbody.Len() != 1 {
+ if n.Body().Len() != 1 {
return false
}
- stmt := n.Nbody.First() // only stmt in body
- if stmt == nil || stmt.Op != ODELETE {
+ stmt := n.Body().First() // only stmt in body
+ if stmt == nil || stmt.Op() != ir.ODELETE {
return false
}
- m := n.Right
- if !samesafeexpr(stmt.List.First(), m) || !samesafeexpr(stmt.List.Second(), k) {
+ m := n.Right()
+ if delete := stmt.(*ir.CallExpr); !samesafeexpr(delete.List().First(), m) || !samesafeexpr(delete.List().Second(), k) {
return false
}
// Keys where equality is not reflexive can not be deleted from maps.
- if !isreflexive(m.Type.Key()) {
+ if !isreflexive(m.Type().Key()) {
return false
}
}
// mapClear constructs a call to runtime.mapclear for the map m.
-func mapClear(m *Node) *Node {
- t := m.Type
+func mapClear(m ir.Node) ir.Node {
+ t := m.Type()
// instantiate mapclear(typ *type, hmap map[any]any)
fn := syslook("mapclear")
fn = substArgTypes(fn, t.Key(), t.Elem())
n := mkcall1(fn, nil, nil, typename(t), m)
-
- n = typecheck(n, ctxStmt)
- n = walkstmt(n)
-
- return n
+ return walkstmt(typecheck(n, ctxStmt))
}
// Lower n into runtime·memclr if possible, for
// in which the evaluation of a is side-effect-free.
//
// Parameters are as in walkrange: "for v1, v2 = range a".
-func arrayClear(n, v1, v2, a *Node) bool {
- if Debug.N != 0 || instrumenting {
- return false
+func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
+ if base.Flag.N != 0 || instrumenting {
+ return nil
}
if v1 == nil || v2 != nil {
- return false
+ return nil
}
- if n.Nbody.Len() != 1 || n.Nbody.First() == nil {
- return false
+ if loop.Body().Len() != 1 || loop.Body().First() == nil {
+ return nil
}
- stmt := n.Nbody.First() // only stmt in body
- if stmt.Op != OAS || stmt.Left.Op != OINDEX {
- return false
+ stmt1 := loop.Body().First() // only stmt in body
+ if stmt1.Op() != ir.OAS {
+ return nil
+ }
+ stmt := stmt1.(*ir.AssignStmt)
+ if stmt.Left().Op() != ir.OINDEX {
+ return nil
}
+ lhs := stmt.Left().(*ir.IndexExpr)
- if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) {
- return false
+ if !samesafeexpr(lhs.Left(), a) || !samesafeexpr(lhs.Right(), v1) {
+ return nil
}
- elemsize := n.Type.Elem().Width
- if elemsize <= 0 || !isZero(stmt.Right) {
- return false
+ elemsize := loop.Type().Elem().Width
+ if elemsize <= 0 || !isZero(stmt.Right()) {
+ return nil
}
// Convert to
// memclr{NoHeap,Has}Pointers(hp, hn)
// i = len(a) - 1
// }
- n.Op = OIF
-
- n.Nbody.Set(nil)
- n.Left = nod(ONE, nod(OLEN, a, nil), nodintconst(0))
+ n := ir.Nod(ir.OIF, nil, nil)
+ n.PtrBody().Set(nil)
+ n.SetLeft(ir.Nod(ir.ONE, ir.Nod(ir.OLEN, a, nil), nodintconst(0)))
// hp = &a[0]
- hp := temp(types.Types[TUNSAFEPTR])
+ hp := temp(types.Types[types.TUNSAFEPTR])
- tmp := nod(OINDEX, a, nodintconst(0))
- tmp.SetBounded(true)
- tmp = nod(OADDR, tmp, nil)
- tmp = convnop(tmp, types.Types[TUNSAFEPTR])
- n.Nbody.Append(nod(OAS, hp, tmp))
+ ix := ir.Nod(ir.OINDEX, a, nodintconst(0))
+ ix.SetBounded(true)
+ addr := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR])
+ n.PtrBody().Append(ir.Nod(ir.OAS, hp, addr))
// hn = len(a) * sizeof(elem(a))
- hn := temp(types.Types[TUINTPTR])
-
- tmp = nod(OLEN, a, nil)
- tmp = nod(OMUL, tmp, nodintconst(elemsize))
- tmp = conv(tmp, types.Types[TUINTPTR])
- n.Nbody.Append(nod(OAS, hn, tmp))
+ hn := temp(types.Types[types.TUINTPTR])
+ mul := conv(ir.Nod(ir.OMUL, ir.Nod(ir.OLEN, a, nil), nodintconst(elemsize)), types.Types[types.TUINTPTR])
+ n.PtrBody().Append(ir.Nod(ir.OAS, hn, mul))
- var fn *Node
- if a.Type.Elem().HasPointers() {
+ var fn ir.Node
+ if a.Type().Elem().HasPointers() {
// memclrHasPointers(hp, hn)
- Curfn.Func.setWBPos(stmt.Pos)
+ Curfn.SetWBPos(stmt.Pos())
fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
} else {
// memclrNoHeapPointers(hp, hn)
fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
}
- n.Nbody.Append(fn)
+ n.PtrBody().Append(fn)
// i = len(a) - 1
- v1 = nod(OAS, v1, nod(OSUB, nod(OLEN, a, nil), nodintconst(1)))
+ v1 = ir.Nod(ir.OAS, v1, ir.Nod(ir.OSUB, ir.Nod(ir.OLEN, a, nil), nodintconst(1)))
- n.Nbody.Append(v1)
+ n.PtrBody().Append(v1)
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- typecheckslice(n.Nbody.Slice(), ctxStmt)
- n = walkstmt(n)
- return true
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ typecheckslice(n.Body().Slice(), ctxStmt)
+ return walkstmt(n)
}
// addptr returns (*T)(uintptr(p) + n).
-func addptr(p *Node, n int64) *Node {
- t := p.Type
+func addptr(p ir.Node, n int64) ir.Node {
+ t := p.Type()
- p = nod(OCONVNOP, p, nil)
- p.Type = types.Types[TUINTPTR]
+ p = ir.Nod(ir.OCONVNOP, p, nil)
+ p.SetType(types.Types[types.TUINTPTR])
- p = nod(OADD, p, nodintconst(n))
+ p = ir.Nod(ir.OADD, p, nodintconst(n))
- p = nod(OCONVNOP, p, nil)
- p.Type = t
+ p = ir.Nod(ir.OCONVNOP, p, nil)
+ p.SetType(t)
return p
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/gcprog"
"cmd/internal/obj"
t *types.Type
}
+func CountTabs() (numPTabs, numITabs int) {
+ return len(ptabs), len(itabs)
+}
+
// runtime interface and reflection data structures
var (
signatmu sync.Mutex // protects signatset and signatslice
func commonSize() int { return 4*Widthptr + 8 + 8 } // Sizeof(runtime._type{})
func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
- if t.Sym == nil && len(methods(t)) == 0 {
+ if t.Sym() == nil && len(methods(t)) == 0 {
return 0
}
return 4 + 2 + 2 + 4 + 4
}
func makefield(name string, t *types.Type) *types.Field {
- f := types.NewField()
- f.Type = t
- f.Sym = (*types.Pkg)(nil).Lookup(name)
- return f
+ sym := (*types.Pkg)(nil).Lookup(name)
+ return types.NewField(src.NoXPos, sym, t)
}
// bmap makes the map bucket type given the type of the map.
return t.MapType().Bucket
}
- bucket := types.New(TSTRUCT)
keytype := t.Key()
elemtype := t.Elem()
dowidth(keytype)
field := make([]*types.Field, 0, 5)
// The first field is: uint8 topbits[BUCKETSIZE].
- arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE)
+ arr := types.NewArray(types.Types[types.TUINT8], BUCKETSIZE)
field = append(field, makefield("topbits", arr))
arr = types.NewArray(keytype, BUCKETSIZE)
// Arrange for the bucket to have no pointers by changing
// the type of the overflow field to uintptr in this case.
// See comment on hmap.overflow in runtime/map.go.
- otyp := types.NewPtr(bucket)
+ otyp := types.Types[types.TUNSAFEPTR]
if !elemtype.HasPointers() && !keytype.HasPointers() {
- otyp = types.Types[TUINTPTR]
+ otyp = types.Types[types.TUINTPTR]
}
overflow := makefield("overflow", otyp)
field = append(field, overflow)
// link up fields
+ bucket := types.NewStruct(types.NoPkg, field[:])
bucket.SetNoalg(true)
- bucket.SetFields(field[:])
dowidth(bucket)
// Check invariants that map code depends on.
if !IsComparable(t.Key()) {
- Fatalf("unsupported map key type for %v", t)
+ base.Fatalf("unsupported map key type for %v", t)
}
if BUCKETSIZE < 8 {
- Fatalf("bucket size too small for proper alignment")
+ base.Fatalf("bucket size too small for proper alignment")
}
if keytype.Align > BUCKETSIZE {
- Fatalf("key align too big for %v", t)
+ base.Fatalf("key align too big for %v", t)
}
if elemtype.Align > BUCKETSIZE {
- Fatalf("elem align too big for %v", t)
+ base.Fatalf("elem align too big for %v", t)
}
if keytype.Width > MAXKEYSIZE {
- Fatalf("key size to large for %v", t)
+ base.Fatalf("key size to large for %v", t)
}
if elemtype.Width > MAXELEMSIZE {
- Fatalf("elem size to large for %v", t)
+ base.Fatalf("elem size to large for %v", t)
}
if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() {
- Fatalf("key indirect incorrect for %v", t)
+ base.Fatalf("key indirect incorrect for %v", t)
}
if t.Elem().Width > MAXELEMSIZE && !elemtype.IsPtr() {
- Fatalf("elem indirect incorrect for %v", t)
+ base.Fatalf("elem indirect incorrect for %v", t)
}
if keytype.Width%int64(keytype.Align) != 0 {
- Fatalf("key size not a multiple of key align for %v", t)
+ base.Fatalf("key size not a multiple of key align for %v", t)
}
if elemtype.Width%int64(elemtype.Align) != 0 {
- Fatalf("elem size not a multiple of elem align for %v", t)
+ base.Fatalf("elem size not a multiple of elem align for %v", t)
}
if bucket.Align%keytype.Align != 0 {
- Fatalf("bucket align not multiple of key align %v", t)
+ base.Fatalf("bucket align not multiple of key align %v", t)
}
if bucket.Align%elemtype.Align != 0 {
- Fatalf("bucket align not multiple of elem align %v", t)
+ base.Fatalf("bucket align not multiple of elem align %v", t)
}
if keys.Offset%int64(keytype.Align) != 0 {
- Fatalf("bad alignment of keys in bmap for %v", t)
+ base.Fatalf("bad alignment of keys in bmap for %v", t)
}
if elems.Offset%int64(elemtype.Align) != 0 {
- Fatalf("bad alignment of elems in bmap for %v", t)
+ base.Fatalf("bad alignment of elems in bmap for %v", t)
}
// Double-check that overflow field is final memory in struct,
// with no padding at end.
if overflow.Offset != bucket.Width-int64(Widthptr) {
- Fatalf("bad offset of overflow in bmap for %v", t)
+ base.Fatalf("bad offset of overflow in bmap for %v", t)
}
t.MapType().Bucket = bucket
// }
// must match runtime/map.go:hmap.
fields := []*types.Field{
- makefield("count", types.Types[TINT]),
- makefield("flags", types.Types[TUINT8]),
- makefield("B", types.Types[TUINT8]),
- makefield("noverflow", types.Types[TUINT16]),
- makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP.
- makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP.
+ makefield("count", types.Types[types.TINT]),
+ makefield("flags", types.Types[types.TUINT8]),
+ makefield("B", types.Types[types.TUINT8]),
+ makefield("noverflow", types.Types[types.TUINT16]),
+ makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
+ makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP.
makefield("oldbuckets", types.NewPtr(bmap)),
- makefield("nevacuate", types.Types[TUINTPTR]),
- makefield("extra", types.Types[TUNSAFEPTR]),
+ makefield("nevacuate", types.Types[types.TUINTPTR]),
+ makefield("extra", types.Types[types.TUNSAFEPTR]),
}
- hmap := types.New(TSTRUCT)
+ hmap := types.NewStruct(types.NoPkg, fields)
hmap.SetNoalg(true)
- hmap.SetFields(fields)
dowidth(hmap)
// The size of hmap should be 48 bytes on 64 bit
// and 28 bytes on 32 bit platforms.
if size := int64(8 + 5*Widthptr); hmap.Width != size {
- Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size)
+ base.Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size)
}
t.MapType().Hmap = hmap
fields := []*types.Field{
makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP.
makefield("elem", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
- makefield("t", types.Types[TUNSAFEPTR]),
+ makefield("t", types.Types[types.TUNSAFEPTR]),
makefield("h", types.NewPtr(hmap)),
makefield("buckets", types.NewPtr(bmap)),
makefield("bptr", types.NewPtr(bmap)),
- makefield("overflow", types.Types[TUNSAFEPTR]),
- makefield("oldoverflow", types.Types[TUNSAFEPTR]),
- makefield("startBucket", types.Types[TUINTPTR]),
- makefield("offset", types.Types[TUINT8]),
- makefield("wrapped", types.Types[TBOOL]),
- makefield("B", types.Types[TUINT8]),
- makefield("i", types.Types[TUINT8]),
- makefield("bucket", types.Types[TUINTPTR]),
- makefield("checkBucket", types.Types[TUINTPTR]),
+ makefield("overflow", types.Types[types.TUNSAFEPTR]),
+ makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
+ makefield("startBucket", types.Types[types.TUINTPTR]),
+ makefield("offset", types.Types[types.TUINT8]),
+ makefield("wrapped", types.Types[types.TBOOL]),
+ makefield("B", types.Types[types.TUINT8]),
+ makefield("i", types.Types[types.TUINT8]),
+ makefield("bucket", types.Types[types.TUINTPTR]),
+ makefield("checkBucket", types.Types[types.TUINTPTR]),
}
// build iterator struct holding the above fields
- hiter := types.New(TSTRUCT)
+ hiter := types.NewStruct(types.NoPkg, fields)
hiter.SetNoalg(true)
- hiter.SetFields(fields)
dowidth(hiter)
if hiter.Width != int64(12*Widthptr) {
- Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr)
+ base.Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr)
}
t.MapType().Hiter = hiter
hiter.StructType().Map = t
// stksize bytes of args.
func deferstruct(stksize int64) *types.Type {
makefield := func(name string, typ *types.Type) *types.Field {
- f := types.NewField()
- f.Type = typ
// Unlike the global makefield function, this one needs to set Pkg
// because these types might be compared (in SSA CSE sorting).
// TODO: unify this makefield and the global one above.
- f.Sym = &types.Sym{Name: name, Pkg: localpkg}
- return f
+ sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
+ return types.NewField(src.NoXPos, sym, typ)
}
- argtype := types.NewArray(types.Types[TUINT8], stksize)
+ argtype := types.NewArray(types.Types[types.TUINT8], stksize)
argtype.Width = stksize
argtype.Align = 1
// These fields must match the ones in runtime/runtime2.go:_defer and
// cmd/compile/internal/gc/ssa.go:(*state).call.
fields := []*types.Field{
- makefield("siz", types.Types[TUINT32]),
- makefield("started", types.Types[TBOOL]),
- makefield("heap", types.Types[TBOOL]),
- makefield("openDefer", types.Types[TBOOL]),
- makefield("sp", types.Types[TUINTPTR]),
- makefield("pc", types.Types[TUINTPTR]),
+ makefield("siz", types.Types[types.TUINT32]),
+ makefield("started", types.Types[types.TBOOL]),
+ makefield("heap", types.Types[types.TBOOL]),
+ makefield("openDefer", types.Types[types.TBOOL]),
+ makefield("sp", types.Types[types.TUINTPTR]),
+ makefield("pc", types.Types[types.TUINTPTR]),
// Note: the types here don't really matter. Defer structures
// are always scanned explicitly during stack copying and GC,
// so we make them uintptr type even though they are real pointers.
- makefield("fn", types.Types[TUINTPTR]),
- makefield("_panic", types.Types[TUINTPTR]),
- makefield("link", types.Types[TUINTPTR]),
- makefield("framepc", types.Types[TUINTPTR]),
- makefield("varp", types.Types[TUINTPTR]),
- makefield("fd", types.Types[TUINTPTR]),
+ makefield("fn", types.Types[types.TUINTPTR]),
+ makefield("_panic", types.Types[types.TUINTPTR]),
+ makefield("link", types.Types[types.TUINTPTR]),
+ makefield("framepc", types.Types[types.TUINTPTR]),
+ makefield("varp", types.Types[types.TUINTPTR]),
+ makefield("fd", types.Types[types.TUINTPTR]),
makefield("args", argtype),
}
// build struct holding the above fields
- s := types.New(TSTRUCT)
+ s := types.NewStruct(types.NoPkg, fields)
s.SetNoalg(true)
- s.SetFields(fields)
- s.Width = widstruct(s, s, 0, 1)
- s.Align = uint8(Widthptr)
+ CalcStructSize(s)
return s
}
if receiver != nil {
inLen++
}
- in := make([]*Node, 0, inLen)
+ in := make([]*ir.Field, 0, inLen)
if receiver != nil {
d := anonfield(receiver)
for _, t := range f.Params().Fields().Slice() {
d := anonfield(t.Type)
- d.SetIsDDD(t.IsDDD())
+ d.IsDDD = t.IsDDD()
in = append(in, d)
}
outLen := f.Results().Fields().Len()
- out := make([]*Node, 0, outLen)
+ out := make([]*ir.Field, 0, outLen)
for _, t := range f.Results().Fields().Slice() {
d := anonfield(t.Type)
out = append(out, d)
}
- t := functype(nil, in, out)
- if f.Nname() != nil {
- // Link to name of original method function.
- t.SetNname(f.Nname())
- }
-
- return t
+ return functype(nil, in, out)
}
// methods returns the methods of the non-interface type t, sorted by name.
var ms []*Sig
for _, f := range mt.AllMethods().Slice() {
if !f.IsMethod() {
- Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
+ base.Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
}
if f.Type.Recv() == nil {
- Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
+ base.Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
}
if f.Nointerface() {
continue
func imethods(t *types.Type) []*Sig {
var methods []*Sig
for _, f := range t.Fields().Slice() {
- if f.Type.Etype != TFUNC || f.Sym == nil {
+ if f.Type.Kind() != types.TFUNC || f.Sym == nil {
continue
}
if f.Sym.IsBlank() {
- Fatalf("unexpected blank symbol in interface method set")
+ base.Fatalf("unexpected blank symbol in interface method set")
}
if n := len(methods); n > 0 {
last := methods[n-1]
if !last.name.Less(f.Sym) {
- Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
+ base.Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
}
}
// If we are compiling the runtime package, there are two runtime packages around
// -- localpkg and Runtimepkg. We don't want to produce import path symbols for
// both of them, so just produce one for localpkg.
- if myimportpath == "runtime" && p == Runtimepkg {
+ if base.Ctxt.Pkgpath == "runtime" && p == Runtimepkg {
return
}
str := p.Path
- if p == localpkg {
+ if p == types.LocalPkg {
// Note: myimportpath != "", or else dgopkgpath won't call dimportpath.
- str = myimportpath
+ str = base.Ctxt.Pkgpath
}
- s := Ctxt.Lookup("type..importpath." + p.Prefix + ".")
+ s := base.Ctxt.Lookup("type..importpath." + p.Prefix + ".")
ot := dnameData(s, 0, str, "", nil, false)
ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
s.Set(obj.AttrContentAddressable, true)
return duintptr(s, ot, 0)
}
- if pkg == localpkg && myimportpath == "" {
+ if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
// If we don't know the full import path of the package being compiled
// (i.e. -p was not passed on the compiler command line), emit a reference to
// type..importpath.""., which the linker will rewrite using the correct import path.
// Every package that imports this one directly defines the symbol.
// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
- ns := Ctxt.Lookup(`type..importpath."".`)
+ ns := base.Ctxt.Lookup(`type..importpath."".`)
return dsymptr(s, ot, ns, 0)
}
if pkg == nil {
return duint32(s, ot, 0)
}
- if pkg == localpkg && myimportpath == "" {
+ if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
// If we don't know the full import path of the package being compiled
// (i.e. -p was not passed on the compiler command line), emit a reference to
// type..importpath.""., which the linker will rewrite using the correct import path.
// Every package that imports this one directly defines the symbol.
// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
- ns := Ctxt.Lookup(`type..importpath."".`)
+ ns := base.Ctxt.Lookup(`type..importpath."".`)
return dsymptrOff(s, ot, ns)
}
// dnameField dumps a reflect.name for a struct field.
func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
- Fatalf("package mismatch for %v", ft.Sym)
+ base.Fatalf("package mismatch for %v", ft.Sym)
}
nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
return dsymptr(lsym, ot, nsym, 0)
// dnameData writes the contents of a reflect.name into s at offset ot.
func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int {
if len(name) > 1<<16-1 {
- Fatalf("name too long: %s", name)
+ base.Fatalf("name too long: %s", name)
}
if len(tag) > 1<<16-1 {
- Fatalf("tag too long: %s", tag)
+ base.Fatalf("tag too long: %s", tag)
}
// Encode name and tag. See reflect/type.go for details.
copy(tb[2:], tag)
}
- ot = int(s.WriteBytes(Ctxt, int64(ot), b))
+ ot = int(s.WriteBytes(base.Ctxt, int64(ot), b))
if pkg != nil {
ot = dgopkgpathOff(s, ot, pkg)
sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
dnameCount++
}
- s := Ctxt.Lookup(sname)
+ s := base.Ctxt.Lookup(sname)
if len(s.P) > 0 {
return s
}
// backing array of the []method field is written (by dextratypeData).
func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
m := methods(t)
- if t.Sym == nil && len(m) == 0 {
+ if t.Sym() == nil && len(m) == 0 {
return ot
}
noff := int(Rnd(int64(ot), int64(Widthptr)))
if noff != ot {
- Fatalf("unexpected alignment in dextratype for %v", t)
+ base.Fatalf("unexpected alignment in dextratype for %v", t)
}
for _, a := range m {
dataAdd += uncommonSize(t)
mcount := len(m)
if mcount != int(uint16(mcount)) {
- Fatalf("too many methods on %v: %d", t, mcount)
+ base.Fatalf("too many methods on %v: %d", t, mcount)
}
xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) })
if dataAdd != int(uint32(dataAdd)) {
- Fatalf("methods are too far away on %v: %d", t, dataAdd)
+ base.Fatalf("methods are too far away on %v: %d", t, dataAdd)
}
ot = duint16(lsym, ot, uint16(mcount))
}
func typePkg(t *types.Type) *types.Pkg {
- tsym := t.Sym
+ tsym := t.Sym()
if tsym == nil {
- switch t.Etype {
- case TARRAY, TSLICE, TPTR, TCHAN:
+ switch t.Kind() {
+ case types.TARRAY, types.TSLICE, types.TPTR, types.TCHAN:
if t.Elem() != nil {
- tsym = t.Elem().Sym
+ tsym = t.Elem().Sym()
}
}
}
- if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype {
+ if tsym != nil && t != types.Types[t.Kind()] && t != types.ErrorType {
return tsym.Pkg
}
return nil
}
var kinds = []int{
- TINT: objabi.KindInt,
- TUINT: objabi.KindUint,
- TINT8: objabi.KindInt8,
- TUINT8: objabi.KindUint8,
- TINT16: objabi.KindInt16,
- TUINT16: objabi.KindUint16,
- TINT32: objabi.KindInt32,
- TUINT32: objabi.KindUint32,
- TINT64: objabi.KindInt64,
- TUINT64: objabi.KindUint64,
- TUINTPTR: objabi.KindUintptr,
- TFLOAT32: objabi.KindFloat32,
- TFLOAT64: objabi.KindFloat64,
- TBOOL: objabi.KindBool,
- TSTRING: objabi.KindString,
- TPTR: objabi.KindPtr,
- TSTRUCT: objabi.KindStruct,
- TINTER: objabi.KindInterface,
- TCHAN: objabi.KindChan,
- TMAP: objabi.KindMap,
- TARRAY: objabi.KindArray,
- TSLICE: objabi.KindSlice,
- TFUNC: objabi.KindFunc,
- TCOMPLEX64: objabi.KindComplex64,
- TCOMPLEX128: objabi.KindComplex128,
- TUNSAFEPTR: objabi.KindUnsafePointer,
+ types.TINT: objabi.KindInt,
+ types.TUINT: objabi.KindUint,
+ types.TINT8: objabi.KindInt8,
+ types.TUINT8: objabi.KindUint8,
+ types.TINT16: objabi.KindInt16,
+ types.TUINT16: objabi.KindUint16,
+ types.TINT32: objabi.KindInt32,
+ types.TUINT32: objabi.KindUint32,
+ types.TINT64: objabi.KindInt64,
+ types.TUINT64: objabi.KindUint64,
+ types.TUINTPTR: objabi.KindUintptr,
+ types.TFLOAT32: objabi.KindFloat32,
+ types.TFLOAT64: objabi.KindFloat64,
+ types.TBOOL: objabi.KindBool,
+ types.TSTRING: objabi.KindString,
+ types.TPTR: objabi.KindPtr,
+ types.TSTRUCT: objabi.KindStruct,
+ types.TINTER: objabi.KindInterface,
+ types.TCHAN: objabi.KindChan,
+ types.TMAP: objabi.KindMap,
+ types.TARRAY: objabi.KindArray,
+ types.TSLICE: objabi.KindSlice,
+ types.TFUNC: objabi.KindFunc,
+ types.TCOMPLEX64: objabi.KindComplex64,
+ types.TCOMPLEX128: objabi.KindComplex128,
+ types.TUNSAFEPTR: objabi.KindUnsafePointer,
}
// typeptrdata returns the length in bytes of the prefix of t
return 0
}
- switch t.Etype {
- case TPTR,
- TUNSAFEPTR,
- TFUNC,
- TCHAN,
- TMAP:
+ switch t.Kind() {
+ case types.TPTR,
+ types.TUNSAFEPTR,
+ types.TFUNC,
+ types.TCHAN,
+ types.TMAP:
return int64(Widthptr)
- case TSTRING:
+ case types.TSTRING:
// struct { byte *str; intgo len; }
return int64(Widthptr)
- case TINTER:
+ case types.TINTER:
// struct { Itab *tab; void *data; } or
// struct { Type *type; void *data; }
// Note: see comment in plive.go:onebitwalktype1.
return 2 * int64(Widthptr)
- case TSLICE:
+ case types.TSLICE:
// struct { byte *array; uintgo len; uintgo cap; }
return int64(Widthptr)
- case TARRAY:
+ case types.TARRAY:
// haspointers already eliminated t.NumElem() == 0.
return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
// Find the last field that has pointers.
var lastPtrField *types.Field
for _, t1 := range t.Fields().Slice() {
return lastPtrField.Offset + typeptrdata(lastPtrField.Type)
default:
- Fatalf("typeptrdata: unexpected type, %v", t)
+ base.Fatalf("typeptrdata: unexpected type, %v", t)
return 0
}
}
var sptr *obj.LSym
if !t.IsPtr() || t.IsPtrElem() {
tptr := types.NewPtr(t)
- if t.Sym != nil || methods(tptr) != nil {
+ if t.Sym() != nil || methods(tptr) != nil {
sptrWeak = false
}
sptr = dtypesym(tptr)
if uncommonSize(t) != 0 {
tflag |= tflagUncommon
}
- if t.Sym != nil && t.Sym.Name != "" {
+ if t.Sym() != nil && t.Sym().Name != "" {
tflag |= tflagNamed
}
if IsRegularMemory(t) {
if !strings.HasPrefix(p, "*") {
p = "*" + p
tflag |= tflagExtraStar
- if t.Sym != nil {
- exported = types.IsExported(t.Sym.Name)
+ if t.Sym() != nil {
+ exported = types.IsExported(t.Sym().Name)
}
} else {
- if t.Elem() != nil && t.Elem().Sym != nil {
- exported = types.IsExported(t.Elem().Sym.Name)
+ if t.Elem() != nil && t.Elem().Sym() != nil {
+ exported = types.IsExported(t.Elem().Sym().Name)
}
}
i = 1
}
if i&(i-1) != 0 {
- Fatalf("invalid alignment %d for %v", t.Align, t)
+ base.Fatalf("invalid alignment %d for %v", t.Align, t)
}
ot = duint8(lsym, ot, t.Align) // align
ot = duint8(lsym, ot, t.Align) // fieldAlign
- i = kinds[t.Etype]
+ i = kinds[t.Kind()]
if isdirectiface(t) {
i |= objabi.KindDirectIface
}
func typenamesym(t *types.Type) *types.Sym {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
- Fatalf("typenamesym %v", t)
+ base.Fatalf("typenamesym %v", t)
}
s := typesym(t)
signatmu.Lock()
return s
}
-func typename(t *types.Type) *Node {
+func typename(t *types.Type) *ir.AddrExpr {
s := typenamesym(t)
if s.Def == nil {
- n := newnamel(src.NoXPos, s)
- n.Type = types.Types[TUINT8]
- n.SetClass(PEXTERN)
+ n := ir.NewNameAt(src.NoXPos, s)
+ n.SetType(types.Types[types.TUINT8])
+ n.SetClass(ir.PEXTERN)
n.SetTypecheck(1)
- s.Def = asTypesNode(n)
+ s.Def = n
}
- n := nod(OADDR, asNode(s.Def), nil)
- n.Type = types.NewPtr(asNode(s.Def).Type)
+ n := nodAddr(ir.AsNode(s.Def))
+ n.SetType(types.NewPtr(s.Def.Type()))
n.SetTypecheck(1)
return n
}
-func itabname(t, itype *types.Type) *Node {
+func itabname(t, itype *types.Type) *ir.AddrExpr {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
- Fatalf("itabname(%v, %v)", t, itype)
+ base.Fatalf("itabname(%v, %v)", t, itype)
}
s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString())
if s.Def == nil {
- n := newname(s)
- n.Type = types.Types[TUINT8]
- n.SetClass(PEXTERN)
+ n := NewName(s)
+ n.SetType(types.Types[types.TUINT8])
+ n.SetClass(ir.PEXTERN)
n.SetTypecheck(1)
- s.Def = asTypesNode(n)
+ s.Def = n
itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
}
- n := nod(OADDR, asNode(s.Def), nil)
- n.Type = types.NewPtr(asNode(s.Def).Type)
+ n := nodAddr(ir.AsNode(s.Def))
+ n.SetType(types.NewPtr(s.Def.Type()))
n.SetTypecheck(1)
return n
}
// isreflexive reports whether t has a reflexive equality operator.
// That is, if x==x for all x of type t.
func isreflexive(t *types.Type) bool {
- switch t.Etype {
- case TBOOL,
- TINT,
- TUINT,
- TINT8,
- TUINT8,
- TINT16,
- TUINT16,
- TINT32,
- TUINT32,
- TINT64,
- TUINT64,
- TUINTPTR,
- TPTR,
- TUNSAFEPTR,
- TSTRING,
- TCHAN:
+ switch t.Kind() {
+ case types.TBOOL,
+ types.TINT,
+ types.TUINT,
+ types.TINT8,
+ types.TUINT8,
+ types.TINT16,
+ types.TUINT16,
+ types.TINT32,
+ types.TUINT32,
+ types.TINT64,
+ types.TUINT64,
+ types.TUINTPTR,
+ types.TPTR,
+ types.TUNSAFEPTR,
+ types.TSTRING,
+ types.TCHAN:
return true
- case TFLOAT32,
- TFLOAT64,
- TCOMPLEX64,
- TCOMPLEX128,
- TINTER:
+ case types.TFLOAT32,
+ types.TFLOAT64,
+ types.TCOMPLEX64,
+ types.TCOMPLEX128,
+ types.TINTER:
return false
- case TARRAY:
+ case types.TARRAY:
return isreflexive(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
for _, t1 := range t.Fields().Slice() {
if !isreflexive(t1.Type) {
return false
return true
default:
- Fatalf("bad type for map key: %v", t)
+ base.Fatalf("bad type for map key: %v", t)
return false
}
}
// needkeyupdate reports whether map updates with t as a key
// need the key to be updated.
func needkeyupdate(t *types.Type) bool {
- switch t.Etype {
- case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32,
- TINT64, TUINT64, TUINTPTR, TPTR, TUNSAFEPTR, TCHAN:
+ switch t.Kind() {
+ case types.TBOOL, types.TINT, types.TUINT, types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, types.TINT32, types.TUINT32,
+ types.TINT64, types.TUINT64, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN:
return false
- case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0
- TINTER,
- TSTRING: // strings might have smaller backing stores
+ case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128, // floats and complex can be +0/-0
+ types.TINTER,
+ types.TSTRING: // strings might have smaller backing stores
return true
- case TARRAY:
+ case types.TARRAY:
return needkeyupdate(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
for _, t1 := range t.Fields().Slice() {
if needkeyupdate(t1.Type) {
return true
return false
default:
- Fatalf("bad type for map key: %v", t)
+ base.Fatalf("bad type for map key: %v", t)
return true
}
}
// hashMightPanic reports whether the hash of a map key of type t might panic.
func hashMightPanic(t *types.Type) bool {
- switch t.Etype {
- case TINTER:
+ switch t.Kind() {
+ case types.TINTER:
return true
- case TARRAY:
+ case types.TARRAY:
return hashMightPanic(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
for _, t1 := range t.Fields().Slice() {
if hashMightPanic(t1.Type) {
return true
// They've been separate internally to make error messages
// better, but we have to merge them in the reflect tables.
func formalType(t *types.Type) *types.Type {
- if t == types.Bytetype || t == types.Runetype {
- return types.Types[t.Etype]
+ if t == types.ByteType || t == types.RuneType {
+ return types.Types[t.Kind()]
}
return t
}
func dtypesym(t *types.Type) *obj.LSym {
t = formalType(t)
if t.IsUntyped() {
- Fatalf("dtypesym %v", t)
+ base.Fatalf("dtypesym %v", t)
}
s := typesym(t)
// emit the type structures for int, float, etc.
tbase := t
- if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil {
+ if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
tbase = t.Elem()
}
dupok := 0
- if tbase.Sym == nil {
+ if tbase.Sym() == nil {
dupok = obj.DUPOK
}
- if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc
+ if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Kind()] && tbase != types.ByteType && tbase != types.RuneType && tbase != types.ErrorType) { // int, float, etc
// named types from other files are defined only by those files
- if tbase.Sym != nil && tbase.Sym.Pkg != localpkg {
- if i, ok := typeSymIdx[tbase]; ok {
- lsym.Pkg = tbase.Sym.Pkg.Prefix
- if t != tbase {
- lsym.SymIdx = int32(i[1])
- } else {
- lsym.SymIdx = int32(i[0])
- }
+ if tbase.Sym() != nil && tbase.Sym().Pkg != types.LocalPkg {
+ if i := BaseTypeIndex(t); i >= 0 {
+ lsym.Pkg = tbase.Sym().Pkg.Prefix
+ lsym.SymIdx = int32(i)
lsym.Set(obj.AttrIndexed, true)
}
return lsym
}
// TODO(mdempsky): Investigate whether this can happen.
- if tbase.Etype == TFORW {
+ if tbase.Kind() == types.TFORW {
return lsym
}
}
ot := 0
- switch t.Etype {
+ switch t.Kind() {
default:
ot = dcommontype(lsym, t)
ot = dextratype(lsym, ot, t, 0)
- case TARRAY:
+ case types.TARRAY:
// ../../../../runtime/type.go:/arrayType
s1 := dtypesym(t.Elem())
t2 := types.NewSlice(t.Elem())
ot = duintptr(lsym, ot, uint64(t.NumElem()))
ot = dextratype(lsym, ot, t, 0)
- case TSLICE:
+ case types.TSLICE:
// ../../../../runtime/type.go:/sliceType
s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
- case TCHAN:
+ case types.TCHAN:
// ../../../../runtime/type.go:/chanType
s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, t)
ot = duintptr(lsym, ot, uint64(t.ChanDir()))
ot = dextratype(lsym, ot, t, 0)
- case TFUNC:
+ case types.TFUNC:
for _, t1 := range t.Recvs().Fields().Slice() {
dtypesym(t1.Type)
}
ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
}
- case TINTER:
+ case types.TINTER:
m := imethods(t)
n := len(m)
for _, a := range m {
ot = dcommontype(lsym, t)
var tpkg *types.Pkg
- if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype {
- tpkg = t.Sym.Pkg
+ if t.Sym() != nil && t != types.Types[t.Kind()] && t != types.ErrorType {
+ tpkg = t.Sym().Pkg
}
ot = dgopkgpath(lsym, ot, tpkg)
}
// ../../../../runtime/type.go:/mapType
- case TMAP:
+ case types.TMAP:
s1 := dtypesym(t.Key())
s2 := dtypesym(t.Elem())
s3 := dtypesym(bmap(t))
ot = duint32(lsym, ot, flags)
ot = dextratype(lsym, ot, t, 0)
- case TPTR:
- if t.Elem().Etype == TANY {
+ case types.TPTR:
+ if t.Elem().Kind() == types.TANY {
// ../../../../runtime/type.go:/UnsafePointerType
ot = dcommontype(lsym, t)
ot = dextratype(lsym, ot, t, 0)
// ../../../../runtime/type.go:/structType
// for security, only the exported fields.
- case TSTRUCT:
+ case types.TSTRUCT:
fields := t.Fields().Slice()
for _, t1 := range fields {
dtypesym(t1.Type)
ot = dsymptr(lsym, ot, dtypesym(f.Type), 0)
offsetAnon := uint64(f.Offset) << 1
if offsetAnon>>1 != uint64(f.Offset) {
- Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
+ base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
}
if f.Embedded != 0 {
offsetAnon |= 1
//
// When buildmode=shared, all types are in typelinks so the
// runtime can deduplicate type pointers.
- keep := Ctxt.Flag_dynlink
- if !keep && t.Sym == nil {
+ keep := base.Ctxt.Flag_dynlink
+ if !keep && t.Sym() == nil {
// For an unnamed type, we only need the link if the type can
// be created at run time by reflect.PtrTo and similar
// functions. If the type exists in the program, those
// functions must return the existing type structure rather
// than creating a new one.
- switch t.Etype {
- case TPTR, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT:
+ switch t.Kind() {
+ case types.TPTR, types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRUCT:
keep = true
}
}
}
if len(sigs) != 0 {
- Fatalf("incomplete itab")
+ base.Fatalf("incomplete itab")
}
return out
}
}
-func addsignats(dcls []*Node) {
+func addsignats(dcls []ir.Node) {
// copy types from dcl list to signatset
for _, n := range dcls {
- if n.Op == OTYPE {
- addsignat(n.Type)
+ if n.Op() == ir.OTYPE {
+ addsignat(n.Type())
}
}
}
for _, ts := range signats {
t := ts.t
dtypesym(t)
- if t.Sym != nil {
+ if t.Sym() != nil {
dtypesym(types.NewPtr(t))
}
}
}
// process ptabs
- if localpkg.Name == "main" && len(ptabs) > 0 {
+ if types.LocalPkg.Name == "main" && len(ptabs) > 0 {
ot := 0
- s := Ctxt.Lookup("go.plugin.tabs")
+ s := base.Ctxt.Lookup("go.plugin.tabs")
for _, p := range ptabs {
// Dump ptab symbol into go.pluginsym package.
//
ggloblsym(s, int32(ot), int16(obj.RODATA))
ot = 0
- s = Ctxt.Lookup("go.plugin.exports")
+ s = base.Ctxt.Lookup("go.plugin.exports")
for _, p := range ptabs {
ot = dsymptr(s, ot, p.s.Linksym(), 0)
}
// so this is as good as any.
// another possible choice would be package main,
// but using runtime means fewer copies in object files.
- if myimportpath == "runtime" {
- for i := types.EType(1); i <= TBOOL; i++ {
+ if base.Ctxt.Pkgpath == "runtime" {
+ for i := types.Kind(1); i <= types.TBOOL; i++ {
dtypesym(types.NewPtr(types.Types[i]))
}
- dtypesym(types.NewPtr(types.Types[TSTRING]))
- dtypesym(types.NewPtr(types.Types[TUNSAFEPTR]))
+ dtypesym(types.NewPtr(types.Types[types.TSTRING]))
+ dtypesym(types.NewPtr(types.Types[types.TUNSAFEPTR]))
// emit type structs for error and func(error) string.
// The latter is the type of an auto-generated wrapper.
- dtypesym(types.NewPtr(types.Errortype))
+ dtypesym(types.NewPtr(types.ErrorType))
- dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])}))
+ dtypesym(functype(nil, []*ir.Field{anonfield(types.ErrorType)}, []*ir.Field{anonfield(types.Types[types.TSTRING])}))
// add paths for runtime and main, which 6l imports implicitly.
dimportpath(Runtimepkg)
- if flag_race {
+ if base.Flag.Race {
dimportpath(racepkg)
}
- if flag_msan {
+ if base.Flag.MSan {
dimportpath(msanpkg)
}
dimportpath(types.NewPkg("main", ""))
// will be equal for the above checks, but different in DWARF output.
// Sort by source position to ensure deterministic order.
// See issues 27013 and 30202.
- if a[i].t.Etype == types.TINTER && a[i].t.Methods().Len() > 0 {
+ if a[i].t.Kind() == types.TINTER && a[i].t.Methods().Len() > 0 {
return a[i].t.Methods().Index(0).Pos.Before(a[j].t.Methods().Index(0).Pos)
}
return false
// For non-trivial arrays, the program describes the full t.Width size.
func dgcprog(t *types.Type) (*obj.LSym, int64) {
dowidth(t)
- if t.Width == BADWIDTH {
- Fatalf("dgcprog: %v badwidth", t)
+ if t.Width == types.BADWIDTH {
+ base.Fatalf("dgcprog: %v badwidth", t)
}
lsym := typesymprefix(".gcprog", t).Linksym()
var p GCProg
offset := p.w.BitIndex() * int64(Widthptr)
p.end()
if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width {
- Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
+ base.Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
}
return lsym, offset
}
w gcprog.Writer
}
-var Debug_gcprog int // set by -d gcprog
-
func (p *GCProg) init(lsym *obj.LSym) {
p.lsym = lsym
p.symoff = 4 // first 4 bytes hold program length
p.w.Init(p.writeByte)
- if Debug_gcprog > 0 {
+ if base.Debug.GCProg > 0 {
fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym)
p.w.Debug(os.Stderr)
}
p.w.End()
duint32(p.lsym, 0, uint32(p.symoff-4))
ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
- if Debug_gcprog > 0 {
+ if base.Debug.GCProg > 0 {
fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
}
}
p.w.Ptr(offset / int64(Widthptr))
return
}
- switch t.Etype {
+ switch t.Kind() {
default:
- Fatalf("GCProg.emit: unexpected type %v", t)
+ base.Fatalf("GCProg.emit: unexpected type %v", t)
- case TSTRING:
+ case types.TSTRING:
p.w.Ptr(offset / int64(Widthptr))
- case TINTER:
+ case types.TINTER:
// Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1.
p.w.Ptr(offset/int64(Widthptr) + 1)
- case TSLICE:
+ case types.TSLICE:
p.w.Ptr(offset / int64(Widthptr))
- case TARRAY:
+ case types.TARRAY:
if t.NumElem() == 0 {
// should have been handled by haspointers check above
- Fatalf("GCProg.emit: empty array")
+ base.Fatalf("GCProg.emit: empty array")
}
// Flatten array-of-array-of-array to just a big array by multiplying counts.
p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr))
p.w.Repeat(elem.Width/int64(Widthptr), count-1)
- case TSTRUCT:
+ case types.TSTRUCT:
for _, t1 := range t.Fields().Slice() {
p.emit(t1.Type, offset+t1.Offset)
}
// zeroaddr returns the address of a symbol with at least
// size bytes of zeros.
-func zeroaddr(size int64) *Node {
+func zeroaddr(size int64) ir.Node {
if size >= 1<<31 {
- Fatalf("map elem too big %d", size)
+ base.Fatalf("map elem too big %d", size)
}
if zerosize < size {
zerosize = size
}
s := mappkg.Lookup("zero")
if s.Def == nil {
- x := newname(s)
- x.Type = types.Types[TUINT8]
- x.SetClass(PEXTERN)
+ x := NewName(s)
+ x.SetType(types.Types[types.TUINT8])
+ x.SetClass(ir.PEXTERN)
x.SetTypecheck(1)
- s.Def = asTypesNode(x)
+ s.Def = x
}
- z := nod(OADDR, asNode(s.Def), nil)
- z.Type = types.NewPtr(types.Types[TUINT8])
+ z := nodAddr(ir.AsNode(s.Def))
+ z.SetType(types.NewPtr(types.Types[types.TUINT8]))
z.SetTypecheck(1)
return z
}
package gc
+import "cmd/compile/internal/ir"
+
// Strongly connected components.
//
// Run analysis on minimal sets of mutually recursive functions
// when analyzing a set of mutually recursive functions.
type bottomUpVisitor struct {
- analyze func([]*Node, bool)
+ analyze func([]*ir.Func, bool)
visitgen uint32
- nodeID map[*Node]uint32
- stack []*Node
+ nodeID map[*ir.Func]uint32
+ stack []*ir.Func
}
// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
// If recursive is false, the list consists of only a single function and its closures.
// If recursive is true, the list may still contain only a single function,
// if that function is itself recursive.
-func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) {
+func visitBottomUp(list []ir.Node, analyze func(list []*ir.Func, recursive bool)) {
var v bottomUpVisitor
v.analyze = analyze
- v.nodeID = make(map[*Node]uint32)
+ v.nodeID = make(map[*ir.Func]uint32)
for _, n := range list {
- if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure() {
- v.visit(n)
+ if n.Op() == ir.ODCLFUNC {
+ n := n.(*ir.Func)
+ if !n.Func().IsHiddenClosure() {
+ v.visit(n)
+ }
}
}
}
-func (v *bottomUpVisitor) visit(n *Node) uint32 {
+func (v *bottomUpVisitor) visit(n *ir.Func) uint32 {
if id := v.nodeID[n]; id > 0 {
// already visited
return id
min := v.visitgen
v.stack = append(v.stack, n)
- inspectList(n.Nbody, func(n *Node) bool {
- switch n.Op {
- case ONAME:
- if n.Class() == PFUNC {
- if n.isMethodExpression() {
- n = asNode(n.Type.Nname())
- }
- if n != nil && n.Name.Defn != nil {
- if m := v.visit(n.Name.Defn); m < min {
+ ir.Visit(n, func(n ir.Node) {
+ switch n.Op() {
+ case ir.ONAME:
+ if n.Class() == ir.PFUNC {
+ if n != nil && n.Name().Defn != nil {
+ if m := v.visit(n.Name().Defn.(*ir.Func)); m < min {
min = m
}
}
}
- case ODOTMETH:
- fn := asNode(n.Type.Nname())
- if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
- if m := v.visit(fn.Name.Defn); m < min {
+ case ir.OMETHEXPR:
+ fn := methodExprName(n)
+ if fn != nil && fn.Defn != nil {
+ if m := v.visit(fn.Defn.(*ir.Func)); m < min {
min = m
}
}
- case OCALLPART:
- fn := asNode(callpartMethod(n).Type.Nname())
- if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
- if m := v.visit(fn.Name.Defn); m < min {
+ case ir.ODOTMETH:
+ fn := methodExprName(n)
+ if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Defn != nil {
+ if m := v.visit(fn.Defn.(*ir.Func)); m < min {
min = m
}
}
- case OCLOSURE:
- if m := v.visit(n.Func.Closure); m < min {
+ case ir.OCALLPART:
+ fn := ir.AsNode(callpartMethod(n).Nname)
+ if fn != nil && fn.Op() == ir.ONAME {
+ if fn := fn.(*ir.Name); fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
+ if m := v.visit(fn.Name().Defn.(*ir.Func)); m < min {
+ min = m
+ }
+ }
+ }
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ if m := v.visit(n.Func()); m < min {
min = m
}
}
- return true
})
- if (min == id || min == id+1) && !n.Func.IsHiddenClosure() {
+ if (min == id || min == id+1) && !n.IsHiddenClosure() {
// This node is the root of a strongly connected component.
// The original min passed to visitcodelist was v.nodeID[n]+1.
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
// See golang.org/issue/20390.
func xposBefore(p, q src.XPos) bool {
- return Ctxt.PosTable.Pos(p).Before(Ctxt.PosTable.Pos(q))
+ return base.Ctxt.PosTable.Pos(p).Before(base.Ctxt.PosTable.Pos(q))
}
-func findScope(marks []Mark, pos src.XPos) ScopeID {
+func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID {
i := sort.Search(len(marks), func(i int) bool {
return xposBefore(pos, marks[i].Pos)
})
return marks[i-1].Scope
}
-func assembleScopes(fnsym *obj.LSym, fn *Node, dwarfVars []*dwarf.Var, varScopes []ScopeID) []dwarf.Scope {
+func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
// Initialize the DWARF scope tree based on lexical scopes.
- dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func.Parents))
- for i, parent := range fn.Func.Parents {
+ dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func().Parents))
+ for i, parent := range fn.Func().Parents {
dwarfScopes[i+1].Parent = int32(parent)
}
scopeVariables(dwarfVars, varScopes, dwarfScopes)
- scopePCs(fnsym, fn.Func.Marks, dwarfScopes)
+ scopePCs(fnsym, fn.Func().Marks, dwarfScopes)
return compactScopes(dwarfScopes)
}
// scopeVariables assigns DWARF variable records to their scopes.
-func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ScopeID, dwarfScopes []dwarf.Scope) {
+func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ir.ScopeID, dwarfScopes []dwarf.Scope) {
sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes})
i0 := 0
}
// scopePCs assigns PC ranges to their scopes.
-func scopePCs(fnsym *obj.LSym, marks []Mark, dwarfScopes []dwarf.Scope) {
+func scopePCs(fnsym *obj.LSym, marks []ir.Mark, dwarfScopes []dwarf.Scope) {
// If there aren't any child scopes (in particular, when scope
// tracking is disabled), we can skip a whole lot of work.
if len(marks) == 0 {
type varsByScopeAndOffset struct {
vars []*dwarf.Var
- scopes []ScopeID
+ scopes []ir.ScopeID
}
func (v varsByScopeAndOffset) Len() int {
package gc
-import "cmd/compile/internal/types"
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
// select
-func typecheckselect(sel *Node) {
- var def *Node
+func typecheckselect(sel *ir.SelectStmt) {
+ var def ir.Node
lno := setlineno(sel)
- typecheckslice(sel.Ninit.Slice(), ctxStmt)
- for _, ncase := range sel.List.Slice() {
- if ncase.Op != OCASE {
- setlineno(ncase)
- Fatalf("typecheckselect %v", ncase.Op)
- }
+ typecheckslice(sel.Init().Slice(), ctxStmt)
+ for _, ncase := range sel.List().Slice() {
+ ncase := ncase.(*ir.CaseStmt)
- if ncase.List.Len() == 0 {
+ if ncase.List().Len() == 0 {
// default
if def != nil {
- yyerrorl(ncase.Pos, "multiple defaults in select (first at %v)", def.Line())
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def))
} else {
def = ncase
}
- } else if ncase.List.Len() > 1 {
- yyerrorl(ncase.Pos, "select cases cannot be lists")
+ } else if ncase.List().Len() > 1 {
+ base.ErrorfAt(ncase.Pos(), "select cases cannot be lists")
} else {
- ncase.List.SetFirst(typecheck(ncase.List.First(), ctxStmt))
- n := ncase.List.First()
- ncase.Left = n
- ncase.List.Set(nil)
- switch n.Op {
+ ncase.List().SetFirst(typecheck(ncase.List().First(), ctxStmt))
+ n := ncase.List().First()
+ ncase.SetLeft(n)
+ ncase.PtrList().Set(nil)
+ oselrecv2 := func(dst, recv ir.Node, colas bool) {
+ n := ir.NodAt(n.Pos(), ir.OSELRECV2, nil, nil)
+ n.PtrList().Set2(dst, ir.BlankNode)
+ n.PtrRlist().Set1(recv)
+ n.SetColas(colas)
+ n.SetTypecheck(1)
+ ncase.SetLeft(n)
+ }
+ switch n.Op() {
default:
- pos := n.Pos
- if n.Op == ONAME {
+ pos := n.Pos()
+ if n.Op() == ir.ONAME {
// We don't have the right position for ONAME nodes (see #15459 and
// others). Using ncase.Pos for now as it will provide the correct
// line number (assuming the expression follows the "case" keyword
// on the same line). This matches the approach before 1.10.
- pos = ncase.Pos
+ pos = ncase.Pos()
}
- yyerrorl(pos, "select case must be receive, send or assign recv")
-
- // convert x = <-c into OSELRECV(x, <-c).
- // remove implicit conversions; the eventual assignment
- // will reintroduce them.
- case OAS:
- if (n.Right.Op == OCONVNOP || n.Right.Op == OCONVIFACE) && n.Right.Implicit() {
- n.Right = n.Right.Left
+ base.ErrorfAt(pos, "select case must be receive, send or assign recv")
+
+ case ir.OAS:
+ // convert x = <-c into x, _ = <-c
+ // remove implicit conversions; the eventual assignment
+ // will reintroduce them.
+ if r := n.Right(); r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE {
+ if r.Implicit() {
+ n.SetRight(r.Left())
+ }
}
-
- if n.Right.Op != ORECV {
- yyerrorl(n.Pos, "select assignment must have receive on right hand side")
+ if n.Right().Op() != ir.ORECV {
+ base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
break
}
+ oselrecv2(n.Left(), n.Right(), n.Colas())
- n.Op = OSELRECV
-
- // convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
- case OAS2RECV:
- if n.Right.Op != ORECV {
- yyerrorl(n.Pos, "select assignment must have receive on right hand side")
+ case ir.OAS2RECV:
+ if n.Rlist().First().Op() != ir.ORECV {
+ base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
break
}
+ n.SetOp(ir.OSELRECV2)
- n.Op = OSELRECV2
- n.Left = n.List.First()
- n.List.Set1(n.List.Second())
-
- // convert <-c into OSELRECV(N, <-c)
- case ORECV:
- n = nodl(n.Pos, OSELRECV, nil, n)
+ case ir.ORECV:
+ // convert <-c into _, _ = <-c
+ oselrecv2(ir.BlankNode, n, false)
- n.SetTypecheck(1)
- ncase.Left = n
-
- case OSEND:
+ case ir.OSEND:
break
}
}
- typecheckslice(ncase.Nbody.Slice(), ctxStmt)
+ typecheckslice(ncase.Body().Slice(), ctxStmt)
}
- lineno = lno
+ base.Pos = lno
}
-func walkselect(sel *Node) {
+func walkselect(sel *ir.SelectStmt) {
lno := setlineno(sel)
- if sel.Nbody.Len() != 0 {
- Fatalf("double walkselect")
+ if sel.Body().Len() != 0 {
+ base.Fatalf("double walkselect")
}
- init := sel.Ninit.Slice()
- sel.Ninit.Set(nil)
+ init := sel.Init().Slice()
+ sel.PtrInit().Set(nil)
- init = append(init, walkselectcases(&sel.List)...)
- sel.List.Set(nil)
+ init = append(init, walkselectcases(sel.List())...)
+ sel.SetList(ir.Nodes{})
- sel.Nbody.Set(init)
- walkstmtlist(sel.Nbody.Slice())
+ sel.PtrBody().Set(init)
+ walkstmtlist(sel.Body().Slice())
- lineno = lno
+ base.Pos = lno
}
-func walkselectcases(cases *Nodes) []*Node {
+func walkselectcases(cases ir.Nodes) []ir.Node {
ncas := cases.Len()
- sellineno := lineno
+ sellineno := base.Pos
// optimization: zero-case select
if ncas == 0 {
- return []*Node{mkcall("block", nil, nil)}
+ return []ir.Node{mkcall("block", nil, nil)}
}
// optimization: one-case select: single op.
if ncas == 1 {
- cas := cases.First()
+ cas := cases.First().(*ir.CaseStmt)
setlineno(cas)
- l := cas.Ninit.Slice()
- if cas.Left != nil { // not default:
- n := cas.Left
- l = append(l, n.Ninit.Slice()...)
- n.Ninit.Set(nil)
- switch n.Op {
+ l := cas.Init().Slice()
+ if cas.Left() != nil { // not default:
+ n := cas.Left()
+ l = append(l, n.Init().Slice()...)
+ n.PtrInit().Set(nil)
+ switch n.Op() {
default:
- Fatalf("select %v", n.Op)
+ base.Fatalf("select %v", n.Op())
- case OSEND:
+ case ir.OSEND:
// already ok
- case OSELRECV, OSELRECV2:
- if n.Op == OSELRECV || n.List.Len() == 0 {
- if n.Left == nil {
- n = n.Right
- } else {
- n.Op = OAS
- }
+ case ir.OSELRECV2:
+ r := n.(*ir.AssignListStmt)
+ if ir.IsBlank(r.List().First()) && ir.IsBlank(r.List().Second()) {
+ n = r.Rlist().First()
break
}
-
- if n.Left == nil {
- nblank = typecheck(nblank, ctxExpr|ctxAssign)
- n.Left = nblank
- }
-
- n.Op = OAS2
- n.List.Prepend(n.Left)
- n.Rlist.Set1(n.Right)
- n.Right = nil
- n.Left = nil
- n.SetTypecheck(0)
- n = typecheck(n, ctxStmt)
+ r.SetOp(ir.OAS2RECV)
}
l = append(l, n)
}
- l = append(l, cas.Nbody.Slice()...)
- l = append(l, nod(OBREAK, nil, nil))
+ l = append(l, cas.Body().Slice()...)
+ l = append(l, ir.Nod(ir.OBREAK, nil, nil))
return l
}
// convert case value arguments to addresses.
// this rewrite is used by both the general code and the next optimization.
- var dflt *Node
+ var dflt *ir.CaseStmt
for _, cas := range cases.Slice() {
+ cas := cas.(*ir.CaseStmt)
setlineno(cas)
- n := cas.Left
+ n := cas.Left()
if n == nil {
dflt = cas
continue
}
- switch n.Op {
- case OSEND:
- n.Right = nod(OADDR, n.Right, nil)
- n.Right = typecheck(n.Right, ctxExpr)
-
- case OSELRECV, OSELRECV2:
- if n.Op == OSELRECV2 && n.List.Len() == 0 {
- n.Op = OSELRECV
- }
-
- if n.Left != nil {
- n.Left = nod(OADDR, n.Left, nil)
- n.Left = typecheck(n.Left, ctxExpr)
+ switch n.Op() {
+ case ir.OSEND:
+ n.SetRight(nodAddr(n.Right()))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+
+ case ir.OSELRECV2:
+ if !ir.IsBlank(n.List().First()) {
+ n.List().SetIndex(0, nodAddr(n.List().First()))
+ n.List().SetIndex(0, typecheck(n.List().First(), ctxExpr))
}
}
}
// optimization: two-case select but one is default: single non-blocking op.
if ncas == 2 && dflt != nil {
- cas := cases.First()
+ cas := cases.First().(*ir.CaseStmt)
if cas == dflt {
- cas = cases.Second()
+ cas = cases.Second().(*ir.CaseStmt)
}
- n := cas.Left
+ n := cas.Left()
setlineno(n)
- r := nod(OIF, nil, nil)
- r.Ninit.Set(cas.Ninit.Slice())
- switch n.Op {
+ r := ir.Nod(ir.OIF, nil, nil)
+ r.PtrInit().Set(cas.Init().Slice())
+ var call ir.Node
+ switch n.Op() {
default:
- Fatalf("select %v", n.Op)
+ base.Fatalf("select %v", n.Op())
- case OSEND:
+ case ir.OSEND:
// if selectnbsend(c, v) { body } else { default body }
- ch := n.Left
- r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), types.Types[TBOOL], &r.Ninit, ch, n.Right)
-
- case OSELRECV:
- // if selectnbrecv(&v, c) { body } else { default body }
- ch := n.Right.Left
- elem := n.Left
- if elem == nil {
+ ch := n.Left()
+ call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right())
+
+ case ir.OSELRECV2:
+ recv := n.Rlist().First().(*ir.UnaryExpr)
+ ch := recv.Left()
+ elem := n.List().First()
+ if ir.IsBlank(elem) {
elem = nodnil()
}
- r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, ch)
-
- case OSELRECV2:
- // if selectnbrecv2(&v, &received, c) { body } else { default body }
- ch := n.Right.Left
- elem := n.Left
- if elem == nil {
- elem = nodnil()
+ if ir.IsBlank(n.List().Second()) {
+ // if selectnbrecv(&v, c) { body } else { default body }
+ call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch)
+ } else {
+ // TODO(cuonglm): make this use selectnbrecv()
+ // if selectnbrecv2(&v, &received, c) { body } else { default body }
+ receivedp := typecheck(nodAddr(n.List().Second()), ctxExpr)
+ call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch)
}
- receivedp := nod(OADDR, n.List.First(), nil)
- receivedp = typecheck(receivedp, ctxExpr)
- r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, receivedp, ch)
}
- r.Left = typecheck(r.Left, ctxExpr)
- r.Nbody.Set(cas.Nbody.Slice())
- r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...))
- return []*Node{r, nod(OBREAK, nil, nil)}
+ r.SetLeft(typecheck(call, ctxExpr))
+ r.PtrBody().Set(cas.Body().Slice())
+ r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...))
+ return []ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)}
}
if dflt != nil {
ncas--
}
- casorder := make([]*Node, ncas)
+ casorder := make([]*ir.CaseStmt, ncas)
nsends, nrecvs := 0, 0
- var init []*Node
+ var init []ir.Node
// generate sel-struct
- lineno = sellineno
+ base.Pos = sellineno
selv := temp(types.NewArray(scasetype(), int64(ncas)))
- r := nod(OAS, selv, nil)
- r = typecheck(r, ctxStmt)
- init = append(init, r)
+ init = append(init, typecheck(ir.Nod(ir.OAS, selv, nil), ctxStmt))
// No initialization for order; runtime.selectgo is responsible for that.
- order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas)))
+ order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
- var pc0, pcs *Node
- if flag_race {
- pcs = temp(types.NewArray(types.Types[TUINTPTR], int64(ncas)))
- pc0 = typecheck(nod(OADDR, nod(OINDEX, pcs, nodintconst(0)), nil), ctxExpr)
+ var pc0, pcs ir.Node
+ if base.Flag.Race {
+ pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
+ pc0 = typecheck(nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(0))), ctxExpr)
} else {
pc0 = nodnil()
}
// register cases
for _, cas := range cases.Slice() {
+ cas := cas.(*ir.CaseStmt)
setlineno(cas)
- init = append(init, cas.Ninit.Slice()...)
- cas.Ninit.Set(nil)
+ init = append(init, cas.Init().Slice()...)
+ cas.PtrInit().Set(nil)
- n := cas.Left
+ n := cas.Left()
if n == nil { // default:
continue
}
var i int
- var c, elem *Node
- switch n.Op {
+ var c, elem ir.Node
+ switch n.Op() {
default:
- Fatalf("select %v", n.Op)
- case OSEND:
+ base.Fatalf("select %v", n.Op())
+ case ir.OSEND:
i = nsends
nsends++
- c = n.Left
- elem = n.Right
- case OSELRECV, OSELRECV2:
+ c = n.Left()
+ elem = n.Right()
+ case ir.OSELRECV2:
nrecvs++
i = ncas - nrecvs
- c = n.Right.Left
- elem = n.Left
+ recv := n.Rlist().First().(*ir.UnaryExpr)
+ c = recv.Left()
+ elem = n.List().First()
}
casorder[i] = cas
- setField := func(f string, val *Node) {
- r := nod(OAS, nodSym(ODOT, nod(OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
- r = typecheck(r, ctxStmt)
- init = append(init, r)
+ setField := func(f string, val ir.Node) {
+ r := ir.Nod(ir.OAS, nodSym(ir.ODOT, ir.Nod(ir.OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
+ init = append(init, typecheck(r, ctxStmt))
}
- c = convnop(c, types.Types[TUNSAFEPTR])
+ c = convnop(c, types.Types[types.TUNSAFEPTR])
setField("c", c)
- if elem != nil {
- elem = convnop(elem, types.Types[TUNSAFEPTR])
+ if !ir.IsBlank(elem) {
+ elem = convnop(elem, types.Types[types.TUNSAFEPTR])
setField("elem", elem)
}
// TODO(mdempsky): There should be a cleaner way to
// handle this.
- if flag_race {
- r = mkcall("selectsetpc", nil, nil, nod(OADDR, nod(OINDEX, pcs, nodintconst(int64(i))), nil))
+ if base.Flag.Race {
+ r := mkcall("selectsetpc", nil, nil, nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i)))))
init = append(init, r)
}
}
if nsends+nrecvs != ncas {
- Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
+ base.Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
}
// run the select
- lineno = sellineno
- chosen := temp(types.Types[TINT])
- recvOK := temp(types.Types[TBOOL])
- r = nod(OAS2, nil, nil)
- r.List.Set2(chosen, recvOK)
+ base.Pos = sellineno
+ chosen := temp(types.Types[types.TINT])
+ recvOK := temp(types.Types[types.TBOOL])
+ r := ir.Nod(ir.OAS2, nil, nil)
+ r.PtrList().Set2(chosen, recvOK)
fn := syslook("selectgo")
- r.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
- r = typecheck(r, ctxStmt)
- init = append(init, r)
+ r.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
+ init = append(init, typecheck(r, ctxStmt))
// selv and order are no longer alive after selectgo.
- init = append(init, nod(OVARKILL, selv, nil))
- init = append(init, nod(OVARKILL, order, nil))
- if flag_race {
- init = append(init, nod(OVARKILL, pcs, nil))
+ init = append(init, ir.Nod(ir.OVARKILL, selv, nil))
+ init = append(init, ir.Nod(ir.OVARKILL, order, nil))
+ if base.Flag.Race {
+ init = append(init, ir.Nod(ir.OVARKILL, pcs, nil))
}
// dispatch cases
- dispatch := func(cond, cas *Node) {
+ dispatch := func(cond ir.Node, cas *ir.CaseStmt) {
cond = typecheck(cond, ctxExpr)
cond = defaultlit(cond, nil)
- r := nod(OIF, cond, nil)
+ r := ir.Nod(ir.OIF, cond, nil)
- if n := cas.Left; n != nil && n.Op == OSELRECV2 {
- x := nod(OAS, n.List.First(), recvOK)
- x = typecheck(x, ctxStmt)
- r.Nbody.Append(x)
+ if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 {
+ if !ir.IsBlank(n.List().Second()) {
+ x := ir.Nod(ir.OAS, n.List().Second(), recvOK)
+ r.PtrBody().Append(typecheck(x, ctxStmt))
+ }
}
- r.Nbody.AppendNodes(&cas.Nbody)
- r.Nbody.Append(nod(OBREAK, nil, nil))
+ r.PtrBody().AppendNodes(cas.PtrBody())
+ r.PtrBody().Append(ir.Nod(ir.OBREAK, nil, nil))
init = append(init, r)
}
if dflt != nil {
setlineno(dflt)
- dispatch(nod(OLT, chosen, nodintconst(0)), dflt)
+ dispatch(ir.Nod(ir.OLT, chosen, nodintconst(0)), dflt)
}
for i, cas := range casorder {
setlineno(cas)
- dispatch(nod(OEQ, chosen, nodintconst(int64(i))), cas)
+ dispatch(ir.Nod(ir.OEQ, chosen, nodintconst(int64(i))), cas)
}
return init
}
// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
-func bytePtrToIndex(n *Node, i int64) *Node {
- s := nod(OADDR, nod(OINDEX, n, nodintconst(i)), nil)
- t := types.NewPtr(types.Types[TUINT8])
+func bytePtrToIndex(n ir.Node, i int64) ir.Node {
+ s := nodAddr(ir.Nod(ir.OINDEX, n, nodintconst(i)))
+ t := types.NewPtr(types.Types[types.TUINT8])
return convnop(s, t)
}
// Keep in sync with src/runtime/select.go.
func scasetype() *types.Type {
if scase == nil {
- scase = tostruct([]*Node{
- namedfield("c", types.Types[TUNSAFEPTR]),
- namedfield("elem", types.Types[TUNSAFEPTR]),
+ scase = tostruct([]*ir.Field{
+ namedfield("c", types.Types[types.TUNSAFEPTR]),
+ namedfield("elem", types.Types[types.TUNSAFEPTR]),
})
scase.SetNoalg(true)
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
+ "go/constant"
)
type InitEntry struct {
- Xoffset int64 // struct, array only
- Expr *Node // bytes of run-time computed expressions
+ Xoffset int64 // struct, array only
+ Expr ir.Node // bytes of run-time computed expressions
}
type InitPlan struct {
type InitSchedule struct {
// out is the ordered list of dynamic initialization
// statements.
- out []*Node
+ out []ir.Node
- initplans map[*Node]*InitPlan
- inittemps map[*Node]*Node
+ initplans map[ir.Node]*InitPlan
+ inittemps map[ir.Node]*ir.Name
}
-func (s *InitSchedule) append(n *Node) {
+func (s *InitSchedule) append(n ir.Node) {
s.out = append(s.out, n)
}
// staticInit adds an initialization statement n to the schedule.
-func (s *InitSchedule) staticInit(n *Node) {
+func (s *InitSchedule) staticInit(n ir.Node) {
if !s.tryStaticInit(n) {
- if Debug.P != 0 {
- Dump("nonstatic", n)
+ if base.Flag.Percent != 0 {
+ ir.Dump("nonstatic", n)
}
s.append(n)
}
// tryStaticInit attempts to statically execute an initialization
// statement and reports whether it succeeded.
-func (s *InitSchedule) tryStaticInit(n *Node) bool {
+func (s *InitSchedule) tryStaticInit(nn ir.Node) bool {
// Only worry about simple "l = r" assignments. Multiple
// variable/expression OAS2 assignments have already been
// replaced by multiple simple OAS assignments, and the other
// OAS2* assignments mostly necessitate dynamic execution
// anyway.
- if n.Op != OAS {
+ if nn.Op() != ir.OAS {
return false
}
- if n.Left.isBlank() && candiscard(n.Right) {
+ n := nn.(*ir.AssignStmt)
+ if ir.IsBlank(n.Left()) && !anySideEffects(n.Right()) {
+ // Discard.
return true
}
lno := setlineno(n)
- defer func() { lineno = lno }()
- return s.staticassign(n.Left, n.Right)
+ defer func() { base.Pos = lno }()
+ nam := n.Left().(*ir.Name)
+ return s.staticassign(nam, 0, n.Right(), nam.Type())
}
// like staticassign but we are copying an already
// initialized value r.
-func (s *InitSchedule) staticcopy(l *Node, r *Node) bool {
- if r.Op != ONAME {
- return false
- }
- if r.Class() == PFUNC {
- pfuncsym(l, r)
+func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool {
+ if rn.Class() == ir.PFUNC {
+ // TODO if roff != 0 { panic }
+ pfuncsym(l, loff, rn)
return true
}
- if r.Class() != PEXTERN || r.Sym.Pkg != localpkg {
+ if rn.Class() != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
return false
}
- if r.Name.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
+ if rn.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
return false
}
- if r.Name.Defn.Op != OAS {
+ if rn.Defn.Op() != ir.OAS {
return false
}
- if r.Type.IsString() { // perhaps overwritten by cmd/link -X (#34675)
+ if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675)
return false
}
- orig := r
- r = r.Name.Defn.Right
+ orig := rn
+ r := rn.Defn.(*ir.AssignStmt).Right()
- for r.Op == OCONVNOP && !types.Identical(r.Type, l.Type) {
- r = r.Left
+ for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) {
+ r = r.(*ir.ConvExpr).Left()
}
- switch r.Op {
- case ONAME:
- if s.staticcopy(l, r) {
+ switch r.Op() {
+ case ir.OMETHEXPR:
+ r = r.(*ir.MethodExpr).FuncName()
+ fallthrough
+ case ir.ONAME:
+ r := r.(*ir.Name)
+ if s.staticcopy(l, loff, r, typ) {
return true
}
// We may have skipped past one or more OCONVNOPs, so
// use conv to ensure r is assignable to l (#13263).
- s.append(nod(OAS, l, conv(r, l.Type)))
+ dst := ir.Node(l)
+ if loff != 0 || !types.Identical(typ, l.Type()) {
+ dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ)
+ }
+ s.append(ir.Nod(ir.OAS, dst, conv(r, typ)))
return true
- case OLITERAL:
+ case ir.ONIL:
+ return true
+
+ case ir.OLITERAL:
if isZero(r) {
return true
}
- litsym(l, r, int(l.Type.Width))
+ litsym(l, loff, r, int(typ.Width))
return true
- case OADDR:
- if a := r.Left; a.Op == ONAME {
- addrsym(l, a)
+ case ir.OADDR:
+ if a := r.Left(); a.Op() == ir.ONAME {
+ a := a.(*ir.Name)
+ addrsym(l, loff, a, 0)
return true
}
- case OPTRLIT:
- switch r.Left.Op {
- case OARRAYLIT, OSLICELIT, OSTRUCTLIT, OMAPLIT:
+ case ir.OPTRLIT:
+ switch r.Left().Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
// copy pointer
- addrsym(l, s.inittemps[r])
+ addrsym(l, loff, s.inittemps[r], 0)
return true
}
- case OSLICELIT:
+ case ir.OSLICELIT:
+ r := r.(*ir.CompLitExpr)
// copy slice
- a := s.inittemps[r]
- slicesym(l, a, r.Right.Int64Val())
+ slicesym(l, loff, s.inittemps[r], r.Len)
return true
- case OARRAYLIT, OSTRUCTLIT:
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
p := s.initplans[r]
-
- n := l.copy()
for i := range p.E {
e := &p.E[i]
- n.Xoffset = l.Xoffset + e.Xoffset
- n.Type = e.Expr.Type
- if e.Expr.Op == OLITERAL {
- litsym(n, e.Expr, int(n.Type.Width))
+ typ := e.Expr.Type()
+ if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+ litsym(l, loff+e.Xoffset, e.Expr, int(typ.Width))
continue
}
- ll := n.sepcopy()
- if s.staticcopy(ll, e.Expr) {
+ x := e.Expr
+ if x.Op() == ir.OMETHEXPR {
+ x = x.(*ir.MethodExpr).FuncName()
+ }
+ if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) {
continue
}
// Requires computation, but we're
// copying someone else's computation.
- rr := orig.sepcopy()
- rr.Type = ll.Type
- rr.Xoffset += e.Xoffset
+ ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ)
+ rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ)
setlineno(rr)
- s.append(nod(OAS, ll, rr))
+ s.append(ir.Nod(ir.OAS, ll, rr))
}
return true
return false
}
-func (s *InitSchedule) staticassign(l *Node, r *Node) bool {
- for r.Op == OCONVNOP {
- r = r.Left
+func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool {
+ for r.Op() == ir.OCONVNOP {
+ r = r.(*ir.ConvExpr).Left()
}
- switch r.Op {
- case ONAME:
- return s.staticcopy(l, r)
+ switch r.Op() {
+ case ir.ONAME:
+ r := r.(*ir.Name)
+ return s.staticcopy(l, loff, r, typ)
+
+ case ir.OMETHEXPR:
+ r := r.(*ir.MethodExpr)
+ return s.staticcopy(l, loff, r.FuncName(), typ)
+
+ case ir.ONIL:
+ return true
- case OLITERAL:
+ case ir.OLITERAL:
if isZero(r) {
return true
}
- litsym(l, r, int(l.Type.Width))
+ litsym(l, loff, r, int(typ.Width))
return true
- case OADDR:
- var nam Node
- if stataddr(&nam, r.Left) {
- addrsym(l, &nam)
+ case ir.OADDR:
+ if name, offset, ok := stataddr(r.Left()); ok {
+ addrsym(l, loff, name, offset)
return true
}
fallthrough
- case OPTRLIT:
- switch r.Left.Op {
- case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT:
+ case ir.OPTRLIT:
+ switch r.Left().Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT:
// Init pointer.
- a := staticname(r.Left.Type)
+ a := staticname(r.Left().Type())
s.inittemps[r] = a
- addrsym(l, a)
+ addrsym(l, loff, a, 0)
// Init underlying literal.
- if !s.staticassign(a, r.Left) {
- s.append(nod(OAS, a, r.Left))
+ if !s.staticassign(a, 0, r.Left(), a.Type()) {
+ s.append(ir.Nod(ir.OAS, a, r.Left()))
}
return true
}
//dump("not static ptrlit", r);
- case OSTR2BYTES:
- if l.Class() == PEXTERN && r.Left.Op == OLITERAL {
- sval := r.Left.StringVal()
- slicebytes(l, sval)
+ case ir.OSTR2BYTES:
+ if l.Class() == ir.PEXTERN && r.Left().Op() == ir.OLITERAL {
+ sval := ir.StringVal(r.Left())
+ slicebytes(l, loff, sval)
return true
}
- case OSLICELIT:
+ case ir.OSLICELIT:
+ r := r.(*ir.CompLitExpr)
s.initplan(r)
// Init slice.
- bound := r.Right.Int64Val()
- ta := types.NewArray(r.Type.Elem(), bound)
+ ta := types.NewArray(r.Type().Elem(), r.Len)
ta.SetNoalg(true)
a := staticname(ta)
s.inittemps[r] = a
- slicesym(l, a, bound)
+ slicesym(l, loff, a, r.Len)
// Fall through to init underlying array.
l = a
+ loff = 0
fallthrough
- case OARRAYLIT, OSTRUCTLIT:
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
s.initplan(r)
p := s.initplans[r]
- n := l.copy()
for i := range p.E {
e := &p.E[i]
- n.Xoffset = l.Xoffset + e.Xoffset
- n.Type = e.Expr.Type
- if e.Expr.Op == OLITERAL {
- litsym(n, e.Expr, int(n.Type.Width))
+ if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+ litsym(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Width))
continue
}
setlineno(e.Expr)
- a := n.sepcopy()
- if !s.staticassign(a, e.Expr) {
- s.append(nod(OAS, a, e.Expr))
+ if !s.staticassign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) {
+ a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type())
+ s.append(ir.Nod(ir.OAS, a, e.Expr))
}
}
return true
- case OMAPLIT:
+ case ir.OMAPLIT:
break
- case OCLOSURE:
+ case ir.OCLOSURE:
+ r := r.(*ir.ClosureExpr)
if hasemptycvars(r) {
- if Debug_closure > 0 {
- Warnl(r.Pos, "closure converted to global")
+ if base.Debug.Closure > 0 {
+ base.WarnfAt(r.Pos(), "closure converted to global")
}
// Closures with no captured variables are globals,
// so the assignment can be done at link time.
- pfuncsym(l, r.Func.Closure.Func.Nname)
+ // TODO if roff != 0 { panic }
+ pfuncsym(l, loff, r.Func().Nname)
return true
}
closuredebugruntimecheck(r)
- case OCONVIFACE:
+ case ir.OCONVIFACE:
// This logic is mirrored in isStaticCompositeLiteral.
// If you change something here, change it there, and vice versa.
// Determine the underlying concrete type and value we are converting from.
- val := r
- for val.Op == OCONVIFACE {
- val = val.Left
+ val := ir.Node(r)
+ for val.Op() == ir.OCONVIFACE {
+ val = val.(*ir.ConvExpr).Left()
}
- if val.Type.IsInterface() {
+
+ if val.Type().IsInterface() {
// val is an interface type.
// If val is nil, we can statically initialize l;
// both words are zero and so there no work to do, so report success.
// If val is non-nil, we have no concrete type to record,
// and we won't be able to statically initialize its value, so report failure.
- return Isconst(val, CTNIL)
+ return val.Op() == ir.ONIL
}
- markTypeUsedInInterface(val.Type, l.Sym.Linksym())
+ markTypeUsedInInterface(val.Type(), l.Sym().Linksym())
- var itab *Node
- if l.Type.IsEmptyInterface() {
- itab = typename(val.Type)
+ var itab *ir.AddrExpr
+ if typ.IsEmptyInterface() {
+ itab = typename(val.Type())
} else {
- itab = itabname(val.Type, l.Type)
+ itab = itabname(val.Type(), typ)
}
// Create a copy of l to modify while we emit data.
- n := l.copy()
// Emit itab, advance offset.
- addrsym(n, itab.Left) // itab is an OADDR node
- n.Xoffset += int64(Widthptr)
+ addrsym(l, loff, itab.Left().(*ir.Name), 0)
// Emit data.
- if isdirectiface(val.Type) {
- if Isconst(val, CTNIL) {
+ if isdirectiface(val.Type()) {
+ if val.Op() == ir.ONIL {
// Nil is zero, nothing to do.
return true
}
// Copy val directly into n.
- n.Type = val.Type
setlineno(val)
- a := n.sepcopy()
- if !s.staticassign(a, val) {
- s.append(nod(OAS, a, val))
+ if !s.staticassign(l, loff+int64(Widthptr), val, val.Type()) {
+ a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(Widthptr), val.Type())
+ s.append(ir.Nod(ir.OAS, a, val))
}
} else {
// Construct temp to hold val, write pointer to temp into n.
- a := staticname(val.Type)
+ a := staticname(val.Type())
s.inittemps[val] = a
- if !s.staticassign(a, val) {
- s.append(nod(OAS, a, val))
+ if !s.staticassign(a, 0, val, val.Type()) {
+ s.append(ir.Nod(ir.OAS, a, val))
}
- addrsym(n, a)
+ addrsym(l, loff+int64(Widthptr), a, 0)
}
return true
// staticname returns a name backed by a (writable) static data symbol.
// Use readonlystaticname for read-only node.
-func staticname(t *types.Type) *Node {
+func staticname(t *types.Type) *ir.Name {
// Don't use lookupN; it interns the resulting string, but these are all unique.
- n := newname(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
+ n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
statuniqgen++
- addvar(n, t, PEXTERN)
- n.Sym.Linksym().Set(obj.AttrLocal, true)
+ declare(n, ir.PEXTERN)
+ n.SetType(t)
+ n.Sym().Linksym().Set(obj.AttrLocal, true)
return n
}
// readonlystaticname returns a name backed by a (writable) static data symbol.
-func readonlystaticname(t *types.Type) *Node {
+func readonlystaticname(t *types.Type) *ir.Name {
n := staticname(t)
n.MarkReadonly()
- n.Sym.Linksym().Set(obj.AttrContentAddressable, true)
+ n.Sym().Linksym().Set(obj.AttrContentAddressable, true)
return n
}
-func (n *Node) isSimpleName() bool {
- return n.Op == ONAME && n.Class() != PAUTOHEAP && n.Class() != PEXTERN
+func isSimpleName(nn ir.Node) bool {
+ if nn.Op() != ir.ONAME {
+ return false
+ }
+ n := nn.(*ir.Name)
+ return n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN
}
-func litas(l *Node, r *Node, init *Nodes) {
- a := nod(OAS, l, r)
- a = typecheck(a, ctxStmt)
- a = walkexpr(a, init)
- init.Append(a)
+func litas(l ir.Node, r ir.Node, init *ir.Nodes) {
+ appendWalkStmt(init, ir.Nod(ir.OAS, l, r))
}
// initGenType is a bitmap indicating the types of generation that will occur for a static value.
// getdyn calculates the initGenType for n.
// If top is false, getdyn is recursing.
-func getdyn(n *Node, top bool) initGenType {
- switch n.Op {
+func getdyn(n ir.Node, top bool) initGenType {
+ switch n.Op() {
default:
- if n.isGoConst() {
+ if isGoConst(n) {
return initConst
}
return initDynamic
- case OSLICELIT:
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
if !top {
return initDynamic
}
- if n.Right.Int64Val()/4 > int64(n.List.Len()) {
+ if n.Len/4 > int64(n.List().Len()) {
// <25% of entries have explicit values.
// Very rough estimation, it takes 4 bytes of instructions
// to initialize 1 byte of result. So don't use a static
return initDynamic
}
- case OARRAYLIT, OSTRUCTLIT:
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
}
+ lit := n.(*ir.CompLitExpr)
var mode initGenType
- for _, n1 := range n.List.Slice() {
- switch n1.Op {
- case OKEY:
- n1 = n1.Right
- case OSTRUCTKEY:
- n1 = n1.Left
+ for _, n1 := range lit.List().Slice() {
+ switch n1.Op() {
+ case ir.OKEY:
+ n1 = n1.(*ir.KeyExpr).Right()
+ case ir.OSTRUCTKEY:
+ n1 = n1.(*ir.StructKeyExpr).Left()
}
mode |= getdyn(n1, false)
if mode == initDynamic|initConst {
}
// isStaticCompositeLiteral reports whether n is a compile-time constant.
-func isStaticCompositeLiteral(n *Node) bool {
- switch n.Op {
- case OSLICELIT:
+func isStaticCompositeLiteral(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OSLICELIT:
return false
- case OARRAYLIT:
- for _, r := range n.List.Slice() {
- if r.Op == OKEY {
- r = r.Right
+ case ir.OARRAYLIT:
+ for _, r := range n.List().Slice() {
+ if r.Op() == ir.OKEY {
+ r = r.(*ir.KeyExpr).Right()
}
if !isStaticCompositeLiteral(r) {
return false
}
}
return true
- case OSTRUCTLIT:
- for _, r := range n.List.Slice() {
- if r.Op != OSTRUCTKEY {
- Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r)
- }
- if !isStaticCompositeLiteral(r.Left) {
+ case ir.OSTRUCTLIT:
+ for _, r := range n.List().Slice() {
+ r := r.(*ir.StructKeyExpr)
+ if !isStaticCompositeLiteral(r.Left()) {
return false
}
}
return true
- case OLITERAL:
+ case ir.OLITERAL, ir.ONIL:
return true
- case OCONVIFACE:
+ case ir.OCONVIFACE:
// See staticassign's OCONVIFACE case for comments.
- val := n
- for val.Op == OCONVIFACE {
- val = val.Left
+ val := ir.Node(n)
+ for val.Op() == ir.OCONVIFACE {
+ val = val.(*ir.ConvExpr).Left()
}
- if val.Type.IsInterface() {
- return Isconst(val, CTNIL)
+ if val.Type().IsInterface() {
+ return val.Op() == ir.ONIL
}
- if isdirectiface(val.Type) && Isconst(val, CTNIL) {
+ if isdirectiface(val.Type()) && val.Op() == ir.ONIL {
return true
}
return isStaticCompositeLiteral(val)
// fixedlit handles struct, array, and slice literals.
// TODO: expand documentation.
-func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) {
- isBlank := var_ == nblank
- var splitnode func(*Node) (a *Node, value *Node)
- switch n.Op {
- case OARRAYLIT, OSLICELIT:
+func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
+ isBlank := var_ == ir.BlankNode
+ var splitnode func(ir.Node) (a ir.Node, value ir.Node)
+ switch n.Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT:
var k int64
- splitnode = func(r *Node) (*Node, *Node) {
- if r.Op == OKEY {
- k = indexconst(r.Left)
+ splitnode = func(r ir.Node) (ir.Node, ir.Node) {
+ if r.Op() == ir.OKEY {
+ kv := r.(*ir.KeyExpr)
+ k = indexconst(kv.Left())
if k < 0 {
- Fatalf("fixedlit: invalid index %v", r.Left)
+ base.Fatalf("fixedlit: invalid index %v", kv.Left())
}
- r = r.Right
+ r = kv.Right()
}
- a := nod(OINDEX, var_, nodintconst(k))
+ a := ir.Nod(ir.OINDEX, var_, nodintconst(k))
k++
if isBlank {
- a = nblank
+ return ir.BlankNode, r
}
return a, r
}
- case OSTRUCTLIT:
- splitnode = func(r *Node) (*Node, *Node) {
- if r.Op != OSTRUCTKEY {
- Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r)
- }
- if r.Sym.IsBlank() || isBlank {
- return nblank, r.Left
+ case ir.OSTRUCTLIT:
+ splitnode = func(rn ir.Node) (ir.Node, ir.Node) {
+ r := rn.(*ir.StructKeyExpr)
+ if r.Sym().IsBlank() || isBlank {
+ return ir.BlankNode, r.Left()
}
setlineno(r)
- return nodSym(ODOT, var_, r.Sym), r.Left
+ return nodSym(ir.ODOT, var_, r.Sym()), r.Left()
}
default:
- Fatalf("fixedlit bad op: %v", n.Op)
+ base.Fatalf("fixedlit bad op: %v", n.Op())
}
- for _, r := range n.List.Slice() {
+ for _, r := range n.List().Slice() {
a, value := splitnode(r)
- if a == nblank && candiscard(value) {
+ if a == ir.BlankNode && !anySideEffects(value) {
+ // Discard.
continue
}
- switch value.Op {
- case OSLICELIT:
+ switch value.Op() {
+ case ir.OSLICELIT:
+ value := value.(*ir.CompLitExpr)
if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
slicelit(ctxt, value, a, init)
continue
}
- case OARRAYLIT, OSTRUCTLIT:
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ value := value.(*ir.CompLitExpr)
fixedlit(ctxt, kind, value, a, init)
continue
}
- islit := value.isGoConst()
+ islit := isGoConst(value)
if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
continue
}
// build list of assignments: var[index] = expr
setlineno(a)
- a = nod(OAS, a, value)
- a = typecheck(a, ctxStmt)
+ as := ir.NewAssignStmt(base.Pos, a, value)
+ as = typecheck(as, ctxStmt).(*ir.AssignStmt)
switch kind {
case initKindStatic:
- genAsStatic(a)
+ genAsStatic(as)
case initKindDynamic, initKindLocalCode:
- a = orderStmtInPlace(a, map[string][]*Node{})
+ a = orderStmtInPlace(as, map[string][]*ir.Name{})
a = walkstmt(a)
init.Append(a)
default:
- Fatalf("fixedlit: bad kind %d", kind)
+ base.Fatalf("fixedlit: bad kind %d", kind)
}
}
}
-func isSmallSliceLit(n *Node) bool {
- if n.Op != OSLICELIT {
+func isSmallSliceLit(n *ir.CompLitExpr) bool {
+ if n.Op() != ir.OSLICELIT {
return false
}
- r := n.Right
-
- return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type.Elem().Width)
+ return n.Type().Elem().Width == 0 || n.Len <= smallArrayBytes/n.Type().Elem().Width
}
-func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
+func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
// make an array type corresponding the number of elements we have
- t := types.NewArray(n.Type.Elem(), n.Right.Int64Val())
+ t := types.NewArray(n.Type().Elem(), n.Len)
dowidth(t)
if ctxt == inNonInitFunction {
// copy static to slice
var_ = typecheck(var_, ctxExpr|ctxAssign)
- var nam Node
- if !stataddr(&nam, var_) || nam.Class() != PEXTERN {
- Fatalf("slicelit: %v", var_)
+ name, offset, ok := stataddr(var_)
+ if !ok || name.Class() != ir.PEXTERN {
+ base.Fatalf("slicelit: %v", var_)
}
- slicesym(&nam, vstat, t.NumElem())
+ slicesym(name, offset, vstat, t.NumElem())
return
}
// if the literal contains constants,
// make static initialized array (1),(2)
- var vstat *Node
+ var vstat ir.Node
mode := getdyn(n, true)
if mode&initConst != 0 && !isSmallSliceLit(n) {
vauto := temp(types.NewPtr(t))
// set auto to point at new temp or heap (3 assign)
- var a *Node
- if x := prealloc[n]; x != nil {
+ var a ir.Node
+ if x := n.Prealloc; x != nil {
// temp allocated during order.go for dddarg
- if !types.Identical(t, x.Type) {
+ if !types.Identical(t, x.Type()) {
panic("dotdotdot base type does not match order's assigned type")
}
if vstat == nil {
- a = nod(OAS, x, nil)
+ a = ir.Nod(ir.OAS, x, nil)
a = typecheck(a, ctxStmt)
init.Append(a) // zero new temp
} else {
// Declare that we're about to initialize all of x.
// (Which happens at the *vauto = vstat below.)
- init.Append(nod(OVARDEF, x, nil))
+ init.Append(ir.Nod(ir.OVARDEF, x, nil))
}
- a = nod(OADDR, x, nil)
- } else if n.Esc == EscNone {
+ a = nodAddr(x)
+ } else if n.Esc() == EscNone {
a = temp(t)
if vstat == nil {
- a = nod(OAS, temp(t), nil)
+ a = ir.Nod(ir.OAS, temp(t), nil)
a = typecheck(a, ctxStmt)
init.Append(a) // zero new temp
- a = a.Left
+ a = a.(*ir.AssignStmt).Left()
} else {
- init.Append(nod(OVARDEF, a, nil))
+ init.Append(ir.Nod(ir.OVARDEF, a, nil))
}
- a = nod(OADDR, a, nil)
+ a = nodAddr(a)
} else {
- a = nod(ONEW, nil, nil)
- a.List.Set1(typenod(t))
+ a = ir.Nod(ir.ONEW, ir.TypeNode(t), nil)
}
-
- a = nod(OAS, vauto, a)
- a = typecheck(a, ctxStmt)
- a = walkexpr(a, init)
- init.Append(a)
+ appendWalkStmt(init, ir.Nod(ir.OAS, vauto, a))
if vstat != nil {
// copy static to heap (4)
- a = nod(ODEREF, vauto, nil)
-
- a = nod(OAS, a, vstat)
- a = typecheck(a, ctxStmt)
- a = walkexpr(a, init)
- init.Append(a)
+ a = ir.Nod(ir.ODEREF, vauto, nil)
+ appendWalkStmt(init, ir.Nod(ir.OAS, a, vstat))
}
// put dynamics into array (5)
var index int64
- for _, value := range n.List.Slice() {
- if value.Op == OKEY {
- index = indexconst(value.Left)
+ for _, value := range n.List().Slice() {
+ if value.Op() == ir.OKEY {
+ kv := value.(*ir.KeyExpr)
+ index = indexconst(kv.Left())
if index < 0 {
- Fatalf("slicelit: invalid index %v", value.Left)
+ base.Fatalf("slicelit: invalid index %v", kv.Left())
}
- value = value.Right
+ value = kv.Right()
}
- a := nod(OINDEX, vauto, nodintconst(index))
+ a := ir.Nod(ir.OINDEX, vauto, nodintconst(index))
a.SetBounded(true)
index++
// TODO need to check bounds?
- switch value.Op {
- case OSLICELIT:
+ switch value.Op() {
+ case ir.OSLICELIT:
break
- case OARRAYLIT, OSTRUCTLIT:
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ value := value.(*ir.CompLitExpr)
k := initKindDynamic
if vstat == nil {
// Generate both static and dynamic initializations.
continue
}
- if vstat != nil && value.isGoConst() { // already set by copy from static value
+ if vstat != nil && isGoConst(value) { // already set by copy from static value
continue
}
// build list of vauto[c] = expr
setlineno(value)
- a = nod(OAS, a, value)
-
- a = typecheck(a, ctxStmt)
- a = orderStmtInPlace(a, map[string][]*Node{})
- a = walkstmt(a)
- init.Append(a)
+ as := typecheck(ir.Nod(ir.OAS, a, value), ctxStmt)
+ as = orderStmtInPlace(as, map[string][]*ir.Name{})
+ as = walkstmt(as)
+ init.Append(as)
}
// make slice out of heap (6)
- a = nod(OAS, var_, nod(OSLICE, vauto, nil))
+ a = ir.Nod(ir.OAS, var_, ir.Nod(ir.OSLICE, vauto, nil))
a = typecheck(a, ctxStmt)
- a = orderStmtInPlace(a, map[string][]*Node{})
+ a = orderStmtInPlace(a, map[string][]*ir.Name{})
a = walkstmt(a)
init.Append(a)
}
-func maplit(n *Node, m *Node, init *Nodes) {
+func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
// make the map var
- a := nod(OMAKE, nil, nil)
- a.Esc = n.Esc
- a.List.Set2(typenod(n.Type), nodintconst(int64(n.List.Len())))
+ a := ir.Nod(ir.OMAKE, nil, nil)
+ a.SetEsc(n.Esc())
+ a.PtrList().Set2(ir.TypeNode(n.Type()), nodintconst(int64(n.List().Len())))
litas(m, a, init)
- entries := n.List.Slice()
+ entries := n.List().Slice()
// The order pass already removed any dynamic (runtime-computed) entries.
// All remaining entries are static. Double-check that.
for _, r := range entries {
- if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
- Fatalf("maplit: entry is not a literal: %v", r)
+ r := r.(*ir.KeyExpr)
+ if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
+ base.Fatalf("maplit: entry is not a literal: %v", r)
}
}
// For a large number of entries, put them in an array and loop.
// build types [count]Tindex and [count]Tvalue
- tk := types.NewArray(n.Type.Key(), int64(len(entries)))
- te := types.NewArray(n.Type.Elem(), int64(len(entries)))
+ tk := types.NewArray(n.Type().Key(), int64(len(entries)))
+ te := types.NewArray(n.Type().Elem(), int64(len(entries)))
tk.SetNoalg(true)
te.SetNoalg(true)
vstatk := readonlystaticname(tk)
vstate := readonlystaticname(te)
- datak := nod(OARRAYLIT, nil, nil)
- datae := nod(OARRAYLIT, nil, nil)
+ datak := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
+ datae := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
for _, r := range entries {
- datak.List.Append(r.Left)
- datae.List.Append(r.Right)
+ r := r.(*ir.KeyExpr)
+ datak.PtrList().Append(r.Left())
+ datae.PtrList().Append(r.Right())
}
fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
// for i = 0; i < len(vstatk); i++ {
// map[vstatk[i]] = vstate[i]
// }
- i := temp(types.Types[TINT])
- rhs := nod(OINDEX, vstate, i)
+ i := temp(types.Types[types.TINT])
+ rhs := ir.Nod(ir.OINDEX, vstate, i)
rhs.SetBounded(true)
- kidx := nod(OINDEX, vstatk, i)
+ kidx := ir.Nod(ir.OINDEX, vstatk, i)
kidx.SetBounded(true)
- lhs := nod(OINDEX, m, kidx)
+ lhs := ir.Nod(ir.OINDEX, m, kidx)
- zero := nod(OAS, i, nodintconst(0))
- cond := nod(OLT, i, nodintconst(tk.NumElem()))
- incr := nod(OAS, i, nod(OADD, i, nodintconst(1)))
- body := nod(OAS, lhs, rhs)
+ zero := ir.Nod(ir.OAS, i, nodintconst(0))
+ cond := ir.Nod(ir.OLT, i, nodintconst(tk.NumElem()))
+ incr := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1)))
+ body := ir.Nod(ir.OAS, lhs, rhs)
- loop := nod(OFOR, cond, incr)
- loop.Nbody.Set1(body)
- loop.Ninit.Set1(zero)
+ loop := ir.Nod(ir.OFOR, cond, incr)
+ loop.PtrBody().Set1(body)
+ loop.PtrInit().Set1(zero)
- loop = typecheck(loop, ctxStmt)
- loop = walkstmt(loop)
- init.Append(loop)
+ appendWalkStmt(init, loop)
return
}
// For a small number of entries, just add them directly.
// Build list of var[c] = expr.
// Use temporaries so that mapassign1 can have addressable key, elem.
// TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
- tmpkey := temp(m.Type.Key())
- tmpelem := temp(m.Type.Elem())
+ tmpkey := temp(m.Type().Key())
+ tmpelem := temp(m.Type().Elem())
for _, r := range entries {
- index, elem := r.Left, r.Right
+ r := r.(*ir.KeyExpr)
+ index, elem := r.Left(), r.Right()
setlineno(index)
- a := nod(OAS, tmpkey, index)
- a = typecheck(a, ctxStmt)
- a = walkstmt(a)
- init.Append(a)
+ appendWalkStmt(init, ir.Nod(ir.OAS, tmpkey, index))
setlineno(elem)
- a = nod(OAS, tmpelem, elem)
- a = typecheck(a, ctxStmt)
- a = walkstmt(a)
- init.Append(a)
+ appendWalkStmt(init, ir.Nod(ir.OAS, tmpelem, elem))
setlineno(tmpelem)
- a = nod(OAS, nod(OINDEX, m, tmpkey), tmpelem)
- a = typecheck(a, ctxStmt)
- a = walkstmt(a)
- init.Append(a)
+ appendWalkStmt(init, ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, tmpkey), tmpelem))
}
- a = nod(OVARKILL, tmpkey, nil)
- a = typecheck(a, ctxStmt)
- init.Append(a)
- a = nod(OVARKILL, tmpelem, nil)
- a = typecheck(a, ctxStmt)
- init.Append(a)
+ appendWalkStmt(init, ir.Nod(ir.OVARKILL, tmpkey, nil))
+ appendWalkStmt(init, ir.Nod(ir.OVARKILL, tmpelem, nil))
}
-func anylit(n *Node, var_ *Node, init *Nodes) {
- t := n.Type
- switch n.Op {
+func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
+ t := n.Type()
+ switch n.Op() {
default:
- Fatalf("anylit: not lit, op=%v node=%v", n.Op, n)
+ base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n)
- case ONAME:
- a := nod(OAS, var_, n)
- a = typecheck(a, ctxStmt)
- init.Append(a)
+ case ir.ONAME:
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, n))
- case OPTRLIT:
+ case ir.OMETHEXPR:
+ n := n.(*ir.MethodExpr)
+ anylit(n.FuncName(), var_, init)
+
+ case ir.OPTRLIT:
if !t.IsPtr() {
- Fatalf("anylit: not ptr")
+ base.Fatalf("anylit: not ptr")
}
- var r *Node
- if n.Right != nil {
+ var r ir.Node
+ if n.Right() != nil {
// n.Right is stack temporary used as backing store.
- init.Append(nod(OAS, n.Right, nil)) // zero backing store, just in case (#18410)
- r = nod(OADDR, n.Right, nil)
- r = typecheck(r, ctxExpr)
+ appendWalkStmt(init, ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410)
+ r = nodAddr(n.Right())
} else {
- r = nod(ONEW, nil, nil)
- r.SetTypecheck(1)
- r.Type = t
- r.Esc = n.Esc
+ r = ir.Nod(ir.ONEW, ir.TypeNode(n.Left().Type()), nil)
+ r.SetEsc(n.Esc())
}
+ appendWalkStmt(init, ir.Nod(ir.OAS, var_, r))
- r = walkexpr(r, init)
- a := nod(OAS, var_, r)
-
- a = typecheck(a, ctxStmt)
- init.Append(a)
-
- var_ = nod(ODEREF, var_, nil)
+ var_ = ir.Nod(ir.ODEREF, var_, nil)
var_ = typecheck(var_, ctxExpr|ctxAssign)
- anylit(n.Left, var_, init)
+ anylit(n.Left(), var_, init)
- case OSTRUCTLIT, OARRAYLIT:
+ case ir.OSTRUCTLIT, ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
if !t.IsStruct() && !t.IsArray() {
- Fatalf("anylit: not struct/array")
+ base.Fatalf("anylit: not struct/array")
}
- if var_.isSimpleName() && n.List.Len() > 4 {
+ if isSimpleName(var_) && n.List().Len() > 4 {
// lay out static data
vstat := readonlystaticname(t)
ctxt := inInitFunction
- if n.Op == OARRAYLIT {
+ if n.Op() == ir.OARRAYLIT {
ctxt = inNonInitFunction
}
fixedlit(ctxt, initKindStatic, n, vstat, init)
// copy static to var
- a := nod(OAS, var_, vstat)
-
- a = typecheck(a, ctxStmt)
- a = walkexpr(a, init)
- init.Append(a)
+ appendWalkStmt(init, ir.Nod(ir.OAS, var_, vstat))
// add expressions to automatic
fixedlit(inInitFunction, initKindDynamic, n, var_, init)
}
var components int64
- if n.Op == OARRAYLIT {
+ if n.Op() == ir.OARRAYLIT {
components = t.NumElem()
} else {
components = int64(t.NumFields())
}
// initialization of an array or struct with unspecified components (missing fields or arrays)
- if var_.isSimpleName() || int64(n.List.Len()) < components {
- a := nod(OAS, var_, nil)
- a = typecheck(a, ctxStmt)
- a = walkexpr(a, init)
- init.Append(a)
+ if isSimpleName(var_) || int64(n.List().Len()) < components {
+ appendWalkStmt(init, ir.Nod(ir.OAS, var_, nil))
}
fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
- case OSLICELIT:
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
slicelit(inInitFunction, n, var_, init)
- case OMAPLIT:
+ case ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
if !t.IsMap() {
- Fatalf("anylit: not map")
+ base.Fatalf("anylit: not map")
}
maplit(n, var_, init)
}
}
-func oaslit(n *Node, init *Nodes) bool {
- if n.Left == nil || n.Right == nil {
+// oaslit handles special composite literal assignments.
+// It returns true if n's effects have been added to init,
+// in which case n should be dropped from the program by the caller.
+func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool {
+ if n.Left() == nil || n.Right() == nil {
// not a special composite literal assignment
return false
}
- if n.Left.Type == nil || n.Right.Type == nil {
+ if n.Left().Type() == nil || n.Right().Type() == nil {
// not a special composite literal assignment
return false
}
- if !n.Left.isSimpleName() {
+ if !isSimpleName(n.Left()) {
// not a special composite literal assignment
return false
}
- if !types.Identical(n.Left.Type, n.Right.Type) {
+ if !types.Identical(n.Left().Type(), n.Right().Type()) {
// not a special composite literal assignment
return false
}
- switch n.Right.Op {
+ switch n.Right().Op() {
default:
// not a special composite literal assignment
return false
- case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
- if vmatch1(n.Left, n.Right) {
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
+ if refersToCommonName(n.Left(), n.Right()) {
// not a special composite literal assignment
return false
}
- anylit(n.Right, n.Left, init)
+ anylit(n.Right(), n.Left(), init)
}
- n.Op = OEMPTY
- n.Right = nil
return true
}
-func getlit(lit *Node) int {
+func getlit(lit ir.Node) int {
if smallintconst(lit) {
- return int(lit.Int64Val())
+ return int(ir.Int64Val(lit))
}
return -1
}
-// stataddr sets nam to the static address of n and reports whether it succeeded.
-func stataddr(nam *Node, n *Node) bool {
+// stataddr returns the static address of n, if n has one, or else nil.
+func stataddr(n ir.Node) (name *ir.Name, offset int64, ok bool) {
if n == nil {
- return false
+ return nil, 0, false
}
- switch n.Op {
- case ONAME:
- *nam = *n
- return true
+ switch n.Op() {
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ return n, 0, true
+
+ case ir.OMETHEXPR:
+ n := n.(*ir.MethodExpr)
+ return stataddr(n.FuncName())
- case ODOT:
- if !stataddr(nam, n.Left) {
+ case ir.ODOT:
+ if name, offset, ok = stataddr(n.Left()); !ok {
break
}
- nam.Xoffset += n.Xoffset
- nam.Type = n.Type
- return true
+ offset += n.Offset()
+ return name, offset, true
- case OINDEX:
- if n.Left.Type.IsSlice() {
+ case ir.OINDEX:
+ if n.Left().Type().IsSlice() {
break
}
- if !stataddr(nam, n.Left) {
+ if name, offset, ok = stataddr(n.Left()); !ok {
break
}
- l := getlit(n.Right)
+ l := getlit(n.Right())
if l < 0 {
break
}
// Check for overflow.
- if n.Type.Width != 0 && thearch.MAXWIDTH/n.Type.Width <= int64(l) {
+ if n.Type().Width != 0 && MaxWidth/n.Type().Width <= int64(l) {
break
}
- nam.Xoffset += int64(l) * n.Type.Width
- nam.Type = n.Type
- return true
+ offset += int64(l) * n.Type().Width
+ return name, offset, true
}
- return false
+ return nil, 0, false
}
-func (s *InitSchedule) initplan(n *Node) {
+func (s *InitSchedule) initplan(n ir.Node) {
if s.initplans[n] != nil {
return
}
p := new(InitPlan)
s.initplans[n] = p
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("initplan")
+ base.Fatalf("initplan")
- case OARRAYLIT, OSLICELIT:
+ case ir.OARRAYLIT, ir.OSLICELIT:
var k int64
- for _, a := range n.List.Slice() {
- if a.Op == OKEY {
- k = indexconst(a.Left)
+ for _, a := range n.List().Slice() {
+ if a.Op() == ir.OKEY {
+ kv := a.(*ir.KeyExpr)
+ k = indexconst(kv.Left())
if k < 0 {
- Fatalf("initplan arraylit: invalid index %v", a.Left)
+ base.Fatalf("initplan arraylit: invalid index %v", kv.Left())
}
- a = a.Right
+ a = kv.Right()
}
- s.addvalue(p, k*n.Type.Elem().Width, a)
+ s.addvalue(p, k*n.Type().Elem().Width, a)
k++
}
- case OSTRUCTLIT:
- for _, a := range n.List.Slice() {
- if a.Op != OSTRUCTKEY {
- Fatalf("initplan structlit")
+ case ir.OSTRUCTLIT:
+ for _, a := range n.List().Slice() {
+ if a.Op() != ir.OSTRUCTKEY {
+ base.Fatalf("initplan structlit")
}
- if a.Sym.IsBlank() {
+ a := a.(*ir.StructKeyExpr)
+ if a.Sym().IsBlank() {
continue
}
- s.addvalue(p, a.Xoffset, a.Left)
+ s.addvalue(p, a.Offset(), a.Left())
}
- case OMAPLIT:
- for _, a := range n.List.Slice() {
- if a.Op != OKEY {
- Fatalf("initplan maplit")
+ case ir.OMAPLIT:
+ for _, a := range n.List().Slice() {
+ if a.Op() != ir.OKEY {
+ base.Fatalf("initplan maplit")
}
- s.addvalue(p, -1, a.Right)
+ a := a.(*ir.KeyExpr)
+ s.addvalue(p, -1, a.Right())
}
}
}
-func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *Node) {
+func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n ir.Node) {
// special case: zero can be dropped entirely
if isZero(n) {
return
p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n})
}
-func isZero(n *Node) bool {
- switch n.Op {
- case OLITERAL:
- switch u := n.Val().U.(type) {
+func isZero(n ir.Node) bool {
+ switch n.Op() {
+ case ir.ONIL:
+ return true
+
+ case ir.OLITERAL:
+ switch u := n.Val(); u.Kind() {
+ case constant.String:
+ return constant.StringVal(u) == ""
+ case constant.Bool:
+ return !constant.BoolVal(u)
default:
- Dump("unexpected literal", n)
- Fatalf("isZero")
- case *NilVal:
- return true
- case string:
- return u == ""
- case bool:
- return !u
- case *Mpint:
- return u.CmpInt64(0) == 0
- case *Mpflt:
- return u.CmpFloat64(0) == 0
- case *Mpcplx:
- return u.Real.CmpFloat64(0) == 0 && u.Imag.CmpFloat64(0) == 0
- }
-
- case OARRAYLIT:
- for _, n1 := range n.List.Slice() {
- if n1.Op == OKEY {
- n1 = n1.Right
+ return constant.Sign(u) == 0
+ }
+
+ case ir.OARRAYLIT:
+ for _, n1 := range n.List().Slice() {
+ if n1.Op() == ir.OKEY {
+ n1 = n1.(*ir.KeyExpr).Right()
}
if !isZero(n1) {
return false
}
return true
- case OSTRUCTLIT:
- for _, n1 := range n.List.Slice() {
- if !isZero(n1.Left) {
+ case ir.OSTRUCTLIT:
+ for _, n1 := range n.List().Slice() {
+ n1 := n1.(*ir.StructKeyExpr)
+ if !isZero(n1.Left()) {
return false
}
}
return false
}
-func isvaluelit(n *Node) bool {
- return n.Op == OARRAYLIT || n.Op == OSTRUCTLIT
+func isvaluelit(n ir.Node) bool {
+ return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
}
-func genAsStatic(as *Node) {
- if as.Left.Type == nil {
- Fatalf("genAsStatic as.Left not typechecked")
+func genAsStatic(as *ir.AssignStmt) {
+ if as.Left().Type() == nil {
+ base.Fatalf("genAsStatic as.Left not typechecked")
}
- var nam Node
- if !stataddr(&nam, as.Left) || (nam.Class() != PEXTERN && as.Left != nblank) {
- Fatalf("genAsStatic: lhs %v", as.Left)
+ name, offset, ok := stataddr(as.Left())
+ if !ok || (name.Class() != ir.PEXTERN && as.Left() != ir.BlankNode) {
+ base.Fatalf("genAsStatic: lhs %v", as.Left())
}
- switch {
- case as.Right.Op == OLITERAL:
- litsym(&nam, as.Right, int(as.Right.Type.Width))
- case as.Right.Op == ONAME && as.Right.Class() == PFUNC:
- pfuncsym(&nam, as.Right)
- default:
- Fatalf("genAsStatic: rhs %v", as.Right)
+ switch r := as.Right(); r.Op() {
+ case ir.OLITERAL:
+ litsym(name, offset, r, int(r.Type().Width))
+ return
+ case ir.OMETHEXPR:
+ r := r.(*ir.MethodExpr)
+ pfuncsym(name, offset, r.FuncName())
+ return
+ case ir.ONAME:
+ r := r.(*ir.Name)
+ if r.Offset() != 0 {
+ base.Fatalf("genAsStatic %+v", as)
+ }
+ if r.Class() == ir.PFUNC {
+ pfuncsym(name, offset, r)
+ return
+ }
}
+ base.Fatalf("genAsStatic: rhs %v", as.Right())
}
import (
"encoding/binary"
"fmt"
+ "go/constant"
"html"
"os"
"path/filepath"
"sort"
+ "strings"
"bufio"
"bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
const maxOpenDefers = 8
// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
-var ssaDumpInlined []*Node
+var ssaDumpInlined []*ir.Func
+
+func ssaDumpInline(fn *ir.Func) {
+ if ssaDump != "" && ssaDump == ir.FuncName(fn) {
+ ssaDumpInlined = append(ssaDumpInlined, fn)
+ }
+}
+
+func initSSAEnv() {
+ ssaDump = os.Getenv("GOSSAFUNC")
+ ssaDir = os.Getenv("GOSSADIR")
+ if ssaDump != "" {
+ if strings.HasSuffix(ssaDump, "+") {
+ ssaDump = ssaDump[:len(ssaDump)-1]
+ ssaDumpStdout = true
+ }
+ spl := strings.Split(ssaDump, ":")
+ if len(spl) > 1 {
+ ssaDump = spl[0]
+ ssaDumpCFG = spl[1]
+ }
+ }
+}
func initssaconfig() {
types_ := ssa.NewTypes()
// Generate a few pointer types that are uncommon in the frontend but common in the backend.
// Caching is disabled in the backend, so generating these here avoids allocations.
- _ = types.NewPtr(types.Types[TINTER]) // *interface{}
- _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string
- _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{}
- _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte
- _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte
- _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string
- _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
- _ = types.NewPtr(types.Types[TINT16]) // *int16
- _ = types.NewPtr(types.Types[TINT64]) // *int64
- _ = types.NewPtr(types.Errortype) // *error
+ _ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
+ _ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
+ _ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
+ _ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte
+ _ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte
+ _ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
+ _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
+ _ = types.NewPtr(types.Types[types.TINT16]) // *int16
+ _ = types.NewPtr(types.Types[types.TINT64]) // *int64
+ _ = types.NewPtr(types.ErrorType) // *error
types.NewPtrCacheEnabled = false
- ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug.N == 0)
+ ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, base.Ctxt, base.Flag.N == 0)
ssaConfig.SoftFloat = thearch.SoftFloat
- ssaConfig.Race = flag_race
- ssaCaches = make([]ssa.Cache, nBackendWorkers)
+ ssaConfig.Race = base.Flag.Race
+ ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
// Set up some runtime functions we'll need to call.
assertE2I = sysfunc("assertE2I")
// function/method/interface call), where the receiver of a method call is
// considered as the 0th parameter. This does not include the receiver of an
// interface call.
-func getParam(n *Node, i int) *types.Field {
- t := n.Left.Type
- if n.Op == OCALLMETH {
+func getParam(n *ir.CallExpr, i int) *types.Field {
+ t := n.Left().Type()
+ if n.Op() == ir.OCALLMETH {
if i == 0 {
return t.Recv()
}
// - Size of the argument
// - Offset of where argument should be placed in the args frame when making call
func (s *state) emitOpenDeferInfo() {
- x := Ctxt.Lookup(s.curfn.Func.lsym.Name + ".opendefer")
- s.curfn.Func.lsym.Func().OpenCodedDeferInfo = x
+ x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
+ s.curfn.LSym.Func().OpenCodedDeferInfo = x
off := 0
// Compute maxargsize (max size of arguments for all defers)
var maxargsize int64
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- argsize := r.n.Left.Type.ArgWidth()
+ argsize := r.n.Left().Type().ArgWidth()
if argsize > maxargsize {
maxargsize = argsize
}
}
off = dvarint(x, off, maxargsize)
- off = dvarint(x, off, -s.deferBitsTemp.Xoffset)
+ off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
off = dvarint(x, off, int64(len(s.openDefers)))
// Write in reverse-order, for ease of running in that order at runtime
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- off = dvarint(x, off, r.n.Left.Type.ArgWidth())
- off = dvarint(x, off, -r.closureNode.Xoffset)
+ off = dvarint(x, off, r.n.Left().Type().ArgWidth())
+ off = dvarint(x, off, -r.closureNode.FrameOffset())
numArgs := len(r.argNodes)
if r.rcvrNode != nil {
// If there's an interface receiver, treat/place it as the first
}
off = dvarint(x, off, int64(numArgs))
if r.rcvrNode != nil {
- off = dvarint(x, off, -r.rcvrNode.Xoffset)
+ off = dvarint(x, off, -r.rcvrNode.FrameOffset())
off = dvarint(x, off, s.config.PtrSize)
off = dvarint(x, off, 0)
}
for j, arg := range r.argNodes {
f := getParam(r.n, j)
- off = dvarint(x, off, -arg.Xoffset)
+ off = dvarint(x, off, -arg.FrameOffset())
off = dvarint(x, off, f.Type.Size())
off = dvarint(x, off, f.Offset)
}
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
-func buildssa(fn *Node, worker int) *ssa.Func {
- name := fn.funcname()
+func buildssa(fn *ir.Func, worker int) *ssa.Func {
+ name := ir.FuncName(fn)
printssa := false
if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset"
- printssa = name == ssaDump || myimportpath+"."+name == ssaDump
+ printssa = name == ssaDump || base.Ctxt.Pkgpath+"."+name == ssaDump
}
var astBuf *bytes.Buffer
if printssa {
astBuf = &bytes.Buffer{}
- fdumplist(astBuf, "buildssa-enter", fn.Func.Enter)
- fdumplist(astBuf, "buildssa-body", fn.Nbody)
- fdumplist(astBuf, "buildssa-exit", fn.Func.Exit)
+ ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
+ ir.FDumpList(astBuf, "buildssa-body", fn.Body())
+ ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
if ssaDumpStdout {
fmt.Println("generating SSA for", name)
fmt.Print(astBuf.String())
}
var s state
- s.pushLine(fn.Pos)
+ s.pushLine(fn.Pos())
defer s.popLine()
- s.hasdefer = fn.Func.HasDefer()
- if fn.Func.Pragma&CgoUnsafeArgs != 0 {
+ s.hasdefer = fn.HasDefer()
+ if fn.Pragma&ir.CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
s.f = ssa.NewFunc(&fe)
s.config = ssaConfig
- s.f.Type = fn.Type
+ s.f.Type = fn.Type()
s.f.Config = ssaConfig
s.f.Cache = &ssaCaches[worker]
s.f.Cache.Reset()
s.f.Name = name
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
s.f.PrintOrHtmlSSA = printssa
- if fn.Func.Pragma&Nosplit != 0 {
+ if fn.Pragma&ir.Nosplit != 0 {
s.f.NoSplit = true
}
s.panics = map[funcLine]*ssa.Block{}
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
- s.f.Entry.Pos = fn.Pos
+ s.f.Entry.Pos = fn.Pos()
if printssa {
ssaDF := ssaDumpFile
if ssaDir != "" {
- ssaDF = filepath.Join(ssaDir, myimportpath+"."+name+".html")
+ ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
ssaD := filepath.Dir(ssaDF)
os.MkdirAll(ssaD, 0755)
}
// Allocate starting values
s.labels = map[string]*ssaLabel{}
- s.labeledNodes = map[*Node]*ssaLabel{}
- s.fwdVars = map[*Node]*ssa.Value{}
+ s.fwdVars = map[ir.Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
- s.hasOpenDefers = Debug.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
+ s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
switch {
- case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
+ case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
// libraries, because there is extra code (added by rewriteToUseGot())
// preceding the deferreturn/ret code that is generated by gencallret()
// that we don't track correctly.
s.hasOpenDefers = false
}
- if s.hasOpenDefers && s.curfn.Func.Exit.Len() > 0 {
+ if s.hasOpenDefers && s.curfn.Exit.Len() > 0 {
// Skip doing open defers if there is any extra exit code (likely
// copying heap-allocated return values or race detection), since
// we will not generate that code in the case of the extra
s.hasOpenDefers = false
}
if s.hasOpenDefers &&
- s.curfn.Func.numReturns*s.curfn.Func.numDefers > 15 {
+ s.curfn.NumReturns*s.curfn.NumDefers > 15 {
// Since we are generating defer calls at every exit for
// open-coded defers, skip doing open-coded defers if there are
// too many returns (especially if there are multiple defers).
s.hasOpenDefers = false
}
- s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
- s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
+ s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
+ s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
s.startBlock(s.f.Entry)
- s.vars[&memVar] = s.startmem
+ s.vars[memVar] = s.startmem
if s.hasOpenDefers {
// Create the deferBits variable and stack slot. deferBits is a
// bitmask showing which of the open-coded defers in this function
// have been activated.
- deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[TUINT8])
+ deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
s.deferBitsTemp = deferBitsTemp
// For this value, AuxInt is initialized to zero by default
- startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[TUINT8])
- s.vars[&deferBitsVar] = startDeferBits
+ startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
+ s.vars[deferBitsVar] = startDeferBits
s.deferBitsAddr = s.addr(deferBitsTemp)
- s.store(types.Types[TUINT8], s.deferBitsAddr, startDeferBits)
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
// Make sure that the deferBits stack slot is kept alive (for use
// by panics) and stores to deferBits are not eliminated, even if
// all checking code on deferBits in the function exit can be
// eliminated, because the defer statements were all
// unconditional.
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
}
// Generate addresses of local declarations
- s.decladdrs = map[*Node]*ssa.Value{}
+ s.decladdrs = map[*ir.Name]*ssa.Value{}
var args []ssa.Param
var results []ssa.Param
- for _, n := range fn.Func.Dcl {
+ for _, n := range fn.Dcl {
switch n.Class() {
- case PPARAM:
- s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
- args = append(args, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
- case PPARAMOUT:
- s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
- results = append(results, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
+ case ir.PPARAM:
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+ args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())})
+ case ir.PPARAMOUT:
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+ results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())})
if s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
// the function.
s.returns = append(s.returns, n)
}
- case PAUTO:
+ case ir.PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
- case PAUTOHEAP:
+ case ir.PAUTOHEAP:
// moved to heap - already handled by frontend
- case PFUNC:
+ case ir.PFUNC:
// local function - already handled by frontend
default:
s.Fatalf("local variable with class %v unimplemented", n.Class())
}
// Populate SSAable arguments.
- for _, n := range fn.Func.Dcl {
- if n.Class() == PPARAM && s.canSSA(n) {
- v := s.newValue0A(ssa.OpArg, n.Type, n)
+ for _, n := range fn.Dcl {
+ if n.Class() == ir.PPARAM && s.canSSA(n) {
+ v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.vars[n] = v
s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
}
}
// Convert the AST-based IR to the SSA-based IR
- s.stmtList(fn.Func.Enter)
- s.stmtList(fn.Nbody)
+ s.stmtList(fn.Enter)
+ s.stmtList(fn.Body())
// fallthrough to exit
if s.curBlock != nil {
- s.pushLine(fn.Func.Endlineno)
+ s.pushLine(fn.Endlineno)
s.exit()
s.popLine()
}
return s.f
}
-func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) {
+func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
// Read sources of target function fn.
- fname := Ctxt.PosTable.Pos(fn.Pos).Filename()
- targetFn, err := readFuncLines(fname, fn.Pos.Line(), fn.Func.Endlineno.Line())
+ fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
+ targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line())
if err != nil {
writer.Logf("cannot read sources for function %v: %v", fn, err)
}
// Read sources of inlined functions.
var inlFns []*ssa.FuncLines
for _, fi := range ssaDumpInlined {
- var elno src.XPos
- if fi.Name.Defn == nil {
- // Endlineno is filled from exported data.
- elno = fi.Func.Endlineno
- } else {
- elno = fi.Name.Defn.Func.Endlineno
- }
- fname := Ctxt.PosTable.Pos(fi.Pos).Filename()
- fnLines, err := readFuncLines(fname, fi.Pos.Line(), elno.Line())
+ elno := fi.Endlineno
+ fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
+ fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
if err != nil {
writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
continue
// Information about each open-coded defer.
type openDeferInfo struct {
- // The ODEFER node representing the function call of the defer
- n *Node
+ // The node representing the call of the defer
+ n *ir.CallExpr
// If defer call is closure call, the address of the argtmp where the
// closure is stored.
closure *ssa.Value
// The node representing the argtmp where the closure is stored - used for
// function, method, or interface call, to store a closure that panic
// processing can use for this defer.
- closureNode *Node
+ closureNode *ir.Name
// If defer call is interface call, the address of the argtmp where the
// receiver is stored
rcvr *ssa.Value
// The node representing the argtmp where the receiver is stored
- rcvrNode *Node
+ rcvrNode *ir.Name
// The addresses of the argtmps where the evaluated arguments of the defer
// function call are stored.
argVals []*ssa.Value
// The nodes representing the argtmps where the args of the defer are stored
- argNodes []*Node
+ argNodes []*ir.Name
}
type state struct {
f *ssa.Func
// Node for function
- curfn *Node
+ curfn *ir.Func
- // labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
- labels map[string]*ssaLabel
- labeledNodes map[*Node]*ssaLabel
+ // labels in f
+ labels map[string]*ssaLabel
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
// TODO: keep a single varnum map, then make all of these maps slices instead?
- vars map[*Node]*ssa.Value
+ vars map[ir.Node]*ssa.Value
// fwdVars are variables that are used before they are defined in the current block.
// This map exists just to coalesce multiple references into a single FwdRef op.
// *Node is the unique identifier (an ONAME Node) for the variable.
- fwdVars map[*Node]*ssa.Value
+ fwdVars map[ir.Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
- defvars []map[*Node]*ssa.Value
+ defvars []map[ir.Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables.
- decladdrs map[*Node]*ssa.Value
+ decladdrs map[*ir.Name]*ssa.Value
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
sb *ssa.Value
// value representing address of where deferBits autotmp is stored
deferBitsAddr *ssa.Value
- deferBitsTemp *Node
+ deferBitsTemp *ir.Name
// line number stack. The current line number is top of stack
line []src.XPos
panics map[funcLine]*ssa.Block
// list of PPARAMOUT (return) variables.
- returns []*Node
+ returns []*ir.Name
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
+func ssaMarker(name string) *ir.Name {
+ return NewName(&types.Sym{Name: name})
+}
+
var (
- // dummy node for the memory variable
- memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}}
-
- // dummy nodes for temporary variables
- ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}}
- lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}}
- newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}}
- capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}}
- typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}}
- okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}}
- deferBitsVar = Node{Op: ONAME, Sym: &types.Sym{Name: "deferBits"}}
+ // marker node for the memory variable
+ memVar = ssaMarker("mem")
+
+ // marker nodes for temporary variables
+ ptrVar = ssaMarker("ptr")
+ lenVar = ssaMarker("len")
+ newlenVar = ssaMarker("newlen")
+ capVar = ssaMarker("cap")
+ typVar = ssaMarker("typ")
+ okVar = ssaMarker("ok")
+ deferBitsVar = ssaMarker("deferBits")
)
// startBlock sets the current block we're generating code in to b.
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
- s.vars = map[*Node]*ssa.Value{}
+ s.vars = map[ir.Node]*ssa.Value{}
for n := range s.fwdVars {
delete(s.fwdVars, n)
}
// the frontend may emit node with line number missing,
// use the parent line number in this case.
line = s.peekPos()
- if Debug.K != 0 {
- Warn("buildssa: unknown position (line 0)")
+ if base.Flag.K != 0 {
+ base.Warn("buildssa: unknown position (line 0)")
}
} else {
s.lastPos = line
}
// newValue0A adds a new value with no arguments and an aux value to the current block.
-func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
+func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
}
}
// newValue1A adds a new value with one argument and an aux value to the current block.
-func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
+func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
}
// newValue1Apos adds a new value with one argument and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
-func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value, isStmt bool) *ssa.Value {
+func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value {
if isStmt {
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
}
}
// newValue2A adds a new value with two arguments and an aux value to the current block.
-func (s *state) newValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value {
+func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
}
// newValue2Apos adds a new value with two arguments and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
-func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
+func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
if isStmt {
return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
}
}
// newValue3A adds a new value with three arguments and an aux value to the current block.
-func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue3Apos adds a new value with three arguments and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
-func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
+func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
if isStmt {
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
}
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
-func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
+func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux)
}
}
// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
-func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
+func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg)
}
}
// entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
-func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value {
+func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
}
return s.f.ConstEmptyString(t)
}
func (s *state) constBool(c bool) *ssa.Value {
- return s.f.ConstBool(types.Types[TBOOL], c)
+ return s.f.ConstBool(types.Types[types.TBOOL], c)
}
func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
return s.f.ConstInt8(t, c)
// If it is instrumenting for MSAN and t is a struct type, it instruments
// operation for each field, instead of for the whole struct.
func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
- if !flag_msan || !t.IsStruct() {
+ if !base.Flag.MSan || !t.IsStruct() {
s.instrument(t, addr, kind)
return
}
}
func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
- if flag_msan {
+ if base.Flag.MSan {
s.instrument2(t, dst, src, instrumentMove)
} else {
s.instrument(t, src, instrumentRead)
}
func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
- if !s.curfn.Func.InstrumentBody() {
+ if !s.curfn.Func().InstrumentBody() {
return
}
panic("instrument2: non-nil addr2 for non-move instrumentation")
}
- if flag_msan {
+ if base.Flag.MSan {
switch kind {
case instrumentRead:
fn = msanread
panic("unreachable")
}
needWidth = true
- } else if flag_race && t.NumComponents(types.CountBlankFields) > 1 {
+ } else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
// for composite objects we have to write every address
// because a write might happen to any subobject.
// composites with only one element don't have subobjects, though.
panic("unreachable")
}
needWidth = true
- } else if flag_race {
+ } else if base.Flag.Race {
// for non-composite objects we can write just the start
// address, as any write must write the first byte.
switch kind {
args = append(args, addr2)
}
if needWidth {
- args = append(args, s.constInt(types.Types[TUINTPTR], w))
+ args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
}
s.rtcall(fn, true, nil, args...)
}
}
func (s *state) store(t *types.Type, dst, val *ssa.Value) {
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
+ s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
}
func (s *state) zero(t *types.Type, dst *ssa.Value) {
s.instrument(t, dst, instrumentWrite)
store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
store.Aux = t
- s.vars[&memVar] = store
+ s.vars[memVar] = store
}
func (s *state) move(t *types.Type, dst, src *ssa.Value) {
s.instrumentMove(t, dst, src)
store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
store.Aux = t
- s.vars[&memVar] = store
+ s.vars[memVar] = store
}
// stmtList converts the statement list n to SSA and adds it to s.
-func (s *state) stmtList(l Nodes) {
+func (s *state) stmtList(l ir.Nodes) {
for _, n := range l.Slice() {
s.stmt(n)
}
}
// stmt converts the statement n to SSA and adds it to s.
-func (s *state) stmt(n *Node) {
- if !(n.Op == OVARKILL || n.Op == OVARLIVE || n.Op == OVARDEF) {
+func (s *state) stmt(n ir.Node) {
+ if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
- s.pushLine(n.Pos)
+ s.pushLine(n.Pos())
defer s.popLine()
}
// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
// then this code is dead. Stop here.
- if s.curBlock == nil && n.Op != OLABEL {
+ if s.curBlock == nil && n.Op() != ir.OLABEL {
return
}
- s.stmtList(n.Ninit)
- switch n.Op {
+ s.stmtList(n.Init())
+ switch n.Op() {
- case OBLOCK:
- s.stmtList(n.List)
+ case ir.OBLOCK:
+ s.stmtList(n.List())
// No-ops
- case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
+ case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
// Expression statements
- case OCALLFUNC:
- if isIntrinsicCall(n) {
+ case ir.OCALLFUNC:
+ n := n.(*ir.CallExpr)
+ if IsIntrinsicCall(n) {
s.intrinsicCall(n)
return
}
fallthrough
- case OCALLMETH, OCALLINTER:
+ case ir.OCALLMETH, ir.OCALLINTER:
+ n := n.(*ir.CallExpr)
s.callResult(n, callNormal)
- if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC {
- if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
- n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
+ if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && n.Left().(*ir.Name).Class() == ir.PFUNC {
+ if fn := n.Left().Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
+ n.Left().Sym().Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
// go through SSA.
}
}
- case ODEFER:
- if Debug_defer > 0 {
+ case ir.ODEFER:
+ if base.Debug.Defer > 0 {
var defertype string
if s.hasOpenDefers {
defertype = "open-coded"
- } else if n.Esc == EscNever {
+ } else if n.Esc() == EscNever {
defertype = "stack-allocated"
} else {
defertype = "heap-allocated"
}
- Warnl(n.Pos, "%s defer", defertype)
+ base.WarnfAt(n.Pos(), "%s defer", defertype)
}
if s.hasOpenDefers {
- s.openDeferRecord(n.Left)
+ s.openDeferRecord(n.Left().(*ir.CallExpr))
} else {
d := callDefer
- if n.Esc == EscNever {
+ if n.Esc() == EscNever {
d = callDeferStack
}
- s.callResult(n.Left, d)
+ s.callResult(n.Left().(*ir.CallExpr), d)
}
- case OGO:
- s.callResult(n.Left, callGo)
+ case ir.OGO:
+ s.callResult(n.Left().(*ir.CallExpr), callGo)
- case OAS2DOTTYPE:
- res, resok := s.dottype(n.Right, true)
+ case ir.OAS2DOTTYPE:
+ res, resok := s.dottype(n.Rlist().First().(*ir.TypeAssertExpr), true)
deref := false
- if !canSSAType(n.Right.Type) {
+ if !canSSAType(n.Rlist().First().Type()) {
if res.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
deref = true
res = res.Args[0]
}
- s.assign(n.List.First(), res, deref, 0)
- s.assign(n.List.Second(), resok, false, 0)
+ s.assign(n.List().First(), res, deref, 0)
+ s.assign(n.List().Second(), resok, false, 0)
return
- case OAS2FUNC:
+ case ir.OAS2FUNC:
// We come here only when it is an intrinsic call returning two values.
- if !isIntrinsicCall(n.Right) {
- s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right)
- }
- v := s.intrinsicCall(n.Right)
- v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
- v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
- s.assign(n.List.First(), v1, false, 0)
- s.assign(n.List.Second(), v2, false, 0)
+ call := n.Rlist().First().(*ir.CallExpr)
+ if !IsIntrinsicCall(call) {
+ s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call)
+ }
+ v := s.intrinsicCall(call)
+ v1 := s.newValue1(ssa.OpSelect0, n.List().First().Type(), v)
+ v2 := s.newValue1(ssa.OpSelect1, n.List().Second().Type(), v)
+ s.assign(n.List().First(), v1, false, 0)
+ s.assign(n.List().Second(), v2, false, 0)
return
- case ODCL:
- if n.Left.Class() == PAUTOHEAP {
+ case ir.ODCL:
+ if n.Left().(*ir.Name).Class() == ir.PAUTOHEAP {
s.Fatalf("DCL %v", n)
}
- case OLABEL:
- sym := n.Sym
+ case ir.OLABEL:
+ sym := n.Sym()
lab := s.label(sym)
- // Associate label with its control flow node, if any
- if ctl := n.labeledControl(); ctl != nil {
- s.labeledNodes[ctl] = lab
- }
-
// The label might already have a target block via a goto.
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
s.startBlock(lab.target)
- case OGOTO:
- sym := n.Sym
+ case ir.OGOTO:
+ sym := n.Sym()
lab := s.label(sym)
if lab.target == nil {
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
b.AddEdgeTo(lab.target)
- case OAS:
- if n.Left == n.Right && n.Left.Op == ONAME {
+ case ir.OAS:
+ if n.Left() == n.Right() && n.Left().Op() == ir.ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
}
// Evaluate RHS.
- rhs := n.Right
+ rhs := n.Right()
if rhs != nil {
- switch rhs.Op {
- case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
+ switch rhs.Op() {
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
s.Fatalf("literal with nonzero value in SSA: %v", rhs)
}
rhs = nil
- case OAPPEND:
+ case ir.OAPPEND:
+ rhs := rhs.(*ir.CallExpr)
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
- if !samesafeexpr(n.Left, rhs.List.First()) || Debug.N != 0 {
+ if !samesafeexpr(n.Left(), rhs.List().First()) || base.Flag.N != 0 {
break
}
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
- if s.canSSA(n.Left) {
- if Debug_append > 0 { // replicating old diagnostic message
- Warnl(n.Pos, "append: len-only update (in local slice)")
+ if s.canSSA(n.Left()) {
+ if base.Debug.Append > 0 { // replicating old diagnostic message
+ base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
}
break
}
- if Debug_append > 0 {
- Warnl(n.Pos, "append: len-only update")
+ if base.Debug.Append > 0 {
+ base.WarnfAt(n.Pos(), "append: len-only update")
}
s.append(rhs, true)
return
}
}
- if n.Left.isBlank() {
+ if ir.IsBlank(n.Left()) {
// _ = rhs
// Just evaluate rhs for side-effects.
if rhs != nil {
}
var t *types.Type
- if n.Right != nil {
- t = n.Right.Type
+ if n.Right() != nil {
+ t = n.Right().Type()
} else {
- t = n.Left.Type
+ t = n.Left().Type()
}
var r *ssa.Value
}
var skip skipMask
- if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
+ if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && samesafeexpr(rhs.(*ir.SliceExpr).Left(), n.Left()) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
i, j, k := rhs.SliceBounds()
- if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64Val() == 0) {
+ if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) {
// [0:...] is the same as [:...]
i = nil
}
}
}
- s.assign(n.Left, r, deref, skip)
+ s.assign(n.Left(), r, deref, skip)
- case OIF:
- if Isconst(n.Left, CTBOOL) {
- s.stmtList(n.Left.Ninit)
- if n.Left.BoolVal() {
- s.stmtList(n.Nbody)
+ case ir.OIF:
+ if ir.IsConst(n.Left(), constant.Bool) {
+ s.stmtList(n.Left().Init())
+ if ir.BoolVal(n.Left()) {
+ s.stmtList(n.Body())
} else {
- s.stmtList(n.Rlist)
+ s.stmtList(n.Rlist())
}
break
}
likely = 1
}
var bThen *ssa.Block
- if n.Nbody.Len() != 0 {
+ if n.Body().Len() != 0 {
bThen = s.f.NewBlock(ssa.BlockPlain)
} else {
bThen = bEnd
}
var bElse *ssa.Block
- if n.Rlist.Len() != 0 {
+ if n.Rlist().Len() != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
} else {
bElse = bEnd
}
- s.condBranch(n.Left, bThen, bElse, likely)
+ s.condBranch(n.Left(), bThen, bElse, likely)
- if n.Nbody.Len() != 0 {
+ if n.Body().Len() != 0 {
s.startBlock(bThen)
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
- if n.Rlist.Len() != 0 {
+ if n.Rlist().Len() != 0 {
s.startBlock(bElse)
- s.stmtList(n.Rlist)
+ s.stmtList(n.Rlist())
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
s.startBlock(bEnd)
- case ORETURN:
- s.stmtList(n.List)
+ case ir.ORETURN:
+ s.stmtList(n.List())
b := s.exit()
b.Pos = s.lastPos.WithIsStmt()
- case ORETJMP:
- s.stmtList(n.List)
+ case ir.ORETJMP:
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
- b.Aux = n.Sym.Linksym()
+ b.Aux = callTargetLSym(n.Sym(), s.curfn.LSym)
- case OCONTINUE, OBREAK:
+ case ir.OCONTINUE, ir.OBREAK:
var to *ssa.Block
- if n.Sym == nil {
+ if n.Sym() == nil {
// plain break/continue
- switch n.Op {
- case OCONTINUE:
+ switch n.Op() {
+ case ir.OCONTINUE:
to = s.continueTo
- case OBREAK:
+ case ir.OBREAK:
to = s.breakTo
}
} else {
// labeled break/continue; look up the target
- sym := n.Sym
+ sym := n.Sym()
lab := s.label(sym)
- switch n.Op {
- case OCONTINUE:
+ switch n.Op() {
+ case ir.OCONTINUE:
to = lab.continueTarget
- case OBREAK:
+ case ir.OBREAK:
to = lab.breakTarget
}
}
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
b.AddEdgeTo(to)
- case OFOR, OFORUNTIL:
+ case ir.OFOR, ir.OFORUNTIL:
// OFOR: for Ninit; Left; Right { Nbody }
// cond (Left); body (Nbody); incr (Right)
//
bEnd := s.f.NewBlock(ssa.BlockPlain)
// ensure empty for loops have correct position; issue #30167
- bBody.Pos = n.Pos
+ bBody.Pos = n.Pos()
// first, jump to condition test (OFOR) or body (OFORUNTIL)
b := s.endBlock()
- if n.Op == OFOR {
+ if n.Op() == ir.OFOR {
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
- if n.Left != nil {
- s.condBranch(n.Left, bBody, bEnd, 1)
+ if n.Left() != nil {
+ s.condBranch(n.Left(), bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
prevBreak := s.breakTo
s.continueTo = bIncr
s.breakTo = bEnd
- lab := s.labeledNodes[n]
- if lab != nil {
+ var lab *ssaLabel
+ if sym := n.Sym(); sym != nil {
// labeled for loop
+ lab = s.label(sym)
lab.continueTarget = bIncr
lab.breakTarget = bEnd
}
// generate body
s.startBlock(bBody)
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
// tear down continue/break
s.continueTo = prevContinue
// generate incr (and, for OFORUNTIL, condition)
s.startBlock(bIncr)
- if n.Right != nil {
- s.stmt(n.Right)
+ if n.Right() != nil {
+ s.stmt(n.Right())
}
- if n.Op == OFOR {
+ if n.Op() == ir.OFOR {
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
// It can happen that bIncr ends in a block containing only VARKILL,
// and that muddles the debugging experience.
- if n.Op != OFORUNTIL && b.Pos == src.NoXPos {
+ if n.Op() != ir.OFORUNTIL && b.Pos == src.NoXPos {
b.Pos = bCond.Pos
}
}
// bCond is unused in OFORUNTIL, so repurpose it.
bLateIncr := bCond
// test condition
- s.condBranch(n.Left, bLateIncr, bEnd, 1)
+ s.condBranch(n.Left(), bLateIncr, bEnd, 1)
// generate late increment
s.startBlock(bLateIncr)
- s.stmtList(n.List)
+ s.stmtList(n.List())
s.endBlock().AddEdgeTo(bBody)
}
s.startBlock(bEnd)
- case OSWITCH, OSELECT:
+ case ir.OSWITCH, ir.OSELECT:
// These have been mostly rewritten by the front end into their Nbody fields.
// Our main task is to correctly hook up any break statements.
bEnd := s.f.NewBlock(ssa.BlockPlain)
prevBreak := s.breakTo
s.breakTo = bEnd
- lab := s.labeledNodes[n]
- if lab != nil {
+ var sym *types.Sym
+ var body ir.Nodes
+ if n.Op() == ir.OSWITCH {
+ n := n.(*ir.SwitchStmt)
+ sym = n.Sym()
+ body = n.Body()
+ } else {
+ n := n.(*ir.SelectStmt)
+ sym = n.Sym()
+ body = n.Body()
+ }
+
+ var lab *ssaLabel
+ if sym != nil {
// labeled
+ lab = s.label(sym)
lab.breakTarget = bEnd
}
// generate body code
- s.stmtList(n.Nbody)
+ s.stmtList(body)
s.breakTo = prevBreak
if lab != nil {
}
s.startBlock(bEnd)
- case OVARDEF:
- if !s.canSSA(n.Left) {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false)
+ case ir.OVARDEF:
+ if !s.canSSA(n.Left()) {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left().(*ir.Name), s.mem(), false)
}
- case OVARKILL:
+ case ir.OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
- if !s.canSSA(n.Left) {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false)
+ if !s.canSSA(n.Left()) {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left().(*ir.Name), s.mem(), false)
}
- case OVARLIVE:
+ case ir.OVARLIVE:
// Insert a varlive op to record that a variable is still live.
- if !n.Left.Name.Addrtaken() {
- s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
+ v := n.Left().(*ir.Name)
+ if !v.Addrtaken() {
+ s.Fatalf("VARLIVE variable %v must have Addrtaken set", v)
}
- switch n.Left.Class() {
- case PAUTO, PPARAM, PPARAMOUT:
+ switch v.Class() {
+ case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
default:
- s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left)
+ s.Fatalf("VARLIVE variable %v must be Auto or Arg", v)
}
- s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem())
- case OCHECKNIL:
- p := s.expr(n.Left)
+ case ir.OCHECKNIL:
+ p := s.expr(n.Left())
s.nilCheck(p)
- case OINLMARK:
- s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Xoffset, s.mem())
+ case ir.OINLMARK:
+ s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Offset(), s.mem())
default:
- s.Fatalf("unhandled stmt %v", n.Op)
+ s.Fatalf("unhandled stmt %v", n.Op())
}
}
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack.
- s.stmtList(s.curfn.Func.Exit)
+ s.stmtList(s.curfn.Exit)
// Store SSAable PPARAMOUT variables back to stack locations.
for _, n := range s.returns {
addr := s.decladdrs[n]
- val := s.variable(n, n.Type)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
- s.store(n.Type, addr, val)
+ val := s.variable(n, n.Type())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ s.store(n.Type(), addr, val)
// TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen
// currently.
}
type opAndType struct {
- op Op
- etype types.EType
+ op ir.Op
+ etype types.Kind
}
var opToSSA = map[opAndType]ssa.Op{
- opAndType{OADD, TINT8}: ssa.OpAdd8,
- opAndType{OADD, TUINT8}: ssa.OpAdd8,
- opAndType{OADD, TINT16}: ssa.OpAdd16,
- opAndType{OADD, TUINT16}: ssa.OpAdd16,
- opAndType{OADD, TINT32}: ssa.OpAdd32,
- opAndType{OADD, TUINT32}: ssa.OpAdd32,
- opAndType{OADD, TINT64}: ssa.OpAdd64,
- opAndType{OADD, TUINT64}: ssa.OpAdd64,
- opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
- opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
-
- opAndType{OSUB, TINT8}: ssa.OpSub8,
- opAndType{OSUB, TUINT8}: ssa.OpSub8,
- opAndType{OSUB, TINT16}: ssa.OpSub16,
- opAndType{OSUB, TUINT16}: ssa.OpSub16,
- opAndType{OSUB, TINT32}: ssa.OpSub32,
- opAndType{OSUB, TUINT32}: ssa.OpSub32,
- opAndType{OSUB, TINT64}: ssa.OpSub64,
- opAndType{OSUB, TUINT64}: ssa.OpSub64,
- opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
- opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
-
- opAndType{ONOT, TBOOL}: ssa.OpNot,
-
- opAndType{ONEG, TINT8}: ssa.OpNeg8,
- opAndType{ONEG, TUINT8}: ssa.OpNeg8,
- opAndType{ONEG, TINT16}: ssa.OpNeg16,
- opAndType{ONEG, TUINT16}: ssa.OpNeg16,
- opAndType{ONEG, TINT32}: ssa.OpNeg32,
- opAndType{ONEG, TUINT32}: ssa.OpNeg32,
- opAndType{ONEG, TINT64}: ssa.OpNeg64,
- opAndType{ONEG, TUINT64}: ssa.OpNeg64,
- opAndType{ONEG, TFLOAT32}: ssa.OpNeg32F,
- opAndType{ONEG, TFLOAT64}: ssa.OpNeg64F,
-
- opAndType{OBITNOT, TINT8}: ssa.OpCom8,
- opAndType{OBITNOT, TUINT8}: ssa.OpCom8,
- opAndType{OBITNOT, TINT16}: ssa.OpCom16,
- opAndType{OBITNOT, TUINT16}: ssa.OpCom16,
- opAndType{OBITNOT, TINT32}: ssa.OpCom32,
- opAndType{OBITNOT, TUINT32}: ssa.OpCom32,
- opAndType{OBITNOT, TINT64}: ssa.OpCom64,
- opAndType{OBITNOT, TUINT64}: ssa.OpCom64,
-
- opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag,
- opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
- opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal,
- opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
-
- opAndType{OMUL, TINT8}: ssa.OpMul8,
- opAndType{OMUL, TUINT8}: ssa.OpMul8,
- opAndType{OMUL, TINT16}: ssa.OpMul16,
- opAndType{OMUL, TUINT16}: ssa.OpMul16,
- opAndType{OMUL, TINT32}: ssa.OpMul32,
- opAndType{OMUL, TUINT32}: ssa.OpMul32,
- opAndType{OMUL, TINT64}: ssa.OpMul64,
- opAndType{OMUL, TUINT64}: ssa.OpMul64,
- opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
- opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
-
- opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
- opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
-
- opAndType{ODIV, TINT8}: ssa.OpDiv8,
- opAndType{ODIV, TUINT8}: ssa.OpDiv8u,
- opAndType{ODIV, TINT16}: ssa.OpDiv16,
- opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
- opAndType{ODIV, TINT32}: ssa.OpDiv32,
- opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
- opAndType{ODIV, TINT64}: ssa.OpDiv64,
- opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
-
- opAndType{OMOD, TINT8}: ssa.OpMod8,
- opAndType{OMOD, TUINT8}: ssa.OpMod8u,
- opAndType{OMOD, TINT16}: ssa.OpMod16,
- opAndType{OMOD, TUINT16}: ssa.OpMod16u,
- opAndType{OMOD, TINT32}: ssa.OpMod32,
- opAndType{OMOD, TUINT32}: ssa.OpMod32u,
- opAndType{OMOD, TINT64}: ssa.OpMod64,
- opAndType{OMOD, TUINT64}: ssa.OpMod64u,
-
- opAndType{OAND, TINT8}: ssa.OpAnd8,
- opAndType{OAND, TUINT8}: ssa.OpAnd8,
- opAndType{OAND, TINT16}: ssa.OpAnd16,
- opAndType{OAND, TUINT16}: ssa.OpAnd16,
- opAndType{OAND, TINT32}: ssa.OpAnd32,
- opAndType{OAND, TUINT32}: ssa.OpAnd32,
- opAndType{OAND, TINT64}: ssa.OpAnd64,
- opAndType{OAND, TUINT64}: ssa.OpAnd64,
-
- opAndType{OOR, TINT8}: ssa.OpOr8,
- opAndType{OOR, TUINT8}: ssa.OpOr8,
- opAndType{OOR, TINT16}: ssa.OpOr16,
- opAndType{OOR, TUINT16}: ssa.OpOr16,
- opAndType{OOR, TINT32}: ssa.OpOr32,
- opAndType{OOR, TUINT32}: ssa.OpOr32,
- opAndType{OOR, TINT64}: ssa.OpOr64,
- opAndType{OOR, TUINT64}: ssa.OpOr64,
-
- opAndType{OXOR, TINT8}: ssa.OpXor8,
- opAndType{OXOR, TUINT8}: ssa.OpXor8,
- opAndType{OXOR, TINT16}: ssa.OpXor16,
- opAndType{OXOR, TUINT16}: ssa.OpXor16,
- opAndType{OXOR, TINT32}: ssa.OpXor32,
- opAndType{OXOR, TUINT32}: ssa.OpXor32,
- opAndType{OXOR, TINT64}: ssa.OpXor64,
- opAndType{OXOR, TUINT64}: ssa.OpXor64,
-
- opAndType{OEQ, TBOOL}: ssa.OpEqB,
- opAndType{OEQ, TINT8}: ssa.OpEq8,
- opAndType{OEQ, TUINT8}: ssa.OpEq8,
- opAndType{OEQ, TINT16}: ssa.OpEq16,
- opAndType{OEQ, TUINT16}: ssa.OpEq16,
- opAndType{OEQ, TINT32}: ssa.OpEq32,
- opAndType{OEQ, TUINT32}: ssa.OpEq32,
- opAndType{OEQ, TINT64}: ssa.OpEq64,
- opAndType{OEQ, TUINT64}: ssa.OpEq64,
- opAndType{OEQ, TINTER}: ssa.OpEqInter,
- opAndType{OEQ, TSLICE}: ssa.OpEqSlice,
- opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
- opAndType{OEQ, TMAP}: ssa.OpEqPtr,
- opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
- opAndType{OEQ, TPTR}: ssa.OpEqPtr,
- opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
- opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
- opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
- opAndType{OEQ, TFLOAT32}: ssa.OpEq32F,
-
- opAndType{ONE, TBOOL}: ssa.OpNeqB,
- opAndType{ONE, TINT8}: ssa.OpNeq8,
- opAndType{ONE, TUINT8}: ssa.OpNeq8,
- opAndType{ONE, TINT16}: ssa.OpNeq16,
- opAndType{ONE, TUINT16}: ssa.OpNeq16,
- opAndType{ONE, TINT32}: ssa.OpNeq32,
- opAndType{ONE, TUINT32}: ssa.OpNeq32,
- opAndType{ONE, TINT64}: ssa.OpNeq64,
- opAndType{ONE, TUINT64}: ssa.OpNeq64,
- opAndType{ONE, TINTER}: ssa.OpNeqInter,
- opAndType{ONE, TSLICE}: ssa.OpNeqSlice,
- opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
- opAndType{ONE, TMAP}: ssa.OpNeqPtr,
- opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
- opAndType{ONE, TPTR}: ssa.OpNeqPtr,
- opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
- opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
- opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
- opAndType{ONE, TFLOAT32}: ssa.OpNeq32F,
-
- opAndType{OLT, TINT8}: ssa.OpLess8,
- opAndType{OLT, TUINT8}: ssa.OpLess8U,
- opAndType{OLT, TINT16}: ssa.OpLess16,
- opAndType{OLT, TUINT16}: ssa.OpLess16U,
- opAndType{OLT, TINT32}: ssa.OpLess32,
- opAndType{OLT, TUINT32}: ssa.OpLess32U,
- opAndType{OLT, TINT64}: ssa.OpLess64,
- opAndType{OLT, TUINT64}: ssa.OpLess64U,
- opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
- opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
-
- opAndType{OLE, TINT8}: ssa.OpLeq8,
- opAndType{OLE, TUINT8}: ssa.OpLeq8U,
- opAndType{OLE, TINT16}: ssa.OpLeq16,
- opAndType{OLE, TUINT16}: ssa.OpLeq16U,
- opAndType{OLE, TINT32}: ssa.OpLeq32,
- opAndType{OLE, TUINT32}: ssa.OpLeq32U,
- opAndType{OLE, TINT64}: ssa.OpLeq64,
- opAndType{OLE, TUINT64}: ssa.OpLeq64U,
- opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
- opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
-}
-
-func (s *state) concreteEtype(t *types.Type) types.EType {
- e := t.Etype
+ opAndType{ir.OADD, types.TINT8}: ssa.OpAdd8,
+ opAndType{ir.OADD, types.TUINT8}: ssa.OpAdd8,
+ opAndType{ir.OADD, types.TINT16}: ssa.OpAdd16,
+ opAndType{ir.OADD, types.TUINT16}: ssa.OpAdd16,
+ opAndType{ir.OADD, types.TINT32}: ssa.OpAdd32,
+ opAndType{ir.OADD, types.TUINT32}: ssa.OpAdd32,
+ opAndType{ir.OADD, types.TINT64}: ssa.OpAdd64,
+ opAndType{ir.OADD, types.TUINT64}: ssa.OpAdd64,
+ opAndType{ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
+ opAndType{ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
+
+ opAndType{ir.OSUB, types.TINT8}: ssa.OpSub8,
+ opAndType{ir.OSUB, types.TUINT8}: ssa.OpSub8,
+ opAndType{ir.OSUB, types.TINT16}: ssa.OpSub16,
+ opAndType{ir.OSUB, types.TUINT16}: ssa.OpSub16,
+ opAndType{ir.OSUB, types.TINT32}: ssa.OpSub32,
+ opAndType{ir.OSUB, types.TUINT32}: ssa.OpSub32,
+ opAndType{ir.OSUB, types.TINT64}: ssa.OpSub64,
+ opAndType{ir.OSUB, types.TUINT64}: ssa.OpSub64,
+ opAndType{ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
+ opAndType{ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
+
+ opAndType{ir.ONOT, types.TBOOL}: ssa.OpNot,
+
+ opAndType{ir.ONEG, types.TINT8}: ssa.OpNeg8,
+ opAndType{ir.ONEG, types.TUINT8}: ssa.OpNeg8,
+ opAndType{ir.ONEG, types.TINT16}: ssa.OpNeg16,
+ opAndType{ir.ONEG, types.TUINT16}: ssa.OpNeg16,
+ opAndType{ir.ONEG, types.TINT32}: ssa.OpNeg32,
+ opAndType{ir.ONEG, types.TUINT32}: ssa.OpNeg32,
+ opAndType{ir.ONEG, types.TINT64}: ssa.OpNeg64,
+ opAndType{ir.ONEG, types.TUINT64}: ssa.OpNeg64,
+ opAndType{ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
+ opAndType{ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
+
+ opAndType{ir.OBITNOT, types.TINT8}: ssa.OpCom8,
+ opAndType{ir.OBITNOT, types.TUINT8}: ssa.OpCom8,
+ opAndType{ir.OBITNOT, types.TINT16}: ssa.OpCom16,
+ opAndType{ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
+ opAndType{ir.OBITNOT, types.TINT32}: ssa.OpCom32,
+ opAndType{ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
+ opAndType{ir.OBITNOT, types.TINT64}: ssa.OpCom64,
+ opAndType{ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
+
+ opAndType{ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag,
+ opAndType{ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
+ opAndType{ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal,
+ opAndType{ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
+
+ opAndType{ir.OMUL, types.TINT8}: ssa.OpMul8,
+ opAndType{ir.OMUL, types.TUINT8}: ssa.OpMul8,
+ opAndType{ir.OMUL, types.TINT16}: ssa.OpMul16,
+ opAndType{ir.OMUL, types.TUINT16}: ssa.OpMul16,
+ opAndType{ir.OMUL, types.TINT32}: ssa.OpMul32,
+ opAndType{ir.OMUL, types.TUINT32}: ssa.OpMul32,
+ opAndType{ir.OMUL, types.TINT64}: ssa.OpMul64,
+ opAndType{ir.OMUL, types.TUINT64}: ssa.OpMul64,
+ opAndType{ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
+ opAndType{ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
+
+ opAndType{ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
+ opAndType{ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
+
+ opAndType{ir.ODIV, types.TINT8}: ssa.OpDiv8,
+ opAndType{ir.ODIV, types.TUINT8}: ssa.OpDiv8u,
+ opAndType{ir.ODIV, types.TINT16}: ssa.OpDiv16,
+ opAndType{ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
+ opAndType{ir.ODIV, types.TINT32}: ssa.OpDiv32,
+ opAndType{ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
+ opAndType{ir.ODIV, types.TINT64}: ssa.OpDiv64,
+ opAndType{ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
+
+ opAndType{ir.OMOD, types.TINT8}: ssa.OpMod8,
+ opAndType{ir.OMOD, types.TUINT8}: ssa.OpMod8u,
+ opAndType{ir.OMOD, types.TINT16}: ssa.OpMod16,
+ opAndType{ir.OMOD, types.TUINT16}: ssa.OpMod16u,
+ opAndType{ir.OMOD, types.TINT32}: ssa.OpMod32,
+ opAndType{ir.OMOD, types.TUINT32}: ssa.OpMod32u,
+ opAndType{ir.OMOD, types.TINT64}: ssa.OpMod64,
+ opAndType{ir.OMOD, types.TUINT64}: ssa.OpMod64u,
+
+ opAndType{ir.OAND, types.TINT8}: ssa.OpAnd8,
+ opAndType{ir.OAND, types.TUINT8}: ssa.OpAnd8,
+ opAndType{ir.OAND, types.TINT16}: ssa.OpAnd16,
+ opAndType{ir.OAND, types.TUINT16}: ssa.OpAnd16,
+ opAndType{ir.OAND, types.TINT32}: ssa.OpAnd32,
+ opAndType{ir.OAND, types.TUINT32}: ssa.OpAnd32,
+ opAndType{ir.OAND, types.TINT64}: ssa.OpAnd64,
+ opAndType{ir.OAND, types.TUINT64}: ssa.OpAnd64,
+
+ opAndType{ir.OOR, types.TINT8}: ssa.OpOr8,
+ opAndType{ir.OOR, types.TUINT8}: ssa.OpOr8,
+ opAndType{ir.OOR, types.TINT16}: ssa.OpOr16,
+ opAndType{ir.OOR, types.TUINT16}: ssa.OpOr16,
+ opAndType{ir.OOR, types.TINT32}: ssa.OpOr32,
+ opAndType{ir.OOR, types.TUINT32}: ssa.OpOr32,
+ opAndType{ir.OOR, types.TINT64}: ssa.OpOr64,
+ opAndType{ir.OOR, types.TUINT64}: ssa.OpOr64,
+
+ opAndType{ir.OXOR, types.TINT8}: ssa.OpXor8,
+ opAndType{ir.OXOR, types.TUINT8}: ssa.OpXor8,
+ opAndType{ir.OXOR, types.TINT16}: ssa.OpXor16,
+ opAndType{ir.OXOR, types.TUINT16}: ssa.OpXor16,
+ opAndType{ir.OXOR, types.TINT32}: ssa.OpXor32,
+ opAndType{ir.OXOR, types.TUINT32}: ssa.OpXor32,
+ opAndType{ir.OXOR, types.TINT64}: ssa.OpXor64,
+ opAndType{ir.OXOR, types.TUINT64}: ssa.OpXor64,
+
+ opAndType{ir.OEQ, types.TBOOL}: ssa.OpEqB,
+ opAndType{ir.OEQ, types.TINT8}: ssa.OpEq8,
+ opAndType{ir.OEQ, types.TUINT8}: ssa.OpEq8,
+ opAndType{ir.OEQ, types.TINT16}: ssa.OpEq16,
+ opAndType{ir.OEQ, types.TUINT16}: ssa.OpEq16,
+ opAndType{ir.OEQ, types.TINT32}: ssa.OpEq32,
+ opAndType{ir.OEQ, types.TUINT32}: ssa.OpEq32,
+ opAndType{ir.OEQ, types.TINT64}: ssa.OpEq64,
+ opAndType{ir.OEQ, types.TUINT64}: ssa.OpEq64,
+ opAndType{ir.OEQ, types.TINTER}: ssa.OpEqInter,
+ opAndType{ir.OEQ, types.TSLICE}: ssa.OpEqSlice,
+ opAndType{ir.OEQ, types.TFUNC}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TMAP}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TCHAN}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TPTR}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TFLOAT64}: ssa.OpEq64F,
+ opAndType{ir.OEQ, types.TFLOAT32}: ssa.OpEq32F,
+
+ opAndType{ir.ONE, types.TBOOL}: ssa.OpNeqB,
+ opAndType{ir.ONE, types.TINT8}: ssa.OpNeq8,
+ opAndType{ir.ONE, types.TUINT8}: ssa.OpNeq8,
+ opAndType{ir.ONE, types.TINT16}: ssa.OpNeq16,
+ opAndType{ir.ONE, types.TUINT16}: ssa.OpNeq16,
+ opAndType{ir.ONE, types.TINT32}: ssa.OpNeq32,
+ opAndType{ir.ONE, types.TUINT32}: ssa.OpNeq32,
+ opAndType{ir.ONE, types.TINT64}: ssa.OpNeq64,
+ opAndType{ir.ONE, types.TUINT64}: ssa.OpNeq64,
+ opAndType{ir.ONE, types.TINTER}: ssa.OpNeqInter,
+ opAndType{ir.ONE, types.TSLICE}: ssa.OpNeqSlice,
+ opAndType{ir.ONE, types.TFUNC}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TMAP}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TCHAN}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TPTR}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TFLOAT64}: ssa.OpNeq64F,
+ opAndType{ir.ONE, types.TFLOAT32}: ssa.OpNeq32F,
+
+ opAndType{ir.OLT, types.TINT8}: ssa.OpLess8,
+ opAndType{ir.OLT, types.TUINT8}: ssa.OpLess8U,
+ opAndType{ir.OLT, types.TINT16}: ssa.OpLess16,
+ opAndType{ir.OLT, types.TUINT16}: ssa.OpLess16U,
+ opAndType{ir.OLT, types.TINT32}: ssa.OpLess32,
+ opAndType{ir.OLT, types.TUINT32}: ssa.OpLess32U,
+ opAndType{ir.OLT, types.TINT64}: ssa.OpLess64,
+ opAndType{ir.OLT, types.TUINT64}: ssa.OpLess64U,
+ opAndType{ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
+ opAndType{ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
+
+ opAndType{ir.OLE, types.TINT8}: ssa.OpLeq8,
+ opAndType{ir.OLE, types.TUINT8}: ssa.OpLeq8U,
+ opAndType{ir.OLE, types.TINT16}: ssa.OpLeq16,
+ opAndType{ir.OLE, types.TUINT16}: ssa.OpLeq16U,
+ opAndType{ir.OLE, types.TINT32}: ssa.OpLeq32,
+ opAndType{ir.OLE, types.TUINT32}: ssa.OpLeq32U,
+ opAndType{ir.OLE, types.TINT64}: ssa.OpLeq64,
+ opAndType{ir.OLE, types.TUINT64}: ssa.OpLeq64U,
+ opAndType{ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
+ opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
+}
+
+func (s *state) concreteEtype(t *types.Type) types.Kind {
+ e := t.Kind()
switch e {
default:
return e
- case TINT:
+ case types.TINT:
if s.config.PtrSize == 8 {
- return TINT64
+ return types.TINT64
}
- return TINT32
- case TUINT:
+ return types.TINT32
+ case types.TUINT:
if s.config.PtrSize == 8 {
- return TUINT64
+ return types.TUINT64
}
- return TUINT32
- case TUINTPTR:
+ return types.TUINT32
+ case types.TUINTPTR:
if s.config.PtrSize == 8 {
- return TUINT64
+ return types.TUINT64
}
- return TUINT32
+ return types.TUINT32
}
}
-func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
+func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
}
func floatForComplex(t *types.Type) *types.Type {
- switch t.Etype {
- case TCOMPLEX64:
- return types.Types[TFLOAT32]
- case TCOMPLEX128:
- return types.Types[TFLOAT64]
+ switch t.Kind() {
+ case types.TCOMPLEX64:
+ return types.Types[types.TFLOAT32]
+ case types.TCOMPLEX128:
+ return types.Types[types.TFLOAT64]
}
- Fatalf("unexpected type: %v", t)
+ base.Fatalf("unexpected type: %v", t)
return nil
}
func complexForFloat(t *types.Type) *types.Type {
- switch t.Etype {
- case TFLOAT32:
- return types.Types[TCOMPLEX64]
- case TFLOAT64:
- return types.Types[TCOMPLEX128]
+ switch t.Kind() {
+ case types.TFLOAT32:
+ return types.Types[types.TCOMPLEX64]
+ case types.TFLOAT64:
+ return types.Types[types.TCOMPLEX128]
}
- Fatalf("unexpected type: %v", t)
+ base.Fatalf("unexpected type: %v", t)
return nil
}
type opAndTwoTypes struct {
- op Op
- etype1 types.EType
- etype2 types.EType
+ op ir.Op
+ etype1 types.Kind
+ etype2 types.Kind
}
type twoTypes struct {
- etype1 types.EType
- etype2 types.EType
+ etype1 types.Kind
+ etype2 types.Kind
}
type twoOpsAndType struct {
op1 ssa.Op
op2 ssa.Op
- intermediateType types.EType
+ intermediateType types.Kind
}
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
- twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
- twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
- twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
- twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
-
- twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
- twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
- twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
- twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
-
- twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
- twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
- twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
- twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
-
- twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
- twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
- twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
- twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
+ twoTypes{types.TINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
+
+ twoTypes{types.TINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
+
+ twoTypes{types.TFLOAT32, types.TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
+
+ twoTypes{types.TFLOAT64, types.TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
// unsigned
- twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
- twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
- twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
- twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead
-
- twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
- twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
- twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
- twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead
-
- twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
- twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
- twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
- twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead
-
- twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
- twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
- twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
- twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead
+ twoTypes{types.TUINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TUINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead
+
+ twoTypes{types.TUINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TUINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead
+
+ twoTypes{types.TFLOAT32, types.TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
+
+ twoTypes{types.TFLOAT64, types.TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
// float
- twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
- twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64},
- twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32},
- twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
+ twoTypes{types.TFLOAT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
+ twoTypes{types.TFLOAT64, types.TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
+ twoTypes{types.TFLOAT32, types.TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
+ twoTypes{types.TFLOAT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
}
// this map is used only for 32-bit arch, and only includes the difference
// on 32-bit arch, don't use int64<->float conversion for uint32
var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
- twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
- twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
- twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
- twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
+ twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
+ twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
+ twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
+ twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
}
// uint64<->float conversions, only on machines that have instructions for that
var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
- twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
- twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
- twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
- twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
+ twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
+ twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
+ twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
+ twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
}
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
- opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
- opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
- opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
- opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
- opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
- opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
- opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
- opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
-
- opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
- opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
- opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
- opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
- opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
- opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
- opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
- opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
-
- opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
- opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
- opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
- opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
- opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
- opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
- opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
- opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
-
- opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
- opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
- opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
- opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
- opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
- opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
- opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
- opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
-
- opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
- opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
- opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
- opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
- opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
- opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
- opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
- opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
-
- opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
- opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
- opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
- opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
- opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
- opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
- opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
- opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
-
- opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
- opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
- opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
- opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
- opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
- opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
- opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
- opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
-
- opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
- opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
- opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
- opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
- opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
- opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
- opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
- opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
-}
-
-func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8,
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
+
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8,
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
+
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8,
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
+
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8,
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
+}
+
+func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
-func (s *state) expr(n *Node) *ssa.Value {
- if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
+func (s *state) expr(n ir.Node) *ssa.Value {
+ if hasUniquePos(n) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
- s.pushLine(n.Pos)
+ s.pushLine(n.Pos())
defer s.popLine()
}
- s.stmtList(n.Ninit)
- switch n.Op {
- case OBYTES2STRTMP:
- slice := s.expr(n.Left)
+ s.stmtList(n.Init())
+ switch n.Op() {
+ case ir.OBYTES2STRTMP:
+ slice := s.expr(n.Left())
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
- len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
- return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
- case OSTR2BYTESTMP:
- str := s.expr(n.Left)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
+ return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
+ case ir.OSTR2BYTESTMP:
+ str := s.expr(n.Left())
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
- len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str)
- return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
- case OCFUNC:
- aux := n.Left.Sym.Linksym()
- return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
- case ONAME:
- if n.Class() == PFUNC {
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
+ case ir.OCFUNC:
+ aux := n.Left().Sym().Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
+ case ir.OMETHEXPR:
+ n := n.(*ir.MethodExpr)
+ sym := funcsym(n.FuncName().Sym()).Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
+ case ir.ONAME:
+ if n.Class() == ir.PFUNC {
// "value" of a function is the address of the function's closure
- sym := funcsym(n.Sym).Linksym()
- return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb)
+ sym := funcsym(n.Sym()).Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
}
if s.canSSA(n) {
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
+ }
+ addr := s.addr(n)
+ return s.load(n.Type(), addr)
+ case ir.ONAMEOFFSET:
+ n := n.(*ir.NameOffsetExpr)
+ if s.canSSAName(n.Name_) && canSSAType(n.Type()) {
+ return s.variable(n, n.Type())
}
addr := s.addr(n)
- return s.load(n.Type, addr)
- case OCLOSUREVAR:
+ return s.load(n.Type(), addr)
+ case ir.OCLOSUREREAD:
addr := s.addr(n)
- return s.load(n.Type, addr)
- case OLITERAL:
- switch u := n.Val().U.(type) {
- case *Mpint:
- i := u.Int64()
- switch n.Type.Size() {
+ return s.load(n.Type(), addr)
+ case ir.ONIL:
+ t := n.Type()
+ switch {
+ case t.IsSlice():
+ return s.constSlice(t)
+ case t.IsInterface():
+ return s.constInterface(t)
+ default:
+ return s.constNil(t)
+ }
+ case ir.OLITERAL:
+ switch u := n.Val(); u.Kind() {
+ case constant.Int:
+ i := ir.IntVal(n.Type(), u)
+ switch n.Type().Size() {
case 1:
- return s.constInt8(n.Type, int8(i))
+ return s.constInt8(n.Type(), int8(i))
case 2:
- return s.constInt16(n.Type, int16(i))
+ return s.constInt16(n.Type(), int16(i))
case 4:
- return s.constInt32(n.Type, int32(i))
+ return s.constInt32(n.Type(), int32(i))
case 8:
- return s.constInt64(n.Type, i)
+ return s.constInt64(n.Type(), i)
default:
- s.Fatalf("bad integer size %d", n.Type.Size())
+ s.Fatalf("bad integer size %d", n.Type().Size())
return nil
}
- case string:
- if u == "" {
- return s.constEmptyString(n.Type)
+ case constant.String:
+ i := constant.StringVal(u)
+ if i == "" {
+ return s.constEmptyString(n.Type())
}
- return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
- case bool:
- return s.constBool(u)
- case *NilVal:
- t := n.Type
- switch {
- case t.IsSlice():
- return s.constSlice(t)
- case t.IsInterface():
- return s.constInterface(t)
- default:
- return s.constNil(t)
- }
- case *Mpflt:
- switch n.Type.Size() {
+ return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i))
+ case constant.Bool:
+ return s.constBool(constant.BoolVal(u))
+ case constant.Float:
+ f, _ := constant.Float64Val(u)
+ switch n.Type().Size() {
case 4:
- return s.constFloat32(n.Type, u.Float32())
+ return s.constFloat32(n.Type(), f)
case 8:
- return s.constFloat64(n.Type, u.Float64())
+ return s.constFloat64(n.Type(), f)
default:
- s.Fatalf("bad float size %d", n.Type.Size())
+ s.Fatalf("bad float size %d", n.Type().Size())
return nil
}
- case *Mpcplx:
- r := &u.Real
- i := &u.Imag
- switch n.Type.Size() {
+ case constant.Complex:
+ re, _ := constant.Float64Val(constant.Real(u))
+ im, _ := constant.Float64Val(constant.Imag(u))
+ switch n.Type().Size() {
case 8:
- pt := types.Types[TFLOAT32]
- return s.newValue2(ssa.OpComplexMake, n.Type,
- s.constFloat32(pt, r.Float32()),
- s.constFloat32(pt, i.Float32()))
+ pt := types.Types[types.TFLOAT32]
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.constFloat32(pt, re),
+ s.constFloat32(pt, im))
case 16:
- pt := types.Types[TFLOAT64]
- return s.newValue2(ssa.OpComplexMake, n.Type,
- s.constFloat64(pt, r.Float64()),
- s.constFloat64(pt, i.Float64()))
+ pt := types.Types[types.TFLOAT64]
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.constFloat64(pt, re),
+ s.constFloat64(pt, im))
default:
- s.Fatalf("bad float size %d", n.Type.Size())
+ s.Fatalf("bad complex size %d", n.Type().Size())
return nil
}
-
default:
- s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype())
+ s.Fatalf("unhandled OLITERAL %v", u.Kind())
return nil
}
- case OCONVNOP:
- to := n.Type
- from := n.Left.Type
+ case ir.OCONVNOP:
+ to := n.Type()
+ from := n.Left().Type()
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
- x := s.expr(n.Left)
+ x := s.expr(n.Left())
+ if to == from {
+ return x
+ }
// Special case for not confusing GC and liveness.
// We don't want pointers accidentally classified
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
// CONVNOP closure
- if to.Etype == TFUNC && from.IsPtrShaped() {
+ if to.Kind() == types.TFUNC && from.IsPtrShaped() {
return v
}
// named <--> unnamed type or typed <--> untyped const
- if from.Etype == to.Etype {
+ if from.Kind() == to.Kind() {
return v
}
}
// map <--> *hmap
- if to.Etype == TMAP && from.IsPtr() &&
+ if to.Kind() == types.TMAP && from.IsPtr() &&
to.MapType().Hmap == from.Elem() {
return v
}
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
return nil
}
- if etypesign(from.Etype) != etypesign(to.Etype) {
- s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
+ if etypesign(from.Kind()) != etypesign(to.Kind()) {
+ s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind())
return nil
}
return v
}
- if etypesign(from.Etype) == 0 {
+ if etypesign(from.Kind()) == 0 {
s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
return nil
}
// integer, same width, same sign
return v
- case OCONV:
- x := s.expr(n.Left)
- ft := n.Left.Type // from type
- tt := n.Type // to type
- if ft.IsBoolean() && tt.IsKind(TUINT8) {
+ case ir.OCONV:
+ x := s.expr(n.Left())
+ ft := n.Left().Type() // from type
+ tt := n.Type() // to type
+ if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
- return s.newValue1(ssa.OpCopy, n.Type, x)
+ return s.newValue1(ssa.OpCopy, n.Type(), x)
}
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
}
}
- return s.newValue1(op, n.Type, x)
+ return s.newValue1(op, n.Type(), x)
}
if ft.IsFloat() || tt.IsFloat() {
if op2 == ssa.OpCopy {
return x
}
- return s.newValueOrSfCall1(op2, n.Type, x)
+ return s.newValueOrSfCall1(op2, n.Type(), x)
}
if op2 == ssa.OpCopy {
- return s.newValueOrSfCall1(op1, n.Type, x)
+ return s.newValueOrSfCall1(op1, n.Type(), x)
}
- return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x))
+ return s.newValueOrSfCall1(op2, n.Type(), s.newValueOrSfCall1(op1, types.Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
- s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
+ s.Fatalf("unhandled OCONV %s -> %s", n.Left().Type().Kind(), n.Type().Kind())
return nil
- case ODOTTYPE:
+ case ir.ODOTTYPE:
+ n := n.(*ir.TypeAssertExpr)
res, _ := s.dottype(n, false)
return res
// binary ops
- case OLT, OEQ, ONE, OLE, OGE, OGT:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Left.Type.IsComplex() {
- pt := floatForComplex(n.Left.Type)
- op := s.ssaOp(OEQ, pt)
- r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
- i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
- c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i)
- switch n.Op {
- case OEQ:
+ case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Left().Type().IsComplex() {
+ pt := floatForComplex(n.Left().Type())
+ op := s.ssaOp(ir.OEQ, pt)
+ r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
+ i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
+ c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
+ switch n.Op() {
+ case ir.OEQ:
return c
- case ONE:
- return s.newValue1(ssa.OpNot, types.Types[TBOOL], c)
+ case ir.ONE:
+ return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
default:
- s.Fatalf("ordered complex compare %v", n.Op)
+ s.Fatalf("ordered complex compare %v", n.Op())
}
}
// Convert OGE and OGT into OLE and OLT.
- op := n.Op
+ op := n.Op()
switch op {
- case OGE:
- op, a, b = OLE, b, a
- case OGT:
- op, a, b = OLT, b, a
+ case ir.OGE:
+ op, a, b = ir.OLE, b, a
+ case ir.OGT:
+ op, a, b = ir.OLT, b, a
}
- if n.Left.Type.IsFloat() {
+ if n.Left().Type().IsFloat() {
// float comparison
- return s.newValueOrSfCall2(s.ssaOp(op, n.Left.Type), types.Types[TBOOL], a, b)
+ return s.newValueOrSfCall2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b)
}
// integer comparison
- return s.newValue2(s.ssaOp(op, n.Left.Type), types.Types[TBOOL], a, b)
- case OMUL:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Type.IsComplex() {
+ return s.newValue2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b)
+ case ir.OMUL:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Type().IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
- pt := floatForComplex(n.Type) // Could be Float32 or Float64
- wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
+ pt := floatForComplex(n.Type()) // Could be Float32 or Float64
+ wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
- return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
+ return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
- if n.Type.IsFloat() {
- return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
- case ODIV:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Type.IsComplex() {
+ case ir.ODIV:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Type().IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
// conversions could all be elided in larger expression trees.
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
- pt := floatForComplex(n.Type) // Could be Float32 or Float64
- wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
+ pt := floatForComplex(n.Type()) // Could be Float32 or Float64
+ wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
- return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
+ return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
- if n.Type.IsFloat() {
- return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.intDivide(n, a, b)
- case OMOD:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
+ case ir.OMOD:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
return s.intDivide(n, a, b)
- case OADD, OSUB:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Type.IsComplex() {
- pt := floatForComplex(n.Type)
- op := s.ssaOp(n.Op, pt)
- return s.newValue2(ssa.OpComplexMake, n.Type,
+ case ir.OADD, ir.OSUB:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Type().IsComplex() {
+ pt := floatForComplex(n.Type())
+ op := s.ssaOp(n.Op(), pt)
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
}
- if n.Type.IsFloat() {
- return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
- }
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
- case OAND, OOR, OXOR:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
- case OANDNOT:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- b = s.newValue1(s.ssaOp(OBITNOT, b.Type), b.Type, b)
- return s.newValue2(s.ssaOp(OAND, n.Type), a.Type, a, b)
- case OLSH, ORSH:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ }
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ case ir.OAND, ir.OOR, ir.OXOR:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ case ir.OANDNOT:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
+ return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
+ case ir.OLSH, ir.ORSH:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
bt := b.Type
if bt.IsSigned() {
- cmp := s.newValue2(s.ssaOp(OLE, bt), types.Types[TBOOL], s.zeroVal(bt), b)
+ cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
s.check(cmp, panicshift)
bt = bt.ToUnsigned()
}
- return s.newValue2(s.ssaShiftOp(n.Op, n.Type, bt), a.Type, a, b)
- case OANDAND, OOROR:
+ return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
+ case ir.OANDAND, ir.OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
// variable is associated with the OANDAND node in the
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
- el := s.expr(n.Left)
+ el := s.expr(n.Left())
s.vars[n] = el
b := s.endBlock()
bRight := s.f.NewBlock(ssa.BlockPlain)
bResult := s.f.NewBlock(ssa.BlockPlain)
- if n.Op == OANDAND {
+ if n.Op() == ir.OANDAND {
b.AddEdgeTo(bRight)
b.AddEdgeTo(bResult)
- } else if n.Op == OOROR {
+ } else if n.Op() == ir.OOROR {
b.AddEdgeTo(bResult)
b.AddEdgeTo(bRight)
}
s.startBlock(bRight)
- er := s.expr(n.Right)
+ er := s.expr(n.Right())
s.vars[n] = er
b = s.endBlock()
b.AddEdgeTo(bResult)
s.startBlock(bResult)
- return s.variable(n, types.Types[TBOOL])
- case OCOMPLEX:
- r := s.expr(n.Left)
- i := s.expr(n.Right)
- return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
+ return s.variable(n, types.Types[types.TBOOL])
+ case ir.OCOMPLEX:
+ r := s.expr(n.Left())
+ i := s.expr(n.Right())
+ return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
// unary ops
- case ONEG:
- a := s.expr(n.Left)
- if n.Type.IsComplex() {
- tp := floatForComplex(n.Type)
- negop := s.ssaOp(n.Op, tp)
- return s.newValue2(ssa.OpComplexMake, n.Type,
+ case ir.ONEG:
+ a := s.expr(n.Left())
+ if n.Type().IsComplex() {
+ tp := floatForComplex(n.Type())
+ negop := s.ssaOp(n.Op(), tp)
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
}
- return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
- case ONOT, OBITNOT:
- a := s.expr(n.Left)
- return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
- case OIMAG, OREAL:
- a := s.expr(n.Left)
- return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
- case OPLUS:
- return s.expr(n.Left)
+ return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
+ case ir.ONOT, ir.OBITNOT:
+ a := s.expr(n.Left())
+ return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
+ case ir.OIMAG, ir.OREAL:
+ a := s.expr(n.Left())
+ return s.newValue1(s.ssaOp(n.Op(), n.Left().Type()), n.Type(), a)
+ case ir.OPLUS:
+ return s.expr(n.Left())
- case OADDR:
- return s.addr(n.Left)
+ case ir.OADDR:
+ return s.addr(n.Left())
- case ORESULT:
+ case ir.ORESULT:
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
// Do the old thing
- addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
- return s.rawLoad(n.Type, addr)
+ addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset())
+ return s.rawLoad(n.Type(), addr)
}
- which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
+ which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset())
if which == -1 {
// Do the old thing // TODO: Panic instead.
- addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
- return s.rawLoad(n.Type, addr)
+ addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset())
+ return s.rawLoad(n.Type(), addr)
}
- if canSSAType(n.Type) {
- return s.newValue1I(ssa.OpSelectN, n.Type, which, s.prevCall)
+ if canSSAType(n.Type()) {
+ return s.newValue1I(ssa.OpSelectN, n.Type(), which, s.prevCall)
} else {
- addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type), which, s.prevCall)
- return s.rawLoad(n.Type, addr)
+ addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type()), which, s.prevCall)
+ return s.rawLoad(n.Type(), addr)
}
- case ODEREF:
- p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
- return s.load(n.Type, p)
+ case ir.ODEREF:
+ p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ return s.load(n.Type(), p)
- case ODOT:
- if n.Left.Op == OSTRUCTLIT {
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ if n.Left().Op() == ir.OSTRUCTLIT {
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
- if !isZero(n.Left) {
- s.Fatalf("literal with nonzero value in SSA: %v", n.Left)
+ if !isZero(n.Left()) {
+ s.Fatalf("literal with nonzero value in SSA: %v", n.Left())
}
- return s.zeroVal(n.Type)
+ return s.zeroVal(n.Type())
}
// If n is addressable and can't be represented in
// SSA, then load just the selected field. This
// instrumentation.
if islvalue(n) && !s.canSSA(n) {
p := s.addr(n)
- return s.load(n.Type, p)
+ return s.load(n.Type(), p)
}
- v := s.expr(n.Left)
- return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
+ v := s.expr(n.Left())
+ return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
- case ODOTPTR:
- p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
- p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p)
- return s.load(n.Type, p)
+ case ir.ODOTPTR:
+ p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
+ return s.load(n.Type(), p)
- case OINDEX:
+ case ir.OINDEX:
switch {
- case n.Left.Type.IsString():
- if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) {
+ case n.Left().Type().IsString():
+ if n.Bounded() && ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.Int) {
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
- return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.StringVal()[n.Right.Int64Val()])))
+ return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.Left())[ir.Int64Val(n.Right())])))
}
- a := s.expr(n.Left)
- i := s.expr(n.Right)
- len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a)
+ a := s.expr(n.Left())
+ i := s.expr(n.Right())
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
- if Isconst(n.Right, CTINT) {
- ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64Val(), ptr)
+ if ir.IsConst(n.Right(), constant.Int) {
+ ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Right()), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
- return s.load(types.Types[TUINT8], ptr)
- case n.Left.Type.IsSlice():
+ return s.load(types.Types[types.TUINT8], ptr)
+ case n.Left().Type().IsSlice():
p := s.addr(n)
- return s.load(n.Left.Type.Elem(), p)
- case n.Left.Type.IsArray():
- if canSSAType(n.Left.Type) {
+ return s.load(n.Left().Type().Elem(), p)
+ case n.Left().Type().IsArray():
+ if canSSAType(n.Left().Type()) {
// SSA can handle arrays of length at most 1.
- bound := n.Left.Type.NumElem()
- a := s.expr(n.Left)
- i := s.expr(n.Right)
+ bound := n.Left().Type().NumElem()
+ a := s.expr(n.Left())
+ i := s.expr(n.Right())
if bound == 0 {
// Bounds check will never succeed. Might as well
// use constants for the bounds check.
- z := s.constInt(types.Types[TINT], 0)
+ z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
// The return value won't be live, return junk.
- return s.newValue0(ssa.OpUnknown, n.Type)
+ return s.newValue0(ssa.OpUnknown, n.Type())
}
- len := s.constInt(types.Types[TINT], bound)
+ len := s.constInt(types.Types[types.TINT], bound)
s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
- return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
+ return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
}
p := s.addr(n)
- return s.load(n.Left.Type.Elem(), p)
+ return s.load(n.Left().Type().Elem(), p)
default:
- s.Fatalf("bad type for index %v", n.Left.Type)
+ s.Fatalf("bad type for index %v", n.Left().Type())
return nil
}
- case OLEN, OCAP:
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
switch {
- case n.Left.Type.IsSlice():
+ case n.Left().Type().IsSlice():
op := ssa.OpSliceLen
- if n.Op == OCAP {
+ if n.Op() == ir.OCAP {
op = ssa.OpSliceCap
}
- return s.newValue1(op, types.Types[TINT], s.expr(n.Left))
- case n.Left.Type.IsString(): // string; not reachable for OCAP
- return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left))
- case n.Left.Type.IsMap(), n.Left.Type.IsChan():
- return s.referenceTypeBuiltin(n, s.expr(n.Left))
+ return s.newValue1(op, types.Types[types.TINT], s.expr(n.Left()))
+ case n.Left().Type().IsString(): // string; not reachable for OCAP
+ return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.Left()))
+ case n.Left().Type().IsMap(), n.Left().Type().IsChan():
+ return s.referenceTypeBuiltin(n, s.expr(n.Left()))
default: // array
- return s.constInt(types.Types[TINT], n.Left.Type.NumElem())
+ return s.constInt(types.Types[types.TINT], n.Left().Type().NumElem())
}
- case OSPTR:
- a := s.expr(n.Left)
- if n.Left.Type.IsSlice() {
- return s.newValue1(ssa.OpSlicePtr, n.Type, a)
+ case ir.OSPTR:
+ a := s.expr(n.Left())
+ if n.Left().Type().IsSlice() {
+ return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
} else {
- return s.newValue1(ssa.OpStringPtr, n.Type, a)
+ return s.newValue1(ssa.OpStringPtr, n.Type(), a)
}
- case OITAB:
- a := s.expr(n.Left)
- return s.newValue1(ssa.OpITab, n.Type, a)
+ case ir.OITAB:
+ a := s.expr(n.Left())
+ return s.newValue1(ssa.OpITab, n.Type(), a)
- case OIDATA:
- a := s.expr(n.Left)
- return s.newValue1(ssa.OpIData, n.Type, a)
+ case ir.OIDATA:
+ a := s.expr(n.Left())
+ return s.newValue1(ssa.OpIData, n.Type(), a)
- case OEFACE:
- tab := s.expr(n.Left)
- data := s.expr(n.Right)
- return s.newValue2(ssa.OpIMake, n.Type, tab, data)
+ case ir.OEFACE:
+ tab := s.expr(n.Left())
+ data := s.expr(n.Right())
+ return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
- case OSLICEHEADER:
- p := s.expr(n.Left)
- l := s.expr(n.List.First())
- c := s.expr(n.List.Second())
- return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
+ case ir.OSLICEHEADER:
+ p := s.expr(n.Left())
+ l := s.expr(n.List().First())
+ c := s.expr(n.List().Second())
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
- case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
- v := s.expr(n.Left)
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
+ v := s.expr(n.Left())
var i, j, k *ssa.Value
low, high, max := n.SliceBounds()
if low != nil {
k = s.expr(max)
}
p, l, c := s.slice(v, i, j, k, n.Bounded())
- return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
- case OSLICESTR:
- v := s.expr(n.Left)
+ case ir.OSLICESTR:
+ v := s.expr(n.Left())
var i, j *ssa.Value
low, high, _ := n.SliceBounds()
if low != nil {
j = s.expr(high)
}
p, l, _ := s.slice(v, i, j, nil, n.Bounded())
- return s.newValue2(ssa.OpStringMake, n.Type, p, l)
+ return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
- case OCALLFUNC:
- if isIntrinsicCall(n) {
+ case ir.OCALLFUNC:
+ n := n.(*ir.CallExpr)
+ if IsIntrinsicCall(n) {
return s.intrinsicCall(n)
}
fallthrough
- case OCALLINTER, OCALLMETH:
+ case ir.OCALLINTER, ir.OCALLMETH:
+ n := n.(*ir.CallExpr)
return s.callResult(n, callNormal)
- case OGETG:
- return s.newValue1(ssa.OpGetG, n.Type, s.mem())
+ case ir.OGETG:
+ return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
- case OAPPEND:
- return s.append(n, false)
+ case ir.OAPPEND:
+ return s.append(n.(*ir.CallExpr), false)
- case OSTRUCTLIT, OARRAYLIT:
+ case ir.OSTRUCTLIT, ir.OARRAYLIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !isZero(n) {
s.Fatalf("literal with nonzero value in SSA: %v", n)
}
- return s.zeroVal(n.Type)
+ return s.zeroVal(n.Type())
- case ONEWOBJ:
- if n.Type.Elem().Size() == 0 {
- return s.newValue1A(ssa.OpAddr, n.Type, zerobaseSym, s.sb)
+ case ir.ONEWOBJ:
+ if n.Type().Elem().Size() == 0 {
+ return s.newValue1A(ssa.OpAddr, n.Type(), zerobaseSym, s.sb)
}
- typ := s.expr(n.Left)
- vv := s.rtcall(newobject, true, []*types.Type{n.Type}, typ)
+ typ := s.expr(n.Left())
+ vv := s.rtcall(newobject, true, []*types.Type{n.Type()}, typ)
return vv[0]
default:
- s.Fatalf("unhandled expr %v", n.Op)
+ s.Fatalf("unhandled expr %v", n.Op())
return nil
}
}
// If inplace is true, it writes the result of the OAPPEND expression n
// back to the slice being appended to, and returns nil.
// inplace MUST be set to false if the slice can be SSA'd.
-func (s *state) append(n *Node, inplace bool) *ssa.Value {
+func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
// If inplace is false, process as expression "append(s, e1, e2, e3)":
//
// ptr, len, cap := s
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
- et := n.Type.Elem()
+ et := n.Type().Elem()
pt := types.NewPtr(et)
// Evaluate slice
- sn := n.List.First() // the slice node is the first in the list
+ sn := n.List().First() // the slice node is the first in the list
var slice, addr *ssa.Value
if inplace {
addr = s.addr(sn)
- slice = s.load(n.Type, addr)
+ slice = s.load(n.Type(), addr)
} else {
slice = s.expr(sn)
}
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
- nargs := int64(n.List.Len() - 1)
+ nargs := int64(n.List().Len() - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
- l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
- c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice)
- nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
+ l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
+ c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
+ nl := s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
- cmp := s.newValue2(s.ssaOp(OLT, types.Types[TUINT]), types.Types[TBOOL], c, nl)
- s.vars[&ptrVar] = p
+ cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, nl)
+ s.vars[ptrVar] = p
if !inplace {
- s.vars[&newlenVar] = nl
- s.vars[&capVar] = c
+ s.vars[newlenVar] = nl
+ s.vars[capVar] = c
} else {
- s.vars[&lenVar] = l
+ s.vars[lenVar] = l
}
b := s.endBlock()
// Call growslice
s.startBlock(grow)
- taddr := s.expr(n.Left)
- r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl)
+ taddr := s.expr(n.Left())
+ r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
if inplace {
- if sn.Op == ONAME && sn.Class() != PEXTERN {
- // Tell liveness we're about to build a new slice
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
+ if sn.Op() == ir.ONAME {
+ sn := sn.(*ir.Name)
+ if sn.Class() != ir.PEXTERN {
+ // Tell liveness we're about to build a new slice
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
+ }
}
capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr)
- s.store(types.Types[TINT], capaddr, r[2])
+ s.store(types.Types[types.TINT], capaddr, r[2])
s.store(pt, addr, r[0])
// load the value we just stored to avoid having to spill it
- s.vars[&ptrVar] = s.load(pt, addr)
- s.vars[&lenVar] = r[1] // avoid a spill in the fast path
+ s.vars[ptrVar] = s.load(pt, addr)
+ s.vars[lenVar] = r[1] // avoid a spill in the fast path
} else {
- s.vars[&ptrVar] = r[0]
- s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs))
- s.vars[&capVar] = r[2]
+ s.vars[ptrVar] = r[0]
+ s.vars[newlenVar] = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], r[1], s.constInt(types.Types[types.TINT], nargs))
+ s.vars[capVar] = r[2]
}
b = s.endBlock()
s.startBlock(assign)
if inplace {
- l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len
- nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
+ l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
+ nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceLenOffset, addr)
- s.store(types.Types[TINT], lenaddr, nl)
+ s.store(types.Types[types.TINT], lenaddr, nl)
}
// Evaluate args
store bool
}
args := make([]argRec, 0, nargs)
- for _, n := range n.List.Slice()[1:] {
- if canSSAType(n.Type) {
+ for _, n := range n.List().Slice()[1:] {
+ if canSSAType(n.Type()) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v := s.addr(n)
}
}
- p = s.variable(&ptrVar, pt) // generates phi for ptr
+ p = s.variable(ptrVar, pt) // generates phi for ptr
if !inplace {
- nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl
- c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap
+ nl = s.variable(newlenVar, types.Types[types.TINT]) // generates phi for nl
+ c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap
}
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
for i, arg := range args {
- addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i)))
+ addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
if arg.store {
s.storeType(et, addr, arg.v, 0, true)
} else {
}
}
- delete(s.vars, &ptrVar)
+ delete(s.vars, ptrVar)
if inplace {
- delete(s.vars, &lenVar)
+ delete(s.vars, lenVar)
return nil
}
- delete(s.vars, &newlenVar)
- delete(s.vars, &capVar)
+ delete(s.vars, newlenVar)
+ delete(s.vars, capVar)
// make result
- return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, nl, c)
}
// condBranch evaluates the boolean expression cond and branches to yes
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
-func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
- switch cond.Op {
- case OANDAND:
+func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
+ switch cond.Op() {
+ case ir.OANDAND:
mid := s.f.NewBlock(ssa.BlockPlain)
- s.stmtList(cond.Ninit)
- s.condBranch(cond.Left, mid, no, max8(likely, 0))
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), mid, no, max8(likely, 0))
s.startBlock(mid)
- s.condBranch(cond.Right, yes, no, likely)
+ s.condBranch(cond.Right(), yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
- case OOROR:
+ case ir.OOROR:
mid := s.f.NewBlock(ssa.BlockPlain)
- s.stmtList(cond.Ninit)
- s.condBranch(cond.Left, yes, mid, min8(likely, 0))
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), yes, mid, min8(likely, 0))
s.startBlock(mid)
- s.condBranch(cond.Right, yes, no, likely)
+ s.condBranch(cond.Right(), yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
- case ONOT:
- s.stmtList(cond.Ninit)
- s.condBranch(cond.Left, no, yes, -likely)
+ case ir.ONOT:
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), no, yes, -likely)
+ return
+ case ir.OCONVNOP:
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), yes, no, likely)
return
}
c := s.expr(cond)
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// skip indicates assignments (at the top level) that can be avoided.
-func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) {
- if left.Op == ONAME && left.isBlank() {
+func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
+ if left.Op() == ir.ONAME && ir.IsBlank(left) {
return
}
- t := left.Type
+ t := left.Type()
dowidth(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
}
- if left.Op == ODOT {
+ if left.Op() == ir.ODOT {
// We're assigning to a field of an ssa-able value.
// We need to build a new structure with the new value for the
// field we're assigning and the old values for the other fields.
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
// Grab information about the structure type.
- t := left.Left.Type
+ left := left.(*ir.SelectorExpr)
+ t := left.Left().Type()
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
- old := s.expr(left.Left)
+ old := s.expr(left.Left())
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
}
// Recursively assign the new value we've made to the base of the dot op.
- s.assign(left.Left, new, false, 0)
+ s.assign(left.Left(), new, false, 0)
// TODO: do we need to update named values here?
return
}
- if left.Op == OINDEX && left.Left.Type.IsArray() {
- s.pushLine(left.Pos)
+ if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).Left().Type().IsArray() {
+ s.pushLine(left.Pos())
defer s.popLine()
// We're assigning to an element of an ssa-able array.
// a[i] = v
- t := left.Left.Type
+ t := left.Left().Type()
n := t.NumElem()
- i := s.expr(left.Right) // index
+ i := s.expr(left.Right()) // index
if n == 0 {
// The bounds check must fail. Might as well
// ignore the actual index and just use zeros.
- z := s.constInt(types.Types[TINT], 0)
+ z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
return
}
s.Fatalf("assigning to non-1-length array")
}
// Rewrite to a = [1]{v}
- len := s.constInt(types.Types[TINT], 1)
+ len := s.constInt(types.Types[types.TINT], 1)
s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
v := s.newValue1(ssa.OpArrayMake1, t, right)
- s.assign(left.Left, v, false, 0)
+ s.assign(left.Left(), v, false, 0)
return
}
+ left := left.(*ir.Name)
// Update variable assignment.
s.vars[left] = right
s.addNamedValue(left, right)
// If this assignment clobbers an entire local variable, then emit
// OpVarDef so liveness analysis knows the variable is redefined.
- if base := clobberBase(left); base.Op == ONAME && base.Class() != PEXTERN && skip == 0 {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !base.IsAutoTmp())
+ if base := clobberBase(left); base.Op() == ir.ONAME && base.(*ir.Name).Class() != ir.PEXTERN && skip == 0 {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base.(*ir.Name), s.mem(), !ir.IsAutoTmp(base))
}
// Left is not ssa-able. Compute its address.
// is valid, even though they have type uintptr (#19168).
// Mark it pointer type to signal the writebarrier pass to
// insert a write barrier.
- t = types.Types[TUNSAFEPTR]
+ t = types.Types[types.TUNSAFEPTR]
}
if deref {
// Treat as a mem->mem move.
return
}
// Treat as a store.
- s.storeType(t, addr, right, skip, !left.IsAutoTmp())
+ s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
}
// zeroVal returns the zero value for type t.
case t.IsComplex():
switch t.Size() {
case 8:
- z := s.constFloat32(types.Types[TFLOAT32], 0)
+ z := s.constFloat32(types.Types[types.TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
case 16:
- z := s.constFloat64(types.Types[TFLOAT64], 0)
+ z := s.constFloat64(types.Types[types.TFLOAT64], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
default:
s.Fatalf("bad sized complex type %v", t)
type sfRtCallDef struct {
rtfn *obj.LSym
- rtype types.EType
+ rtype types.Kind
}
var softFloatOps map[ssa.Op]sfRtCallDef
func softfloatInit() {
// Some of these operations get transformed by sfcall.
softFloatOps = map[ssa.Op]sfRtCallDef{
- ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
- ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
- ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
- ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
- ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32},
- ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64},
- ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32},
- ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64},
-
- ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), TBOOL},
- ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), TBOOL},
- ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), TBOOL},
- ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), TBOOL},
- ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), TBOOL},
- ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), TBOOL},
- ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL},
- ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL},
-
- ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), TFLOAT32},
- ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), TINT32},
- ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), TFLOAT32},
- ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), TINT64},
- ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32},
- ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64},
- ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), TFLOAT64},
- ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), TINT32},
- ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), TFLOAT64},
- ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), TINT64},
- ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64},
- ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64},
- ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64},
- ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32},
+ ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), types.TFLOAT32},
+ ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), types.TFLOAT64},
+ ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), types.TFLOAT32},
+ ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), types.TFLOAT64},
+ ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), types.TFLOAT32},
+ ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), types.TFLOAT64},
+ ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), types.TFLOAT32},
+ ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), types.TFLOAT64},
+
+ ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), types.TBOOL},
+ ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), types.TBOOL},
+ ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), types.TBOOL},
+ ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), types.TBOOL},
+ ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), types.TBOOL},
+ ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), types.TBOOL},
+ ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), types.TBOOL},
+ ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), types.TBOOL},
+
+ ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), types.TINT32},
+ ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), types.TINT64},
+ ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), types.TUINT64},
+ ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), types.TINT32},
+ ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), types.TINT64},
+ ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), types.TUINT64},
+ ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), types.TFLOAT32},
}
}
args[0], args[1] = args[1], args[0]
case ssa.OpSub32F,
ssa.OpSub64F:
- args[1] = s.newValue1(s.ssaOp(ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
+ args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
}
result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
// An intrinsicBuilder converts a call node n into an ssa value that
// implements that call as an intrinsic. args is a list of arguments to the func.
-type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
+type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value
type intrinsicKey struct {
arch *sys.Arch
fn string
}
-func init() {
+func initSSATables() {
intrinsics = map[intrinsicKey]intrinsicBuilder{}
var all []*sys.Arch
/******** runtime ********/
if !instrumenting {
add("runtime", "slicebytetostringtmp",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// Compiler frontend optimizations emit OBYTES2STRTMP nodes
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
- return s.newValue2(ssa.OpStringMake, n.Type, args[0], args[1])
+ return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
},
all...)
}
addF("runtime/internal/math", "MulUintptr",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
+ return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
}
- return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
+ return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
},
sys.AMD64, sys.I386, sys.MIPS64)
add("runtime", "KeepAlive",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
- s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
+ s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
return nil
},
all...)
add("runtime", "getclosureptr",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallerpc",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallersp",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
},
all...)
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Ctz64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Bswap32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
addF("runtime/internal/sys", "Bswap64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
/******** runtime/internal/atomic ********/
addF("runtime/internal/atomic", "Load",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[TUINT8], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT8], v)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StorepNoWB",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64)
addF("runtime/internal/atomic", "Xchg",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xchg64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
- type atomicOpEmitter func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType)
+ type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind)
- makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.EType, emit atomicOpEmitter) intrinsicBuilder {
+ makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
- return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// Target Atomic feature is identified by dynamic detection
- addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
- v := s.load(types.Types[TBOOL], addr)
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
+ v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
// Merge results.
s.startBlock(bEnd)
- if rtyp == TNIL {
+ if rtyp == types.TNIL {
return nil
} else {
return s.variable(n, types.Types[rtyp])
}
}
- atomicXchgXaddEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Xchg",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xchg64",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd64",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Cas64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "CasRel",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.PPC64)
- atomicCasEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
- v := s.newValue4(op, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
+ v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Cas",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, TUINT32, TBOOL, atomicCasEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas64",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, TUINT64, TBOOL, atomicCasEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "And",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
- atomicAndOrEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
- s.vars[&memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
+ atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
+ s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
}
addF("runtime/internal/atomic", "And8",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or8",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
/******** math ********/
addF("math", "Sqrt",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
},
sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
addF("math", "Trunc",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Ceil",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Floor",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Round",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "RoundToEven",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.S390X, sys.Wasm)
addF("math", "Abs",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
addF("math", "Copysign",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
},
sys.PPC64, sys.Wasm)
addF("math", "FMA",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "FMA",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
}
- v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasFMA)
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasFMA)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
- s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
+ s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
},
sys.AMD64)
addF("math", "FMA",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
}
- addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), armHasVFPv4, s.sb)
- v := s.load(types.Types[TBOOL], addr)
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), armHasVFPv4, s.sb)
+ v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
- s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
+ s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
},
sys.ARM)
- makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasSSE41)
+ makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasSSE41)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
- s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0])
+ s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
}
}
addF("math", "RoundToEven",
/******** math/bits ********/
addF("math/bits", "TrailingZeros64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
- c := s.constInt32(types.Types[TUINT32], 1<<16)
- y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
- return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
+ c := s.constInt32(types.Types[types.TUINT32], 1<<16)
+ y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
},
sys.MIPS)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz16, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
- c := s.constInt64(types.Types[TUINT64], 1<<16)
- y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
- return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
+ c := s.constInt64(types.Types[types.TUINT64], 1<<16)
+ y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
},
sys.S390X, sys.PPC64)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
- c := s.constInt32(types.Types[TUINT32], 1<<8)
- y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
- return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
+ c := s.constInt32(types.Types[types.TUINT32], 1<<8)
+ y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
},
sys.MIPS)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz8, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
- c := s.constInt64(types.Types[TUINT64], 1<<8)
- y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
- return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
+ c := s.constInt64(types.Types[types.TUINT64], 1<<8)
+ y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
},
sys.S390X)
alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
// ReverseBytes inlines correctly, no need to intrinsify it.
// ReverseBytes16 lowers to a rotate, no need for anything special here.
addF("math/bits", "Len64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64)
addF("math/bits", "Len32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
- x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
}
- x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen16, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
}
- x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen8, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
// LeadingZeros is handled because it trivially calls Len.
addF("math/bits", "Reverse64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
}
- return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "RotateLeft8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpRotateLeft8, types.Types[TUINT8], args[0], args[1])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpRotateLeft16, types.Types[TUINT16], args[0], args[1])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpRotateLeft32, types.Types[TUINT32], args[0], args[1])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "RotateLeft64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpRotateLeft64, types.Types[TUINT64], args[0], args[1])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
- makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasPOPCNT)
+ makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasPOPCNT)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
if s.config.PtrSize == 4 {
op = op32
}
- s.vars[n] = s.newValue1(op, types.Types[TINT], args[0])
+ s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, types.Types[TINT])
+ return s.variable(n, types.Types[types.TINT])
}
}
addF("math/bits", "OnesCount64",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
sys.AMD64)
addF("math/bits", "OnesCount64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
addF("math/bits", "OnesCount32",
makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "OnesCount32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
addF("math/bits", "OnesCount16",
makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
sys.AMD64)
addF("math/bits", "OnesCount16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpPopCount16, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
},
sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpPopCount8, types.Types[TINT], args[0])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
},
sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "Mul64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64)
alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
addF("math/bits", "Add64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X)
addF("math/bits", "Sub64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
addF("math/bits", "Div64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// check for divide-by-zero/overflow and panic with appropriate message
- cmpZero := s.newValue2(s.ssaOp(ONE, types.Types[TUINT64]), types.Types[TBOOL], args[2], s.zeroVal(types.Types[TUINT64]))
+ cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
s.check(cmpZero, panicdivide)
- cmpOverflow := s.newValue2(s.ssaOp(OLT, types.Types[TUINT64]), types.Types[TBOOL], args[0], args[2])
+ cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
s.check(cmpOverflow, panicoverflow)
- return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64)
alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
/******** math/big ********/
add("math/big", "mulWW",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
}
return nil
}
pkg := sym.Pkg.Path
- if sym.Pkg == localpkg {
- pkg = myimportpath
+ if sym.Pkg == types.LocalPkg {
+ pkg = base.Ctxt.Pkgpath
}
- if flag_race && pkg == "sync/atomic" {
+ if base.Flag.Race && pkg == "sync/atomic" {
// The race detector needs to be able to intercept these calls.
// We can't intrinsify them.
return nil
return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
}
-func isIntrinsicCall(n *Node) bool {
- if n == nil || n.Left == nil {
+func isIntrinsicCall(n *ir.CallExpr) bool {
+ if n == nil {
+ return false
+ }
+ name, ok := n.Left().(*ir.Name)
+ if !ok {
return false
}
- return findIntrinsic(n.Left.Sym) != nil
+ return findIntrinsic(name.Sym()) != nil
}
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
-func (s *state) intrinsicCall(n *Node) *ssa.Value {
- v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
+func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
+ v := findIntrinsic(n.Left().Sym())(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
if x == nil {
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
x = x.Args[0]
}
- Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
+ base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.Left().Sym().Name, x.LongString())
}
return v
}
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
-func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
+func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
// Construct map of temps; see comments in s.call about the structure of n.
- temps := map[*Node]*ssa.Value{}
- for _, a := range n.List.Slice() {
- if a.Op != OAS {
- s.Fatalf("non-assignment as a temp function argument %v", a.Op)
+ temps := map[ir.Node]*ssa.Value{}
+ for _, a := range n.List().Slice() {
+ if a.Op() != ir.OAS {
+ s.Fatalf("non-assignment as a temp function argument %v", a.Op())
}
- l, r := a.Left, a.Right
- if l.Op != ONAME {
- s.Fatalf("non-ONAME temp function argument %v", a.Op)
+ a := a.(*ir.AssignStmt)
+ l, r := a.Left(), a.Right()
+ if l.Op() != ir.ONAME {
+ s.Fatalf("non-ONAME temp function argument %v", a.Op())
}
// Evaluate and store to "temporary".
// Walk ensures these temporaries are dead outside of n.
temps[l] = s.expr(r)
}
- args := make([]*ssa.Value, n.Rlist.Len())
- for i, n := range n.Rlist.Slice() {
+ args := make([]*ssa.Value, n.Rlist().Len())
+ for i, n := range n.Rlist().Slice() {
// Store a value to an argument slot.
if x, ok := temps[n]; ok {
// This is a previously computed temporary.
// call. We will also record funcdata information on where the args are stored
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
-func (s *state) openDeferRecord(n *Node) {
+func (s *state) openDeferRecord(n *ir.CallExpr) {
// Do any needed expression evaluation for the args (including the
// receiver, if any). This may be evaluating something like 'autotmp_3 =
// once.mutex'. Such a statement will create a mapping in s.vars[] from
// the autotmp name to the evaluated SSA arg value, but won't do any
// stores to the stack.
- s.stmtList(n.List)
+ s.stmtList(n.List())
var args []*ssa.Value
- var argNodes []*Node
+ var argNodes []*ir.Name
opendefer := &openDeferInfo{
n: n,
}
- fn := n.Left
- if n.Op == OCALLFUNC {
+ fn := n.Left()
+ if n.Op() == ir.OCALLFUNC {
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the function directly if it is a static function.
closureVal := s.expr(fn)
- closure := s.openDeferSave(nil, fn.Type, closureVal)
- opendefer.closureNode = closure.Aux.(*Node)
- if !(fn.Op == ONAME && fn.Class() == PFUNC) {
+ closure := s.openDeferSave(nil, fn.Type(), closureVal)
+ opendefer.closureNode = closure.Aux.(*ir.Name)
+ if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class() == ir.PFUNC) {
opendefer.closure = closure
}
- } else if n.Op == OCALLMETH {
- if fn.Op != ODOTMETH {
- Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
+ } else if n.Op() == ir.OCALLMETH {
+ if fn.Op() != ir.ODOTMETH {
+ base.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
+ fn := fn.(*ir.SelectorExpr)
closureVal := s.getMethodClosure(fn)
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the method directly.
- closure := s.openDeferSave(nil, fn.Type, closureVal)
- opendefer.closureNode = closure.Aux.(*Node)
+ closure := s.openDeferSave(nil, fn.Type(), closureVal)
+ opendefer.closureNode = closure.Aux.(*ir.Name)
} else {
- if fn.Op != ODOTINTER {
- Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
+ if fn.Op() != ir.ODOTINTER {
+ base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
+ fn := fn.(*ir.SelectorExpr)
closure, rcvr := s.getClosureAndRcvr(fn)
opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
// Important to get the receiver type correct, so it is recognized
// as a pointer for GC purposes.
- opendefer.rcvr = s.openDeferSave(nil, fn.Type.Recv().Type, rcvr)
- opendefer.closureNode = opendefer.closure.Aux.(*Node)
- opendefer.rcvrNode = opendefer.rcvr.Aux.(*Node)
+ opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
+ opendefer.closureNode = opendefer.closure.Aux.(*ir.Name)
+ opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name)
}
- for _, argn := range n.Rlist.Slice() {
+ for _, argn := range n.Rlist().Slice() {
var v *ssa.Value
- if canSSAType(argn.Type) {
- v = s.openDeferSave(nil, argn.Type, s.expr(argn))
+ if canSSAType(argn.Type()) {
+ v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
} else {
- v = s.openDeferSave(argn, argn.Type, nil)
+ v = s.openDeferSave(argn, argn.Type(), nil)
}
args = append(args, v)
- argNodes = append(argNodes, v.Aux.(*Node))
+ argNodes = append(argNodes, v.Aux.(*ir.Name))
}
opendefer.argVals = args
opendefer.argNodes = argNodes
// Update deferBits only after evaluation and storage to stack of
// args/receiver/interface is successful.
- bitvalue := s.constInt8(types.Types[TUINT8], 1<<uint(index))
- newDeferBits := s.newValue2(ssa.OpOr8, types.Types[TUINT8], s.variable(&deferBitsVar, types.Types[TUINT8]), bitvalue)
- s.vars[&deferBitsVar] = newDeferBits
- s.store(types.Types[TUINT8], s.deferBitsAddr, newDeferBits)
+ bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
+ newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
+ s.vars[deferBitsVar] = newDeferBits
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
}
// openDeferSave generates SSA nodes to store a value (with type t) for an
// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
// evaluated (via s.addr() below) to get the value that is to be stored. The
// function returns an SSA value representing a pointer to the autotmp location.
-func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value {
+func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
canSSA := canSSAType(t)
var pos src.XPos
if canSSA {
pos = val.Pos
} else {
- pos = n.Pos
+ pos = n.Pos()
}
argTemp := tempAt(pos.WithNotStmt(), s.curfn, t)
- argTemp.Name.SetOpenDeferSlot(true)
+ argTemp.SetOpenDeferSlot(true)
var addrArgTemp *ssa.Value
// Use OpVarLive to make sure stack slots for the args, etc. are not
// removed by dead-store elimination
// declared in the entry block, so that it will be live for the
// defer exit code (which will actually access it only if the
// associated defer call has been activated).
- s.defvars[s.f.Entry.ID][&memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][&memVar])
- s.defvars[s.f.Entry.ID][&memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][&memVar])
- addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.defvars[s.f.Entry.ID][&memVar])
+ s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
+ s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
+ addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
} else {
// Special case if we're still in the entry block. We can't use
// the above code, since s.defvars[s.f.Entry.ID] isn't defined
// until we end the entry block with s.endBlock().
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
- addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
+ addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.mem(), false)
}
if t.HasPointers() {
// Since we may use this argTemp during exit depending on the
// Therefore, we must make sure it is zeroed out in the entry
// block if it contains pointers, else GC may wrongly follow an
// uninitialized pointer value.
- argTemp.Name.SetNeedzero(true)
+ argTemp.SetNeedzero(true)
}
if !canSSA {
a := s.addr(n)
s.startBlock(deferExit)
s.lastDeferExit = deferExit
s.lastDeferCount = len(s.openDefers)
- zeroval := s.constInt8(types.Types[TUINT8], 0)
+ zeroval := s.constInt8(types.Types[types.TUINT8], 0)
testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
// Test for and run defers in reverse order
for i := len(s.openDefers) - 1; i >= 0; i-- {
bCond := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
- deferBits := s.variable(&deferBitsVar, types.Types[TUINT8])
+ deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
// Generate code to check if the bit associated with the current
// defer is set.
- bitval := s.constInt8(types.Types[TUINT8], 1<<uint(i))
- andval := s.newValue2(ssa.OpAnd8, types.Types[TUINT8], deferBits, bitval)
- eqVal := s.newValue2(ssa.OpEq8, types.Types[TBOOL], andval, zeroval)
+ bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
+ andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
+ eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(eqVal)
// Clear this bit in deferBits and force store back to stack, so
// we will not try to re-run this defer call if this defer call panics.
- nbitval := s.newValue1(ssa.OpCom8, types.Types[TUINT8], bitval)
- maskedval := s.newValue2(ssa.OpAnd8, types.Types[TUINT8], deferBits, nbitval)
- s.store(types.Types[TUINT8], s.deferBitsAddr, maskedval)
+ nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
+ maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
// Use this value for following tests, so we keep previous
// bits cleared.
- s.vars[&deferBitsVar] = maskedval
+ s.vars[deferBitsVar] = maskedval
// Generate code to call the function call of the defer, using the
// closure/receiver/args that were stored in argtmps at the point
// of the defer statement.
- argStart := Ctxt.FixedFrameSize()
- fn := r.n.Left
- stksize := fn.Type.ArgWidth()
+ argStart := base.Ctxt.FixedFrameSize()
+ fn := r.n.Left()
+ stksize := fn.Type().ArgWidth()
var ACArgs []ssa.Param
var ACResults []ssa.Param
var callArgs []*ssa.Value
// rcvr in case of OCALLINTER
v := s.load(r.rcvr.Type.Elem(), r.rcvr)
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)})
if testLateExpansion {
callArgs = append(callArgs, v)
} else {
- s.store(types.Types[TUINTPTR], addr, v)
+ s.store(types.Types[types.TUINTPTR], addr, v)
}
}
for j, argAddrVal := range r.argVals {
if r.closure != nil {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
- codeptr := s.rawLoad(types.Types[TUINTPTR], v)
+ codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
aux := ssa.ClosureAuxCall(ACArgs, ACResults)
if testLateExpansion {
callArgs = append(callArgs, s.mem())
call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, aux, codeptr, v, s.mem())
}
} else {
- aux := ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults)
+ aux := ssa.StaticAuxCall(fn.Sym().Linksym(), ACArgs, ACResults)
if testLateExpansion {
callArgs = append(callArgs, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
}
call.AuxInt = stksize
if testLateExpansion {
- s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else {
- s.vars[&memVar] = call
+ s.vars[memVar] = call
}
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
// for the deferreturn, so we want all stack slots to be live.
if r.closureNode != nil {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
if r.rcvrNode != nil {
- if r.rcvrNode.Type.HasPointers() {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
+ if r.rcvrNode.Type().HasPointers() {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
}
}
for _, argNode := range r.argNodes {
- if argNode.Type.HasPointers() {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
+ if argNode.Type().HasPointers() {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
}
}
}
}
-func (s *state) callResult(n *Node, k callKind) *ssa.Value {
+func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
return s.call(n, k, false)
}
-func (s *state) callAddr(n *Node, k callKind) *ssa.Value {
+func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
return s.call(n, k, true)
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
-func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
+func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
s.prevCall = nil
var sym *types.Sym // target symbol (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
- fn := n.Left
+ fn := n.Left()
var ACArgs []ssa.Param
var ACResults []ssa.Param
var callArgs []*ssa.Value
- res := n.Left.Type.Results()
+ res := n.Left().Type().Results()
if k == callNormal {
nf := res.NumFields()
for i := 0; i < nf; i++ {
fp := res.Field(i)
- ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + Ctxt.FixedFrameSize())})
+ ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + base.Ctxt.FixedFrameSize())})
}
}
testLateExpansion := false
- switch n.Op {
- case OCALLFUNC:
+ switch n.Op() {
+ case ir.OCALLFUNC:
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
- if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
- sym = fn.Sym
+ if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class() == ir.PFUNC {
+ sym = fn.Sym()
break
}
closure = s.expr(fn)
// not the point of defer statement.
s.maybeNilCheckClosure(closure, k)
}
- case OCALLMETH:
- if fn.Op != ODOTMETH {
+ case ir.OCALLMETH:
+ if fn.Op() != ir.ODOTMETH {
s.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
+ fn := fn.(*ir.SelectorExpr)
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
if k == callNormal {
- sym = fn.Sym
+ sym = fn.Sym()
break
}
closure = s.getMethodClosure(fn)
// Note: receiver is already present in n.Rlist, so we don't
// want to set it here.
- case OCALLINTER:
- if fn.Op != ODOTINTER {
- s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
+ case ir.OCALLINTER:
+ if fn.Op() != ir.ODOTINTER {
+ s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
+ fn := fn.(*ir.SelectorExpr)
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
var iclosure *ssa.Value
iclosure, rcvr = s.getClosureAndRcvr(fn)
if k == callNormal {
- codeptr = s.load(types.Types[TUINTPTR], iclosure)
+ codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
} else {
closure = iclosure
}
}
- dowidth(fn.Type)
- stksize := fn.Type.ArgWidth() // includes receiver, args, and results
+ dowidth(fn.Type())
+ stksize := fn.Type().ArgWidth() // includes receiver, args, and results
// Run all assignments of temps.
// The temps are introduced to avoid overwriting argument
// slots when arguments themselves require function calls.
- s.stmtList(n.List)
+ s.stmtList(n.List())
var call *ssa.Value
if k == callDeferStack {
testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f)
// Make a defer struct d on the stack.
t := deferstruct(stksize)
- d := tempAt(n.Pos, s.curfn, t)
+ d := tempAt(n.Pos(), s.curfn, t)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d)
// Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
// 0: siz
- s.store(types.Types[TUINT32],
- s.newValue1I(ssa.OpOffPtr, types.Types[TUINT32].PtrTo(), t.FieldOff(0), addr),
- s.constInt32(types.Types[TUINT32], int32(stksize)))
+ s.store(types.Types[types.TUINT32],
+ s.newValue1I(ssa.OpOffPtr, types.Types[types.TUINT32].PtrTo(), t.FieldOff(0), addr),
+ s.constInt32(types.Types[types.TUINT32], int32(stksize)))
// 1: started, set in deferprocStack
// 2: heap, set in deferprocStack
// 3: openDefer
// 11: fd
// Then, store all the arguments of the defer call.
- ft := fn.Type
+ ft := fn.Type()
off := t.FieldOff(12)
- args := n.Rlist.Slice()
+ args := n.Rlist().Slice()
// Set receiver (for interface calls). Always a pointer.
if rcvr != nil {
p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
- s.store(types.Types[TUINTPTR], p, rcvr)
+ s.store(types.Types[types.TUINTPTR], p, rcvr)
}
// Set receiver (for method calls).
- if n.Op == OCALLMETH {
+ if n.Op() == ir.OCALLMETH {
f := ft.Recv()
s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
args = args[1:]
}
// Call runtime.deferprocStack with pointer to _defer record.
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(Ctxt.FixedFrameSize())})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(base.Ctxt.FixedFrameSize())})
aux := ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults)
if testLateExpansion {
callArgs = append(callArgs, addr, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
} else {
- arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize())
- s.store(types.Types[TUINTPTR], arg0, addr)
+ arg0 := s.constOffPtrSP(types.Types[types.TUINTPTR], base.Ctxt.FixedFrameSize())
+ s.store(types.Types[types.TUINTPTR], arg0, addr)
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
}
if stksize < int64(Widthptr) {
} else {
// Store arguments to stack, including defer/go arguments and receiver for method calls.
// These are written in SP-offset order.
- argStart := Ctxt.FixedFrameSize()
+ argStart := base.Ctxt.FixedFrameSize()
// Defer/go args.
if k != callNormal {
// Write argsize and closure (args to newproc/deferproc).
- argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINT32], Offset: int32(argStart)})
+ argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize))
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINT32], Offset: int32(argStart)})
if testLateExpansion {
callArgs = append(callArgs, argsize)
} else {
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
- s.store(types.Types[TUINT32], addr, argsize)
+ s.store(types.Types[types.TUINT32], addr, argsize)
}
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart) + int32(Widthptr)})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart) + int32(Widthptr)})
if testLateExpansion {
callArgs = append(callArgs, closure)
} else {
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
- s.store(types.Types[TUINTPTR], addr, closure)
+ s.store(types.Types[types.TUINTPTR], addr, closure)
}
stksize += 2 * int64(Widthptr)
argStart += 2 * int64(Widthptr)
// Set receiver (for interface calls).
if rcvr != nil {
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)})
if testLateExpansion {
callArgs = append(callArgs, rcvr)
} else {
- s.store(types.Types[TUINTPTR], addr, rcvr)
+ s.store(types.Types[types.TUINTPTR], addr, rcvr)
}
}
// Write args.
- t := n.Left.Type
- args := n.Rlist.Slice()
- if n.Op == OCALLMETH {
+ t := n.Left().Type()
+ args := n.Rlist().Slice()
+ if n.Op() == ir.OCALLMETH {
f := t.Recv()
ACArg, arg := s.putArg(args[0], f.Type, argStart+f.Offset, testLateExpansion)
ACArgs = append(ACArgs, ACArg)
// can't always figure that out currently, and it's
// critical that we not clobber any arguments already
// stored onto the stack.
- codeptr = s.rawLoad(types.Types[TUINTPTR], closure)
+ codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
if testLateExpansion {
aux := ssa.ClosureAuxCall(ACArgs, ACResults)
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
}
case sym != nil:
if testLateExpansion {
- aux := ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults)
+ aux := ssa.StaticAuxCall(callTargetLSym(sym, s.curfn.LSym), ACArgs, ACResults)
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
} else {
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem())
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(callTargetLSym(sym, s.curfn.LSym), ACArgs, ACResults), s.mem())
}
default:
- s.Fatalf("bad call type %v %v", n.Op, n)
+ s.Fatalf("bad call type %v %v", n.Op(), n)
}
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
}
if testLateExpansion {
s.prevCall = call
- s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else {
- s.vars[&memVar] = call
+ s.vars[memVar] = call
}
// Insert OVARLIVE nodes
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
// Finish block for defers
if k == callDefer || k == callDeferStack {
if testLateExpansion {
return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call)
}
- return s.constOffPtrSP(pt, fp.Offset+Ctxt.FixedFrameSize())
+ return s.constOffPtrSP(pt, fp.Offset+base.Ctxt.FixedFrameSize())
}
if testLateExpansion {
return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
}
- return s.load(n.Type, s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()))
+ return s.load(n.Type(), s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+base.Ctxt.FixedFrameSize()))
}
// maybeNilCheckClosure checks if a nil check of a closure is needed in some
}
// getMethodClosure returns a value representing the closure for a method call
-func (s *state) getMethodClosure(fn *Node) *ssa.Value {
+func (s *state) getMethodClosure(fn *ir.SelectorExpr) *ssa.Value {
// Make a name n2 for the function.
// fn.Sym might be sync.(*Mutex).Unlock.
// Make a PFUNC node out of that, then evaluate it.
// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
// We can then pass that to defer or go.
- n2 := newnamel(fn.Pos, fn.Sym)
- n2.Name.Curfn = s.curfn
- n2.SetClass(PFUNC)
+ n2 := ir.NewNameAt(fn.Pos(), fn.Sym())
+ n2.Curfn = s.curfn
+ n2.SetClass(ir.PFUNC)
// n2.Sym already existed, so it's already marked as a function.
- n2.Pos = fn.Pos
- n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
+ n2.SetPos(fn.Pos())
+ n2.SetType(types.Types[types.TUINT8]) // fake type for a static closure. Could use runtime.funcval if we had it.
return s.expr(n2)
}
// getClosureAndRcvr returns values for the appropriate closure and receiver of an
// interface call
-func (s *state) getClosureAndRcvr(fn *Node) (*ssa.Value, *ssa.Value) {
- i := s.expr(fn.Left)
- itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i)
+func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) {
+ i := s.expr(fn.Left())
+ itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
s.nilCheck(itab)
- itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
+ itabidx := fn.Offset() + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
return closure, rcvr
// etypesign returns the signed-ness of e, for integer/pointer etypes.
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
-func etypesign(e types.EType) int8 {
+func etypesign(e types.Kind) int8 {
switch e {
- case TINT8, TINT16, TINT32, TINT64, TINT:
+ case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
return -1
- case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
+ case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
return +1
}
return 0
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
-func (s *state) addr(n *Node) *ssa.Value {
- if n.Op != ONAME {
- s.pushLine(n.Pos)
+func (s *state) addr(n ir.Node) *ssa.Value {
+ if n.Op() != ir.ONAME {
+ s.pushLine(n.Pos())
defer s.popLine()
}
- t := types.NewPtr(n.Type)
- switch n.Op {
- case ONAME:
+ t := types.NewPtr(n.Type())
+ var offset int64
+ switch n.Op() {
+ case ir.ONAMEOFFSET:
+ no := n.(*ir.NameOffsetExpr)
+ offset = no.Offset_
+ n = no.Name_
+ fallthrough
+ case ir.ONAME:
+ n := n.(*ir.Name)
switch n.Class() {
- case PEXTERN:
+ case ir.PEXTERN:
// global variable
- v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb)
+ v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym().Linksym(), s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
- if n.Xoffset != 0 {
- v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
+ if offset != 0 {
+ v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
}
return v
- case PPARAM:
+ case ir.PPARAM:
// parameter slot
v := s.decladdrs[n]
if v != nil {
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil
- case PAUTO:
- return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !n.IsAutoTmp())
+ case ir.PAUTO:
+ return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
- case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
+ case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
s.Fatalf("variable address class %v not implemented", n.Class())
return nil
}
- case ORESULT:
+ case ir.ORESULT:
// load return from callee
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
- return s.constOffPtrSP(t, n.Xoffset)
+ return s.constOffPtrSP(t, n.Offset())
}
- which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
+ which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset())
if which == -1 {
// Do the old thing // TODO: Panic instead.
- return s.constOffPtrSP(t, n.Xoffset)
+ return s.constOffPtrSP(t, n.Offset())
}
x := s.newValue1I(ssa.OpSelectNAddr, t, which, s.prevCall)
return x
- case OINDEX:
- if n.Left.Type.IsSlice() {
- a := s.expr(n.Left)
- i := s.expr(n.Right)
- len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a)
+ case ir.OINDEX:
+ if n.Left().Type().IsSlice() {
+ a := s.expr(n.Left())
+ i := s.expr(n.Right())
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i)
} else { // array
- a := s.addr(n.Left)
- i := s.expr(n.Right)
- len := s.constInt(types.Types[TINT], n.Left.Type.NumElem())
+ a := s.addr(n.Left())
+ i := s.expr(n.Right())
+ len := s.constInt(types.Types[types.TINT], n.Left().Type().NumElem())
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
- return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i)
- }
- case ODEREF:
- return s.exprPtr(n.Left, n.Bounded(), n.Pos)
- case ODOT:
- p := s.addr(n.Left)
- return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
- case ODOTPTR:
- p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
- return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
- case OCLOSUREVAR:
- return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
+ return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left().Type().Elem()), a, i)
+ }
+ case ir.ODEREF:
+ return s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ case ir.ODOT:
+ p := s.addr(n.Left())
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+ case ir.ODOTPTR:
+ p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+ case ir.OCLOSUREREAD:
+ n := n.(*ir.ClosureReadExpr)
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(),
s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
- case OCONVNOP:
- addr := s.addr(n.Left)
+ case ir.OCONVNOP:
+ if n.Type() == n.Left().Type() {
+ return s.addr(n.Left())
+ }
+ addr := s.addr(n.Left())
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
- case OCALLFUNC, OCALLINTER, OCALLMETH:
+ case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
+ n := n.(*ir.CallExpr)
return s.callAddr(n, callNormal)
- case ODOTTYPE:
+ case ir.ODOTTYPE:
+ n := n.(*ir.TypeAssertExpr)
v, _ := s.dottype(n, false)
if v.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
return v.Args[0]
default:
- s.Fatalf("unhandled addr %v", n.Op)
+ s.Fatalf("unhandled addr %v", n.Op())
return nil
}
}
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
-func (s *state) canSSA(n *Node) bool {
- if Debug.N != 0 {
+func (s *state) canSSA(n ir.Node) bool {
+ if base.Flag.N != 0 {
return false
}
- for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
- n = n.Left
+ for {
+ nn := n
+ if nn.Op() == ir.ODOT {
+ n = nn.Left()
+ continue
+ }
+ if nn.Op() == ir.OINDEX {
+ if nn.Left().Type().IsArray() {
+ n = nn.Left()
+ continue
+ }
+ }
+ break
}
- if n.Op != ONAME {
+ if n.Op() != ir.ONAME {
return false
}
- if n.Name.Addrtaken() {
+ return s.canSSAName(n.(*ir.Name)) && canSSAType(n.Type())
+}
+
+func (s *state) canSSAName(name *ir.Name) bool {
+ if name.Addrtaken() {
return false
}
- if n.isParamHeapCopy() {
+ if isParamHeapCopy(name) {
return false
}
- if n.Class() == PAUTOHEAP {
- s.Fatalf("canSSA of PAUTOHEAP %v", n)
+ if name.Class() == ir.PAUTOHEAP {
+ s.Fatalf("canSSA of PAUTOHEAP %v", name)
}
- switch n.Class() {
- case PEXTERN:
+ switch name.Class() {
+ case ir.PEXTERN:
return false
- case PPARAMOUT:
+ case ir.PPARAMOUT:
if s.hasdefer {
// TODO: handle this case? Named return values must be
// in memory so that the deferred function can see them.
return false
}
}
- if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
+ if name.Class() == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARAMOUT?
return false
}
- return canSSAType(n.Type)
+ return true
// TODO: try to make more variables SSAable?
}
// Too big and we'll introduce too much register pressure.
return false
}
- switch t.Etype {
- case TARRAY:
+ switch t.Kind() {
+ case types.TARRAY:
// We can't do larger arrays because dynamic indexing is
// not supported on SSA variables.
// TODO: allow if all indexes are constant.
return canSSAType(t.Elem())
}
return false
- case TSTRUCT:
+ case types.TSTRUCT:
if t.NumFields() > ssa.MaxStruct {
return false
}
}
// exprPtr evaluates n to a pointer and nil-checks it.
-func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
+func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
p := s.expr(n)
if bounded || n.NonNil() {
if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
- if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() {
+ if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
return
}
s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
idx = s.extendIndex(idx, len, kind, bounded)
- if bounded || Debug.B != 0 {
+ if bounded || base.Flag.B != 0 {
// If bounded or bounds checking is flag-disabled, then no check necessary,
// just return the extended index.
//
var cmp *ssa.Value
if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
- cmp = s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len)
+ cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
} else {
- cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len)
+ cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
}
b := s.endBlock()
b.Kind = ssa.BlockIf
s.startBlock(bNext)
// In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
- if spectreIndex {
+ if base.Flag.Cfg.SpectreIndex {
op := ssa.OpSpectreIndex
if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
op = ssa.OpSpectreSliceIndex
}
- idx = s.newValue2(op, types.Types[TINT], idx, len)
+ idx = s.newValue2(op, types.Types[types.TINT], idx, len)
}
return idx
b.Likely = ssa.BranchLikely
bNext := s.f.NewBlock(ssa.BlockPlain)
line := s.peekPos()
- pos := Ctxt.PosTable.Pos(line)
+ pos := base.Ctxt.PosTable.Pos(line)
fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
bPanic := s.panics[fl]
if bPanic == nil {
s.startBlock(bNext)
}
-func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
+func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
needcheck := true
switch b.Op {
case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
}
if needcheck {
// do a size-appropriate check for zero
- cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type))
+ cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
s.check(cmp, panicdivide)
}
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
// rtcall issues a call to the given runtime function fn with the listed args.
func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
s.prevCall = nil
// Write args to the stack
- off := Ctxt.FixedFrameSize()
+ off := base.Ctxt.FixedFrameSize()
testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
var ACArgs []ssa.Param
var ACResults []ssa.Param
callArgs = append(callArgs, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
- s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else {
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
- s.vars[&memVar] = call
+ s.vars[memVar] = call
}
if !returns {
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(call)
- call.AuxInt = off - Ctxt.FixedFrameSize()
+ call.AuxInt = off - base.Ctxt.FixedFrameSize()
if len(results) > 0 {
s.Fatalf("panic call can't have results")
}
if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type.
- s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
+ s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
return
}
if skip&skipLen != 0 {
return
}
- len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right)
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
- s.store(types.Types[TINT], lenAddr, len)
+ s.store(types.Types[types.TINT], lenAddr, len)
case t.IsSlice():
if skip&skipLen == 0 {
- len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
- s.store(types.Types[TINT], lenAddr, len)
+ s.store(types.Types[types.TINT], lenAddr, len)
}
if skip&skipCap == 0 {
- cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right)
+ cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
- s.store(types.Types[TINT], capAddr, cap)
+ s.store(types.Types[types.TINT], capAddr, cap)
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
- s.store(types.Types[TUINTPTR], left, itab)
+ s.store(types.Types[types.TUINTPTR], left, itab)
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call.
// If forLateExpandedCall is true, it returns the argument value to pass to the call operation.
// If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil.
-func (s *state) putArg(n *Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
+func (s *state) putArg(n ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
var a *ssa.Value
if forLateExpandedCall {
if !canSSAType(t) {
return ssa.Param{Type: t, Offset: int32(off)}, a
}
-func (s *state) storeArgWithBase(n *Node, t *types.Type, base *ssa.Value, off int64) {
+func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
pt := types.NewPtr(t)
var addr *ssa.Value
if base == s.sp {
switch {
case t.IsSlice():
ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
- len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v)
- cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v)
+ len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
+ cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
case t.IsString():
- ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[TUINT8]), v)
- len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v)
+ ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
+ len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
cap = len
case t.IsPtr():
if !t.Elem().IsArray() {
}
s.nilCheck(v)
ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
- len = s.constInt(types.Types[TINT], t.Elem().NumElem())
+ len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
cap = len
default:
s.Fatalf("bad type in slice %v\n", t)
// Set default values
if i == nil {
- i = s.constInt(types.Types[TINT], 0)
+ i = s.constInt(types.Types[types.TINT], 0)
}
if j == nil {
j = len
}
// Word-sized integer operations.
- subOp := s.ssaOp(OSUB, types.Types[TINT])
- mulOp := s.ssaOp(OMUL, types.Types[TINT])
- andOp := s.ssaOp(OAND, types.Types[TINT])
+ subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
+ mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
+ andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
// Calculate the length (rlen) and capacity (rcap) of the new slice.
// For strings the capacity of the result is unimportant. However,
// we use rcap to test if we've generated a zero-length slice.
// Use length of strings for that.
- rlen := s.newValue2(subOp, types.Types[TINT], j, i)
+ rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
rcap := rlen
if j != k && !t.IsString() {
- rcap = s.newValue2(subOp, types.Types[TINT], k, i)
+ rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
}
if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
//
// Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
// of the element type.
- stride := s.constInt(types.Types[TINT], ptr.Type.Elem().Width)
+ stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Width)
// The delta is the number of bytes to offset ptr by.
- delta := s.newValue2(mulOp, types.Types[TINT], i, stride)
+ delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
// If we're slicing to the point where the capacity is zero,
// zero out the delta.
- mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap)
- delta = s.newValue2(andOp, types.Types[TINT], delta, mask)
+ mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
+ delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
// Compute rptr = ptr + delta.
rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
one: (*state).constInt64,
}
-func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
}
-func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
}
-func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
- cmp := s.newValue2(cvttab.leq, types.Types[TBOOL], s.zeroVal(ft), x)
+ cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
type u322fcvtTab struct {
cvtF2F: ssa.OpCvt64Fto32F,
}
-func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
}
-func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
}
-func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = floatY(x)
// } else {
// result = floatY(float64(x) + (1<<32))
// }
- cmp := s.newValue2(ssa.OpLeq32, types.Types[TBOOL], s.zeroVal(ft), x)
+ cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
- a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x)
- twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32))
- a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32)
+ a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
+ twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
+ a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
s.vars[n] = a3
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
-func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
- if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
+func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
+ if !n.Left().Type().IsMap() && !n.Left().Type().IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
// // cap
// return *(((*int)n)+1)
// }
- lenType := n.Type
- nilValue := s.constNil(types.Types[TUINTPTR])
- cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue)
+ lenType := n.Type()
+ nilValue := s.constNil(types.Types[types.TUINTPTR])
+ cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
- switch n.Op {
- case OLEN:
+ switch n.Op() {
+ case ir.OLEN:
// length is stored in the first word for map/chan
s.vars[n] = s.load(lenType, x)
- case OCAP:
+ case ir.OCAP:
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
s.vars[n] = s.load(lenType, sw)
cutoff: 1 << 31,
}
-func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
-func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
-func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u32, n, x, ft, tt)
}
-func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u32, n, x, ft, tt)
}
-func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// cutoff:=1<<(intY_Size-1)
// if x < floatX(cutoff) {
// result = uintY(x)
// result = z | -(cutoff)
// }
cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
- cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff)
+ cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
-func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
- iface := s.expr(n.Left) // input interface
- target := s.expr(n.Right) // target type
+func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
+ iface := s.expr(n.Left()) // input interface
+ target := s.expr(n.Right()) // target type
byteptr := s.f.Config.Types.BytePtr
- if n.Type.IsInterface() {
- if n.Type.IsEmptyInterface() {
+ if n.Type().IsInterface() {
+ if n.Type().IsEmptyInterface() {
// Converting to an empty interface.
// Input could be an empty or nonempty interface.
- if Debug_typeassert > 0 {
- Warnl(n.Pos, "type assertion inlined")
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(n.Pos(), "type assertion inlined")
}
// Get itab/type field from input.
itab := s.newValue1(ssa.OpITab, byteptr, iface)
// Conversion succeeds iff that field is not nil.
- cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr))
+ cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
- if n.Left.Type.IsEmptyInterface() && commaok {
+ if n.Left().Type().IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
return iface, cond
}
// On success, return (perhaps modified) input interface.
s.startBlock(bOk)
- if n.Left.Type.IsEmptyInterface() {
+ if n.Left().Type().IsEmptyInterface() {
res = iface // Use input interface unchanged.
return
}
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
typ := s.load(byteptr, off)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
- res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
+ res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata)
return
}
// nonempty -> empty
// Need to load type from itab
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
- s.vars[&typVar] = s.load(byteptr, off)
+ s.vars[typVar] = s.load(byteptr, off)
s.endBlock()
// itab is nil, might as well use that as the nil result.
s.startBlock(bFail)
- s.vars[&typVar] = itab
+ s.vars[typVar] = itab
s.endBlock()
// Merge point.
bFail.AddEdgeTo(bEnd)
s.startBlock(bEnd)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
- res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata)
+ res = s.newValue2(ssa.OpIMake, n.Type(), s.variable(typVar, byteptr), idata)
resok = cond
- delete(s.vars, &typVar)
+ delete(s.vars, typVar)
return
}
// converting to a nonempty interface needs a runtime call.
- if Debug_typeassert > 0 {
- Warnl(n.Pos, "type assertion not inlined")
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(n.Pos(), "type assertion not inlined")
}
- if n.Left.Type.IsEmptyInterface() {
+ if n.Left().Type().IsEmptyInterface() {
if commaok {
- call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
+ call := s.rtcall(assertE2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
return call[0], call[1]
}
- return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil
+ return s.rtcall(assertE2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
}
if commaok {
- call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
+ call := s.rtcall(assertI2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
return call[0], call[1]
}
- return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil
+ return s.rtcall(assertI2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
}
- if Debug_typeassert > 0 {
- Warnl(n.Pos, "type assertion inlined")
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(n.Pos(), "type assertion inlined")
}
// Converting to a concrete type.
- direct := isdirectiface(n.Type)
+ direct := isdirectiface(n.Type())
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
- if Debug_typeassert > 0 {
- Warnl(n.Pos, "type assertion inlined")
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(n.Pos(), "type assertion inlined")
}
var targetITab *ssa.Value
- if n.Left.Type.IsEmptyInterface() {
+ if n.Left().Type().IsEmptyInterface() {
// Looking for pointer to target type.
targetITab = target
} else {
// Looking for pointer to itab for target type and source interface.
- targetITab = s.expr(n.List.First())
+ targetITab = s.expr(n.List().First())
}
- var tmp *Node // temporary for use with large types
+ var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
- if commaok && !canSSAType(n.Type) {
+ if commaok && !canSSAType(n.Type()) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
- tmp = tempAt(n.Pos, s.curfn, n.Type)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
+ tmp = tempAt(n.Pos(), s.curfn, n.Type())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp.(*ir.Name), s.mem())
addr = s.addr(tmp)
}
- cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab)
+ cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, targetITab)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
- taddr := s.expr(n.Right.Right)
- if n.Left.Type.IsEmptyInterface() {
+ taddr := s.expr(n.Right().(*ir.AddrExpr).Right())
+ if n.Left().Type().IsEmptyInterface() {
s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
} else {
s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
// on success, return data from interface
s.startBlock(bOk)
if direct {
- return s.newValue1(ssa.OpIData, n.Type, iface), nil
+ return s.newValue1(ssa.OpIData, n.Type(), iface), nil
}
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- return s.load(n.Type, p), nil
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+ return s.load(n.Type(), p), nil
}
// commaok is the more complicated case because we have
bEnd := s.f.NewBlock(ssa.BlockPlain)
// Note that we need a new valVar each time (unlike okVar where we can
// reuse the variable) because it might have a different type every time.
- valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}}
+ valVar := ssaMarker("val")
// type assertion succeeded
s.startBlock(bOk)
if tmp == nil {
if direct {
- s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
+ s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type(), iface)
} else {
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- s.vars[valVar] = s.load(n.Type, p)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+ s.vars[valVar] = s.load(n.Type(), p)
}
} else {
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- s.move(n.Type, addr, p)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+ s.move(n.Type(), addr, p)
}
- s.vars[&okVar] = s.constBool(true)
+ s.vars[okVar] = s.constBool(true)
s.endBlock()
bOk.AddEdgeTo(bEnd)
// type assertion failed
s.startBlock(bFail)
if tmp == nil {
- s.vars[valVar] = s.zeroVal(n.Type)
+ s.vars[valVar] = s.zeroVal(n.Type())
} else {
- s.zero(n.Type, addr)
+ s.zero(n.Type(), addr)
}
- s.vars[&okVar] = s.constBool(false)
+ s.vars[okVar] = s.constBool(false)
s.endBlock()
bFail.AddEdgeTo(bEnd)
// merge point
s.startBlock(bEnd)
if tmp == nil {
- res = s.variable(valVar, n.Type)
+ res = s.variable(valVar, n.Type())
delete(s.vars, valVar)
} else {
- res = s.load(n.Type, addr)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
+ res = s.load(n.Type(), addr)
+ s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp.(*ir.Name), s.mem())
}
- resok = s.variable(&okVar, types.Types[TBOOL])
- delete(s.vars, &okVar)
+ resok = s.variable(okVar, types.Types[types.TBOOL])
+ delete(s.vars, okVar)
return res, resok
}
// variable returns the value of a variable at the current location.
-func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
- v := s.vars[name]
+func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value {
+ v := s.vars[n]
if v != nil {
return v
}
- v = s.fwdVars[name]
+ v = s.fwdVars[n]
if v != nil {
return v
}
if s.curBlock == s.f.Entry {
// No variable should be live at entry.
- s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v)
+ s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, n, v)
}
// Make a FwdRef, which records a value that's live on block input.
// We'll find the matching definition as part of insertPhis.
- v = s.newValue0A(ssa.OpFwdRef, t, name)
- s.fwdVars[name] = v
- s.addNamedValue(name, v)
+ v = s.newValue0A(ssa.OpFwdRef, t, FwdRefAux{N: n})
+ s.fwdVars[n] = v
+ if n.Op() == ir.ONAME {
+ s.addNamedValue(n.(*ir.Name), v)
+ }
return v
}
func (s *state) mem() *ssa.Value {
- return s.variable(&memVar, types.TypeMem)
+ return s.variable(memVar, types.TypeMem)
}
-func (s *state) addNamedValue(n *Node, v *ssa.Value) {
- if n.Class() == Pxxx {
- // Don't track our dummy nodes (&memVar etc.).
+func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
+ if n.Class() == ir.Pxxx {
+ // Don't track our marker nodes (memVar etc.).
return
}
- if n.IsAutoTmp() {
+ if ir.IsAutoTmp(n) {
// Don't track temporary variables.
return
}
- if n.Class() == PPARAMOUT {
+ if n.Class() == ir.PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
- if n.Class() == PAUTO && n.Xoffset != 0 {
- s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
- }
- loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
+ loc := ssa.LocalSlot{N: n.Name(), Type: n.Type(), Off: 0}
values, ok := s.f.NamedValues[loc]
if !ok {
s.f.Names = append(s.f.Names, loc)
bstart []*obj.Prog
// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8.
- ScratchFpMem *Node
+ ScratchFpMem *ir.Name
maxarg int64 // largest frame size for arguments to calls made by the function
}
}
-// byXoffset implements sort.Interface for []*Node using Xoffset as the ordering.
-type byXoffset []*Node
+// byXoffset implements sort.Interface for []*ir.Name using Xoffset as the ordering.
+type byXoffset []*ir.Name
func (s byXoffset) Len() int { return len(s) }
-func (s byXoffset) Less(i, j int) bool { return s[i].Xoffset < s[j].Xoffset }
+func (s byXoffset) Less(i, j int) bool { return s[i].FrameOffset() < s[j].FrameOffset() }
func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func emitStackObjects(e *ssafn, pp *Progs) {
- var vars []*Node
- for _, n := range e.curfn.Func.Dcl {
- if livenessShouldTrack(n) && n.Name.Addrtaken() {
+ var vars []*ir.Name
+ for _, n := range e.curfn.Dcl {
+ if livenessShouldTrack(n) && n.Addrtaken() {
vars = append(vars, n)
}
}
// Populate the stack object data.
// Format must match runtime/stack.go:stackObjectRecord.
- x := e.curfn.Func.lsym.Func().StackObjects
+ x := e.curfn.LSym.Func().StackObjects
off := 0
off = duintptr(x, off, uint64(len(vars)))
for _, v := range vars {
// Note: arguments and return values have non-negative Xoffset,
// in which case the offset is relative to argp.
// Locals have a negative Xoffset, in which case the offset is relative to varp.
- off = duintptr(x, off, uint64(v.Xoffset))
- if !typesym(v.Type).Siggen() {
- e.Fatalf(v.Pos, "stack object's type symbol not generated for type %s", v.Type)
+ off = duintptr(x, off, uint64(v.FrameOffset()))
+ if !typesym(v.Type()).Siggen() {
+ e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type())
}
- off = dsymptr(x, off, dtypesym(v.Type), 0)
+ off = dsymptr(x, off, dtypesym(v.Type()), 0)
}
// Emit a funcdata pointing at the stack object data.
p.To.Name = obj.NAME_EXTERN
p.To.Sym = x
- if debuglive != 0 {
+ if base.Flag.Live != 0 {
for _, v := range vars {
- Warnl(v.Pos, "stack object %v %s", v, v.Type.String())
+ base.WarnfAt(v.Pos(), "stack object %v %s", v, v.Type().String())
}
}
}
e := f.Frontend().(*ssafn)
- s.livenessMap = liveness(e, f, pp)
+ s.livenessMap = liveness(e.curfn, f, e.stkptrsize, pp)
emitStackObjects(e, pp)
- openDeferInfo := e.curfn.Func.lsym.Func().OpenCodedDeferInfo
+ openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
// This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa.
s.ScratchFpMem = e.scratchFpMem
- if Ctxt.Flag_locationlists {
+ if base.Ctxt.Flag_locationlists {
if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
}
thearch.SSAGenValue(&s, v)
}
- if Ctxt.Flag_locationlists {
+ if base.Ctxt.Flag_locationlists {
valueToProgAfter[v.ID] = s.pp.next
}
}
// Emit control flow instructions for block
var next *ssa.Block
- if i < len(f.Blocks)-1 && Debug.N == 0 {
+ if i < len(f.Blocks)-1 && base.Flag.N == 0 {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
// some of the inline marks.
// Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement
- pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[m])
+ pp.curfn.LSym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP
m.Pos = src.NoXPos
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList {
if p.As != obj.ANOP {
- pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[p])
+ pp.curfn.LSym.Func().AddInlMark(p, inlMarks[p])
}
}
}
- if Ctxt.Flag_locationlists {
- e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(Ctxt, f, Debug_locationlist > 1, stackOffset)
+ if base.Ctxt.Flag_locationlists {
+ debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset)
+ e.curfn.DebugInfo = debugInfo
bstart := s.bstart
// Note that at this moment, Prog.Pc is a sequence number; it's
// not a real PC until after assembly, so this mapping has to
// be done later.
- e.curfn.Func.DebugInfo.GetPC = func(b, v ssa.ID) int64 {
+ debugInfo.GetPC = func(b, v ssa.ID) int64 {
switch v {
case ssa.BlockStart.ID:
if b == f.Entry.ID {
}
return bstart[b].Pc
case ssa.BlockEnd.ID:
- return e.curfn.Func.lsym.Size
+ return e.curfn.LSym.Size
default:
return valueToProgAfter[v].Pc
}
// Fill in argument and frame size.
pp.Text.To.Type = obj.TYPE_TEXTSIZE
- pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg)))
+ pp.Text.To.Val = int32(Rnd(e.curfn.Type().ArgWidth(), int64(Widthreg)))
pp.Text.To.Offset = frame
// Insert code to zero ambiguously live variables so that the
var state uint32
// Iterate through declarations. They are sorted in decreasing Xoffset order.
- for _, n := range e.curfn.Func.Dcl {
- if !n.Name.Needzero() {
+ for _, n := range e.curfn.Dcl {
+ if !n.Needzero() {
continue
}
- if n.Class() != PAUTO {
- e.Fatalf(n.Pos, "needzero class %d", n.Class())
+ if n.Class() != ir.PAUTO {
+ e.Fatalf(n.Pos(), "needzero class %d", n.Class())
}
- if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 {
- e.Fatalf(n.Pos, "var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset)
+ if n.Type().Size()%int64(Widthptr) != 0 || n.FrameOffset()%int64(Widthptr) != 0 || n.Type().Size() == 0 {
+ e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset())
}
- if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) {
+ if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*Widthreg) {
// Merge with range we already have.
- lo = n.Xoffset
+ lo = n.FrameOffset()
continue
}
p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
// Set new range.
- lo = n.Xoffset
- hi = lo + n.Type.Size()
+ lo = n.FrameOffset()
+ hi = lo + n.Type().Size()
}
// Zero final range.
case *obj.LSym:
a.Name = obj.NAME_EXTERN
a.Sym = n
- case *Node:
- if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ case *ir.Name:
+ if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
- a.Sym = n.Orig.Sym.Linksym()
- a.Offset += n.Xoffset
+ a.Sym = ir.Orig(n).Sym().Linksym()
+ a.Offset += n.FrameOffset()
break
}
a.Name = obj.NAME_AUTO
- a.Sym = n.Sym.Linksym()
- a.Offset += n.Xoffset
+ a.Sym = n.Sym().Linksym()
+ a.Offset += n.FrameOffset()
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
}
// high word and branch to out-of-bounds failure if it is not 0.
var lo *ssa.Value
if idx.Type.IsSigned() {
- lo = s.newValue1(ssa.OpInt64Lo, types.Types[TINT], idx)
+ lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
} else {
- lo = s.newValue1(ssa.OpInt64Lo, types.Types[TUINT], idx)
+ lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
}
- if bounded || Debug.B != 0 {
+ if bounded || base.Flag.B != 0 {
return lo
}
bNext := s.f.NewBlock(ssa.BlockPlain)
bPanic := s.f.NewBlock(ssa.BlockExit)
- hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], idx)
- cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0))
+ hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
+ cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
if !idx.Type.IsSigned() {
switch kind {
case ssa.BoundsIndex:
s.Fatalf("bad unsigned index extension %s", idx.Type)
}
}
- return s.newValue1(op, types.Types[TINT], idx)
+ return s.newValue1(op, types.Types[types.TINT], idx)
}
// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
func CheckLoweredGetClosurePtr(v *ssa.Value) {
entry := v.Block.Func.Entry
if entry != v.Block || entry.Values[0] != v {
- Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
+ base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
}
-// AutoVar returns a *Node and int64 representing the auto variable and offset within it
+// AutoVar returns a *Name and int64 representing the auto variable and offset within it
// where v should be spilled.
-func AutoVar(v *ssa.Value) (*Node, int64) {
+func AutoVar(v *ssa.Value) (*ir.Name, int64) {
loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
if v.Type.Size() > loc.Type.Size() {
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
}
- return loc.N.(*Node), loc.Off
+ return loc.N, loc.Off
}
func AddrAuto(a *obj.Addr, v *ssa.Value) {
n, off := AutoVar(v)
a.Type = obj.TYPE_MEM
- a.Sym = n.Sym.Linksym()
+ a.Sym = n.Sym().Linksym()
a.Reg = int16(thearch.REGSP)
- a.Offset = n.Xoffset + off
- if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ a.Offset = n.FrameOffset() + off
+ if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
} else {
a.Name = obj.NAME_AUTO
}
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_AUTO
- a.Sym = s.ScratchFpMem.Sym.Linksym()
+ a.Sym = s.ScratchFpMem.Sym().Linksym()
a.Reg = int16(thearch.REGSP)
- a.Offset = s.ScratchFpMem.Xoffset
+ a.Offset = s.ScratchFpMem.Offset()
}
// Call returns a new CALL instruction for the SSA value v.
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
p.To.Type = obj.TYPE_MEM
default:
- Fatalf("unknown indirect call family")
+ base.Fatalf("unknown indirect call family")
}
p.To.Reg = v.Args[0].Reg()
}
if !idx.StackMapValid() {
// See Liveness.hasStackMap.
if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == typedmemclr || sym.Fn == typedmemmove) {
- Fatalf("missing stack map index for %v", v.LongString())
+ base.Fatalf("missing stack map index for %v", v.LongString())
}
}
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
-func fieldIdx(n *Node) int {
- t := n.Left.Type
- f := n.Sym
+func fieldIdx(n *ir.SelectorExpr) int {
+ t := n.Left().Type()
+ f := n.Sym()
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
i++
continue
}
- if t1.Offset != n.Xoffset {
+ if t1.Offset != n.Offset() {
panic("field offset doesn't match")
}
return i
// ssafn holds frontend information about a function that the backend is processing.
// It also exports a bunch of compiler services for the ssa backend.
type ssafn struct {
- curfn *Node
+ curfn *ir.Func
strings map[string]*obj.LSym // map from constant string to data symbols
- scratchFpMem *Node // temp for floating point register / memory moves on some architectures
+ scratchFpMem *ir.Name // temp for floating point register / memory moves on some architectures
stksize int64 // stack size for current frame
stkptrsize int64 // prefix of stack containing pointers
log bool // print ssa debug to the stdout
if e.strings == nil {
e.strings = make(map[string]*obj.LSym)
}
- data := stringsym(e.curfn.Pos, s)
+ data := stringsym(e.curfn.Pos(), s)
e.strings[s] = data
return data
}
-func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
- n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
- return n
+func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
+ return tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
}
func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
- ptrType := types.NewPtr(types.Types[TUINT8])
- lenType := types.Types[TINT]
+ ptrType := types.NewPtr(types.Types[types.TUINT8])
+ lenType := types.Types[types.TINT]
// Split this string up into two separate variables.
p := e.SplitSlot(&name, ".ptr", 0, ptrType)
l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
}
func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
- n := name.N.(*Node)
- u := types.Types[TUINTPTR]
- t := types.NewPtr(types.Types[TUINT8])
+ n := name.N
+ u := types.Types[types.TUINTPTR]
+ t := types.NewPtr(types.Types[types.TUINT8])
// Split this interface up into two separate variables.
f := ".itab"
- if n.Type.IsEmptyInterface() {
+ if n.Type().IsEmptyInterface() {
f = ".type"
}
c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
ptrType := types.NewPtr(name.Type.Elem())
- lenType := types.Types[TINT]
+ lenType := types.Types[types.TINT]
p := e.SplitSlot(&name, ".ptr", 0, ptrType)
l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
s := name.Type.Size() / 2
var t *types.Type
if s == 8 {
- t = types.Types[TFLOAT64]
+ t = types.Types[types.TFLOAT64]
} else {
- t = types.Types[TFLOAT32]
+ t = types.Types[types.TFLOAT32]
}
r := e.SplitSlot(&name, ".real", 0, t)
i := e.SplitSlot(&name, ".imag", t.Size(), t)
func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
var t *types.Type
if name.Type.IsSigned() {
- t = types.Types[TINT32]
+ t = types.Types[types.TINT32]
} else {
- t = types.Types[TUINT32]
+ t = types.Types[types.TUINT32]
}
if thearch.LinkArch.ByteOrder == binary.BigEndian {
- return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
+ return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[types.TUINT32])
}
- return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[TUINT32])
+ return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[types.TUINT32])
}
func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
}
func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
- n := name.N.(*Node)
+ n := name.N
at := name.Type
if at.NumElem() != 1 {
- e.Fatalf(n.Pos, "bad array size")
+ e.Fatalf(n.Pos(), "bad array size")
}
et := at.Elem()
return e.SplitSlot(&name, "[0]", 0, et)
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
- node := parent.N.(*Node)
+ node := parent.N
- if node.Class() != PAUTO || node.Name.Addrtaken() {
+ if node.Class() != ir.PAUTO || node.Name().Addrtaken() {
// addressed things and non-autos retain their parents (i.e., cannot truly be split)
return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
}
- s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg}
-
- n := &Node{
- Name: new(Name),
- Op: ONAME,
- Pos: parent.N.(*Node).Pos,
- }
- n.Orig = n
-
- s.Def = asTypesNode(n)
- asNode(s.Def).Name.SetUsed(true)
- n.Sym = s
- n.Type = t
- n.SetClass(PAUTO)
- n.Esc = EscNever
- n.Name.Curfn = e.curfn
- e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)
+ s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
+ n := ir.NewNameAt(parent.N.Pos(), s)
+ s.Def = n
+ ir.AsNode(s.Def).Name().SetUsed(true)
+ n.SetType(t)
+ n.SetClass(ir.PAUTO)
+ n.SetEsc(EscNever)
+ n.Curfn = e.curfn
+ e.curfn.Dcl = append(e.curfn.Dcl, n)
dowidth(t)
return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
}
}
func (e *ssafn) Line(pos src.XPos) string {
- return linestr(pos)
+ return base.FmtPos(pos)
}
// Log logs a message from the compiler.
// Fatal reports a compiler error and exits.
func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
- lineno = pos
- nargs := append([]interface{}{e.curfn.funcname()}, args...)
- Fatalf("'%s': "+msg, nargs...)
+ base.Pos = pos
+ nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
+ base.Fatalf("'%s': "+msg, nargs...)
}
// Warnl reports a "warning", which is usually flag-triggered
// logging output for the benefit of tests.
func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
- Warnl(pos, fmt_, args...)
+ base.WarnfAt(pos, fmt_, args...)
}
func (e *ssafn) Debug_checknil() bool {
- return Debug_checknil != 0
+ return base.Debug.Nil != 0
}
func (e *ssafn) UseWriteBarrier() bool {
- return use_writebarrier
+ return base.Flag.WB
}
func (e *ssafn) Syslook(name string) *obj.LSym {
}
func (e *ssafn) SetWBPos(pos src.XPos) {
- e.curfn.Func.setWBPos(pos)
+ e.curfn.SetWBPos(pos)
}
func (e *ssafn) MyImportPath() string {
- return myimportpath
+ return base.Ctxt.Pkgpath
}
-func (n *Node) Typ() *types.Type {
- return n.Type
-}
-func (n *Node) StorageClass() ssa.StorageClass {
- switch n.Class() {
- case PPARAM:
- return ssa.ClassParam
- case PPARAMOUT:
- return ssa.ClassParamOut
- case PAUTO:
- return ssa.ClassAuto
- default:
- Fatalf("untranslatable storage class for %v: %s", n, n.Class())
- return 0
+func clobberBase(n ir.Node) ir.Node {
+ if n.Op() == ir.ODOT {
+ if n.Left().Type().NumFields() == 1 {
+ return clobberBase(n.Left())
+ }
+ }
+ if n.Op() == ir.OINDEX {
+ if n.Left().Type().IsArray() && n.Left().Type().NumElem() == 1 {
+ return clobberBase(n.Left())
+ }
}
+ return n
}
-func clobberBase(n *Node) *Node {
- if n.Op == ODOT && n.Left.Type.NumFields() == 1 {
- return clobberBase(n.Left)
+// callTargetLSym determines the correct LSym for 'callee' when called
+// from function 'caller'. There are a couple of different scenarios
+// to contend with here:
+//
+// 1. if 'caller' is an ABI wrapper, then we always want to use the
+// LSym from the Func for the callee.
+//
+// 2. if 'caller' is not an ABI wrapper, then we looked at the callee
+// to see if it corresponds to a "known" ABI0 symbol (e.g. assembly
+// routine defined in the current package); if so, we want the call to
+// directly target the ABI0 symbol (effectively bypassing the
+// ABIInternal->ABI0 wrapper for 'callee').
+//
+// 3. in all other cases, want the regular ABIInternal linksym
+//
+func callTargetLSym(callee *types.Sym, callerLSym *obj.LSym) *obj.LSym {
+ lsym := callee.Linksym()
+ if !base.Flag.ABIWrap {
+ return lsym
}
- if n.Op == OINDEX && n.Left.Type.IsArray() && n.Left.Type.NumElem() == 1 {
- return clobberBase(n.Left)
+ if ir.AsNode(callee.Def) == nil {
+ return lsym
}
- return n
+ ndclfunc := ir.AsNode(callee.Def).Name().Defn
+ if ndclfunc == nil {
+ return lsym
+ }
+ // check for case 1 above
+ if callerLSym.ABIWrapper() {
+ if nlsym := ndclfunc.Func().LSym; nlsym != nil {
+ lsym = nlsym
+ }
+ } else {
+ // check for case 2 above
+ nam := ndclfunc.Func().Nname
+ defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()]
+ if hasDefABI && defABI == obj.ABI0 {
+ lsym = nam.Sym().LinksymABI0()
+ }
+ }
+ return lsym
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
- "cmd/internal/objabi"
"cmd/internal/src"
"crypto/md5"
"encoding/binary"
"fmt"
- "os"
- "runtime/debug"
+ "go/constant"
"sort"
"strconv"
"strings"
"unicode/utf8"
)
-type Error struct {
- pos src.XPos
- msg string
-}
-
-var errors []Error
-
// largeStack is info about a function whose stack frame is too large (rare).
type largeStack struct {
locals int64
largeStackFrames []largeStack
)
-func errorexit() {
- flusherrors()
- if outfile != "" {
- os.Remove(outfile)
- }
- os.Exit(2)
-}
-
-func adderrorname(n *Node) {
- if n.Op != ODOT {
- return
- }
- old := fmt.Sprintf("%v: undefined: %v\n", n.Line(), n.Left)
- if len(errors) > 0 && errors[len(errors)-1].pos.Line() == n.Pos.Line() && errors[len(errors)-1].msg == old {
- errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), n.Left, n)
- }
-}
-
-func adderr(pos src.XPos, format string, args ...interface{}) {
- msg := fmt.Sprintf(format, args...)
- // Only add the position if know the position.
- // See issue golang.org/issue/11361.
- if pos.IsKnown() {
- msg = fmt.Sprintf("%v: %s", linestr(pos), msg)
- }
- errors = append(errors, Error{
- pos: pos,
- msg: msg + "\n",
- })
-}
-
-// byPos sorts errors by source position.
-type byPos []Error
-
-func (x byPos) Len() int { return len(x) }
-func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
-func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-// flusherrors sorts errors seen so far by line number, prints them to stdout,
-// and empties the errors array.
-func flusherrors() {
- Ctxt.Bso.Flush()
- if len(errors) == 0 {
- return
- }
- sort.Stable(byPos(errors))
- for i, err := range errors {
- if i == 0 || err.msg != errors[i-1].msg {
- fmt.Printf("%s", err.msg)
- }
- }
- errors = errors[:0]
-}
-
-func hcrash() {
- if Debug.h != 0 {
- flusherrors()
- if outfile != "" {
- os.Remove(outfile)
- }
- var x *int
- *x = 0
- }
-}
-
-func linestr(pos src.XPos) string {
- return Ctxt.OutermostPos(pos).Format(Debug.C == 0, Debug.L == 1)
-}
-
-// lasterror keeps track of the most recently issued error.
-// It is used to avoid multiple error messages on the same
-// line.
-var lasterror struct {
- syntax src.XPos // source position of last syntax error
- other src.XPos // source position of last non-syntax error
- msg string // error message of last non-syntax error
-}
-
-// sameline reports whether two positions a, b are on the same line.
-func sameline(a, b src.XPos) bool {
- p := Ctxt.PosTable.Pos(a)
- q := Ctxt.PosTable.Pos(b)
- return p.Base() == q.Base() && p.Line() == q.Line()
-}
-
-func yyerrorl(pos src.XPos, format string, args ...interface{}) {
- msg := fmt.Sprintf(format, args...)
-
- if strings.HasPrefix(msg, "syntax error") {
- nsyntaxerrors++
- // only one syntax error per line, no matter what error
- if sameline(lasterror.syntax, pos) {
- return
- }
- lasterror.syntax = pos
- } else {
- // only one of multiple equal non-syntax errors per line
- // (flusherrors shows only one of them, so we filter them
- // here as best as we can (they may not appear in order)
- // so that we don't count them here and exit early, and
- // then have nothing to show for.)
- if sameline(lasterror.other, pos) && lasterror.msg == msg {
- return
- }
- lasterror.other = pos
- lasterror.msg = msg
- }
-
- adderr(pos, "%s", msg)
-
- hcrash()
- nerrors++
- if nsavederrors+nerrors >= 10 && Debug.e == 0 {
- flusherrors()
- fmt.Printf("%v: too many errors\n", linestr(pos))
- errorexit()
- }
-}
-
-func yyerrorv(lang string, format string, args ...interface{}) {
- what := fmt.Sprintf(format, args...)
- yyerrorl(lineno, "%s requires %s or later (-lang was set to %s; check go.mod)", what, lang, flag_lang)
-}
-
-func yyerror(format string, args ...interface{}) {
- yyerrorl(lineno, format, args...)
-}
-
-func Warn(fmt_ string, args ...interface{}) {
- Warnl(lineno, fmt_, args...)
-}
-
-func Warnl(line src.XPos, fmt_ string, args ...interface{}) {
- adderr(line, fmt_, args...)
- if Debug.m != 0 {
- flusherrors()
- }
-}
-
-func Fatalf(fmt_ string, args ...interface{}) {
- flusherrors()
-
- if Debug_panic != 0 || nsavederrors+nerrors == 0 {
- fmt.Printf("%v: internal compiler error: ", linestr(lineno))
- fmt.Printf(fmt_, args...)
- fmt.Printf("\n")
-
- // If this is a released compiler version, ask for a bug report.
- if strings.HasPrefix(objabi.Version, "go") {
- fmt.Printf("\n")
- fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
- fmt.Printf("https://golang.org/issue/new\n")
- } else {
- // Not a release; dump a stack trace, too.
- fmt.Println()
- os.Stdout.Write(debug.Stack())
- fmt.Println()
- }
- }
-
- hcrash()
- errorexit()
-}
-
// hasUniquePos reports whether n has a unique position that can be
// used for reporting error messages.
//
// It's primarily used to distinguish references to named objects,
// whose Pos will point back to their declaration position rather than
// their usage position.
-func hasUniquePos(n *Node) bool {
- switch n.Op {
- case ONAME, OPACK:
+func hasUniquePos(n ir.Node) bool {
+ switch n.Op() {
+ case ir.ONAME, ir.OPACK:
return false
- case OLITERAL, OTYPE:
- if n.Sym != nil {
+ case ir.OLITERAL, ir.ONIL, ir.OTYPE:
+ if n.Sym() != nil {
return false
}
}
- if !n.Pos.IsKnown() {
- if Debug.K != 0 {
- Warn("setlineno: unknown position (line 0)")
+ if !n.Pos().IsKnown() {
+ if base.Flag.K != 0 {
+ base.Warn("setlineno: unknown position (line 0)")
}
return false
}
return true
}
-func setlineno(n *Node) src.XPos {
- lno := lineno
+func setlineno(n ir.Node) src.XPos {
+ lno := base.Pos
if n != nil && hasUniquePos(n) {
- lineno = n.Pos
+ base.Pos = n.Pos()
}
return lno
}
func lookup(name string) *types.Sym {
- return localpkg.Lookup(name)
+ return types.LocalPkg.Lookup(name)
}
// lookupN looks up the symbol starting with prefix and ending with
var buf [20]byte // plenty long enough for all current users
copy(buf[:], prefix)
b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10)
- return localpkg.LookupBytes(b)
+ return types.LocalPkg.LookupBytes(b)
}
// autolabel generates a new Name node for use with
// user labels.
func autolabel(prefix string) *types.Sym {
if prefix[0] != '.' {
- Fatalf("autolabel prefix must start with '.', have %q", prefix)
+ base.Fatalf("autolabel prefix must start with '.', have %q", prefix)
}
fn := Curfn
if Curfn == nil {
- Fatalf("autolabel outside function")
+ base.Fatalf("autolabel outside function")
}
- n := fn.Func.Label
- fn.Func.Label++
+ n := fn.Label
+ fn.Label++
return lookupN(prefix, int(n))
}
-// find all the exported symbols in package opkg
+// dotImports tracks all PkgNames that have been dot-imported.
+var dotImports []*ir.PkgName
+
+// dotImportRefs maps idents introduced by importDot back to the
+// ir.PkgName they were dot-imported through.
+var dotImportRefs map[*ir.Ident]*ir.PkgName
+
+// find all the exported symbols in package referenced by PkgName,
// and make them available in the current package
-func importdot(opkg *types.Pkg, pack *Node) {
- n := 0
+func importDot(pack *ir.PkgName) {
+ if dotImportRefs == nil {
+ dotImportRefs = make(map[*ir.Ident]*ir.PkgName)
+ }
+
+ opkg := pack.Pkg
for _, s := range opkg.Syms {
if s.Def == nil {
- continue
+ if _, ok := declImporter[s]; !ok {
+ continue
+ }
}
if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
continue
s1 := lookup(s.Name)
if s1.Def != nil {
pkgerror := fmt.Sprintf("during import %q", opkg.Path)
- redeclare(lineno, s1, pkgerror)
+ redeclare(base.Pos, s1, pkgerror)
continue
}
- s1.Def = s.Def
- s1.Block = s.Block
- if asNode(s1.Def).Name == nil {
- Dump("s1def", asNode(s1.Def))
- Fatalf("missing Name")
- }
- asNode(s1.Def).Name.Pack = pack
- s1.Origpkg = opkg
- n++
+ id := ir.NewIdent(src.NoXPos, s)
+ dotImportRefs[id] = pack
+ s1.Def = id
+ s1.Block = 1
}
- if n == 0 {
- // can't possibly be used - there were no symbols
- yyerrorl(pack.Pos, "imported and not used: %q", opkg.Path)
- }
+ dotImports = append(dotImports, pack)
}
-func nod(op Op, nleft, nright *Node) *Node {
- return nodl(lineno, op, nleft, nright)
-}
+// checkDotImports reports errors for any unused dot imports.
+func checkDotImports() {
+ for _, pack := range dotImports {
+ if !pack.Used {
+ base.ErrorfAt(pack.Pos(), "imported and not used: %q", pack.Pkg.Path)
+ }
+ }
-func nodl(pos src.XPos, op Op, nleft, nright *Node) *Node {
- var n *Node
- switch op {
- case OCLOSURE, ODCLFUNC:
- var x struct {
- n Node
- f Func
- }
- n = &x.n
- n.Func = &x.f
- case ONAME:
- Fatalf("use newname instead")
- case OLABEL, OPACK:
- var x struct {
- n Node
- m Name
- }
- n = &x.n
- n.Name = &x.m
- default:
- n = new(Node)
- }
- n.Op = op
- n.Left = nleft
- n.Right = nright
- n.Pos = pos
- n.Xoffset = BADWIDTH
- n.Orig = n
- return n
+ // No longer needed; release memory.
+ dotImports = nil
+ dotImportRefs = nil
}
-// newname returns a new ONAME Node associated with symbol s.
-func newname(s *types.Sym) *Node {
- n := newnamel(lineno, s)
- n.Name.Curfn = Curfn
- return n
+// nodAddr returns a node representing &n at base.Pos.
+func nodAddr(n ir.Node) *ir.AddrExpr {
+ return nodAddrAt(base.Pos, n)
}
-// newnamel returns a new ONAME Node associated with symbol s at position pos.
-// The caller is responsible for setting n.Name.Curfn.
-func newnamel(pos src.XPos, s *types.Sym) *Node {
- if s == nil {
- Fatalf("newnamel nil")
- }
-
- var x struct {
- n Node
- m Name
- p Param
- }
- n := &x.n
- n.Name = &x.m
- n.Name.Param = &x.p
-
- n.Op = ONAME
- n.Pos = pos
- n.Orig = n
+// nodAddrPos returns a node representing &n at position pos.
+func nodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr {
+ return ir.NewAddrExpr(pos, n)
+}
- n.Sym = s
+// newname returns a new ONAME Node associated with symbol s.
+func NewName(s *types.Sym) *ir.Name {
+ n := ir.NewNameAt(base.Pos, s)
+ n.Curfn = Curfn
return n
}
// nodSym makes a Node with Op op and with the Left field set to left
// and the Sym field set to sym. This is for ODOT and friends.
-func nodSym(op Op, left *Node, sym *types.Sym) *Node {
- return nodlSym(lineno, op, left, sym)
+func nodSym(op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
+ return nodlSym(base.Pos, op, left, sym)
}
// nodlSym makes a Node with position Pos, with Op op, and with the Left field set to left
// and the Sym field set to sym. This is for ODOT and friends.
-func nodlSym(pos src.XPos, op Op, left *Node, sym *types.Sym) *Node {
- n := nodl(pos, op, left, nil)
- n.Sym = sym
+func nodlSym(pos src.XPos, op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
+ n := ir.NodAt(pos, op, left, nil)
+ n.SetSym(sym)
return n
}
-// rawcopy returns a shallow copy of n.
-// Note: copy or sepcopy (rather than rawcopy) is usually the
-// correct choice (see comment with Node.copy, below).
-func (n *Node) rawcopy() *Node {
- copy := *n
- return ©
-}
-
-// sepcopy returns a separate shallow copy of n, with the copy's
-// Orig pointing to itself.
-func (n *Node) sepcopy() *Node {
- copy := *n
- copy.Orig = ©
- return ©
-}
-
-// copy returns shallow copy of n and adjusts the copy's Orig if
-// necessary: In general, if n.Orig points to itself, the copy's
-// Orig should point to itself as well. Otherwise, if n is modified,
-// the copy's Orig node appears modified, too, and then doesn't
-// represent the original node anymore.
-// (This caused the wrong complit Op to be used when printing error
-// messages; see issues #26855, #27765).
-func (n *Node) copy() *Node {
- copy := *n
- if n.Orig == n {
- copy.Orig = ©
- }
- return ©
-}
-
// methcmp sorts methods by symbol.
type methcmp []*types.Field
func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x methcmp) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
-func nodintconst(v int64) *Node {
- u := new(Mpint)
- u.SetInt64(v)
- return nodlit(Val{u})
+func nodintconst(v int64) ir.Node {
+ return ir.NewLiteral(constant.MakeInt64(v))
}
-func nodnil() *Node {
- return nodlit(Val{new(NilVal)})
+func nodnil() ir.Node {
+ n := ir.Nod(ir.ONIL, nil, nil)
+ n.SetType(types.Types[types.TNIL])
+ return n
}
-func nodbool(b bool) *Node {
- return nodlit(Val{b})
+func nodbool(b bool) ir.Node {
+ return ir.NewLiteral(constant.MakeBool(b))
}
-func nodstr(s string) *Node {
- return nodlit(Val{s})
+func nodstr(s string) ir.Node {
+ return ir.NewLiteral(constant.MakeString(s))
}
-// treecopy recursively copies n, with the exception of
-// ONAME, OLITERAL, OTYPE, and ONONAME leaves.
-// If pos.IsKnown(), it sets the source position of newly
-// allocated nodes to pos.
-func treecopy(n *Node, pos src.XPos) *Node {
- if n == nil {
- return nil
- }
-
- switch n.Op {
- default:
- m := n.sepcopy()
- m.Left = treecopy(n.Left, pos)
- m.Right = treecopy(n.Right, pos)
- m.List.Set(listtreecopy(n.List.Slice(), pos))
- if pos.IsKnown() {
- m.Pos = pos
- }
- if m.Name != nil && n.Op != ODCLFIELD {
- Dump("treecopy", n)
- Fatalf("treecopy Name")
- }
- return m
-
- case OPACK:
- // OPACK nodes are never valid in const value declarations,
- // but allow them like any other declared symbol to avoid
- // crashing (golang.org/issue/11361).
- fallthrough
-
- case ONAME, ONONAME, OLITERAL, OTYPE:
- return n
-
- }
-}
-
-// isNil reports whether n represents the universal untyped zero value "nil".
-func (n *Node) isNil() bool {
- // Check n.Orig because constant propagation may produce typed nil constants,
- // which don't exist in the Go spec.
- return Isconst(n.Orig, CTNIL)
-}
-
-func isptrto(t *types.Type, et types.EType) bool {
+func isptrto(t *types.Type, et types.Kind) bool {
if t == nil {
return false
}
if t == nil {
return false
}
- if t.Etype != et {
+ if t.Kind() != et {
return false
}
return true
}
-func (n *Node) isBlank() bool {
- if n == nil {
- return false
- }
- return n.Sym.IsBlank()
-}
-
// methtype returns the underlying type, if any,
// that owns methods with receiver parameter t.
// The result is either a named type or an anonymous struct.
// Strip away pointer if it's there.
if t.IsPtr() {
- if t.Sym != nil {
+ if t.Sym() != nil {
return nil
}
t = t.Elem()
}
// Must be a named type or anonymous struct.
- if t.Sym == nil && !t.IsStruct() {
+ if t.Sym() == nil && !t.IsStruct() {
return nil
}
// Check types.
- if issimple[t.Etype] {
+ if issimple[t.Kind()] {
return t
}
- switch t.Etype {
- case TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRING, TSTRUCT:
+ switch t.Kind() {
+ case types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRING, types.TSTRUCT:
return t
}
return nil
// If so, return op code to use in conversion.
// If not, return OXXX. In this case, the string return parameter may
// hold a reason why. In all other cases, it'll be the empty string.
-func assignop(src, dst *types.Type) (Op, string) {
+func assignop(src, dst *types.Type) (ir.Op, string) {
if src == dst {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
- if src == nil || dst == nil || src.Etype == TFORW || dst.Etype == TFORW || src.Orig == nil || dst.Orig == nil {
- return OXXX, ""
+ if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil {
+ return ir.OXXX, ""
}
// 1. src type is identical to dst.
if types.Identical(src, dst) {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
// 2. src and dst have identical underlying types
// we want to recompute the itab. Recomputing the itab ensures
// that itabs are unique (thus an interface with a compile-time
// type I has an itab with interface type I).
- if types.Identical(src.Orig, dst.Orig) {
+ if types.Identical(src.Underlying(), dst.Underlying()) {
if src.IsEmptyInterface() {
// Conversion between two empty interfaces
// requires no code.
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
- if (src.Sym == nil || dst.Sym == nil) && !src.IsInterface() {
+ if (src.Sym() == nil || dst.Sym() == nil) && !src.IsInterface() {
// Conversion between two types, at least one unnamed,
// needs no conversion. The exception is nonempty interfaces
// which need to have their itab updated.
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
}
// 3. dst is an interface type and src implements dst.
- if dst.IsInterface() && src.Etype != TNIL {
+ if dst.IsInterface() && src.Kind() != types.TNIL {
var missing, have *types.Field
var ptr int
if implements(src, dst, &missing, &have, &ptr) {
- return OCONVIFACE, ""
+ // Call itabname so that (src, dst)
+ // gets added to itabs early, which allows
+ // us to de-virtualize calls through this
+ // type/interface pair later. See peekitabs in reflect.go
+ if isdirectiface(src) && !dst.IsEmptyInterface() {
+ NeedITab(src, dst)
+ }
+
+ return ir.OCONVIFACE, ""
}
// we'll have complained about this method anyway, suppress spurious messages.
if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) {
- return OCONVIFACE, ""
+ return ir.OCONVIFACE, ""
}
var why string
- if isptrto(src, TINTER) {
+ if isptrto(src, types.TINTER) {
why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
} else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
} else if have != nil && have.Sym == missing.Sym {
why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
- "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if ptr != 0 {
why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
} else if have != nil {
why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
- "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else {
why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
}
- return OXXX, why
+ return ir.OXXX, why
}
- if isptrto(dst, TINTER) {
+ if isptrto(dst, types.TINTER) {
why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
- return OXXX, why
+ return ir.OXXX, why
}
- if src.IsInterface() && dst.Etype != TBLANK {
+ if src.IsInterface() && dst.Kind() != types.TBLANK {
var missing, have *types.Field
var ptr int
var why string
if implements(dst, src, &missing, &have, &ptr) {
why = ": need type assertion"
}
- return OXXX, why
+ return ir.OXXX, why
}
// 4. src is a bidirectional channel value, dst is a channel type,
// src and dst have identical element types, and
// either src or dst is not a named type.
if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
- if types.Identical(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) {
- return OCONVNOP, ""
+ if types.Identical(src.Elem(), dst.Elem()) && (src.Sym() == nil || dst.Sym() == nil) {
+ return ir.OCONVNOP, ""
}
}
// 5. src is the predeclared identifier nil and dst is a nillable type.
- if src.Etype == TNIL {
- switch dst.Etype {
- case TPTR,
- TFUNC,
- TMAP,
- TCHAN,
- TINTER,
- TSLICE:
- return OCONVNOP, ""
+ if src.Kind() == types.TNIL {
+ switch dst.Kind() {
+ case types.TPTR,
+ types.TFUNC,
+ types.TMAP,
+ types.TCHAN,
+ types.TINTER,
+ types.TSLICE:
+ return ir.OCONVNOP, ""
}
}
// 6. rule about untyped constants - already converted by defaultlit.
// 7. Any typed value can be assigned to the blank identifier.
- if dst.Etype == TBLANK {
- return OCONVNOP, ""
+ if dst.Kind() == types.TBLANK {
+ return ir.OCONVNOP, ""
}
- return OXXX, ""
+ return ir.OXXX, ""
}
// Can we convert a value of type src to a value of type dst?
// If not, return OXXX. In this case, the string return parameter may
// hold a reason why. In all other cases, it'll be the empty string.
// srcConstant indicates whether the value of type src is a constant.
-func convertop(srcConstant bool, src, dst *types.Type) (Op, string) {
+func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
if src == dst {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
if src == nil || dst == nil {
- return OXXX, ""
+ return ir.OXXX, ""
}
// Conversions from regular to go:notinheap are not allowed
// (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
- return OXXX, why
+ return ir.OXXX, why
}
// (b) Disallow string to []T where T is go:notinheap.
- if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Etype == types.Bytetype.Etype || dst.Elem().Etype == types.Runetype.Etype) {
+ if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Kind() == types.ByteType.Kind() || dst.Elem().Kind() == types.RuneType.Kind()) {
why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
- return OXXX, why
+ return ir.OXXX, why
}
// 1. src can be assigned to dst.
op, why := assignop(src, dst)
- if op != OXXX {
+ if op != ir.OXXX {
return op, why
}
// with the good message from assignop.
// Otherwise clear the error.
if src.IsInterface() || dst.IsInterface() {
- return OXXX, why
+ return ir.OXXX, why
}
// 2. Ignoring struct tags, src and dst have identical underlying types.
- if types.IdenticalIgnoreTags(src.Orig, dst.Orig) {
- return OCONVNOP, ""
+ if types.IdenticalIgnoreTags(src.Underlying(), dst.Underlying()) {
+ return ir.OCONVNOP, ""
}
// 3. src and dst are unnamed pointer types and, ignoring struct tags,
// their base types have identical underlying types.
- if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil {
- if types.IdenticalIgnoreTags(src.Elem().Orig, dst.Elem().Orig) {
- return OCONVNOP, ""
+ if src.IsPtr() && dst.IsPtr() && src.Sym() == nil && dst.Sym() == nil {
+ if types.IdenticalIgnoreTags(src.Elem().Underlying(), dst.Elem().Underlying()) {
+ return ir.OCONVNOP, ""
}
}
// 4. src and dst are both integer or floating point types.
if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
- if simtype[src.Etype] == simtype[dst.Etype] {
- return OCONVNOP, ""
+ if simtype[src.Kind()] == simtype[dst.Kind()] {
+ return ir.OCONVNOP, ""
}
- return OCONV, ""
+ return ir.OCONV, ""
}
// 5. src and dst are both complex types.
if src.IsComplex() && dst.IsComplex() {
- if simtype[src.Etype] == simtype[dst.Etype] {
- return OCONVNOP, ""
+ if simtype[src.Kind()] == simtype[dst.Kind()] {
+ return ir.OCONVNOP, ""
}
- return OCONV, ""
+ return ir.OCONV, ""
}
// Special case for constant conversions: any numeric
// conversion is potentially okay. We'll validate further
// within evconst. See #38117.
if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) {
- return OCONV, ""
+ return ir.OCONV, ""
}
// 6. src is an integer or has type []byte or []rune
// and dst is a string type.
if src.IsInteger() && dst.IsString() {
- return ORUNESTR, ""
+ return ir.ORUNESTR, ""
}
if src.IsSlice() && dst.IsString() {
- if src.Elem().Etype == types.Bytetype.Etype {
- return OBYTES2STR, ""
+ if src.Elem().Kind() == types.ByteType.Kind() {
+ return ir.OBYTES2STR, ""
}
- if src.Elem().Etype == types.Runetype.Etype {
- return ORUNES2STR, ""
+ if src.Elem().Kind() == types.RuneType.Kind() {
+ return ir.ORUNES2STR, ""
}
}
// 7. src is a string and dst is []byte or []rune.
// String to slice.
if src.IsString() && dst.IsSlice() {
- if dst.Elem().Etype == types.Bytetype.Etype {
- return OSTR2BYTES, ""
+ if dst.Elem().Kind() == types.ByteType.Kind() {
+ return ir.OSTR2BYTES, ""
}
- if dst.Elem().Etype == types.Runetype.Etype {
- return OSTR2RUNES, ""
+ if dst.Elem().Kind() == types.RuneType.Kind() {
+ return ir.OSTR2RUNES, ""
}
}
// 8. src is a pointer or uintptr and dst is unsafe.Pointer.
if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
// 9. src is unsafe.Pointer and dst is a pointer or uintptr.
if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
// src is map and dst is a pointer to corresponding hmap.
// This rule is needed for the implementation detail that
// go gc maps are implemented as a pointer to a hmap struct.
- if src.Etype == TMAP && dst.IsPtr() &&
+ if src.Kind() == types.TMAP && dst.IsPtr() &&
src.MapType().Hmap == dst.Elem() {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
- return OXXX, ""
+ return ir.OXXX, ""
}
-func assignconv(n *Node, t *types.Type, context string) *Node {
+func assignconv(n ir.Node, t *types.Type, context string) ir.Node {
return assignconvfn(n, t, func() string { return context })
}
// Convert node n for assignment to type t.
-func assignconvfn(n *Node, t *types.Type, context func() string) *Node {
- if n == nil || n.Type == nil || n.Type.Broke() {
+func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
+ if n == nil || n.Type() == nil || n.Type().Broke() {
return n
}
- if t.Etype == TBLANK && n.Type.Etype == TNIL {
- yyerror("use of untyped nil")
+ if t.Kind() == types.TBLANK && n.Type().Kind() == types.TNIL {
+ base.Errorf("use of untyped nil")
}
n = convlit1(n, t, false, context)
- if n.Type == nil {
+ if n.Type() == nil {
return n
}
- if t.Etype == TBLANK {
+ if t.Kind() == types.TBLANK {
return n
}
// Convert ideal bool from comparison to plain bool
// if the next step is non-bool (like interface{}).
- if n.Type == types.UntypedBool && !t.IsBoolean() {
- if n.Op == ONAME || n.Op == OLITERAL {
- r := nod(OCONVNOP, n, nil)
- r.Type = types.Types[TBOOL]
+ if n.Type() == types.UntypedBool && !t.IsBoolean() {
+ if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL {
+ r := ir.Nod(ir.OCONVNOP, n, nil)
+ r.SetType(types.Types[types.TBOOL])
r.SetTypecheck(1)
r.SetImplicit(true)
n = r
}
}
- if types.Identical(n.Type, t) {
+ if types.Identical(n.Type(), t) {
return n
}
- op, why := assignop(n.Type, t)
- if op == OXXX {
- yyerror("cannot use %L as type %v in %s%s", n, t, context(), why)
- op = OCONV
+ op, why := assignop(n.Type(), t)
+ if op == ir.OXXX {
+ base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why)
+ op = ir.OCONV
}
- r := nod(op, n, nil)
- r.Type = t
+ r := ir.NewConvExpr(base.Pos, op, t, n)
r.SetTypecheck(1)
r.SetImplicit(true)
- r.Orig = n.Orig
return r
}
-// IsMethod reports whether n is a method.
-// n must be a function or a method.
-func (n *Node) IsMethod() bool {
- return n.Type.Recv() != nil
-}
-
-// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max].
-// n must be a slice expression. max is nil if n is a simple slice expression.
-func (n *Node) SliceBounds() (low, high, max *Node) {
- if n.List.Len() == 0 {
- return nil, nil, nil
- }
-
- switch n.Op {
- case OSLICE, OSLICEARR, OSLICESTR:
- s := n.List.Slice()
- return s[0], s[1], nil
- case OSLICE3, OSLICE3ARR:
- s := n.List.Slice()
- return s[0], s[1], s[2]
- }
- Fatalf("SliceBounds op %v: %v", n.Op, n)
- return nil, nil, nil
-}
-
-// SetSliceBounds sets n's slice bounds, where n is a slice expression.
-// n must be a slice expression. If max is non-nil, n must be a full slice expression.
-func (n *Node) SetSliceBounds(low, high, max *Node) {
- switch n.Op {
- case OSLICE, OSLICEARR, OSLICESTR:
- if max != nil {
- Fatalf("SetSliceBounds %v given three bounds", n.Op)
- }
- s := n.List.Slice()
- if s == nil {
- if low == nil && high == nil {
- return
- }
- n.List.Set2(low, high)
- return
- }
- s[0] = low
- s[1] = high
- return
- case OSLICE3, OSLICE3ARR:
- s := n.List.Slice()
- if s == nil {
- if low == nil && high == nil && max == nil {
- return
- }
- n.List.Set3(low, high, max)
- return
- }
- s[0] = low
- s[1] = high
- s[2] = max
- return
- }
- Fatalf("SetSliceBounds op %v: %v", n.Op, n)
-}
-
-// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR).
-// o must be a slicing op.
-func (o Op) IsSlice3() bool {
- switch o {
- case OSLICE, OSLICEARR, OSLICESTR:
- return false
- case OSLICE3, OSLICE3ARR:
- return true
- }
- Fatalf("IsSlice3 op %v", o)
- return false
-}
-
// backingArrayPtrLen extracts the pointer and length from a slice or string.
// This constructs two nodes referring to n, so n must be a cheapexpr.
-func (n *Node) backingArrayPtrLen() (ptr, len *Node) {
- var init Nodes
+func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
+ var init ir.Nodes
c := cheapexpr(n, &init)
if c != n || init.Len() != 0 {
- Fatalf("backingArrayPtrLen not cheap: %v", n)
+ base.Fatalf("backingArrayPtrLen not cheap: %v", n)
}
- ptr = nod(OSPTR, n, nil)
- if n.Type.IsString() {
- ptr.Type = types.Types[TUINT8].PtrTo()
+ ptr = ir.Nod(ir.OSPTR, n, nil)
+ if n.Type().IsString() {
+ ptr.SetType(types.Types[types.TUINT8].PtrTo())
} else {
- ptr.Type = n.Type.Elem().PtrTo()
+ ptr.SetType(n.Type().Elem().PtrTo())
}
- len = nod(OLEN, n, nil)
- len.Type = types.Types[TINT]
- return ptr, len
-}
-
-// labeledControl returns the control flow Node (for, switch, select)
-// associated with the label n, if any.
-func (n *Node) labeledControl() *Node {
- if n.Op != OLABEL {
- Fatalf("labeledControl %v", n.Op)
- }
- ctl := n.Name.Defn
- if ctl == nil {
- return nil
- }
- switch ctl.Op {
- case OFOR, OFORUNTIL, OSWITCH, OSELECT:
- return ctl
- }
- return nil
+ length = ir.Nod(ir.OLEN, n, nil)
+ length.SetType(types.Types[types.TINT])
+ return ptr, length
}
-func syslook(name string) *Node {
+func syslook(name string) *ir.Name {
s := Runtimepkg.Lookup(name)
if s == nil || s.Def == nil {
- Fatalf("syslook: can't find runtime.%s", name)
+ base.Fatalf("syslook: can't find runtime.%s", name)
}
- return asNode(s.Def)
+ return ir.AsNode(s.Def).(*ir.Name)
}
// typehash computes a hash value for type t to use in type switch statements.
// updateHasCall checks whether expression n contains any function
// calls and sets the n.HasCall flag if so.
-func updateHasCall(n *Node) {
+func updateHasCall(n ir.Node) {
if n == nil {
return
}
n.SetHasCall(calcHasCall(n))
}
-func calcHasCall(n *Node) bool {
- if n.Ninit.Len() != 0 {
+func calcHasCall(n ir.Node) bool {
+ if n.Init().Len() != 0 {
// TODO(mdempsky): This seems overly conservative.
return true
}
- switch n.Op {
- case OLITERAL, ONAME, OTYPE:
+ switch n.Op() {
+ default:
+ base.Fatalf("calcHasCall %+v", n)
+ panic("unreachable")
+
+ case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE, ir.ONAMEOFFSET:
if n.HasCall() {
- Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
+ base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
}
return false
- case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER:
+ case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
return true
- case OANDAND, OOROR:
+ case ir.OANDAND, ir.OOROR:
// hard with instrumented code
if instrumenting {
return true
}
- case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR,
- ODEREF, ODOTPTR, ODOTTYPE, ODIV, OMOD:
+ return n.Left().HasCall() || n.Right().HasCall()
+ case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
+ ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD:
// These ops might panic, make sure they are done
// before we start marshaling args for a call. See issue 16760.
return true
// When using soft-float, these ops might be rewritten to function calls
// so we ensure they are evaluated first.
- case OADD, OSUB, ONEG, OMUL:
- if thearch.SoftFloat && (isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) {
+ case ir.OADD, ir.OSUB, ir.OMUL:
+ if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) {
return true
}
- case OLT, OEQ, ONE, OLE, OGE, OGT:
- if thearch.SoftFloat && (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype]) {
+ return n.Left().HasCall() || n.Right().HasCall()
+ case ir.ONEG:
+ if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) {
return true
}
- case OCONV:
- if thearch.SoftFloat && ((isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) || (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype])) {
+ return n.Left().HasCall()
+ case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+ if thearch.SoftFloat && (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()]) {
return true
}
- }
+ return n.Left().HasCall() || n.Right().HasCall()
+ case ir.OCONV:
+ if thearch.SoftFloat && ((isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) || (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()])) {
+ return true
+ }
+ return n.Left().HasCall()
- if n.Left != nil && n.Left.HasCall() {
- return true
- }
- if n.Right != nil && n.Right.HasCall() {
- return true
+ case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE:
+ return n.Left().HasCall() || n.Right().HasCall()
+
+ case ir.OAS:
+ return n.Left().HasCall() || n.Right() != nil && n.Right().HasCall()
+
+ case ir.OADDR:
+ return n.Left().HasCall()
+ case ir.OPAREN:
+ return n.Left().HasCall()
+ case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV,
+ ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW,
+ ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF,
+ ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE:
+ return n.Left().HasCall()
+ case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
+ return n.Left().HasCall()
+
+ case ir.OGETG, ir.OCLOSUREREAD, ir.OMETHEXPR:
+ return false
+
+ // TODO(rsc): These look wrong in various ways but are what calcHasCall has always done.
+ case ir.OADDSTR:
+ // TODO(rsc): This used to check left and right, which are not part of OADDSTR.
+ return false
+ case ir.OBLOCK:
+ // TODO(rsc): Surely the block's statements matter.
+ return false
+ case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR:
+ // TODO(rsc): Some conversions are themselves calls, no?
+ return n.Left().HasCall()
+ case ir.ODOTTYPE2:
+ // TODO(rsc): Shouldn't this be up with ODOTTYPE above?
+ return n.Left().HasCall()
+ case ir.OSLICEHEADER:
+ // TODO(rsc): What about len and cap?
+ return n.Left().HasCall()
+ case ir.OAS2DOTTYPE, ir.OAS2FUNC:
+ // TODO(rsc): Surely we need to check List and Rlist.
+ return false
}
- return false
}
-func badtype(op Op, tl, tr *types.Type) {
+func badtype(op ir.Op, tl, tr *types.Type) {
var s string
if tl != nil {
s += fmt.Sprintf("\n\t%v", tl)
}
}
- yyerror("illegal types for operand: %v%s", op, s)
+ base.Errorf("illegal types for operand: %v%s", op, s)
}
// brcom returns !(op).
// For example, brcom(==) is !=.
-func brcom(op Op) Op {
+func brcom(op ir.Op) ir.Op {
switch op {
- case OEQ:
- return ONE
- case ONE:
- return OEQ
- case OLT:
- return OGE
- case OGT:
- return OLE
- case OLE:
- return OGT
- case OGE:
- return OLT
- }
- Fatalf("brcom: no com for %v\n", op)
+ case ir.OEQ:
+ return ir.ONE
+ case ir.ONE:
+ return ir.OEQ
+ case ir.OLT:
+ return ir.OGE
+ case ir.OGT:
+ return ir.OLE
+ case ir.OLE:
+ return ir.OGT
+ case ir.OGE:
+ return ir.OLT
+ }
+ base.Fatalf("brcom: no com for %v\n", op)
return op
}
// brrev returns reverse(op).
// For example, Brrev(<) is >.
-func brrev(op Op) Op {
+func brrev(op ir.Op) ir.Op {
switch op {
- case OEQ:
- return OEQ
- case ONE:
- return ONE
- case OLT:
- return OGT
- case OGT:
- return OLT
- case OLE:
- return OGE
- case OGE:
- return OLE
- }
- Fatalf("brrev: no rev for %v\n", op)
+ case ir.OEQ:
+ return ir.OEQ
+ case ir.ONE:
+ return ir.ONE
+ case ir.OLT:
+ return ir.OGT
+ case ir.OGT:
+ return ir.OLT
+ case ir.OLE:
+ return ir.OGE
+ case ir.OGE:
+ return ir.OLE
+ }
+ base.Fatalf("brrev: no rev for %v\n", op)
return op
}
// return side effect-free n, appending side effects to init.
// result is assignable if n is.
-func safeexpr(n *Node, init *Nodes) *Node {
+func safeexpr(n ir.Node, init *ir.Nodes) ir.Node {
if n == nil {
return nil
}
- if n.Ninit.Len() != 0 {
- walkstmtlist(n.Ninit.Slice())
- init.AppendNodes(&n.Ninit)
+ if n.Init().Len() != 0 {
+ walkstmtlist(n.Init().Slice())
+ init.AppendNodes(n.PtrInit())
}
- switch n.Op {
- case ONAME, OLITERAL:
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
return n
- case ODOT, OLEN, OCAP:
- l := safeexpr(n.Left, init)
- if l == n.Left {
+ case ir.OLEN, ir.OCAP:
+ l := safeexpr(n.Left(), init)
+ if l == n.Left() {
return n
}
- r := n.copy()
- r.Left = l
- r = typecheck(r, ctxExpr)
- r = walkexpr(r, init)
- return r
+ a := ir.Copy(n).(*ir.UnaryExpr)
+ a.SetLeft(l)
+ return walkexpr(typecheck(a, ctxExpr), init)
- case ODOTPTR, ODEREF:
- l := safeexpr(n.Left, init)
- if l == n.Left {
+ case ir.ODOT, ir.ODOTPTR:
+ l := safeexpr(n.Left(), init)
+ if l == n.Left() {
return n
}
- a := n.copy()
- a.Left = l
- a = walkexpr(a, init)
- return a
+ a := ir.Copy(n).(*ir.SelectorExpr)
+ a.SetLeft(l)
+ return walkexpr(typecheck(a, ctxExpr), init)
- case OINDEX, OINDEXMAP:
- l := safeexpr(n.Left, init)
- r := safeexpr(n.Right, init)
- if l == n.Left && r == n.Right {
+ case ir.ODEREF:
+ l := safeexpr(n.Left(), init)
+ if l == n.Left() {
return n
}
- a := n.copy()
- a.Left = l
- a.Right = r
- a = walkexpr(a, init)
- return a
+ a := ir.Copy(n).(*ir.StarExpr)
+ a.SetLeft(l)
+ return walkexpr(typecheck(a, ctxExpr), init)
- case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
+ case ir.OINDEX, ir.OINDEXMAP:
+ l := safeexpr(n.Left(), init)
+ r := safeexpr(n.Right(), init)
+ if l == n.Left() && r == n.Right() {
+ return n
+ }
+ a := ir.Copy(n).(*ir.IndexExpr)
+ a.SetLeft(l)
+ a.SetRight(r)
+ return walkexpr(typecheck(a, ctxExpr), init)
+
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
if isStaticCompositeLiteral(n) {
return n
}
// make a copy; must not be used as an lvalue
if islvalue(n) {
- Fatalf("missing lvalue case in safeexpr: %v", n)
+ base.Fatalf("missing lvalue case in safeexpr: %v", n)
}
return cheapexpr(n, init)
}
-func copyexpr(n *Node, t *types.Type, init *Nodes) *Node {
+func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
l := temp(t)
- a := nod(OAS, l, n)
- a = typecheck(a, ctxStmt)
- a = walkexpr(a, init)
- init.Append(a)
+ appendWalkStmt(init, ir.Nod(ir.OAS, l, n))
return l
}
// return side-effect free and cheap n, appending side effects to init.
// result may not be assignable.
-func cheapexpr(n *Node, init *Nodes) *Node {
- switch n.Op {
- case ONAME, OLITERAL:
+func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node {
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
}
- return copyexpr(n, n.Type, init)
+ return copyexpr(n, n.Type(), init)
}
// Code to resolve elided DOTs in embedded types.
}
u = t
- if t.Sym != nil && t.IsPtr() && !t.Elem().IsPtr() {
+ if t.Sym() != nil && t.IsPtr() && !t.Elem().IsPtr() {
// If t is a defined pointer type, then x.m is shorthand for (*x).m.
u = t.Elem()
}
// find missing fields that
// will give shortest unique addressing.
// modify the tree with missing type names.
-func adddot(n *Node) *Node {
- n.Left = typecheck(n.Left, ctxType|ctxExpr)
- if n.Left.Diag() {
+func adddot(n *ir.SelectorExpr) *ir.SelectorExpr {
+ n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
+ if n.Left().Diag() {
n.SetDiag(true)
}
- t := n.Left.Type
+ t := n.Left().Type()
if t == nil {
return n
}
- if n.Left.Op == OTYPE {
+ if n.Left().Op() == ir.OTYPE {
return n
}
- s := n.Sym
+ s := n.Sym()
if s == nil {
return n
}
case path != nil:
// rebuild elided dots
for c := len(path) - 1; c >= 0; c-- {
- n.Left = nodSym(ODOT, n.Left, path[c].field.Sym)
- n.Left.SetImplicit(true)
+ dot := nodSym(ir.ODOT, n.Left(), path[c].field.Sym)
+ dot.SetImplicit(true)
+ dot.SetType(path[c].field.Type)
+ n.SetLeft(dot)
}
case ambig:
- yyerror("ambiguous selector %v", n)
- n.Left = nil
+ base.Errorf("ambiguous selector %v", n)
+ n.SetLeft(nil)
}
return n
t.AllMethods().Set(ms)
}
-// Given funarg struct list, return list of ODCLFIELD Node fn args.
-func structargs(tl *types.Type, mustname bool) []*Node {
- var args []*Node
+// Given funarg struct list, return list of fn args.
+func structargs(tl *types.Type, mustname bool) []*ir.Field {
+ var args []*ir.Field
gen := 0
for _, t := range tl.Fields().Slice() {
s := t.Sym
}
a := symfield(s, t.Type)
a.Pos = t.Pos
- a.SetIsDDD(t.IsDDD())
+ a.IsDDD = t.IsDDD()
args = append(args, a)
}
// method - M func (t T)(), a TFIELD type struct
// newnam - the eventual mangled name of this function
func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
- if false && Debug.r != 0 {
+ if false && base.Flag.LowerR != 0 {
fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
}
// Only generate (*T).M wrappers for T.M in T's own package.
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
- rcvr.Elem().Sym != nil && rcvr.Elem().Sym.Pkg != localpkg {
+ rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
return
}
// Only generate I.M wrappers for I in I's own package
// but keep doing it for error.Error (was issue #29304).
- if rcvr.IsInterface() && rcvr.Sym != nil && rcvr.Sym.Pkg != localpkg && rcvr != types.Errortype {
+ if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
return
}
- lineno = autogeneratedPos
- dclcontext = PEXTERN
+ base.Pos = autogeneratedPos
+ dclcontext = ir.PEXTERN
- tfn := nod(OTFUNC, nil, nil)
- tfn.Left = namedfield(".this", rcvr)
- tfn.List.Set(structargs(method.Type.Params(), true))
- tfn.Rlist.Set(structargs(method.Type.Results(), false))
+ tfn := ir.NewFuncType(base.Pos,
+ namedfield(".this", rcvr),
+ structargs(method.Type.Params(), true),
+ structargs(method.Type.Results(), false))
fn := dclfunc(newnam, tfn)
- fn.Func.SetDupok(true)
+ fn.SetDupok(true)
- nthis := asNode(tfn.Type.Recv().Nname)
+ nthis := ir.AsNode(tfn.Type().Recv().Nname)
methodrcvr := method.Type.Recv().Type
// generate nil pointer check for better error
if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
// generating wrapper from *T to T.
- n := nod(OIF, nil, nil)
- n.Left = nod(OEQ, nthis, nodnil())
- call := nod(OCALL, syslook("panicwrap"), nil)
- n.Nbody.Set1(call)
- fn.Nbody.Append(n)
+ n := ir.Nod(ir.OIF, nil, nil)
+ n.SetLeft(ir.Nod(ir.OEQ, nthis, nodnil()))
+ call := ir.Nod(ir.OCALL, syslook("panicwrap"), nil)
+ n.PtrBody().Set1(call)
+ fn.PtrBody().Append(n)
}
- dot := adddot(nodSym(OXDOT, nthis, method.Sym))
+ dot := adddot(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
// generate call
// It's not possible to use a tail call when dynamic linking on ppc64le. The
// the TOC to the appropriate value for that module. But if it returns
// directly to the wrapper's caller, nothing will reset it to the correct
// value for that function.
- if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && Ctxt.Flag_dynlink) {
+ if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
// generate tail call: adjust pointer receiver and jump to embedded method.
- dot = dot.Left // skip final .M
- // TODO(mdempsky): Remove dependency on dotlist.
- if !dotlist[0].field.Type.IsPtr() {
- dot = nod(OADDR, dot, nil)
- }
- as := nod(OAS, nthis, convnop(dot, rcvr))
- fn.Nbody.Append(as)
- fn.Nbody.Append(nodSym(ORETJMP, nil, methodSym(methodrcvr, method.Sym)))
+ left := dot.Left() // skip final .M
+ if !left.Type().IsPtr() {
+ left = nodAddr(left)
+ }
+ as := ir.Nod(ir.OAS, nthis, convnop(left, rcvr))
+ fn.PtrBody().Append(as)
+ fn.PtrBody().Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym)))
} else {
- fn.Func.SetWrapper(true) // ignore frame for panic+recover matching
- call := nod(OCALL, dot, nil)
- call.List.Set(paramNnames(tfn.Type))
- call.SetIsDDD(tfn.Type.IsVariadic())
+ fn.SetWrapper(true) // ignore frame for panic+recover matching
+ call := ir.Nod(ir.OCALL, dot, nil)
+ call.PtrList().Set(paramNnames(tfn.Type()))
+ call.SetIsDDD(tfn.Type().IsVariadic())
if method.Type.NumResults() > 0 {
- n := nod(ORETURN, nil, nil)
- n.List.Set1(call)
- call = n
+ ret := ir.Nod(ir.ORETURN, nil, nil)
+ ret.PtrList().Set1(call)
+ fn.PtrBody().Append(ret)
+ } else {
+ fn.PtrBody().Append(call)
}
- fn.Nbody.Append(call)
}
- if false && Debug.r != 0 {
- dumplist("genwrapper body", fn.Nbody)
+ if false && base.Flag.LowerR != 0 {
+ ir.DumpList("genwrapper body", fn.Body())
}
funcbody()
- if debug_dclstack != 0 {
+ if base.Debug.DclStack != 0 {
testdclstack()
}
- fn = typecheck(fn, ctxStmt)
-
+ typecheckFunc(fn)
Curfn = fn
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
// Inline calls within (*T).M wrappers. This is safe because we only
// generate those wrappers within the same compilation unit as (T).M.
// TODO(mdempsky): Investigate why we can't enable this more generally.
- if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil {
+ if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
inlcalls(fn)
}
- escapeFuncs([]*Node{fn}, false)
+ escapeFuncs([]*ir.Func{fn}, false)
Curfn = nil
- xtop = append(xtop, fn)
+ Target.Decls = append(Target.Decls, fn)
}
-func paramNnames(ft *types.Type) []*Node {
- args := make([]*Node, ft.NumParams())
+func paramNnames(ft *types.Type) []ir.Node {
+ args := make([]ir.Node, ft.NumParams())
for i, f := range ft.Params().FieldSlice() {
- args[i] = asNode(f.Nname)
+ args[i] = ir.AsNode(f.Nname)
}
return args
}
-func hashmem(t *types.Type) *Node {
+func hashmem(t *types.Type) ir.Node {
sym := Runtimepkg.Lookup("memhash")
- n := newname(sym)
+ n := NewName(sym)
setNodeNameFunc(n)
- n.Type = functype(nil, []*Node{
+ n.SetType(functype(nil, []*ir.Field{
anonfield(types.NewPtr(t)),
- anonfield(types.Types[TUINTPTR]),
- anonfield(types.Types[TUINTPTR]),
- }, []*Node{
- anonfield(types.Types[TUINTPTR]),
- })
+ anonfield(types.Types[types.TUINTPTR]),
+ anonfield(types.Types[types.TUINTPTR]),
+ }, []*ir.Field{
+ anonfield(types.Types[types.TUINTPTR]),
+ }))
return n
}
path, ambig := dotpath(s, t, &m, ignorecase)
if path == nil {
if ambig {
- yyerror("%v.%v is ambiguous", t, s)
+ base.Errorf("%v.%v is ambiguous", t, s)
}
return nil, false
}
}
if !m.IsMethod() {
- yyerror("%v.%v is a field, not a method", t, s)
+ base.Errorf("%v.%v is a field, not a method", t, s)
return nil, followptr
}
// the method does not exist for value types.
rcvr := tm.Type.Recv().Type
if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !isifacemethod(tm.Type) {
- if false && Debug.r != 0 {
- yyerror("interface pointer mismatch")
+ if false && base.Flag.LowerR != 0 {
+ base.Errorf("interface pointer mismatch")
}
*m = im
}
}
- // We're going to emit an OCONVIFACE.
- // Call itabname so that (t, iface)
- // gets added to itabs early, which allows
- // us to de-virtualize calls through this
- // type/interface pair later. See peekitabs in reflect.go
- if isdirectiface(t0) && !iface.IsEmptyInterface() {
- itabname(t0, iface)
- }
return true
}
-func listtreecopy(l []*Node, pos src.XPos) []*Node {
- var out []*Node
- for _, n := range l {
- out = append(out, treecopy(n, pos))
- }
- return out
-}
-
-func liststmt(l []*Node) *Node {
- n := nod(OBLOCK, nil, nil)
- n.List.Set(l)
+func liststmt(l []ir.Node) ir.Node {
+ n := ir.Nod(ir.OBLOCK, nil, nil)
+ n.PtrList().Set(l)
if len(l) != 0 {
- n.Pos = l[0].Pos
- }
- return n
-}
-
-func (l Nodes) asblock() *Node {
- n := nod(OBLOCK, nil, nil)
- n.List = l
- if l.Len() != 0 {
- n.Pos = l.First().Pos
+ n.SetPos(l[0].Pos())
}
return n
}
-func ngotype(n *Node) *types.Sym {
- if n.Type != nil {
- return typenamesym(n.Type)
+func ngotype(n ir.Node) *types.Sym {
+ if n.Type() != nil {
+ return typenamesym(n.Type())
}
return nil
}
-// The result of addinit MUST be assigned back to n, e.g.
-// n.Left = addinit(n.Left, init)
-func addinit(n *Node, init []*Node) *Node {
+// The result of initExpr MUST be assigned back to n, e.g.
+// n.Left = initExpr(init, n.Left)
+func initExpr(init []ir.Node, n ir.Node) ir.Node {
if len(init) == 0 {
return n
}
- if n.mayBeShared() {
+ if ir.MayBeShared(n) {
// Introduce OCONVNOP to hold init list.
- n = nod(OCONVNOP, n, nil)
- n.Type = n.Left.Type
+ old := n
+ n = ir.Nod(ir.OCONVNOP, old, nil)
+ n.SetType(old.Type())
n.SetTypecheck(1)
}
- n.Ninit.Prepend(init...)
+ n.PtrInit().Prepend(init...)
n.SetHasCall(true)
return n
}
func isbadimport(path string, allowSpace bool) bool {
if strings.Contains(path, "\x00") {
- yyerror("import path contains NUL")
+ base.Errorf("import path contains NUL")
return true
}
for _, ri := range reservedimports {
if path == ri {
- yyerror("import path %q is reserved and cannot be used", path)
+ base.Errorf("import path %q is reserved and cannot be used", path)
return true
}
}
for _, r := range path {
if r == utf8.RuneError {
- yyerror("import path contains invalid UTF-8 sequence: %q", path)
+ base.Errorf("import path contains invalid UTF-8 sequence: %q", path)
return true
}
if r < 0x20 || r == 0x7f {
- yyerror("import path contains control character: %q", path)
+ base.Errorf("import path contains control character: %q", path)
return true
}
if r == '\\' {
- yyerror("import path contains backslash; use slash: %q", path)
+ base.Errorf("import path contains backslash; use slash: %q", path)
return true
}
if !allowSpace && unicode.IsSpace(r) {
- yyerror("import path contains space character: %q", path)
+ base.Errorf("import path contains space character: %q", path)
return true
}
if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
- yyerror("import path contains invalid character '%c': %q", r, path)
+ base.Errorf("import path contains invalid character '%c': %q", r, path)
return true
}
}
return false
}
- switch t.Etype {
- case TPTR:
+ switch t.Kind() {
+ case types.TPTR:
// Pointers to notinheap types must be stored indirectly. See issue 42076.
return !t.Elem().NotInHeap()
- case TCHAN,
- TMAP,
- TFUNC,
- TUNSAFEPTR:
+ case types.TCHAN,
+ types.TMAP,
+ types.TFUNC,
+ types.TUNSAFEPTR:
return true
- case TARRAY:
+ case types.TARRAY:
// Array of 1 direct iface type can be direct.
return t.NumElem() == 1 && isdirectiface(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
// Struct with 1 field of direct iface type can be direct.
return t.NumFields() == 1 && isdirectiface(t.Field(0).Type)
}
}
// itabType loads the _type field from a runtime.itab struct.
-func itabType(itab *Node) *Node {
- typ := nodSym(ODOTPTR, itab, nil)
- typ.Type = types.NewPtr(types.Types[TUINT8])
+func itabType(itab ir.Node) ir.Node {
+ typ := nodSym(ir.ODOTPTR, itab, nil)
+ typ.SetType(types.NewPtr(types.Types[types.TUINT8]))
typ.SetTypecheck(1)
- typ.Xoffset = int64(Widthptr) // offset of _type in runtime.itab
- typ.SetBounded(true) // guaranteed not to fault
+ typ.SetOffset(int64(Widthptr)) // offset of _type in runtime.itab
+ typ.SetBounded(true) // guaranteed not to fault
return typ
}
// ifaceData loads the data field from an interface.
// The concrete type must be known to have type t.
// It follows the pointer if !isdirectiface(t).
-func ifaceData(pos src.XPos, n *Node, t *types.Type) *Node {
+func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
if t.IsInterface() {
- Fatalf("ifaceData interface: %v", t)
+ base.Fatalf("ifaceData interface: %v", t)
}
- ptr := nodlSym(pos, OIDATA, n, nil)
+ ptr := ir.NodAt(pos, ir.OIDATA, n, nil)
if isdirectiface(t) {
- ptr.Type = t
+ ptr.SetType(t)
ptr.SetTypecheck(1)
return ptr
}
- ptr.Type = types.NewPtr(t)
+ ptr.SetType(types.NewPtr(t))
ptr.SetTypecheck(1)
- ind := nodl(pos, ODEREF, ptr, nil)
- ind.Type = t
+ ind := ir.NodAt(pos, ir.ODEREF, ptr, nil)
+ ind.SetType(t)
ind.SetTypecheck(1)
ind.SetBounded(true)
return ind
// typePos returns the position associated with t.
// This is where t was declared or where it appeared as a type expression.
func typePos(t *types.Type) src.XPos {
- n := asNode(t.Nod)
- if n == nil || !n.Pos.IsKnown() {
- Fatalf("bad type: %v", t)
+ if pos := t.Pos(); pos.IsKnown() {
+ return pos
}
- return n.Pos
+ base.Fatalf("bad type: %v", t)
+ panic("unreachable")
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
+ "go/constant"
+ "go/token"
"sort"
)
// typecheckswitch typechecks a switch statement.
-func typecheckswitch(n *Node) {
- typecheckslice(n.Ninit.Slice(), ctxStmt)
- if n.Left != nil && n.Left.Op == OTYPESW {
+func typecheckswitch(n *ir.SwitchStmt) {
+ typecheckslice(n.Init().Slice(), ctxStmt)
+ if n.Left() != nil && n.Left().Op() == ir.OTYPESW {
typecheckTypeSwitch(n)
} else {
typecheckExprSwitch(n)
}
}
-func typecheckTypeSwitch(n *Node) {
- n.Left.Right = typecheck(n.Left.Right, ctxExpr)
- t := n.Left.Right.Type
+func typecheckTypeSwitch(n *ir.SwitchStmt) {
+ guard := n.Left().(*ir.TypeSwitchGuard)
+ guard.SetRight(typecheck(guard.Right(), ctxExpr))
+ t := guard.Right().Type()
if t != nil && !t.IsInterface() {
- yyerrorl(n.Pos, "cannot type switch on non-interface value %L", n.Left.Right)
+ base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", guard.Right())
t = nil
}
// We don't actually declare the type switch's guarded
// declaration itself. So if there are no cases, we won't
// notice that it went unused.
- if v := n.Left.Left; v != nil && !v.isBlank() && n.List.Len() == 0 {
- yyerrorl(v.Pos, "%v declared but not used", v.Sym)
+ if v := guard.Left(); v != nil && !ir.IsBlank(v) && n.List().Len() == 0 {
+ base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym())
}
- var defCase, nilCase *Node
+ var defCase, nilCase ir.Node
var ts typeSet
- for _, ncase := range n.List.Slice() {
- ls := ncase.List.Slice()
+ for _, ncase := range n.List().Slice() {
+ ncase := ncase.(*ir.CaseStmt)
+ ls := ncase.List().Slice()
if len(ls) == 0 { // default:
if defCase != nil {
- yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line())
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
} else {
defCase = ncase
}
for i := range ls {
ls[i] = typecheck(ls[i], ctxExpr|ctxType)
n1 := ls[i]
- if t == nil || n1.Type == nil {
+ if t == nil || n1.Type() == nil {
continue
}
var missing, have *types.Field
var ptr int
- switch {
- case n1.isNil(): // case nil:
+ if ir.IsNil(n1) { // case nil:
if nilCase != nil {
- yyerrorl(ncase.Pos, "multiple nil cases in type switch (first at %v)", nilCase.Line())
+ base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase))
} else {
nilCase = ncase
}
- case n1.Op != OTYPE:
- yyerrorl(ncase.Pos, "%L is not a type", n1)
- case !n1.Type.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr) && !missing.Broke():
+ continue
+ }
+ if n1.Op() != ir.OTYPE {
+ base.ErrorfAt(ncase.Pos(), "%L is not a type", n1)
+ continue
+ }
+ if !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke() {
if have != nil && !have.Broke() {
- yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
- " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left.Right, n1.Type, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", guard.Right(), n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if ptr != 0 {
- yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
- " (%v method has pointer receiver)", n.Left.Right, n1.Type, missing.Sym)
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (%v method has pointer receiver)", guard.Right(), n1.Type(), missing.Sym)
} else {
- yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
- " (missing %v method)", n.Left.Right, n1.Type, missing.Sym)
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (missing %v method)", guard.Right(), n1.Type(), missing.Sym)
}
+ continue
}
- if n1.Op == OTYPE {
- ts.add(ncase.Pos, n1.Type)
- }
+ ts.add(ncase.Pos(), n1.Type())
}
- if ncase.Rlist.Len() != 0 {
+ if ncase.Rlist().Len() != 0 {
// Assign the clause variable's type.
vt := t
if len(ls) == 1 {
- if ls[0].Op == OTYPE {
- vt = ls[0].Type
- } else if ls[0].Op != OLITERAL { // TODO(mdempsky): Should be !ls[0].isNil()
+ if ls[0].Op() == ir.OTYPE {
+ vt = ls[0].Type()
+ } else if !ir.IsNil(ls[0]) {
// Invalid single-type case;
// mark variable as broken.
vt = nil
}
}
- // TODO(mdempsky): It should be possible to
- // still typecheck the case body.
- if vt == nil {
- continue
+ nvar := ncase.Rlist().First()
+ nvar.SetType(vt)
+ if vt != nil {
+ nvar = typecheck(nvar, ctxExpr|ctxAssign)
+ } else {
+ // Clause variable is broken; prevent typechecking.
+ nvar.SetTypecheck(1)
+ nvar.SetWalkdef(1)
}
-
- nvar := ncase.Rlist.First()
- nvar.Type = vt
- nvar = typecheck(nvar, ctxExpr|ctxAssign)
- ncase.Rlist.SetFirst(nvar)
+ ncase.Rlist().SetFirst(nvar)
}
- typecheckslice(ncase.Nbody.Slice(), ctxStmt)
+ typecheckslice(ncase.Body().Slice(), ctxStmt)
}
}
prevs := s.m[ls]
for _, prev := range prevs {
if types.Identical(typ, prev.typ) {
- yyerrorl(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, linestr(prev.pos))
+ base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos))
return
}
}
s.m[ls] = append(prevs, typeSetEntry{pos, typ})
}
-func typecheckExprSwitch(n *Node) {
- t := types.Types[TBOOL]
- if n.Left != nil {
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- t = n.Left.Type
+func typecheckExprSwitch(n *ir.SwitchStmt) {
+ t := types.Types[types.TBOOL]
+ if n.Left() != nil {
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ t = n.Left().Type()
}
var nilonly string
switch {
case t.IsMap():
nilonly = "map"
- case t.Etype == TFUNC:
+ case t.Kind() == types.TFUNC:
nilonly = "func"
case t.IsSlice():
nilonly = "slice"
case !IsComparable(t):
if t.IsStruct() {
- yyerrorl(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, IncomparableField(t).Type)
+ base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Left(), IncomparableField(t).Type)
} else {
- yyerrorl(n.Pos, "cannot switch on %L", n.Left)
+ base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Left())
}
t = nil
}
}
- var defCase *Node
+ var defCase ir.Node
var cs constSet
- for _, ncase := range n.List.Slice() {
- ls := ncase.List.Slice()
+ for _, ncase := range n.List().Slice() {
+ ncase := ncase.(*ir.CaseStmt)
+ ls := ncase.List().Slice()
if len(ls) == 0 { // default:
if defCase != nil {
- yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line())
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
} else {
defCase = ncase
}
ls[i] = typecheck(ls[i], ctxExpr)
ls[i] = defaultlit(ls[i], t)
n1 := ls[i]
- if t == nil || n1.Type == nil {
+ if t == nil || n1.Type() == nil {
continue
}
- if nilonly != "" && !n1.isNil() {
- yyerrorl(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
- } else if t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type) {
- yyerrorl(ncase.Pos, "invalid case %L in switch (incomparable type)", n1)
+ if nilonly != "" && !ir.IsNil(n1) {
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left())
+ } else if t.IsInterface() && !n1.Type().IsInterface() && !IsComparable(n1.Type()) {
+ base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1)
} else {
- op1, _ := assignop(n1.Type, t)
- op2, _ := assignop(t, n1.Type)
- if op1 == OXXX && op2 == OXXX {
- if n.Left != nil {
- yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
+ op1, _ := assignop(n1.Type(), t)
+ op2, _ := assignop(t, n1.Type())
+ if op1 == ir.OXXX && op2 == ir.OXXX {
+ if n.Left() != nil {
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left(), n1.Type(), t)
} else {
- yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type())
}
}
}
// case GOARCH == "arm" && GOARM == "5":
// case GOARCH == "arm":
// which would both evaluate to false for non-ARM compiles.
- if !n1.Type.IsBoolean() {
- cs.add(ncase.Pos, n1, "case", "switch")
+ if !n1.Type().IsBoolean() {
+ cs.add(ncase.Pos(), n1, "case", "switch")
}
}
- typecheckslice(ncase.Nbody.Slice(), ctxStmt)
+ typecheckslice(ncase.Body().Slice(), ctxStmt)
}
}
// walkswitch walks a switch statement.
-func walkswitch(sw *Node) {
+func walkswitch(sw *ir.SwitchStmt) {
// Guard against double walk, see #25776.
- if sw.List.Len() == 0 && sw.Nbody.Len() > 0 {
+ if sw.List().Len() == 0 && sw.Body().Len() > 0 {
return // Was fatal, but eliminating every possible source of double-walking is hard
}
- if sw.Left != nil && sw.Left.Op == OTYPESW {
+ if sw.Left() != nil && sw.Left().Op() == ir.OTYPESW {
walkTypeSwitch(sw)
} else {
walkExprSwitch(sw)
// walkExprSwitch generates an AST implementing sw. sw is an
// expression switch.
-func walkExprSwitch(sw *Node) {
+func walkExprSwitch(sw *ir.SwitchStmt) {
lno := setlineno(sw)
- cond := sw.Left
- sw.Left = nil
+ cond := sw.Left()
+ sw.SetLeft(nil)
// convert switch {...} to switch true {...}
if cond == nil {
// because walkexpr will lower the string
// conversion into a runtime call.
// See issue 24937 for more discussion.
- if cond.Op == OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
- cond.Op = OBYTES2STRTMP
+ if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
+ cond.SetOp(ir.OBYTES2STRTMP)
}
- cond = walkexpr(cond, &sw.Ninit)
- if cond.Op != OLITERAL {
- cond = copyexpr(cond, cond.Type, &sw.Nbody)
+ cond = walkexpr(cond, sw.PtrInit())
+ if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL {
+ cond = copyexpr(cond, cond.Type(), sw.PtrBody())
}
- lineno = lno
+ base.Pos = lno
s := exprSwitch{
exprname: cond,
}
- var defaultGoto *Node
- var body Nodes
- for _, ncase := range sw.List.Slice() {
+ var defaultGoto ir.Node
+ var body ir.Nodes
+ for _, ncase := range sw.List().Slice() {
+ ncase := ncase.(*ir.CaseStmt)
label := autolabel(".s")
- jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label))
+ jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
// Process case dispatch.
- if ncase.List.Len() == 0 {
+ if ncase.List().Len() == 0 {
if defaultGoto != nil {
- Fatalf("duplicate default case not detected during typechecking")
+ base.Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
- for _, n1 := range ncase.List.Slice() {
- s.Add(ncase.Pos, n1, jmp)
+ for _, n1 := range ncase.List().Slice() {
+ s.Add(ncase.Pos(), n1, jmp)
}
// Process body.
- body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label)))
- body.Append(ncase.Nbody.Slice()...)
- if fall, pos := hasFall(ncase.Nbody.Slice()); !fall {
- br := nod(OBREAK, nil, nil)
- br.Pos = pos
+ body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
+ body.Append(ncase.Body().Slice()...)
+ if fall, pos := endsInFallthrough(ncase.Body().Slice()); !fall {
+ br := ir.Nod(ir.OBREAK, nil, nil)
+ br.SetPos(pos)
body.Append(br)
}
}
- sw.List.Set(nil)
+ sw.PtrList().Set(nil)
if defaultGoto == nil {
- br := nod(OBREAK, nil, nil)
- br.Pos = br.Pos.WithNotStmt()
+ br := ir.Nod(ir.OBREAK, nil, nil)
+ br.SetPos(br.Pos().WithNotStmt())
defaultGoto = br
}
- s.Emit(&sw.Nbody)
- sw.Nbody.Append(defaultGoto)
- sw.Nbody.AppendNodes(&body)
- walkstmtlist(sw.Nbody.Slice())
+ s.Emit(sw.PtrBody())
+ sw.PtrBody().Append(defaultGoto)
+ sw.PtrBody().AppendNodes(&body)
+ walkstmtlist(sw.Body().Slice())
}
// An exprSwitch walks an expression switch.
type exprSwitch struct {
- exprname *Node // value being switched on
+ exprname ir.Node // value being switched on
- done Nodes
+ done ir.Nodes
clauses []exprClause
}
type exprClause struct {
pos src.XPos
- lo, hi *Node
- jmp *Node
+ lo, hi ir.Node
+ jmp ir.Node
}
-func (s *exprSwitch) Add(pos src.XPos, expr, jmp *Node) {
+func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
- if okforcmp[s.exprname.Type.Etype] && expr.Op == OLITERAL {
+ if okforcmp[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL {
s.clauses = append(s.clauses, c)
return
}
s.flush()
}
-func (s *exprSwitch) Emit(out *Nodes) {
+func (s *exprSwitch) Emit(out *ir.Nodes) {
s.flush()
out.AppendNodes(&s.done)
}
// (e.g., sort.Slice doesn't need to invoke the less function
// when there's only a single slice element).
- if s.exprname.Type.IsString() && len(cc) >= 2 {
+ if s.exprname.Type().IsString() && len(cc) >= 2 {
// Sort strings by length and then by value. It is
// much cheaper to compare lengths than values, and
// all we need here is consistency. We respect this
// sorting below.
sort.Slice(cc, func(i, j int) bool {
- si := cc[i].lo.StringVal()
- sj := cc[j].lo.StringVal()
+ si := ir.StringVal(cc[i].lo)
+ sj := ir.StringVal(cc[j].lo)
if len(si) != len(sj) {
return len(si) < len(sj)
}
// runLen returns the string length associated with a
// particular run of exprClauses.
- runLen := func(run []exprClause) int64 { return int64(len(run[0].lo.StringVal())) }
+ runLen := func(run []exprClause) int64 { return int64(len(ir.StringVal(run[0].lo))) }
// Collapse runs of consecutive strings with the same length.
var runs [][]exprClause
runs = append(runs, cc[start:])
// Perform two-level binary search.
- nlen := nod(OLEN, s.exprname, nil)
binarySearch(len(runs), &s.done,
- func(i int) *Node {
- return nod(OLE, nlen, nodintconst(runLen(runs[i-1])))
+ func(i int) ir.Node {
+ return ir.Nod(ir.OLE, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1])))
},
- func(i int, nif *Node) {
+ func(i int, nif *ir.IfStmt) {
run := runs[i]
- nif.Left = nod(OEQ, nlen, nodintconst(runLen(run)))
- s.search(run, &nif.Nbody)
+ nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run))))
+ s.search(run, nif.PtrBody())
},
)
return
}
sort.Slice(cc, func(i, j int) bool {
- return compareOp(cc[i].lo.Val(), OLT, cc[j].lo.Val())
+ return constant.Compare(cc[i].lo.Val(), token.LSS, cc[j].lo.Val())
})
// Merge consecutive integer cases.
- if s.exprname.Type.IsInteger() {
+ if s.exprname.Type().IsInteger() {
merged := cc[:1]
for _, c := range cc[1:] {
last := &merged[len(merged)-1]
- if last.jmp == c.jmp && last.hi.Int64Val()+1 == c.lo.Int64Val() {
+ if last.jmp == c.jmp && ir.Int64Val(last.hi)+1 == ir.Int64Val(c.lo) {
last.hi = c.lo
} else {
merged = append(merged, c)
s.search(cc, &s.done)
}
-func (s *exprSwitch) search(cc []exprClause, out *Nodes) {
+func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
binarySearch(len(cc), out,
- func(i int) *Node {
- return nod(OLE, s.exprname, cc[i-1].hi)
+ func(i int) ir.Node {
+ return ir.Nod(ir.OLE, s.exprname, cc[i-1].hi)
},
- func(i int, nif *Node) {
+ func(i int, nif *ir.IfStmt) {
c := &cc[i]
- nif.Left = c.test(s.exprname)
- nif.Nbody.Set1(c.jmp)
+ nif.SetLeft(c.test(s.exprname))
+ nif.PtrBody().Set1(c.jmp)
},
)
}
-func (c *exprClause) test(exprname *Node) *Node {
+func (c *exprClause) test(exprname ir.Node) ir.Node {
// Integer range.
if c.hi != c.lo {
- low := nodl(c.pos, OGE, exprname, c.lo)
- high := nodl(c.pos, OLE, exprname, c.hi)
- return nodl(c.pos, OANDAND, low, high)
+ low := ir.NodAt(c.pos, ir.OGE, exprname, c.lo)
+ high := ir.NodAt(c.pos, ir.OLE, exprname, c.hi)
+ return ir.NodAt(c.pos, ir.OANDAND, low, high)
}
// Optimize "switch true { ...}" and "switch false { ... }".
- if Isconst(exprname, CTBOOL) && !c.lo.Type.IsInterface() {
- if exprname.BoolVal() {
+ if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() {
+ if ir.BoolVal(exprname) {
return c.lo
} else {
- return nodl(c.pos, ONOT, c.lo, nil)
+ return ir.NodAt(c.pos, ir.ONOT, c.lo, nil)
}
}
- return nodl(c.pos, OEQ, exprname, c.lo)
+ return ir.NodAt(c.pos, ir.OEQ, exprname, c.lo)
}
-func allCaseExprsAreSideEffectFree(sw *Node) bool {
+func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool {
// In theory, we could be more aggressive, allowing any
// side-effect-free expressions in cases, but it's a bit
// tricky because some of that information is unavailable due
// Restricting to constants is simple and probably powerful
// enough.
- for _, ncase := range sw.List.Slice() {
- if ncase.Op != OCASE {
- Fatalf("switch string(byteslice) bad op: %v", ncase.Op)
- }
- for _, v := range ncase.List.Slice() {
- if v.Op != OLITERAL {
+ for _, ncase := range sw.List().Slice() {
+ ncase := ncase.(*ir.CaseStmt)
+ for _, v := range ncase.List().Slice() {
+ if v.Op() != ir.OLITERAL {
return false
}
}
return true
}
-// hasFall reports whether stmts ends with a "fallthrough" statement.
-func hasFall(stmts []*Node) (bool, src.XPos) {
+// endsInFallthrough reports whether stmts ends with a "fallthrough" statement.
+func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) {
// Search backwards for the index of the fallthrough
// statement. Do not assume it'll be in the last
// position, since in some cases (e.g. when the statement
// nodes will be at the end of the list.
i := len(stmts) - 1
- for i >= 0 && stmts[i].Op == OVARKILL {
+ for i >= 0 && stmts[i].Op() == ir.OVARKILL {
i--
}
if i < 0 {
return false, src.NoXPos
}
- return stmts[i].Op == OFALL, stmts[i].Pos
+ return stmts[i].Op() == ir.OFALL, stmts[i].Pos()
}
// walkTypeSwitch generates an AST that implements sw, where sw is a
// type switch.
-func walkTypeSwitch(sw *Node) {
+func walkTypeSwitch(sw *ir.SwitchStmt) {
var s typeSwitch
- s.facename = sw.Left.Right
- sw.Left = nil
+ s.facename = sw.Left().(*ir.TypeSwitchGuard).Right()
+ sw.SetLeft(nil)
- s.facename = walkexpr(s.facename, &sw.Ninit)
- s.facename = copyexpr(s.facename, s.facename.Type, &sw.Nbody)
- s.okname = temp(types.Types[TBOOL])
+ s.facename = walkexpr(s.facename, sw.PtrInit())
+ s.facename = copyexpr(s.facename, s.facename.Type(), sw.PtrBody())
+ s.okname = temp(types.Types[types.TBOOL])
// Get interface descriptor word.
// For empty interfaces this will be the type.
// For non-empty interfaces this will be the itab.
- itab := nod(OITAB, s.facename, nil)
+ itab := ir.Nod(ir.OITAB, s.facename, nil)
// For empty interfaces, do:
// if e._type == nil {
// }
// h := e._type.hash
// Use a similar strategy for non-empty interfaces.
- ifNil := nod(OIF, nil, nil)
- ifNil.Left = nod(OEQ, itab, nodnil())
- lineno = lineno.WithNotStmt() // disable statement marks after the first check.
- ifNil.Left = typecheck(ifNil.Left, ctxExpr)
- ifNil.Left = defaultlit(ifNil.Left, nil)
+ ifNil := ir.Nod(ir.OIF, nil, nil)
+ ifNil.SetLeft(ir.Nod(ir.OEQ, itab, nodnil()))
+ base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
+ ifNil.SetLeft(typecheck(ifNil.Left(), ctxExpr))
+ ifNil.SetLeft(defaultlit(ifNil.Left(), nil))
// ifNil.Nbody assigned at end.
- sw.Nbody.Append(ifNil)
+ sw.PtrBody().Append(ifNil)
// Load hash from type or itab.
- dotHash := nodSym(ODOTPTR, itab, nil)
- dotHash.Type = types.Types[TUINT32]
+ dotHash := nodSym(ir.ODOTPTR, itab, nil)
+ dotHash.SetType(types.Types[types.TUINT32])
dotHash.SetTypecheck(1)
- if s.facename.Type.IsEmptyInterface() {
- dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type
+ if s.facename.Type().IsEmptyInterface() {
+ dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime._type
} else {
- dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime.itab
+ dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime.itab
}
dotHash.SetBounded(true) // guaranteed not to fault
- s.hashname = copyexpr(dotHash, dotHash.Type, &sw.Nbody)
-
- br := nod(OBREAK, nil, nil)
- var defaultGoto, nilGoto *Node
- var body Nodes
- for _, ncase := range sw.List.Slice() {
- var caseVar *Node
- if ncase.Rlist.Len() != 0 {
- caseVar = ncase.Rlist.First()
+ s.hashname = copyexpr(dotHash, dotHash.Type(), sw.PtrBody())
+
+ br := ir.Nod(ir.OBREAK, nil, nil)
+ var defaultGoto, nilGoto ir.Node
+ var body ir.Nodes
+ for _, ncase := range sw.List().Slice() {
+ ncase := ncase.(*ir.CaseStmt)
+ var caseVar ir.Node
+ if ncase.Rlist().Len() != 0 {
+ caseVar = ncase.Rlist().First()
}
// For single-type cases with an interface type,
// we initialize the case variable as part of the type assertion.
// In other cases, we initialize it in the body.
var singleType *types.Type
- if ncase.List.Len() == 1 && ncase.List.First().Op == OTYPE {
- singleType = ncase.List.First().Type
+ if ncase.List().Len() == 1 && ncase.List().First().Op() == ir.OTYPE {
+ singleType = ncase.List().First().Type()
}
caseVarInitialized := false
label := autolabel(".s")
- jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label))
+ jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
- if ncase.List.Len() == 0 { // default:
+ if ncase.List().Len() == 0 { // default:
if defaultGoto != nil {
- Fatalf("duplicate default case not detected during typechecking")
+ base.Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
- for _, n1 := range ncase.List.Slice() {
- if n1.isNil() { // case nil:
+ for _, n1 := range ncase.List().Slice() {
+ if ir.IsNil(n1) { // case nil:
if nilGoto != nil {
- Fatalf("duplicate nil case not detected during typechecking")
+ base.Fatalf("duplicate nil case not detected during typechecking")
}
nilGoto = jmp
continue
}
if singleType != nil && singleType.IsInterface() {
- s.Add(ncase.Pos, n1.Type, caseVar, jmp)
+ s.Add(ncase.Pos(), n1.Type(), caseVar, jmp)
caseVarInitialized = true
} else {
- s.Add(ncase.Pos, n1.Type, nil, jmp)
+ s.Add(ncase.Pos(), n1.Type(), nil, jmp)
}
}
- body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label)))
+ body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
if caseVar != nil && !caseVarInitialized {
val := s.facename
if singleType != nil {
// We have a single concrete type. Extract the data.
if singleType.IsInterface() {
- Fatalf("singleType interface should have been handled in Add")
+ base.Fatalf("singleType interface should have been handled in Add")
}
- val = ifaceData(ncase.Pos, s.facename, singleType)
+ val = ifaceData(ncase.Pos(), s.facename, singleType)
}
- l := []*Node{
- nodl(ncase.Pos, ODCL, caseVar, nil),
- nodl(ncase.Pos, OAS, caseVar, val),
+ l := []ir.Node{
+ ir.NodAt(ncase.Pos(), ir.ODCL, caseVar, nil),
+ ir.NodAt(ncase.Pos(), ir.OAS, caseVar, val),
}
typecheckslice(l, ctxStmt)
body.Append(l...)
}
- body.Append(ncase.Nbody.Slice()...)
+ body.Append(ncase.Body().Slice()...)
body.Append(br)
}
- sw.List.Set(nil)
+ sw.PtrList().Set(nil)
if defaultGoto == nil {
defaultGoto = br
if nilGoto == nil {
nilGoto = defaultGoto
}
- ifNil.Nbody.Set1(nilGoto)
+ ifNil.PtrBody().Set1(nilGoto)
- s.Emit(&sw.Nbody)
- sw.Nbody.Append(defaultGoto)
- sw.Nbody.AppendNodes(&body)
+ s.Emit(sw.PtrBody())
+ sw.PtrBody().Append(defaultGoto)
+ sw.PtrBody().AppendNodes(&body)
- walkstmtlist(sw.Nbody.Slice())
+ walkstmtlist(sw.Body().Slice())
}
// A typeSwitch walks a type switch.
type typeSwitch struct {
// Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
- facename *Node // value being type-switched on
- hashname *Node // type hash of the value being type-switched on
- okname *Node // boolean used for comma-ok type assertions
+ facename ir.Node // value being type-switched on
+ hashname ir.Node // type hash of the value being type-switched on
+ okname ir.Node // boolean used for comma-ok type assertions
- done Nodes
+ done ir.Nodes
clauses []typeClause
}
type typeClause struct {
hash uint32
- body Nodes
+ body ir.Nodes
}
-func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *Node) {
- var body Nodes
+func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) {
+ var body ir.Nodes
if caseVar != nil {
- l := []*Node{
- nodl(pos, ODCL, caseVar, nil),
- nodl(pos, OAS, caseVar, nil),
+ l := []ir.Node{
+ ir.NodAt(pos, ir.ODCL, caseVar, nil),
+ ir.NodAt(pos, ir.OAS, caseVar, nil),
}
typecheckslice(l, ctxStmt)
body.Append(l...)
} else {
- caseVar = nblank
+ caseVar = ir.BlankNode
}
// cv, ok = iface.(type)
- as := nodl(pos, OAS2, nil, nil)
- as.List.Set2(caseVar, s.okname) // cv, ok =
- dot := nodl(pos, ODOTTYPE, s.facename, nil)
- dot.Type = typ // iface.(type)
- as.Rlist.Set1(dot)
- as = typecheck(as, ctxStmt)
- as = walkexpr(as, &body)
- body.Append(as)
+ as := ir.NodAt(pos, ir.OAS2, nil, nil)
+ as.PtrList().Set2(caseVar, s.okname) // cv, ok =
+ dot := ir.NodAt(pos, ir.ODOTTYPE, s.facename, nil)
+ dot.SetType(typ) // iface.(type)
+ as.PtrRlist().Set1(dot)
+ appendWalkStmt(&body, as)
// if ok { goto label }
- nif := nodl(pos, OIF, nil, nil)
- nif.Left = s.okname
- nif.Nbody.Set1(jmp)
+ nif := ir.NodAt(pos, ir.OIF, nil, nil)
+ nif.SetLeft(s.okname)
+ nif.PtrBody().Set1(jmp)
body.Append(nif)
if !typ.IsInterface() {
s.done.AppendNodes(&body)
}
-func (s *typeSwitch) Emit(out *Nodes) {
+func (s *typeSwitch) Emit(out *ir.Nodes) {
s.flush()
out.AppendNodes(&s.done)
}
cc = merged
binarySearch(len(cc), &s.done,
- func(i int) *Node {
- return nod(OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
+ func(i int) ir.Node {
+ return ir.Nod(ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
},
- func(i int, nif *Node) {
+ func(i int, nif *ir.IfStmt) {
// TODO(mdempsky): Omit hash equality check if
// there's only one type.
c := cc[i]
- nif.Left = nod(OEQ, s.hashname, nodintconst(int64(c.hash)))
- nif.Nbody.AppendNodes(&c.body)
+ nif.SetLeft(ir.Nod(ir.OEQ, s.hashname, nodintconst(int64(c.hash))))
+ nif.PtrBody().AppendNodes(&c.body)
},
)
}
// less(i) should return a boolean expression. If it evaluates true,
// then cases before i will be tested; otherwise, cases i and later.
//
-// base(i, nif) should setup nif (an OIF node) to test case i. In
+// leaf(i, nif) should setup nif (an OIF node) to test case i. In
// particular, it should set nif.Left and nif.Nbody.
-func binarySearch(n int, out *Nodes, less func(i int) *Node, base func(i int, nif *Node)) {
+func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif *ir.IfStmt)) {
const binarySearchMin = 4 // minimum number of cases for binary search
- var do func(lo, hi int, out *Nodes)
- do = func(lo, hi int, out *Nodes) {
+ var do func(lo, hi int, out *ir.Nodes)
+ do = func(lo, hi int, out *ir.Nodes) {
n := hi - lo
if n < binarySearchMin {
for i := lo; i < hi; i++ {
- nif := nod(OIF, nil, nil)
- base(i, nif)
- lineno = lineno.WithNotStmt()
- nif.Left = typecheck(nif.Left, ctxExpr)
- nif.Left = defaultlit(nif.Left, nil)
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ leaf(i, nif)
+ base.Pos = base.Pos.WithNotStmt()
+ nif.SetLeft(typecheck(nif.Left(), ctxExpr))
+ nif.SetLeft(defaultlit(nif.Left(), nil))
out.Append(nif)
- out = &nif.Rlist
+ out = nif.PtrRlist()
}
return
}
half := lo + n/2
- nif := nod(OIF, nil, nil)
- nif.Left = less(half)
- lineno = lineno.WithNotStmt()
- nif.Left = typecheck(nif.Left, ctxExpr)
- nif.Left = defaultlit(nif.Left, nil)
- do(lo, half, &nif.Nbody)
- do(half, hi, &nif.Rlist)
+ nif := ir.Nod(ir.OIF, nil, nil)
+ nif.SetLeft(less(half))
+ base.Pos = base.Pos.WithNotStmt()
+ nif.SetLeft(typecheck(nif.Left(), ctxExpr))
+ nif.SetLeft(defaultlit(nif.Left(), nil))
+ do(lo, half, nif.PtrBody())
+ do(half, hi, nif.PtrRlist())
out.Append(nif)
}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// “Abstract” syntax representation.
-
-package gc
-
-import (
- "cmd/compile/internal/ssa"
- "cmd/compile/internal/types"
- "cmd/internal/obj"
- "cmd/internal/objabi"
- "cmd/internal/src"
- "sort"
-)
-
-// A Node is a single node in the syntax tree.
-// Actually the syntax tree is a syntax DAG, because there is only one
-// node with Op=ONAME for a given instance of a variable x.
-// The same is true for Op=OTYPE and Op=OLITERAL. See Node.mayBeShared.
-type Node struct {
- // Tree structure.
- // Generic recursive walks should follow these fields.
- Left *Node
- Right *Node
- Ninit Nodes
- Nbody Nodes
- List Nodes
- Rlist Nodes
-
- // most nodes
- Type *types.Type
- Orig *Node // original form, for printing, and tracking copies of ONAMEs
-
- // func
- Func *Func
-
- // ONAME, OTYPE, OPACK, OLABEL, some OLITERAL
- Name *Name
-
- Sym *types.Sym // various
- E interface{} // Opt or Val, see methods below
-
- // Various. Usually an offset into a struct. For example:
- // - ONAME nodes that refer to local variables use it to identify their stack frame position.
- // - ODOT, ODOTPTR, and ORESULT use it to indicate offset relative to their base address.
- // - OSTRUCTKEY uses it to store the named field's offset.
- // - Named OLITERALs use it to store their ambient iota value.
- // - OINLMARK stores an index into the inlTree data structure.
- // - OCLOSURE uses it to store ambient iota value, if any.
- // Possibly still more uses. If you find any, document them.
- Xoffset int64
-
- Pos src.XPos
-
- flags bitset32
-
- Esc uint16 // EscXXX
-
- Op Op
- aux uint8
-}
-
-func (n *Node) ResetAux() {
- n.aux = 0
-}
-
-func (n *Node) SubOp() Op {
- switch n.Op {
- case OASOP, ONAME:
- default:
- Fatalf("unexpected op: %v", n.Op)
- }
- return Op(n.aux)
-}
-
-func (n *Node) SetSubOp(op Op) {
- switch n.Op {
- case OASOP, ONAME:
- default:
- Fatalf("unexpected op: %v", n.Op)
- }
- n.aux = uint8(op)
-}
-
-func (n *Node) IndexMapLValue() bool {
- if n.Op != OINDEXMAP {
- Fatalf("unexpected op: %v", n.Op)
- }
- return n.aux != 0
-}
-
-func (n *Node) SetIndexMapLValue(b bool) {
- if n.Op != OINDEXMAP {
- Fatalf("unexpected op: %v", n.Op)
- }
- if b {
- n.aux = 1
- } else {
- n.aux = 0
- }
-}
-
-func (n *Node) TChanDir() types.ChanDir {
- if n.Op != OTCHAN {
- Fatalf("unexpected op: %v", n.Op)
- }
- return types.ChanDir(n.aux)
-}
-
-func (n *Node) SetTChanDir(dir types.ChanDir) {
- if n.Op != OTCHAN {
- Fatalf("unexpected op: %v", n.Op)
- }
- n.aux = uint8(dir)
-}
-
-func (n *Node) IsSynthetic() bool {
- name := n.Sym.Name
- return name[0] == '.' || name[0] == '~'
-}
-
-// IsAutoTmp indicates if n was created by the compiler as a temporary,
-// based on the setting of the .AutoTemp flag in n's Name.
-func (n *Node) IsAutoTmp() bool {
- if n == nil || n.Op != ONAME {
- return false
- }
- return n.Name.AutoTemp()
-}
-
-const (
- nodeClass, _ = iota, 1 << iota // PPARAM, PAUTO, PEXTERN, etc; three bits; first in the list because frequently accessed
- _, _ // second nodeClass bit
- _, _ // third nodeClass bit
- nodeWalkdef, _ // tracks state during typecheckdef; 2 == loop detected; two bits
- _, _ // second nodeWalkdef bit
- nodeTypecheck, _ // tracks state during typechecking; 2 == loop detected; two bits
- _, _ // second nodeTypecheck bit
- nodeInitorder, _ // tracks state during init1; two bits
- _, _ // second nodeInitorder bit
- _, nodeHasBreak
- _, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only
- _, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP
- _, nodeIsDDD // is the argument variadic
- _, nodeDiag // already printed error about this
- _, nodeColas // OAS resulting from :=
- _, nodeNonNil // guaranteed to be non-nil
- _, nodeTransient // storage can be reused immediately after this statement
- _, nodeBounded // bounds check unnecessary
- _, nodeHasCall // expression contains a function call
- _, nodeLikely // if statement condition likely
- _, nodeHasVal // node.E contains a Val
- _, nodeHasOpt // node.E contains an Opt
- _, nodeEmbedded // ODCLFIELD embedded type
-)
-
-func (n *Node) Class() Class { return Class(n.flags.get3(nodeClass)) }
-func (n *Node) Walkdef() uint8 { return n.flags.get2(nodeWalkdef) }
-func (n *Node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) }
-func (n *Node) Initorder() uint8 { return n.flags.get2(nodeInitorder) }
-
-func (n *Node) HasBreak() bool { return n.flags&nodeHasBreak != 0 }
-func (n *Node) NoInline() bool { return n.flags&nodeNoInline != 0 }
-func (n *Node) Implicit() bool { return n.flags&nodeImplicit != 0 }
-func (n *Node) IsDDD() bool { return n.flags&nodeIsDDD != 0 }
-func (n *Node) Diag() bool { return n.flags&nodeDiag != 0 }
-func (n *Node) Colas() bool { return n.flags&nodeColas != 0 }
-func (n *Node) NonNil() bool { return n.flags&nodeNonNil != 0 }
-func (n *Node) Transient() bool { return n.flags&nodeTransient != 0 }
-func (n *Node) Bounded() bool { return n.flags&nodeBounded != 0 }
-func (n *Node) HasCall() bool { return n.flags&nodeHasCall != 0 }
-func (n *Node) Likely() bool { return n.flags&nodeLikely != 0 }
-func (n *Node) HasVal() bool { return n.flags&nodeHasVal != 0 }
-func (n *Node) HasOpt() bool { return n.flags&nodeHasOpt != 0 }
-func (n *Node) Embedded() bool { return n.flags&nodeEmbedded != 0 }
-
-func (n *Node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) }
-func (n *Node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) }
-func (n *Node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) }
-func (n *Node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) }
-
-func (n *Node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) }
-func (n *Node) SetNoInline(b bool) { n.flags.set(nodeNoInline, b) }
-func (n *Node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) }
-func (n *Node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) }
-func (n *Node) SetDiag(b bool) { n.flags.set(nodeDiag, b) }
-func (n *Node) SetColas(b bool) { n.flags.set(nodeColas, b) }
-func (n *Node) SetTransient(b bool) { n.flags.set(nodeTransient, b) }
-func (n *Node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
-func (n *Node) SetLikely(b bool) { n.flags.set(nodeLikely, b) }
-func (n *Node) SetHasVal(b bool) { n.flags.set(nodeHasVal, b) }
-func (n *Node) SetHasOpt(b bool) { n.flags.set(nodeHasOpt, b) }
-func (n *Node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) }
-
-// MarkNonNil marks a pointer n as being guaranteed non-nil,
-// on all code paths, at all times.
-// During conversion to SSA, non-nil pointers won't have nil checks
-// inserted before dereferencing. See state.exprPtr.
-func (n *Node) MarkNonNil() {
- if !n.Type.IsPtr() && !n.Type.IsUnsafePtr() {
- Fatalf("MarkNonNil(%v), type %v", n, n.Type)
- }
- n.flags.set(nodeNonNil, true)
-}
-
-// SetBounded indicates whether operation n does not need safety checks.
-// When n is an index or slice operation, n does not need bounds checks.
-// When n is a dereferencing operation, n does not need nil checks.
-// When n is a makeslice+copy operation, n does not need length and cap checks.
-func (n *Node) SetBounded(b bool) {
- switch n.Op {
- case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
- // No bounds checks needed.
- case ODOTPTR, ODEREF:
- // No nil check needed.
- case OMAKESLICECOPY:
- // No length and cap checks needed
- // since new slice and copied over slice data have same length.
- default:
- Fatalf("SetBounded(%v)", n)
- }
- n.flags.set(nodeBounded, b)
-}
-
-// MarkReadonly indicates that n is an ONAME with readonly contents.
-func (n *Node) MarkReadonly() {
- if n.Op != ONAME {
- Fatalf("Node.MarkReadonly %v", n.Op)
- }
- n.Name.SetReadonly(true)
- // Mark the linksym as readonly immediately
- // so that the SSA backend can use this information.
- // It will be overridden later during dumpglobls.
- n.Sym.Linksym().Type = objabi.SRODATA
-}
-
-// Val returns the Val for the node.
-func (n *Node) Val() Val {
- if !n.HasVal() {
- return Val{}
- }
- return Val{n.E}
-}
-
-// SetVal sets the Val for the node, which must not have been used with SetOpt.
-func (n *Node) SetVal(v Val) {
- if n.HasOpt() {
- Debug.h = 1
- Dump("have Opt", n)
- Fatalf("have Opt")
- }
- n.SetHasVal(true)
- n.E = v.U
-}
-
-// Opt returns the optimizer data for the node.
-func (n *Node) Opt() interface{} {
- if !n.HasOpt() {
- return nil
- }
- return n.E
-}
-
-// SetOpt sets the optimizer data for the node, which must not have been used with SetVal.
-// SetOpt(nil) is ignored for Vals to simplify call sites that are clearing Opts.
-func (n *Node) SetOpt(x interface{}) {
- if x == nil && n.HasVal() {
- return
- }
- if n.HasVal() {
- Debug.h = 1
- Dump("have Val", n)
- Fatalf("have Val")
- }
- n.SetHasOpt(true)
- n.E = x
-}
-
-func (n *Node) Iota() int64 {
- return n.Xoffset
-}
-
-func (n *Node) SetIota(x int64) {
- n.Xoffset = x
-}
-
-// mayBeShared reports whether n may occur in multiple places in the AST.
-// Extra care must be taken when mutating such a node.
-func (n *Node) mayBeShared() bool {
- switch n.Op {
- case ONAME, OLITERAL, OTYPE:
- return true
- }
- return false
-}
-
-// isMethodExpression reports whether n represents a method expression T.M.
-func (n *Node) isMethodExpression() bool {
- return n.Op == ONAME && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME
-}
-
-// funcname returns the name (without the package) of the function n.
-func (n *Node) funcname() string {
- if n == nil || n.Func == nil || n.Func.Nname == nil {
- return "<nil>"
- }
- return n.Func.Nname.Sym.Name
-}
-
-// pkgFuncName returns the name of the function referenced by n, with package prepended.
-// This differs from the compiler's internal convention where local functions lack a package
-// because the ultimate consumer of this is a human looking at an IDE; package is only empty
-// if the compilation package is actually the empty string.
-func (n *Node) pkgFuncName() string {
- var s *types.Sym
- if n == nil {
- return "<nil>"
- }
- if n.Op == ONAME {
- s = n.Sym
- } else {
- if n.Func == nil || n.Func.Nname == nil {
- return "<nil>"
- }
- s = n.Func.Nname.Sym
- }
- pkg := s.Pkg
-
- p := myimportpath
- if pkg != nil && pkg.Path != "" {
- p = pkg.Path
- }
- if p == "" {
- return s.Name
- }
- return p + "." + s.Name
-}
-
-// The compiler needs *Node to be assignable to cmd/compile/internal/ssa.Sym.
-func (n *Node) CanBeAnSSASym() {
-}
-
-// Name holds Node fields used only by named nodes (ONAME, OTYPE, OPACK, OLABEL, some OLITERAL).
-type Name struct {
- Pack *Node // real package for import . names
- Pkg *types.Pkg // pkg for OPACK nodes
- // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
- // For a closure var, the ONAME node of the outer captured variable
- Defn *Node
- // The ODCLFUNC node (for a static function/method or a closure) in which
- // local variable or param is declared.
- Curfn *Node
- Param *Param // additional fields for ONAME, OTYPE
- Decldepth int32 // declaration loop depth, increased for every loop or label
- // Unique number for ONAME nodes within a function. Function outputs
- // (results) are numbered starting at one, followed by function inputs
- // (parameters), and then local variables. Vargen is used to distinguish
- // local variables/params with the same name.
- Vargen int32
- flags bitset16
-}
-
-const (
- nameCaptured = 1 << iota // is the variable captured by a closure
- nameReadonly
- nameByval // is the variable captured by value or by reference
- nameNeedzero // if it contains pointers, needs to be zeroed on function entry
- nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap)
- nameUsed // for variable declared and not used error
- nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original at n.Name.Defn
- nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy
- nameAssigned // is the variable ever assigned to
- nameAddrtaken // address taken, even if not moved to heap
- nameInlFormal // PAUTO created by inliner, derived from callee formal
- nameInlLocal // PAUTO created by inliner, derived from callee local
- nameOpenDeferSlot // if temporary var storing info for open-coded defers
- nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section
-)
-
-func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 }
-func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 }
-func (n *Name) Byval() bool { return n.flags&nameByval != 0 }
-func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 }
-func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 }
-func (n *Name) Used() bool { return n.flags&nameUsed != 0 }
-func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 }
-func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 }
-func (n *Name) Assigned() bool { return n.flags&nameAssigned != 0 }
-func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 }
-func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 }
-func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 }
-func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 }
-func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 }
-
-func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) }
-func (n *Name) SetReadonly(b bool) { n.flags.set(nameReadonly, b) }
-func (n *Name) SetByval(b bool) { n.flags.set(nameByval, b) }
-func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) }
-func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) }
-func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) }
-func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) }
-func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) }
-func (n *Name) SetAssigned(b bool) { n.flags.set(nameAssigned, b) }
-func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) }
-func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) }
-func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) }
-func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) }
-func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) }
-
-type Param struct {
- Ntype *Node
- Heapaddr *Node // temp holding heap address of param
-
- // ONAME PAUTOHEAP
- Stackcopy *Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
-
- // ONAME closure linkage
- // Consider:
- //
- // func f() {
- // x := 1 // x1
- // func() {
- // use(x) // x2
- // func() {
- // use(x) // x3
- // --- parser is here ---
- // }()
- // }()
- // }
- //
- // There is an original declaration of x and then a chain of mentions of x
- // leading into the current function. Each time x is mentioned in a new closure,
- // we create a variable representing x for use in that specific closure,
- // since the way you get to x is different in each closure.
- //
- // Let's number the specific variables as shown in the code:
- // x1 is the original x, x2 is when mentioned in the closure,
- // and x3 is when mentioned in the closure in the closure.
- //
- // We keep these linked (assume N > 1):
- //
- // - x1.Defn = original declaration statement for x (like most variables)
- // - x1.Innermost = current innermost closure x (in this case x3), or nil for none
- // - x1.IsClosureVar() = false
- //
- // - xN.Defn = x1, N > 1
- // - xN.IsClosureVar() = true, N > 1
- // - x2.Outer = nil
- // - xN.Outer = x(N-1), N > 2
- //
- //
- // When we look up x in the symbol table, we always get x1.
- // Then we can use x1.Innermost (if not nil) to get the x
- // for the innermost known closure function,
- // but the first reference in a closure will find either no x1.Innermost
- // or an x1.Innermost with .Funcdepth < Funcdepth.
- // In that case, a new xN must be created, linked in with:
- //
- // xN.Defn = x1
- // xN.Outer = x1.Innermost
- // x1.Innermost = xN
- //
- // When we finish the function, we'll process its closure variables
- // and find xN and pop it off the list using:
- //
- // x1 := xN.Defn
- // x1.Innermost = xN.Outer
- //
- // We leave x1.Innermost set so that we can still get to the original
- // variable quickly. Not shown here, but once we're
- // done parsing a function and no longer need xN.Outer for the
- // lexical x reference links as described above, funcLit
- // recomputes xN.Outer as the semantic x reference link tree,
- // even filling in x in intermediate closures that might not
- // have mentioned it along the way to inner closures that did.
- // See funcLit for details.
- //
- // During the eventual compilation, then, for closure variables we have:
- //
- // xN.Defn = original variable
- // xN.Outer = variable captured in next outward scope
- // to make closure where xN appears
- //
- // Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
- // and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
- Innermost *Node
- Outer *Node
-
- // OTYPE & ONAME //go:embed info,
- // sharing storage to reduce gc.Param size.
- // Extra is nil, or else *Extra is a *paramType or an *embedFileList.
- Extra *interface{}
-}
-
-type paramType struct {
- flag PragmaFlag
- alias bool
-}
-
-type embedFileList []string
-
-// Pragma returns the PragmaFlag for p, which must be for an OTYPE.
-func (p *Param) Pragma() PragmaFlag {
- if p.Extra == nil {
- return 0
- }
- return (*p.Extra).(*paramType).flag
-}
-
-// SetPragma sets the PragmaFlag for p, which must be for an OTYPE.
-func (p *Param) SetPragma(flag PragmaFlag) {
- if p.Extra == nil {
- if flag == 0 {
- return
- }
- p.Extra = new(interface{})
- *p.Extra = ¶mType{flag: flag}
- return
- }
- (*p.Extra).(*paramType).flag = flag
-}
-
-// Alias reports whether p, which must be for an OTYPE, is a type alias.
-func (p *Param) Alias() bool {
- if p.Extra == nil {
- return false
- }
- t, ok := (*p.Extra).(*paramType)
- if !ok {
- return false
- }
- return t.alias
-}
-
-// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
-func (p *Param) SetAlias(alias bool) {
- if p.Extra == nil {
- if !alias {
- return
- }
- p.Extra = new(interface{})
- *p.Extra = ¶mType{alias: alias}
- return
- }
- (*p.Extra).(*paramType).alias = alias
-}
-
-// EmbedFiles returns the list of embedded files for p,
-// which must be for an ONAME var.
-func (p *Param) EmbedFiles() []string {
- if p.Extra == nil {
- return nil
- }
- return *(*p.Extra).(*embedFileList)
-}
-
-// SetEmbedFiles sets the list of embedded files for p,
-// which must be for an ONAME var.
-func (p *Param) SetEmbedFiles(list []string) {
- if p.Extra == nil {
- if len(list) == 0 {
- return
- }
- f := embedFileList(list)
- p.Extra = new(interface{})
- *p.Extra = &f
- return
- }
- *(*p.Extra).(*embedFileList) = list
-}
-
-// Functions
-//
-// A simple function declaration is represented as an ODCLFUNC node f
-// and an ONAME node n. They're linked to one another through
-// f.Func.Nname == n and n.Name.Defn == f. When functions are
-// referenced by name in an expression, the function's ONAME node is
-// used directly.
-//
-// Function names have n.Class() == PFUNC. This distinguishes them
-// from variables of function type.
-//
-// Confusingly, n.Func and f.Func both exist, but commonly point to
-// different Funcs. (Exception: an OCALLPART's Func does point to its
-// ODCLFUNC's Func.)
-//
-// A method declaration is represented like functions, except n.Sym
-// will be the qualified method name (e.g., "T.m") and
-// f.Func.Shortname is the bare method name (e.g., "m").
-//
-// Method expressions are represented as ONAME/PFUNC nodes like
-// function names, but their Left and Right fields still point to the
-// type and method, respectively. They can be distinguished from
-// normal functions with isMethodExpression. Also, unlike function
-// name nodes, method expression nodes exist for each method
-// expression. The declaration ONAME can be accessed with
-// x.Type.Nname(), where x is the method expression ONAME node.
-//
-// Method values are represented by ODOTMETH/ODOTINTER when called
-// immediately, and OCALLPART otherwise. They are like method
-// expressions, except that for ODOTMETH/ODOTINTER the method name is
-// stored in Sym instead of Right.
-//
-// Closures are represented by OCLOSURE node c. They link back and
-// forth with the ODCLFUNC via Func.Closure; that is, c.Func.Closure
-// == f and f.Func.Closure == c.
-//
-// Function bodies are stored in f.Nbody, and inline function bodies
-// are stored in n.Func.Inl. Pragmas are stored in f.Func.Pragma.
-//
-// Imported functions skip the ODCLFUNC, so n.Name.Defn is nil. They
-// also use Dcl instead of Inldcl.
-
-// Func holds Node fields used only with function-like nodes.
-type Func struct {
- Shortname *types.Sym
- // Extra entry code for the function. For example, allocate and initialize
- // memory for escaping parameters. However, just for OCLOSURE, Enter is a
- // list of ONAME nodes of captured variables
- Enter Nodes
- Exit Nodes
- // ONAME nodes for closure params, each should have closurevar set
- Cvars Nodes
- // ONAME nodes for all params/locals for this func/closure, does NOT
- // include closurevars until transformclosure runs.
- Dcl []*Node
-
- // Parents records the parent scope of each scope within a
- // function. The root scope (0) has no parent, so the i'th
- // scope's parent is stored at Parents[i-1].
- Parents []ScopeID
-
- // Marks records scope boundary changes.
- Marks []Mark
-
- // Closgen tracks how many closures have been generated within
- // this function. Used by closurename for creating unique
- // function names.
- Closgen int
-
- FieldTrack map[*types.Sym]struct{}
- DebugInfo *ssa.FuncDebug
- Ntype *Node // signature
- Top int // top context (ctxCallee, etc)
- Closure *Node // OCLOSURE <-> ODCLFUNC (see header comment above)
- Nname *Node // The ONAME node associated with an ODCLFUNC (both have same Type)
- lsym *obj.LSym
-
- Inl *Inline
-
- Label int32 // largest auto-generated label in this function
-
- Endlineno src.XPos
- WBPos src.XPos // position of first write barrier; see SetWBPos
-
- Pragma PragmaFlag // go:xxx function annotations
-
- flags bitset16
- numDefers int // number of defer calls in the function
- numReturns int // number of explicit returns in the function
-
- // nwbrCalls records the LSyms of functions called by this
- // function for go:nowritebarrierrec analysis. Only filled in
- // if nowritebarrierrecCheck != nil.
- nwbrCalls *[]nowritebarrierrecCallSym
-}
-
-// An Inline holds fields used for function bodies that can be inlined.
-type Inline struct {
- Cost int32 // heuristic cost of inlining this function
-
- // Copies of Func.Dcl and Nbody for use during inlining.
- Dcl []*Node
- Body []*Node
-}
-
-// A Mark represents a scope boundary.
-type Mark struct {
- // Pos is the position of the token that marks the scope
- // change.
- Pos src.XPos
-
- // Scope identifies the innermost scope to the right of Pos.
- Scope ScopeID
-}
-
-// A ScopeID represents a lexical scope within a function.
-type ScopeID int32
-
-const (
- funcDupok = 1 << iota // duplicate definitions ok
- funcWrapper // is method wrapper
- funcNeedctxt // function uses context register (has closure variables)
- funcReflectMethod // function calls reflect.Type.Method or MethodByName
- // true if closure inside a function; false if a simple function or a
- // closure in a global variable initialization
- funcIsHiddenClosure
- funcHasDefer // contains a defer statement
- funcNilCheckDisabled // disable nil checks when compiling this function
- funcInlinabilityChecked // inliner has already determined whether the function is inlinable
- funcExportInline // include inline body in export data
- funcInstrumentBody // add race/msan instrumentation during SSA construction
- funcOpenCodedDeferDisallowed // can't do open-coded defers
-)
-
-func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 }
-func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 }
-func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 }
-func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 }
-func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 }
-func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 }
-func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 }
-func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 }
-func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 }
-func (f *Func) InstrumentBody() bool { return f.flags&funcInstrumentBody != 0 }
-func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 }
-
-func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) }
-func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) }
-func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) }
-func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) }
-func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) }
-func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) }
-func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) }
-func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) }
-func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) }
-func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) }
-func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
-
-func (f *Func) setWBPos(pos src.XPos) {
- if Debug_wb != 0 {
- Warnl(pos, "write barrier")
- }
- if !f.WBPos.IsKnown() {
- f.WBPos = pos
- }
-}
-
-//go:generate stringer -type=Op -trimprefix=O
-
-type Op uint8
-
-// Node ops.
-const (
- OXXX Op = iota
-
- // names
- ONAME // var or func name
- // Unnamed arg or return value: f(int, string) (int, error) { etc }
- // Also used for a qualified package identifier that hasn't been resolved yet.
- ONONAME
- OTYPE // type name
- OPACK // import
- OLITERAL // literal
-
- // expressions
- OADD // Left + Right
- OSUB // Left - Right
- OOR // Left | Right
- OXOR // Left ^ Right
- OADDSTR // +{List} (string addition, list elements are strings)
- OADDR // &Left
- OANDAND // Left && Right
- OAPPEND // append(List); after walk, Left may contain elem type descriptor
- OBYTES2STR // Type(Left) (Type is string, Left is a []byte)
- OBYTES2STRTMP // Type(Left) (Type is string, Left is a []byte, ephemeral)
- ORUNES2STR // Type(Left) (Type is string, Left is a []rune)
- OSTR2BYTES // Type(Left) (Type is []byte, Left is a string)
- OSTR2BYTESTMP // Type(Left) (Type is []byte, Left is a string, ephemeral)
- OSTR2RUNES // Type(Left) (Type is []rune, Left is a string)
- // Left = Right or (if Colas=true) Left := Right
- // If Colas, then Ninit includes a DCL node for Left.
- OAS
- // List = Rlist (x, y, z = a, b, c) or (if Colas=true) List := Rlist
- // If Colas, then Ninit includes DCL nodes for List
- OAS2
- OAS2DOTTYPE // List = Right (x, ok = I.(int))
- OAS2FUNC // List = Right (x, y = f())
- OAS2MAPR // List = Right (x, ok = m["foo"])
- OAS2RECV // List = Right (x, ok = <-c)
- OASOP // Left Etype= Right (x += y)
- OCALL // Left(List) (function call, method call or type conversion)
-
- // OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure.
- // Prior to walk, they are: Left(List), where List is all regular arguments.
- // After walk, List is a series of assignments to temporaries,
- // and Rlist is an updated set of arguments.
- // Nbody is all OVARLIVE nodes that are attached to OCALLxxx.
- // TODO(josharian/khr): Use Ninit instead of List for the assignments to temporaries. See CL 114797.
- OCALLFUNC // Left(List/Rlist) (function call f(args))
- OCALLMETH // Left(List/Rlist) (direct method call x.Method(args))
- OCALLINTER // Left(List/Rlist) (interface method call x.Method(args))
- OCALLPART // Left.Right (method expression x.Method, not called)
- OCAP // cap(Left)
- OCLOSE // close(Left)
- OCLOSURE // func Type { Func.Closure.Nbody } (func literal)
- OCOMPLIT // Right{List} (composite literal, not yet lowered to specific form)
- OMAPLIT // Type{List} (composite literal, Type is map)
- OSTRUCTLIT // Type{List} (composite literal, Type is struct)
- OARRAYLIT // Type{List} (composite literal, Type is array)
- OSLICELIT // Type{List} (composite literal, Type is slice) Right.Int64() = slice length.
- OPTRLIT // &Left (left is composite literal)
- OCONV // Type(Left) (type conversion)
- OCONVIFACE // Type(Left) (type conversion, to interface)
- OCONVNOP // Type(Left) (type conversion, no effect)
- OCOPY // copy(Left, Right)
- ODCL // var Left (declares Left of type Left.Type)
-
- // Used during parsing but don't last.
- ODCLFUNC // func f() or func (r) f()
- ODCLFIELD // struct field, interface field, or func/method argument/return value.
- ODCLCONST // const pi = 3.14
- ODCLTYPE // type Int int or type Int = int
-
- ODELETE // delete(List)
- ODOT // Left.Sym (Left is of struct type)
- ODOTPTR // Left.Sym (Left is of pointer to struct type)
- ODOTMETH // Left.Sym (Left is non-interface, Right is method name)
- ODOTINTER // Left.Sym (Left is interface, Right is method name)
- OXDOT // Left.Sym (before rewrite to one of the preceding)
- ODOTTYPE // Left.Right or Left.Type (.Right during parsing, .Type once resolved); after walk, .Right contains address of interface type descriptor and .Right.Right contains address of concrete type descriptor
- ODOTTYPE2 // Left.Right or Left.Type (.Right during parsing, .Type once resolved; on rhs of OAS2DOTTYPE); after walk, .Right contains address of interface type descriptor
- OEQ // Left == Right
- ONE // Left != Right
- OLT // Left < Right
- OLE // Left <= Right
- OGE // Left >= Right
- OGT // Left > Right
- ODEREF // *Left
- OINDEX // Left[Right] (index of array or slice)
- OINDEXMAP // Left[Right] (index of map)
- OKEY // Left:Right (key:value in struct/array/map literal)
- OSTRUCTKEY // Sym:Left (key:value in struct literal, after type checking)
- OLEN // len(Left)
- OMAKE // make(List) (before type checking converts to one of the following)
- OMAKECHAN // make(Type, Left) (type is chan)
- OMAKEMAP // make(Type, Left) (type is map)
- OMAKESLICE // make(Type, Left, Right) (type is slice)
- OMAKESLICECOPY // makeslicecopy(Type, Left, Right) (type is slice; Left is length and Right is the copied from slice)
- // OMAKESLICECOPY is created by the order pass and corresponds to:
- // s = make(Type, Left); copy(s, Right)
- //
- // Bounded can be set on the node when Left == len(Right) is known at compile time.
- //
- // This node is created so the walk pass can optimize this pattern which would
- // otherwise be hard to detect after the order pass.
- OMUL // Left * Right
- ODIV // Left / Right
- OMOD // Left % Right
- OLSH // Left << Right
- ORSH // Left >> Right
- OAND // Left & Right
- OANDNOT // Left &^ Right
- ONEW // new(Left); corresponds to calls to new in source code
- ONEWOBJ // runtime.newobject(n.Type); introduced by walk; Left is type descriptor
- ONOT // !Left
- OBITNOT // ^Left
- OPLUS // +Left
- ONEG // -Left
- OOROR // Left || Right
- OPANIC // panic(Left)
- OPRINT // print(List)
- OPRINTN // println(List)
- OPAREN // (Left)
- OSEND // Left <- Right
- OSLICE // Left[List[0] : List[1]] (Left is untypechecked or slice)
- OSLICEARR // Left[List[0] : List[1]] (Left is array)
- OSLICESTR // Left[List[0] : List[1]] (Left is string)
- OSLICE3 // Left[List[0] : List[1] : List[2]] (Left is untypedchecked or slice)
- OSLICE3ARR // Left[List[0] : List[1] : List[2]] (Left is array)
- OSLICEHEADER // sliceheader{Left, List[0], List[1]} (Left is unsafe.Pointer, List[0] is length, List[1] is capacity)
- ORECOVER // recover()
- ORECV // <-Left
- ORUNESTR // Type(Left) (Type is string, Left is rune)
- OSELRECV // Left = <-Right.Left: (appears as .Left of OCASE; Right.Op == ORECV)
- OSELRECV2 // List = <-Right.Left: (appears as .Left of OCASE; count(List) == 2, Right.Op == ORECV)
- OIOTA // iota
- OREAL // real(Left)
- OIMAG // imag(Left)
- OCOMPLEX // complex(Left, Right) or complex(List[0]) where List[0] is a 2-result function call
- OALIGNOF // unsafe.Alignof(Left)
- OOFFSETOF // unsafe.Offsetof(Left)
- OSIZEOF // unsafe.Sizeof(Left)
-
- // statements
- OBLOCK // { List } (block of code)
- OBREAK // break [Sym]
- // OCASE: case List: Nbody (List==nil means default)
- // For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
- // for nil), and, if a type-switch variable is specified, Rlist is an
- // ONAME for the version of the type-switch variable with the specified
- // type.
- OCASE
- OCONTINUE // continue [Sym]
- ODEFER // defer Left (Left must be call)
- OEMPTY // no-op (empty statement)
- OFALL // fallthrough
- OFOR // for Ninit; Left; Right { Nbody }
- // OFORUNTIL is like OFOR, but the test (Left) is applied after the body:
- // Ninit
- // top: { Nbody } // Execute the body at least once
- // cont: Right
- // if Left { // And then test the loop condition
- // List // Before looping to top, execute List
- // goto top
- // }
- // OFORUNTIL is created by walk. There's no way to write this in Go code.
- OFORUNTIL
- OGOTO // goto Sym
- OIF // if Ninit; Left { Nbody } else { Rlist }
- OLABEL // Sym:
- OGO // go Left (Left must be call)
- ORANGE // for List = range Right { Nbody }
- ORETURN // return List
- OSELECT // select { List } (List is list of OCASE)
- OSWITCH // switch Ninit; Left { List } (List is a list of OCASE)
- // OTYPESW: Left := Right.(type) (appears as .Left of OSWITCH)
- // Left is nil if there is no type-switch variable
- OTYPESW
-
- // types
- OTCHAN // chan int
- OTMAP // map[string]int
- OTSTRUCT // struct{}
- OTINTER // interface{}
- // OTFUNC: func() - Left is receiver field, List is list of param fields, Rlist is
- // list of result fields.
- OTFUNC
- OTARRAY // []int, [8]int, [N]int or [...]int
-
- // misc
- ODDD // func f(args ...int) or f(l...) or var a = [...]int{0, 1, 2}.
- OINLCALL // intermediary representation of an inlined call.
- OEFACE // itable and data words of an empty-interface value.
- OITAB // itable word of an interface value.
- OIDATA // data word of an interface value in Left
- OSPTR // base pointer of a slice or string.
- OCLOSUREVAR // variable reference at beginning of closure function
- OCFUNC // reference to c function pointer (not go func value)
- OCHECKNIL // emit code to ensure pointer/interface not nil
- OVARDEF // variable is about to be fully initialized
- OVARKILL // variable is dead
- OVARLIVE // variable is alive
- ORESULT // result of a function call; Xoffset is stack offset
- OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
-
- // arch-specific opcodes
- ORETJMP // return to other function
- OGETG // runtime.getg() (read g pointer)
-
- OEND
-)
-
-// Nodes is a pointer to a slice of *Node.
-// For fields that are not used in most nodes, this is used instead of
-// a slice to save space.
-type Nodes struct{ slice *[]*Node }
-
-// asNodes returns a slice of *Node as a Nodes value.
-func asNodes(s []*Node) Nodes {
- return Nodes{&s}
-}
-
-// Slice returns the entries in Nodes as a slice.
-// Changes to the slice entries (as in s[i] = n) will be reflected in
-// the Nodes.
-func (n Nodes) Slice() []*Node {
- if n.slice == nil {
- return nil
- }
- return *n.slice
-}
-
-// Len returns the number of entries in Nodes.
-func (n Nodes) Len() int {
- if n.slice == nil {
- return 0
- }
- return len(*n.slice)
-}
-
-// Index returns the i'th element of Nodes.
-// It panics if n does not have at least i+1 elements.
-func (n Nodes) Index(i int) *Node {
- return (*n.slice)[i]
-}
-
-// First returns the first element of Nodes (same as n.Index(0)).
-// It panics if n has no elements.
-func (n Nodes) First() *Node {
- return (*n.slice)[0]
-}
-
-// Second returns the second element of Nodes (same as n.Index(1)).
-// It panics if n has fewer than two elements.
-func (n Nodes) Second() *Node {
- return (*n.slice)[1]
-}
-
-// Set sets n to a slice.
-// This takes ownership of the slice.
-func (n *Nodes) Set(s []*Node) {
- if len(s) == 0 {
- n.slice = nil
- } else {
- // Copy s and take address of t rather than s to avoid
- // allocation in the case where len(s) == 0 (which is
- // over 3x more common, dynamically, for make.bash).
- t := s
- n.slice = &t
- }
-}
-
-// Set1 sets n to a slice containing a single node.
-func (n *Nodes) Set1(n1 *Node) {
- n.slice = &[]*Node{n1}
-}
-
-// Set2 sets n to a slice containing two nodes.
-func (n *Nodes) Set2(n1, n2 *Node) {
- n.slice = &[]*Node{n1, n2}
-}
-
-// Set3 sets n to a slice containing three nodes.
-func (n *Nodes) Set3(n1, n2, n3 *Node) {
- n.slice = &[]*Node{n1, n2, n3}
-}
-
-// MoveNodes sets n to the contents of n2, then clears n2.
-func (n *Nodes) MoveNodes(n2 *Nodes) {
- n.slice = n2.slice
- n2.slice = nil
-}
-
-// SetIndex sets the i'th element of Nodes to node.
-// It panics if n does not have at least i+1 elements.
-func (n Nodes) SetIndex(i int, node *Node) {
- (*n.slice)[i] = node
-}
-
-// SetFirst sets the first element of Nodes to node.
-// It panics if n does not have at least one elements.
-func (n Nodes) SetFirst(node *Node) {
- (*n.slice)[0] = node
-}
-
-// SetSecond sets the second element of Nodes to node.
-// It panics if n does not have at least two elements.
-func (n Nodes) SetSecond(node *Node) {
- (*n.slice)[1] = node
-}
-
-// Addr returns the address of the i'th element of Nodes.
-// It panics if n does not have at least i+1 elements.
-func (n Nodes) Addr(i int) **Node {
- return &(*n.slice)[i]
-}
-
-// Append appends entries to Nodes.
-func (n *Nodes) Append(a ...*Node) {
- if len(a) == 0 {
- return
- }
- if n.slice == nil {
- s := make([]*Node, len(a))
- copy(s, a)
- n.slice = &s
- return
- }
- *n.slice = append(*n.slice, a...)
-}
-
-// Prepend prepends entries to Nodes.
-// If a slice is passed in, this will take ownership of it.
-func (n *Nodes) Prepend(a ...*Node) {
- if len(a) == 0 {
- return
- }
- if n.slice == nil {
- n.slice = &a
- } else {
- *n.slice = append(a, *n.slice...)
- }
-}
-
-// AppendNodes appends the contents of *n2 to n, then clears n2.
-func (n *Nodes) AppendNodes(n2 *Nodes) {
- switch {
- case n2.slice == nil:
- case n.slice == nil:
- n.slice = n2.slice
- default:
- *n.slice = append(*n.slice, *n2.slice...)
- }
- n2.slice = nil
-}
-
-// inspect invokes f on each node in an AST in depth-first order.
-// If f(n) returns false, inspect skips visiting n's children.
-func inspect(n *Node, f func(*Node) bool) {
- if n == nil || !f(n) {
- return
- }
- inspectList(n.Ninit, f)
- inspect(n.Left, f)
- inspect(n.Right, f)
- inspectList(n.List, f)
- inspectList(n.Nbody, f)
- inspectList(n.Rlist, f)
-}
-
-func inspectList(l Nodes, f func(*Node) bool) {
- for _, n := range l.Slice() {
- inspect(n, f)
- }
-}
-
-// nodeQueue is a FIFO queue of *Node. The zero value of nodeQueue is
-// a ready-to-use empty queue.
-type nodeQueue struct {
- ring []*Node
- head, tail int
-}
-
-// empty reports whether q contains no Nodes.
-func (q *nodeQueue) empty() bool {
- return q.head == q.tail
-}
-
-// pushRight appends n to the right of the queue.
-func (q *nodeQueue) pushRight(n *Node) {
- if len(q.ring) == 0 {
- q.ring = make([]*Node, 16)
- } else if q.head+len(q.ring) == q.tail {
- // Grow the ring.
- nring := make([]*Node, len(q.ring)*2)
- // Copy the old elements.
- part := q.ring[q.head%len(q.ring):]
- if q.tail-q.head <= len(part) {
- part = part[:q.tail-q.head]
- copy(nring, part)
- } else {
- pos := copy(nring, part)
- copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
- }
- q.ring, q.head, q.tail = nring, 0, q.tail-q.head
- }
-
- q.ring[q.tail%len(q.ring)] = n
- q.tail++
-}
-
-// popLeft pops a node from the left of the queue. It panics if q is
-// empty.
-func (q *nodeQueue) popLeft() *Node {
- if q.empty() {
- panic("dequeue empty")
- }
- n := q.ring[q.head%len(q.ring)]
- q.head++
- return n
-}
-
-// NodeSet is a set of Nodes.
-type NodeSet map[*Node]struct{}
-
-// Has reports whether s contains n.
-func (s NodeSet) Has(n *Node) bool {
- _, isPresent := s[n]
- return isPresent
-}
-
-// Add adds n to s.
-func (s *NodeSet) Add(n *Node) {
- if *s == nil {
- *s = make(map[*Node]struct{})
- }
- (*s)[n] = struct{}{}
-}
-
-// Sorted returns s sorted according to less.
-func (s NodeSet) Sorted(less func(*Node, *Node) bool) []*Node {
- var res []*Node
- for n := range s {
- res = append(res, n)
- }
- sort.Slice(res, func(i, j int) bool { return less(res[i], res[j]) })
- return res
-}
"time"
)
+var timings Timings
+
// Timings collects the execution times of labeled phases
// which are added trough a sequence of Start/Stop calls.
// Events may be associated with each phase via AddEvent.
import (
"os"
tracepkg "runtime/trace"
+
+ "cmd/compile/internal/base"
)
func init() {
func traceHandlerGo17(traceprofile string) {
f, err := os.Create(traceprofile)
if err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
if err := tracepkg.Start(f); err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
- atExit(tracepkg.Stop)
+ base.AtExit(tracepkg.Stop)
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"fmt"
+ "go/constant"
+ "go/token"
"strings"
)
+var (
+ NeedFuncSym = func(*types.Sym) {}
+ NeedITab = func(t, itype *types.Type) {}
+ NeedRuntimeType = func(*types.Type) {}
+)
+
+func TypecheckInit() {
+ types.Widthptr = Widthptr
+ types.Dowidth = dowidth
+ initUniverse()
+ dclcontext = ir.PEXTERN
+ timings.Start("fe", "loadsys")
+ loadsys()
+}
+
+func TypecheckPackage() {
+ finishUniverse()
+
+ typecheckok = true
+
+ // Process top-level declarations in phases.
+
+ // Phase 1: const, type, and names and types of funcs.
+ // This will gather all the information about types
+ // and methods but doesn't depend on any of it.
+ //
+ // We also defer type alias declarations until phase 2
+ // to avoid cycles like #18640.
+ // TODO(gri) Remove this again once we have a fix for #25838.
+
+ // Don't use range--typecheck can add closures to Target.Decls.
+ timings.Start("fe", "typecheck", "top1")
+ for i := 0; i < len(Target.Decls); i++ {
+ n := Target.Decls[i]
+ if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).Left().Name().Alias()) {
+ Target.Decls[i] = typecheck(n, ctxStmt)
+ }
+ }
+
+ // Phase 2: Variable assignments.
+ // To check interface assignments, depends on phase 1.
+
+ // Don't use range--typecheck can add closures to Target.Decls.
+ timings.Start("fe", "typecheck", "top2")
+ for i := 0; i < len(Target.Decls); i++ {
+ n := Target.Decls[i]
+ if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).Left().Name().Alias() {
+ Target.Decls[i] = typecheck(n, ctxStmt)
+ }
+ }
+
+ // Phase 3: Type check function bodies.
+ // Don't use range--typecheck can add closures to Target.Decls.
+ timings.Start("fe", "typecheck", "func")
+ var fcount int64
+ for i := 0; i < len(Target.Decls); i++ {
+ n := Target.Decls[i]
+ if n.Op() == ir.ODCLFUNC {
+ TypecheckFuncBody(n.(*ir.Func))
+ fcount++
+ }
+ }
+
+ // Phase 4: Check external declarations.
+ // TODO(mdempsky): This should be handled when type checking their
+ // corresponding ODCL nodes.
+ timings.Start("fe", "typecheck", "externdcls")
+ for i, n := range Target.Externs {
+ if n.Op() == ir.ONAME {
+ Target.Externs[i] = typecheck(Target.Externs[i], ctxExpr)
+ }
+ }
+
+ // Phase 5: With all user code type-checked, it's now safe to verify map keys.
+ checkMapKeys()
+
+ // Phase 6: Decide how to capture closed variables.
+ // This needs to run before escape analysis,
+ // because variables captured by value do not escape.
+ timings.Start("fe", "capturevars")
+ for _, n := range Target.Decls {
+ if n.Op() == ir.ODCLFUNC {
+ n := n.(*ir.Func)
+ if n.Func().OClosure != nil {
+ Curfn = n
+ capturevars(n)
+ }
+ }
+ }
+ capturevarscomplete = true
+ Curfn = nil
+
+ if base.Debug.TypecheckInl != 0 {
+ // Typecheck imported function bodies if Debug.l > 1,
+ // otherwise lazily when used or re-exported.
+ TypecheckImports()
+ }
+}
+
+func TypecheckAssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) }
+func TypecheckExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr) }
+func TypecheckStmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) }
+
+func TypecheckExprs(exprs []ir.Node) { typecheckslice(exprs, ctxExpr) }
+func TypecheckStmts(stmts []ir.Node) { typecheckslice(stmts, ctxStmt) }
+
+func TypecheckCall(call *ir.CallExpr) {
+ t := call.X.Type()
+ if t == nil {
+ panic("misuse of Call")
+ }
+ ctx := ctxStmt
+ if t.NumResults() > 0 {
+ ctx = ctxExpr | ctxMultiOK
+ }
+ if typecheck(call, ctx) != call {
+ panic("bad typecheck")
+ }
+}
+
+func TypecheckCallee(n ir.Node) ir.Node {
+ return typecheck(n, ctxExpr|ctxCallee)
+}
+
+func TypecheckFuncBody(n *ir.Func) {
+ Curfn = n
+ decldepth = 1
+ errorsBefore := base.Errors()
+ typecheckslice(n.Body(), ctxStmt)
+ checkreturn(n)
+ if base.Errors() > errorsBefore {
+ n.PtrBody().Set(nil) // type errors; do not compile
+ }
+ // Now that we've checked whether n terminates,
+ // we can eliminate some obviously dead code.
+ deadcode(n)
+}
+
+var importlist []*ir.Func
+
+func TypecheckImports() {
+ for _, n := range importlist {
+ if n.Inl != nil {
+ typecheckinl(n)
+ }
+ }
+}
+
// To enable tracing support (-t flag), set enableTrace to true.
const enableTrace = false
-var trace bool
var traceIndent []byte
var skipDowidthForTracing bool
-func tracePrint(title string, n *Node) func(np **Node) {
+func tracePrint(title string, n ir.Node) func(np *ir.Node) {
indent := traceIndent
// guard against nil
var pos, op string
var tc uint8
if n != nil {
- pos = linestr(n.Pos)
- op = n.Op.String()
+ pos = base.FmtPos(n.Pos())
+ op = n.Op().String()
tc = n.Typecheck()
}
fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc)
traceIndent = append(traceIndent, ". "...)
- return func(np **Node) {
+ return func(np *ir.Node) {
traceIndent = traceIndent[:len(traceIndent)-2]
// if we have a result, use that
var tc uint8
var typ *types.Type
if n != nil {
- pos = linestr(n.Pos)
- op = n.Op.String()
+ pos = base.FmtPos(n.Pos())
+ op = n.Op().String()
tc = n.Typecheck()
- typ = n.Type
+ typ = n.Type()
}
skipDowidthForTracing = true
defer func() { skipDowidthForTracing = false }()
- fmt.Printf("%s: %s=> %p %s %v tc=%d type=%#L\n", pos, indent, n, op, n, tc, typ)
+ fmt.Printf("%s: %s=> %p %s %v tc=%d type=%L\n", pos, indent, n, op, n, tc, typ)
}
}
// marks variables that escape the local frame.
// rewrites n.Op to be more specific in some cases.
-var typecheckdefstack []*Node
+var typecheckdefstack []ir.Node
// resolve ONONAME to definition, if any.
-func resolve(n *Node) (res *Node) {
- if n == nil || n.Op != ONONAME {
+func resolve(n ir.Node) (res ir.Node) {
+ if n == nil || n.Op() != ir.ONONAME {
return n
}
// only trace if there's work to do
- if enableTrace && trace {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("resolve", n)(&res)
}
- if n.Sym.Pkg != localpkg {
+ if sym := n.Sym(); sym.Pkg != types.LocalPkg {
+ // We might have an ir.Ident from oldname or importDot.
+ if id, ok := n.(*ir.Ident); ok {
+ if pkgName := dotImportRefs[id]; pkgName != nil {
+ pkgName.Used = true
+ }
+ }
+
if inimport {
- Fatalf("recursive inimport")
+ base.Fatalf("recursive inimport")
}
inimport = true
- expandDecl(n)
+ n = expandDecl(n)
inimport = false
return n
}
- r := asNode(n.Sym.Def)
+ r := ir.AsNode(n.Sym().Def)
if r == nil {
return n
}
- if r.Op == OIOTA {
+ if r.Op() == ir.OIOTA {
if x := getIotaValue(); x >= 0 {
return nodintconst(x)
}
return r
}
-func typecheckslice(l []*Node, top int) {
+func typecheckslice(l []ir.Node, top int) {
for i := range l {
l[i] = typecheck(l[i], top)
}
}
var _typekind = []string{
- TINT: "int",
- TUINT: "uint",
- TINT8: "int8",
- TUINT8: "uint8",
- TINT16: "int16",
- TUINT16: "uint16",
- TINT32: "int32",
- TUINT32: "uint32",
- TINT64: "int64",
- TUINT64: "uint64",
- TUINTPTR: "uintptr",
- TCOMPLEX64: "complex64",
- TCOMPLEX128: "complex128",
- TFLOAT32: "float32",
- TFLOAT64: "float64",
- TBOOL: "bool",
- TSTRING: "string",
- TPTR: "pointer",
- TUNSAFEPTR: "unsafe.Pointer",
- TSTRUCT: "struct",
- TINTER: "interface",
- TCHAN: "chan",
- TMAP: "map",
- TARRAY: "array",
- TSLICE: "slice",
- TFUNC: "func",
- TNIL: "nil",
- TIDEAL: "untyped number",
+ types.TINT: "int",
+ types.TUINT: "uint",
+ types.TINT8: "int8",
+ types.TUINT8: "uint8",
+ types.TINT16: "int16",
+ types.TUINT16: "uint16",
+ types.TINT32: "int32",
+ types.TUINT32: "uint32",
+ types.TINT64: "int64",
+ types.TUINT64: "uint64",
+ types.TUINTPTR: "uintptr",
+ types.TCOMPLEX64: "complex64",
+ types.TCOMPLEX128: "complex128",
+ types.TFLOAT32: "float32",
+ types.TFLOAT64: "float64",
+ types.TBOOL: "bool",
+ types.TSTRING: "string",
+ types.TPTR: "pointer",
+ types.TUNSAFEPTR: "unsafe.Pointer",
+ types.TSTRUCT: "struct",
+ types.TINTER: "interface",
+ types.TCHAN: "chan",
+ types.TMAP: "map",
+ types.TARRAY: "array",
+ types.TSLICE: "slice",
+ types.TFUNC: "func",
+ types.TNIL: "nil",
+ types.TIDEAL: "untyped number",
}
func typekind(t *types.Type) string {
if t.IsUntyped() {
return fmt.Sprintf("%v", t)
}
- et := t.Etype
+ et := t.Kind()
if int(et) < len(_typekind) {
s := _typekind[et]
if s != "" {
return fmt.Sprintf("etype=%d", et)
}
-func cycleFor(start *Node) []*Node {
+func cycleFor(start ir.Node) []ir.Node {
// Find the start node in typecheck_tcstack.
// We know that it must exist because each time we mark
// a node with n.SetTypecheck(2) we push it on the stack,
}
// collect all nodes with same Op
- var cycle []*Node
+ var cycle []ir.Node
for _, n := range typecheck_tcstack[i:] {
- if n.Op == start.Op {
+ if n.Op() == start.Op() {
cycle = append(cycle, n)
}
}
return cycle
}
-func cycleTrace(cycle []*Node) string {
+func cycleTrace(cycle []ir.Node) string {
var s string
for i, n := range cycle {
- s += fmt.Sprintf("\n\t%v: %v uses %v", n.Line(), n, cycle[(i+1)%len(cycle)])
+ s += fmt.Sprintf("\n\t%v: %v uses %v", ir.Line(n), n, cycle[(i+1)%len(cycle)])
}
return s
}
-var typecheck_tcstack []*Node
+var typecheck_tcstack []ir.Node
+
+func typecheckFunc(fn *ir.Func) {
+ new := typecheck(fn, ctxStmt)
+ if new != fn {
+ base.Fatalf("typecheck changed func")
+ }
+}
+
+func typecheckNtype(n ir.Ntype) ir.Ntype {
+ return typecheck(n, ctxType).(ir.Ntype)
+}
// typecheck type checks node n.
// The result of typecheck MUST be assigned back to n, e.g.
// n.Left = typecheck(n.Left, top)
-func typecheck(n *Node, top int) (res *Node) {
+func typecheck(n ir.Node, top int) (res ir.Node) {
// cannot type check until all the source has been parsed
if !typecheckok {
- Fatalf("early typecheck")
+ base.Fatalf("early typecheck")
}
if n == nil {
}
// only trace if there's work to do
- if enableTrace && trace {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheck", n)(&res)
}
lno := setlineno(n)
// Skip over parens.
- for n.Op == OPAREN {
- n = n.Left
+ for n.Op() == ir.OPAREN {
+ n = n.(*ir.ParenExpr).Left()
}
// Resolve definition of name and value of iota lazily.
// Skip typecheck if already done.
// But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
if n.Typecheck() == 1 {
- switch n.Op {
- case ONAME, OTYPE, OLITERAL, OPACK:
+ switch n.Op() {
+ case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.OPACK:
break
default:
- lineno = lno
+ base.Pos = lno
return n
}
}
if n.Typecheck() == 2 {
// Typechecking loop. Trying printing a meaningful message,
// otherwise a stack trace of typechecking.
- switch n.Op {
+ switch n.Op() {
// We can already diagnose variables used as types.
- case ONAME:
+ case ir.ONAME:
if top&(ctxExpr|ctxType) == ctxType {
- yyerror("%v is not a type", n)
+ base.Errorf("%v is not a type", n)
}
- case OTYPE:
+ case ir.OTYPE:
// Only report a type cycle if we are expecting a type.
// Otherwise let other code report an error.
if top&ctxType == ctxType {
// are substituted.
cycle := cycleFor(n)
for _, n1 := range cycle {
- if n1.Name != nil && !n1.Name.Param.Alias() {
+ if n1.Name() != nil && !n1.Name().Alias() {
// Cycle is ok. But if n is an alias type and doesn't
// have a type yet, we have a recursive type declaration
// with aliases that we can't handle properly yet.
// Report an error rather than crashing later.
- if n.Name != nil && n.Name.Param.Alias() && n.Type == nil {
- lineno = n.Pos
- Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
+ if n.Name() != nil && n.Name().Alias() && n.Type() == nil {
+ base.Pos = n.Pos()
+ base.Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
}
- lineno = lno
+ base.Pos = lno
return n
}
}
- yyerrorl(n.Pos, "invalid recursive type alias %v%s", n, cycleTrace(cycle))
+ base.ErrorfAt(n.Pos(), "invalid recursive type alias %v%s", n, cycleTrace(cycle))
}
- case OLITERAL:
+ case ir.OLITERAL:
if top&(ctxExpr|ctxType) == ctxType {
- yyerror("%v is not a type", n)
+ base.Errorf("%v is not a type", n)
break
}
- yyerrorl(n.Pos, "constant definition loop%s", cycleTrace(cycleFor(n)))
+ base.ErrorfAt(n.Pos(), "constant definition loop%s", cycleTrace(cycleFor(n)))
}
- if nsavederrors+nerrors == 0 {
+ if base.Errors() == 0 {
var trace string
for i := len(typecheck_tcstack) - 1; i >= 0; i-- {
x := typecheck_tcstack[i]
- trace += fmt.Sprintf("\n\t%v %v", x.Line(), x)
+ trace += fmt.Sprintf("\n\t%v %v", ir.Line(x), x)
}
- yyerror("typechecking loop involving %v%s", n, trace)
+ base.Errorf("typechecking loop involving %v%s", n, trace)
}
- lineno = lno
+ base.Pos = lno
return n
}
- n.SetTypecheck(2)
-
typecheck_tcstack = append(typecheck_tcstack, n)
- n = typecheck1(n, top)
+ n.SetTypecheck(2)
+ n = typecheck1(n, top)
n.SetTypecheck(1)
last := len(typecheck_tcstack) - 1
typecheck_tcstack[last] = nil
typecheck_tcstack = typecheck_tcstack[:last]
- lineno = lno
+ _, isExpr := n.(ir.Expr)
+ _, isStmt := n.(ir.Stmt)
+ isMulti := false
+ switch n.Op() {
+ case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
+ if t := n.Left().Type(); t != nil && t.Kind() == types.TFUNC {
+ nr := t.NumResults()
+ isMulti = nr > 1
+ if nr == 0 {
+ isExpr = false
+ }
+ }
+ case ir.OAPPEND:
+ // Must be used (and not BinaryExpr/UnaryExpr).
+ isStmt = false
+ case ir.OCLOSE, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.OVARKILL, ir.OVARLIVE:
+ // Must not be used.
+ isExpr = false
+ isStmt = true
+ case ir.OCOPY, ir.ORECOVER, ir.ORECV:
+ // Can be used or not.
+ isStmt = true
+ }
+
+ t := n.Type()
+ if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE {
+ switch t.Kind() {
+ case types.TFUNC, // might have TANY; wait until it's called
+ types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK:
+ break
+
+ default:
+ checkwidth(t)
+ }
+ }
+ if t != nil {
+ n = evalConst(n)
+ t = n.Type()
+ }
+
+ // TODO(rsc): Lots of the complexity here is because typecheck can
+ // see OTYPE, ONAME, and OLITERAL nodes multiple times.
+ // Once we make the IR a proper tree, we should be able to simplify
+ // this code a bit, especially the final case.
+ switch {
+ case top&(ctxStmt|ctxExpr) == ctxExpr && !isExpr && n.Op() != ir.OTYPE && !isMulti:
+ if !n.Diag() {
+ base.Errorf("%v used as value", n)
+ n.SetDiag(true)
+ }
+ if t != nil {
+ n.SetType(nil)
+ }
+
+ case top&ctxType == 0 && n.Op() == ir.OTYPE && t != nil:
+ if !n.Type().Broke() {
+ base.Errorf("type %v is not an expression", n.Type())
+ }
+ n.SetType(nil)
+
+ case top&(ctxStmt|ctxExpr) == ctxStmt && !isStmt && t != nil:
+ if !n.Diag() {
+ base.Errorf("%v evaluated but not used", n)
+ n.SetDiag(true)
+ }
+ n.SetType(nil)
+
+ case top&(ctxType|ctxExpr) == ctxType && n.Op() != ir.OTYPE && n.Op() != ir.ONONAME && (t != nil || n.Op() == ir.ONAME):
+ base.Errorf("%v is not a type", n)
+ if t != nil {
+ n.SetType(nil)
+ }
+
+ }
+
+ base.Pos = lno
return n
}
// value of type int (see also checkmake for comparison).
// The result of indexlit MUST be assigned back to n, e.g.
// n.Left = indexlit(n.Left)
-func indexlit(n *Node) *Node {
- if n != nil && n.Type != nil && n.Type.Etype == TIDEAL {
- return defaultlit(n, types.Types[TINT])
+func indexlit(n ir.Node) ir.Node {
+ if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
+ return defaultlit(n, types.Types[types.TINT])
}
return n
}
-// The result of typecheck1 MUST be assigned back to n, e.g.
-// n.Left = typecheck1(n.Left, top)
-func typecheck1(n *Node, top int) (res *Node) {
- if enableTrace && trace {
+// typecheck1 should ONLY be called from typecheck.
+func typecheck1(n ir.Node, top int) (res ir.Node) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheck1", n)(&res)
}
- switch n.Op {
- case OLITERAL, ONAME, ONONAME, OTYPE:
- if n.Sym == nil {
- break
+ switch n.Op() {
+ case ir.OLITERAL, ir.ONAME, ir.ONONAME, ir.OTYPE:
+ if n.Sym() == nil {
+ return n
}
- if n.Op == ONAME && n.SubOp() != 0 && top&ctxCallee == 0 {
- yyerror("use of builtin %v not in function call", n.Sym)
- n.Type = nil
- return n
+ if n.Op() == ir.ONAME {
+ if n.SubOp() != 0 && top&ctxCallee == 0 {
+ base.Errorf("use of builtin %v not in function call", n.Sym())
+ n.SetType(nil)
+ return n
+ }
}
typecheckdef(n)
- if n.Op == ONONAME {
- n.Type = nil
+ if n.Op() == ir.ONONAME {
+ n.SetType(nil)
return n
}
}
- ok := 0
- switch n.Op {
- // until typecheck is complete, do nothing.
+ switch n.Op() {
default:
- Dump("typecheck", n)
-
- Fatalf("typecheck %v", n.Op)
+ ir.Dump("typecheck", n)
+ base.Fatalf("typecheck %v", n.Op())
+ panic("unreachable")
// names
- case OLITERAL:
- ok |= ctxExpr
-
- if n.Type == nil && n.Val().Ctype() == CTSTR {
- n.Type = types.UntypedString
+ case ir.OLITERAL:
+ if n.Type() == nil && n.Val().Kind() == constant.String {
+ base.Fatalf("string literal missing type")
}
+ return n
- case ONONAME:
- ok |= ctxExpr
+ case ir.ONIL, ir.ONONAME:
+ return n
- case ONAME:
- if n.Name.Decldepth == 0 {
- n.Name.Decldepth = decldepth
+ case ir.ONAME:
+ if n.Name().Decldepth == 0 {
+ n.Name().Decldepth = decldepth
}
if n.SubOp() != 0 {
- ok |= ctxCallee
- break
+ return n
}
-
if top&ctxAssign == 0 {
// not a write to the variable
- if n.isBlank() {
- yyerror("cannot use _ as value")
- n.Type = nil
+ if ir.IsBlank(n) {
+ base.Errorf("cannot use _ as value")
+ n.SetType(nil)
return n
}
-
- n.Name.SetUsed(true)
+ n.Name().SetUsed(true)
}
+ return n
- ok |= ctxExpr
-
- case OPACK:
- yyerror("use of package %v without selector", n.Sym)
- n.Type = nil
+ case ir.ONAMEOFFSET:
+ // type already set
return n
- case ODDD:
- break
+ case ir.OPACK:
+ base.Errorf("use of package %v without selector", n.Sym())
+ n.SetType(nil)
+ return n
// types (ODEREF is with exprs)
- case OTYPE:
- ok |= ctxType
-
- if n.Type == nil {
+ case ir.OTYPE:
+ if n.Type() == nil {
return n
}
+ return n
- case OTARRAY:
- ok |= ctxType
- r := typecheck(n.Right, ctxType)
- if r.Type == nil {
- n.Type = nil
+ case ir.OTSLICE:
+ n := n.(*ir.SliceType)
+ n.Elem = typecheck(n.Elem, ctxType)
+ if n.Elem.Type() == nil {
return n
}
+ t := types.NewSlice(n.Elem.Type())
+ n.SetOTYPE(t)
+ checkwidth(t)
+ return n
- var t *types.Type
- if n.Left == nil {
- t = types.NewSlice(r.Type)
- } else if n.Left.Op == ODDD {
+ case ir.OTARRAY:
+ n := n.(*ir.ArrayType)
+ n.Elem = typecheck(n.Elem, ctxType)
+ if n.Elem.Type() == nil {
+ return n
+ }
+ if n.Len == nil { // [...]T
if !n.Diag() {
n.SetDiag(true)
- yyerror("use of [...] array outside of array literal")
+ base.Errorf("use of [...] array outside of array literal")
}
- n.Type = nil
return n
- } else {
- n.Left = indexlit(typecheck(n.Left, ctxExpr))
- l := n.Left
- if consttype(l) != CTINT {
- switch {
- case l.Type == nil:
- // Error already reported elsewhere.
- case l.Type.IsInteger() && l.Op != OLITERAL:
- yyerror("non-constant array bound %v", l)
- default:
- yyerror("invalid array bound %v", l)
- }
- n.Type = nil
- return n
+ }
+ n.Len = indexlit(typecheck(n.Len, ctxExpr))
+ size := n.Len
+ if ir.ConstType(size) != constant.Int {
+ switch {
+ case size.Type() == nil:
+ // Error already reported elsewhere.
+ case size.Type().IsInteger() && size.Op() != ir.OLITERAL:
+ base.Errorf("non-constant array bound %v", size)
+ default:
+ base.Errorf("invalid array bound %v", size)
}
+ return n
+ }
- v := l.Val()
- if doesoverflow(v, types.Types[TINT]) {
- yyerror("array bound is too large")
- n.Type = nil
- return n
- }
+ v := size.Val()
+ if doesoverflow(v, types.Types[types.TINT]) {
+ base.Errorf("array bound is too large")
+ return n
+ }
- bound := v.U.(*Mpint).Int64()
- if bound < 0 {
- yyerror("array bound must be non-negative")
- n.Type = nil
- return n
- }
- t = types.NewArray(r.Type, bound)
+ if constant.Sign(v) < 0 {
+ base.Errorf("array bound must be non-negative")
+ return n
}
- setTypeNode(n, t)
- n.Left = nil
- n.Right = nil
+ bound, _ := constant.Int64Val(v)
+ t := types.NewArray(n.Elem.Type(), bound)
+ n.SetOTYPE(t)
checkwidth(t)
+ return n
- case OTMAP:
- ok |= ctxType
- n.Left = typecheck(n.Left, ctxType)
- n.Right = typecheck(n.Right, ctxType)
- l := n.Left
- r := n.Right
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ case ir.OTMAP:
+ n := n.(*ir.MapType)
+ n.Key = typecheck(n.Key, ctxType)
+ n.Elem = typecheck(n.Elem, ctxType)
+ l := n.Key
+ r := n.Elem
+ if l.Type() == nil || r.Type() == nil {
return n
}
- if l.Type.NotInHeap() {
- yyerror("incomplete (or unallocatable) map key not allowed")
+ if l.Type().NotInHeap() {
+ base.Errorf("incomplete (or unallocatable) map key not allowed")
}
- if r.Type.NotInHeap() {
- yyerror("incomplete (or unallocatable) map value not allowed")
+ if r.Type().NotInHeap() {
+ base.Errorf("incomplete (or unallocatable) map value not allowed")
}
-
- setTypeNode(n, types.NewMap(l.Type, r.Type))
+ n.SetOTYPE(types.NewMap(l.Type(), r.Type()))
mapqueue = append(mapqueue, n) // check map keys when all types are settled
- n.Left = nil
- n.Right = nil
+ return n
- case OTCHAN:
- ok |= ctxType
- n.Left = typecheck(n.Left, ctxType)
- l := n.Left
- if l.Type == nil {
- n.Type = nil
+ case ir.OTCHAN:
+ n := n.(*ir.ChanType)
+ n.Elem = typecheck(n.Elem, ctxType)
+ l := n.Elem
+ if l.Type() == nil {
return n
}
- if l.Type.NotInHeap() {
- yyerror("chan of incomplete (or unallocatable) type not allowed")
+ if l.Type().NotInHeap() {
+ base.Errorf("chan of incomplete (or unallocatable) type not allowed")
}
+ n.SetOTYPE(types.NewChan(l.Type(), n.Dir))
+ return n
- setTypeNode(n, types.NewChan(l.Type, n.TChanDir()))
- n.Left = nil
- n.ResetAux()
-
- case OTSTRUCT:
- ok |= ctxType
- setTypeNode(n, tostruct(n.List.Slice()))
- n.List.Set(nil)
+ case ir.OTSTRUCT:
+ n := n.(*ir.StructType)
+ n.SetOTYPE(tostruct(n.Fields))
+ return n
- case OTINTER:
- ok |= ctxType
- setTypeNode(n, tointerface(n.List.Slice()))
+ case ir.OTINTER:
+ n := n.(*ir.InterfaceType)
+ n.SetOTYPE(tointerface(n.Methods))
+ return n
- case OTFUNC:
- ok |= ctxType
- setTypeNode(n, functype(n.Left, n.List.Slice(), n.Rlist.Slice()))
- n.Left = nil
- n.List.Set(nil)
- n.Rlist.Set(nil)
+ case ir.OTFUNC:
+ n := n.(*ir.FuncType)
+ n.SetOTYPE(functype(n.Recv, n.Params, n.Results))
+ return n
// type or expr
- case ODEREF:
- n.Left = typecheck(n.Left, ctxExpr|ctxType)
- l := n.Left
- t := l.Type
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ n.X = typecheck(n.X, ctxExpr|ctxType)
+ l := n.X
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- if l.Op == OTYPE {
- ok |= ctxType
- setTypeNode(n, types.NewPtr(l.Type))
- n.Left = nil
+ if l.Op() == ir.OTYPE {
+ n.SetOTYPE(types.NewPtr(l.Type()))
// Ensure l.Type gets dowidth'd for the backend. Issue 20174.
- checkwidth(l.Type)
- break
+ checkwidth(l.Type())
+ return n
}
if !t.IsPtr() {
if top&(ctxExpr|ctxStmt) != 0 {
- yyerror("invalid indirect of %L", n.Left)
- n.Type = nil
+ base.Errorf("invalid indirect of %L", n.Left())
+ n.SetType(nil)
return n
}
-
- break
+ base.Errorf("%v is not a type", l)
+ return n
}
- ok |= ctxExpr
- n.Type = t.Elem()
+ n.SetType(t.Elem())
+ return n
// arithmetic exprs
- case OASOP,
- OADD,
- OAND,
- OANDAND,
- OANDNOT,
- ODIV,
- OEQ,
- OGE,
- OGT,
- OLE,
- OLT,
- OLSH,
- ORSH,
- OMOD,
- OMUL,
- ONE,
- OOR,
- OOROR,
- OSUB,
- OXOR:
- var l *Node
- var op Op
- var r *Node
- if n.Op == OASOP {
- ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxExpr)
- n.Right = typecheck(n.Right, ctxExpr)
- l = n.Left
- r = n.Right
- checkassign(n, n.Left)
- if l.Type == nil || r.Type == nil {
- n.Type = nil
- return n
- }
- if n.Implicit() && !okforarith[l.Type.Etype] {
- yyerror("invalid operation: %v (non-numeric type %v)", n, l.Type)
- n.Type = nil
+ case ir.OASOP,
+ ir.OADD,
+ ir.OAND,
+ ir.OANDAND,
+ ir.OANDNOT,
+ ir.ODIV,
+ ir.OEQ,
+ ir.OGE,
+ ir.OGT,
+ ir.OLE,
+ ir.OLT,
+ ir.OLSH,
+ ir.ORSH,
+ ir.OMOD,
+ ir.OMUL,
+ ir.ONE,
+ ir.OOR,
+ ir.OOROR,
+ ir.OSUB,
+ ir.OXOR:
+ var l, r ir.Node
+ var setLR func()
+ switch n := n.(type) {
+ case *ir.AssignOpStmt:
+ l, r = n.Left(), n.Right()
+ setLR = func() { n.SetLeft(l); n.SetRight(r) }
+ case *ir.BinaryExpr:
+ l, r = n.Left(), n.Right()
+ setLR = func() { n.SetLeft(l); n.SetRight(r) }
+ case *ir.LogicalExpr:
+ l, r = n.Left(), n.Right()
+ setLR = func() { n.SetLeft(l); n.SetRight(r) }
+ }
+ l = typecheck(l, ctxExpr)
+ r = typecheck(r, ctxExpr)
+ setLR()
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ op := n.Op()
+ if n.Op() == ir.OASOP {
+ checkassign(n, l)
+ if n.Implicit() && !okforarith[l.Type().Kind()] {
+ base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type())
+ n.SetType(nil)
return n
}
// TODO(marvin): Fix Node.EType type union.
op = n.SubOp()
- } else {
- ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Right = typecheck(n.Right, ctxExpr)
- l = n.Left
- r = n.Right
- if l.Type == nil || r.Type == nil {
- n.Type = nil
- return n
- }
- op = n.Op
}
- if op == OLSH || op == ORSH {
- r = defaultlit(r, types.Types[TUINT])
- n.Right = r
- t := r.Type
+ if op == ir.OLSH || op == ir.ORSH {
+ r = defaultlit(r, types.Types[types.TUINT])
+ setLR()
+ t := r.Type()
if !t.IsInteger() {
- yyerror("invalid operation: %v (shift count type %v, must be integer)", n, r.Type)
- n.Type = nil
+ base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type())
+ n.SetType(nil)
return n
}
if t.IsSigned() && !langSupported(1, 13, curpkg()) {
- yyerrorv("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type)
- n.Type = nil
+ base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type())
+ n.SetType(nil)
return n
}
- t = l.Type
- if t != nil && t.Etype != TIDEAL && !t.IsInteger() {
- yyerror("invalid operation: %v (shift of type %v)", n, t)
- n.Type = nil
+ t = l.Type()
+ if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() {
+ base.Errorf("invalid operation: %v (shift of type %v)", n, t)
+ n.SetType(nil)
return n
}
// no defaultlit for left
// the outer context gives the type
- n.Type = l.Type
- if (l.Type == types.UntypedFloat || l.Type == types.UntypedComplex) && r.Op == OLITERAL {
- n.Type = types.UntypedInt
+ n.SetType(l.Type())
+ if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL {
+ n.SetType(types.UntypedInt)
}
-
- break
+ return n
}
// For "x == x && len(s)", it's better to report that "len(s)" (type int)
// can't be used with "&&" than to report that "x == x" (type untyped bool)
// can't be converted to int (see issue #41500).
- if n.Op == OANDAND || n.Op == OOROR {
- if !n.Left.Type.IsBoolean() {
- yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Left.Type))
- n.Type = nil
+ if n.Op() == ir.OANDAND || n.Op() == ir.OOROR {
+ if !n.Left().Type().IsBoolean() {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Left().Type()))
+ n.SetType(nil)
return n
}
- if !n.Right.Type.IsBoolean() {
- yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Right.Type))
- n.Type = nil
+ if !n.Right().Type().IsBoolean() {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Right().Type()))
+ n.SetType(nil)
return n
}
}
// ideal mixed with non-ideal
l, r = defaultlit2(l, r, false)
+ setLR()
- n.Left = l
- n.Right = r
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- t := l.Type
- if t.Etype == TIDEAL {
- t = r.Type
+ t := l.Type()
+ if t.Kind() == types.TIDEAL {
+ t = r.Type()
}
- et := t.Etype
- if et == TIDEAL {
- et = TINT
+ et := t.Kind()
+ if et == types.TIDEAL {
+ et = types.TINT
}
- aop := OXXX
- if iscmp[n.Op] && t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) {
+ aop := ir.OXXX
+ if iscmp[n.Op()] && t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
// comparison is okay as long as one side is
// assignable to the other. convert so they have
// the same type.
// in that case, check comparability of the concrete type.
// The conversion allocates, so only do it if the concrete type is huge.
converted := false
- if r.Type.Etype != TBLANK {
- aop, _ = assignop(l.Type, r.Type)
- if aop != OXXX {
- if r.Type.IsInterface() && !l.Type.IsInterface() && !IsComparable(l.Type) {
- yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type))
- n.Type = nil
+ if r.Type().Kind() != types.TBLANK {
+ aop, _ = assignop(l.Type(), r.Type())
+ if aop != ir.OXXX {
+ if r.Type().IsInterface() && !l.Type().IsInterface() && !IsComparable(l.Type()) {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type()))
+ n.SetType(nil)
return n
}
- dowidth(l.Type)
- if r.Type.IsInterface() == l.Type.IsInterface() || l.Type.Width >= 1<<16 {
- l = nod(aop, l, nil)
- l.Type = r.Type
+ dowidth(l.Type())
+ if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 {
+ l = ir.NewConvExpr(base.Pos, aop, r.Type(), l)
l.SetTypecheck(1)
- n.Left = l
+ setLR()
}
- t = r.Type
+ t = r.Type()
converted = true
}
}
- if !converted && l.Type.Etype != TBLANK {
- aop, _ = assignop(r.Type, l.Type)
- if aop != OXXX {
- if l.Type.IsInterface() && !r.Type.IsInterface() && !IsComparable(r.Type) {
- yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type))
- n.Type = nil
+ if !converted && l.Type().Kind() != types.TBLANK {
+ aop, _ = assignop(r.Type(), l.Type())
+ if aop != ir.OXXX {
+ if l.Type().IsInterface() && !r.Type().IsInterface() && !IsComparable(r.Type()) {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type()))
+ n.SetType(nil)
return n
}
- dowidth(r.Type)
- if r.Type.IsInterface() == l.Type.IsInterface() || r.Type.Width >= 1<<16 {
- r = nod(aop, r, nil)
- r.Type = l.Type
+ dowidth(r.Type())
+ if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 {
+ r = ir.NewConvExpr(base.Pos, aop, l.Type(), r)
r.SetTypecheck(1)
- n.Right = r
+ setLR()
}
- t = l.Type
+ t = l.Type()
}
}
- et = t.Etype
+ et = t.Kind()
}
- if t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) {
+ if t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
l, r = defaultlit2(l, r, true)
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- if l.Type.IsInterface() == r.Type.IsInterface() || aop == 0 {
- yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
- n.Type = nil
+ if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
return n
}
}
- if t.Etype == TIDEAL {
- t = mixUntyped(l.Type, r.Type)
+ if t.Kind() == types.TIDEAL {
+ t = mixUntyped(l.Type(), r.Type())
}
- if dt := defaultType(t); !okfor[op][dt.Etype] {
- yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
- n.Type = nil
+ if dt := defaultType(t); !okfor[op][dt.Kind()] {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
+ n.SetType(nil)
return n
}
// okfor allows any array == array, map == map, func == func.
// restrict to slice/map/func == nil and nil == slice/map/func.
- if l.Type.IsArray() && !IsComparable(l.Type) {
- yyerror("invalid operation: %v (%v cannot be compared)", n, l.Type)
- n.Type = nil
+ if l.Type().IsArray() && !IsComparable(l.Type()) {
+ base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type())
+ n.SetType(nil)
return n
}
- if l.Type.IsSlice() && !l.isNil() && !r.isNil() {
- yyerror("invalid operation: %v (slice can only be compared to nil)", n)
- n.Type = nil
+ if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (slice can only be compared to nil)", n)
+ n.SetType(nil)
return n
}
- if l.Type.IsMap() && !l.isNil() && !r.isNil() {
- yyerror("invalid operation: %v (map can only be compared to nil)", n)
- n.Type = nil
+ if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (map can only be compared to nil)", n)
+ n.SetType(nil)
return n
}
- if l.Type.Etype == TFUNC && !l.isNil() && !r.isNil() {
- yyerror("invalid operation: %v (func can only be compared to nil)", n)
- n.Type = nil
+ if l.Type().Kind() == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (func can only be compared to nil)", n)
+ n.SetType(nil)
return n
}
- if l.Type.IsStruct() {
- if f := IncomparableField(l.Type); f != nil {
- yyerror("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
- n.Type = nil
+ if l.Type().IsStruct() {
+ if f := IncomparableField(l.Type()); f != nil {
+ base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
+ n.SetType(nil)
return n
}
}
- if iscmp[n.Op] {
- evconst(n)
+ if iscmp[n.Op()] {
t = types.UntypedBool
- if n.Op != OLITERAL {
- l, r = defaultlit2(l, r, true)
- n.Left = l
- n.Right = r
+ n.SetType(t)
+ if con := evalConst(n); con.Op() == ir.OLITERAL {
+ return con
}
+ l, r = defaultlit2(l, r, true)
+ setLR()
+ return n
}
- if et == TSTRING && n.Op == OADD {
- // create OADDSTR node with list of strings in x + y + z + (w + v) + ...
- n.Op = OADDSTR
-
- if l.Op == OADDSTR {
- n.List.Set(l.List.Slice())
+ if et == types.TSTRING && n.Op() == ir.OADD {
+ // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ...
+ var add *ir.AddStringExpr
+ if l.Op() == ir.OADDSTR {
+ add = l.(*ir.AddStringExpr)
+ add.SetPos(n.Pos())
} else {
- n.List.Set1(l)
+ add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l})
}
- if r.Op == OADDSTR {
- n.List.AppendNodes(&r.List)
+ if r.Op() == ir.OADDSTR {
+ add.PtrList().AppendNodes(r.PtrList())
} else {
- n.List.Append(r)
+ add.PtrList().Append(r)
}
- n.Left = nil
- n.Right = nil
+ add.SetType(t)
+ return add
}
- if (op == ODIV || op == OMOD) && Isconst(r, CTINT) {
- if r.Val().U.(*Mpint).CmpInt64(0) == 0 {
- yyerror("division by zero")
- n.Type = nil
+ if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) {
+ if constant.Sign(r.Val()) == 0 {
+ base.Errorf("division by zero")
+ n.SetType(nil)
return n
}
}
- n.Type = t
+ n.SetType(t)
+ return n
- case OBITNOT, ONEG, ONOT, OPLUS:
- ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- l := n.Left
- t := l.Type
+ case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- if !okfor[n.Op][defaultType(t).Etype] {
- yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(t))
- n.Type = nil
+ if !okfor[n.Op()][defaultType(t).Kind()] {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t))
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
+ return n
// exprs
- case OADDR:
- ok |= ctxExpr
-
- n.Left = typecheck(n.Left, ctxExpr)
- if n.Left.Type == nil {
- n.Type = nil
+ case ir.OADDR:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ if n.Left().Type() == nil {
+ n.SetType(nil)
return n
}
- switch n.Left.Op {
- case OARRAYLIT, OMAPLIT, OSLICELIT, OSTRUCTLIT:
- n.Op = OPTRLIT
+ switch n.Left().Op() {
+ case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
+ n.SetOp(ir.OPTRLIT)
default:
- checklvalue(n.Left, "take the address of")
- r := outervalue(n.Left)
- if r.Op == ONAME {
- if r.Orig != r {
- Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
+ checklvalue(n.Left(), "take the address of")
+ r := outervalue(n.Left())
+ if r.Op() == ir.ONAME {
+ if ir.Orig(r) != r {
+ base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
}
- r.Name.SetAddrtaken(true)
- if r.Name.IsClosureVar() && !capturevarscomplete {
+ r.Name().SetAddrtaken(true)
+ if r.Name().IsClosureVar() && !capturevarscomplete {
// Mark the original variable as Addrtaken so that capturevars
// knows not to pass it by value.
// But if the capturevars phase is complete, don't touch it,
// in case l.Name's containing function has not yet been compiled.
- r.Name.Defn.Name.SetAddrtaken(true)
+ r.Name().Defn.Name().SetAddrtaken(true)
}
}
- n.Left = defaultlit(n.Left, nil)
- if n.Left.Type == nil {
- n.Type = nil
+ n.SetLeft(defaultlit(n.Left(), nil))
+ if n.Left().Type() == nil {
+ n.SetType(nil)
return n
}
}
- n.Type = types.NewPtr(n.Left.Type)
+ n.SetType(types.NewPtr(n.Left().Type()))
+ return n
- case OCOMPLIT:
- ok |= ctxExpr
- n = typecheckcomplit(n)
- if n.Type == nil {
- return n
- }
+ case ir.OCOMPLIT:
+ return typecheckcomplit(n.(*ir.CompLitExpr))
- case OXDOT, ODOT:
- if n.Op == OXDOT {
+ case ir.OXDOT, ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ if n.Op() == ir.OXDOT {
n = adddot(n)
- n.Op = ODOT
- if n.Left == nil {
- n.Type = nil
+ n.SetOp(ir.ODOT)
+ if n.Left() == nil {
+ n.SetType(nil)
return n
}
}
- n.Left = typecheck(n.Left, ctxExpr|ctxType)
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType))
- n.Left = defaultlit(n.Left, nil)
+ n.SetLeft(defaultlit(n.Left(), nil))
- t := n.Left.Type
+ t := n.Left().Type()
if t == nil {
- adderrorname(n)
- n.Type = nil
+ base.UpdateErrorDot(ir.Line(n), fmt.Sprint(n.Left()), fmt.Sprint(n))
+ n.SetType(nil)
return n
}
- s := n.Sym
+ s := n.Sym()
- if n.Left.Op == OTYPE {
- n = typecheckMethodExpr(n)
- if n.Type == nil {
- return n
- }
- ok = ctxExpr
- break
+ if n.Left().Op() == ir.OTYPE {
+ return typecheckMethodExpr(n)
}
if t.IsPtr() && !t.Elem().IsInterface() {
t = t.Elem()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Op = ODOTPTR
+ n.SetOp(ir.ODOTPTR)
checkwidth(t)
}
- if n.Sym.IsBlank() {
- yyerror("cannot refer to blank field or method")
- n.Type = nil
+ if n.Sym().IsBlank() {
+ base.Errorf("cannot refer to blank field or method")
+ n.SetType(nil)
return n
}
// Legitimate field or method lookup failed, try to explain the error
switch {
case t.IsEmptyInterface():
- yyerror("%v undefined (type %v is interface with no methods)", n, n.Left.Type)
+ base.Errorf("%v undefined (type %v is interface with no methods)", n, n.Left().Type())
case t.IsPtr() && t.Elem().IsInterface():
// Pointer to interface is almost always a mistake.
- yyerror("%v undefined (type %v is pointer to interface, not interface)", n, n.Left.Type)
+ base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.Left().Type())
case lookdot(n, t, 1) != nil:
// Field or method matches by name, but it is not exported.
- yyerror("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym)
+ base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym())
default:
if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup.
- yyerror("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left.Type, n.Sym, mt.Sym)
+ base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left().Type(), n.Sym(), mt.Sym)
} else {
- yyerror("%v undefined (type %v has no field or method %v)", n, n.Left.Type, n.Sym)
+ base.Errorf("%v undefined (type %v has no field or method %v)", n, n.Left().Type(), n.Sym())
}
}
- n.Type = nil
+ n.SetType(nil)
return n
}
- switch n.Op {
- case ODOTINTER, ODOTMETH:
- if top&ctxCallee != 0 {
- ok |= ctxCallee
- } else {
- typecheckpartialcall(n, s)
- ok |= ctxExpr
- }
-
- default:
- ok |= ctxExpr
+ if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 {
+ return typecheckpartialcall(n, s)
}
+ return n
- case ODOTTYPE:
- ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- l := n.Left
- t := l.Type
+ case ir.ODOTTYPE:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsInterface() {
- yyerror("invalid type assertion: %v (non-interface type %v on left)", n, t)
- n.Type = nil
+ base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t)
+ n.SetType(nil)
return n
}
- if n.Right != nil {
- n.Right = typecheck(n.Right, ctxType)
- n.Type = n.Right.Type
- n.Right = nil
- if n.Type == nil {
+ if n.Right() != nil {
+ n.SetRight(typecheck(n.Right(), ctxType))
+ n.SetType(n.Right().Type())
+ n.SetRight(nil)
+ if n.Type() == nil {
return n
}
}
- if n.Type != nil && !n.Type.IsInterface() {
+ if n.Type() != nil && !n.Type().IsInterface() {
var missing, have *types.Field
var ptr int
- if !implements(n.Type, t, &missing, &have, &ptr) {
+ if !implements(n.Type(), t, &missing, &have, &ptr) {
if have != nil && have.Sym == missing.Sym {
- yyerror("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+
- "\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+
+ "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if ptr != 0 {
- yyerror("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type, t, missing.Sym)
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type(), t, missing.Sym)
} else if have != nil {
- yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+
- "\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+
+ "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else {
- yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type, t, missing.Sym)
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type(), t, missing.Sym)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
}
+ return n
- case OINDEX:
- ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- n.Left = implicitstar(n.Left)
- l := n.Left
- n.Right = typecheck(n.Right, ctxExpr)
- r := n.Right
- t := l.Type
- if t == nil || r.Type == nil {
- n.Type = nil
+ case ir.OINDEX:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ n.SetLeft(implicitstar(n.Left()))
+ l := n.Left()
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ r := n.Right()
+ t := l.Type()
+ if t == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- switch t.Etype {
+ switch t.Kind() {
default:
- yyerror("invalid operation: %v (type %v does not support indexing)", n, t)
- n.Type = nil
+ base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t)
+ n.SetType(nil)
return n
- case TSTRING, TARRAY, TSLICE:
- n.Right = indexlit(n.Right)
+ case types.TSTRING, types.TARRAY, types.TSLICE:
+ n.SetRight(indexlit(n.Right()))
if t.IsString() {
- n.Type = types.Bytetype
+ n.SetType(types.ByteType)
} else {
- n.Type = t.Elem()
+ n.SetType(t.Elem())
}
why := "string"
if t.IsArray() {
why = "slice"
}
- if n.Right.Type != nil && !n.Right.Type.IsInteger() {
- yyerror("non-integer %s index %v", why, n.Right)
- break
+ if n.Right().Type() != nil && !n.Right().Type().IsInteger() {
+ base.Errorf("non-integer %s index %v", why, n.Right())
+ return n
}
- if !n.Bounded() && Isconst(n.Right, CTINT) {
- x := n.Right.Int64Val()
- if x < 0 {
- yyerror("invalid %s index %v (index must be non-negative)", why, n.Right)
- } else if t.IsArray() && x >= t.NumElem() {
- yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem())
- } else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.StringVal())) {
- yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal()))
- } else if n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
- yyerror("invalid %s index %v (index too large)", why, n.Right)
+ if !n.Bounded() && ir.IsConst(n.Right(), constant.Int) {
+ x := n.Right().Val()
+ if constant.Sign(x) < 0 {
+ base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Right())
+ } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) {
+ base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Right(), t.NumElem())
+ } else if ir.IsConst(n.Left(), constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.Left()))))) {
+ base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Right(), len(ir.StringVal(n.Left())))
+ } else if doesoverflow(x, types.Types[types.TINT]) {
+ base.Errorf("invalid %s index %v (index too large)", why, n.Right())
}
}
- case TMAP:
- n.Right = assignconv(n.Right, t.Key(), "map index")
- n.Type = t.Elem()
- n.Op = OINDEXMAP
- n.ResetAux()
+ case types.TMAP:
+ n.SetRight(assignconv(n.Right(), t.Key(), "map index"))
+ n.SetType(t.Elem())
+ n.SetOp(ir.OINDEXMAP)
+ n.SetIndexMapLValue(false)
}
+ return n
- case ORECV:
- ok |= ctxStmt | ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- l := n.Left
- t := l.Type
+ case ir.ORECV:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsChan() {
- yyerror("invalid operation: %v (receive from non-chan type %v)", n, t)
- n.Type = nil
+ base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t)
+ n.SetType(nil)
return n
}
if !t.ChanDir().CanRecv() {
- yyerror("invalid operation: %v (receive from send-only type %v)", n, t)
- n.Type = nil
+ base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t)
+ n.SetType(nil)
return n
}
- n.Type = t.Elem()
+ n.SetType(t.Elem())
+ return n
- case OSEND:
- ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxExpr)
- n.Right = typecheck(n.Right, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- t := n.Left.Type
+ case ir.OSEND:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ t := n.Left().Type()
if t == nil {
- n.Type = nil
return n
}
if !t.IsChan() {
- yyerror("invalid operation: %v (send to non-chan type %v)", n, t)
- n.Type = nil
+ base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t)
return n
}
if !t.ChanDir().CanSend() {
- yyerror("invalid operation: %v (send to receive-only type %v)", n, t)
- n.Type = nil
+ base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t)
return n
}
- n.Right = assignconv(n.Right, t.Elem(), "send")
- if n.Right.Type == nil {
- n.Type = nil
+ n.SetRight(assignconv(n.Right(), t.Elem(), "send"))
+ if n.Right().Type() == nil {
return n
}
- n.Type = nil
+ return n
- case OSLICEHEADER:
- // Errors here are Fatalf instead of yyerror because only the compiler
+ case ir.OSLICEHEADER:
+ // Errors here are Fatalf instead of Errorf because only the compiler
// can construct an OSLICEHEADER node.
// Components used in OSLICEHEADER that are supplied by parsed source code
// have already been typechecked in e.g. OMAKESLICE earlier.
- ok |= ctxExpr
-
- t := n.Type
+ t := n.Type()
if t == nil {
- Fatalf("no type specified for OSLICEHEADER")
+ base.Fatalf("no type specified for OSLICEHEADER")
}
if !t.IsSlice() {
- Fatalf("invalid type %v for OSLICEHEADER", n.Type)
+ base.Fatalf("invalid type %v for OSLICEHEADER", n.Type())
}
- if n.Left == nil || n.Left.Type == nil || !n.Left.Type.IsUnsafePtr() {
- Fatalf("need unsafe.Pointer for OSLICEHEADER")
+ if n.Left() == nil || n.Left().Type() == nil || !n.Left().Type().IsUnsafePtr() {
+ base.Fatalf("need unsafe.Pointer for OSLICEHEADER")
}
- if x := n.List.Len(); x != 2 {
- Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x)
+ if x := n.List().Len(); x != 2 {
+ base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x)
}
- n.Left = typecheck(n.Left, ctxExpr)
- l := typecheck(n.List.First(), ctxExpr)
- c := typecheck(n.List.Second(), ctxExpr)
- l = defaultlit(l, types.Types[TINT])
- c = defaultlit(c, types.Types[TINT])
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ l := typecheck(n.List().First(), ctxExpr)
+ c := typecheck(n.List().Second(), ctxExpr)
+ l = defaultlit(l, types.Types[types.TINT])
+ c = defaultlit(c, types.Types[types.TINT])
- if Isconst(l, CTINT) && l.Int64Val() < 0 {
- Fatalf("len for OSLICEHEADER must be non-negative")
+ if ir.IsConst(l, constant.Int) && ir.Int64Val(l) < 0 {
+ base.Fatalf("len for OSLICEHEADER must be non-negative")
}
- if Isconst(c, CTINT) && c.Int64Val() < 0 {
- Fatalf("cap for OSLICEHEADER must be non-negative")
+ if ir.IsConst(c, constant.Int) && ir.Int64Val(c) < 0 {
+ base.Fatalf("cap for OSLICEHEADER must be non-negative")
}
- if Isconst(l, CTINT) && Isconst(c, CTINT) && l.Val().U.(*Mpint).Cmp(c.Val().U.(*Mpint)) > 0 {
- Fatalf("len larger than cap for OSLICEHEADER")
+ if ir.IsConst(l, constant.Int) && ir.IsConst(c, constant.Int) && constant.Compare(l.Val(), token.GTR, c.Val()) {
+ base.Fatalf("len larger than cap for OSLICEHEADER")
}
- n.List.SetFirst(l)
- n.List.SetSecond(c)
+ n.List().SetFirst(l)
+ n.List().SetSecond(c)
+ return n
- case OMAKESLICECOPY:
- // Errors here are Fatalf instead of yyerror because only the compiler
+ case ir.OMAKESLICECOPY:
+ // Errors here are Fatalf instead of Errorf because only the compiler
// can construct an OMAKESLICECOPY node.
// Components used in OMAKESCLICECOPY that are supplied by parsed source code
// have already been typechecked in OMAKE and OCOPY earlier.
- ok |= ctxExpr
-
- t := n.Type
+ t := n.Type()
if t == nil {
- Fatalf("no type specified for OMAKESLICECOPY")
+ base.Fatalf("no type specified for OMAKESLICECOPY")
}
if !t.IsSlice() {
- Fatalf("invalid type %v for OMAKESLICECOPY", n.Type)
+ base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type())
}
- if n.Left == nil {
- Fatalf("missing len argument for OMAKESLICECOPY")
+ if n.Left() == nil {
+ base.Fatalf("missing len argument for OMAKESLICECOPY")
}
- if n.Right == nil {
- Fatalf("missing slice argument to copy for OMAKESLICECOPY")
+ if n.Right() == nil {
+ base.Fatalf("missing slice argument to copy for OMAKESLICECOPY")
}
- n.Left = typecheck(n.Left, ctxExpr)
- n.Right = typecheck(n.Right, ctxExpr)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
- n.Left = defaultlit(n.Left, types.Types[TINT])
+ n.SetLeft(defaultlit(n.Left(), types.Types[types.TINT]))
- if !n.Left.Type.IsInteger() && n.Type.Etype != TIDEAL {
- yyerror("non-integer len argument in OMAKESLICECOPY")
+ if !n.Left().Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+ base.Errorf("non-integer len argument in OMAKESLICECOPY")
}
- if Isconst(n.Left, CTINT) {
- if n.Left.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
- Fatalf("len for OMAKESLICECOPY too large")
+ if ir.IsConst(n.Left(), constant.Int) {
+ if doesoverflow(n.Left().Val(), types.Types[types.TINT]) {
+ base.Fatalf("len for OMAKESLICECOPY too large")
}
- if n.Left.Int64Val() < 0 {
- Fatalf("len for OMAKESLICECOPY must be non-negative")
+ if constant.Sign(n.Left().Val()) < 0 {
+ base.Fatalf("len for OMAKESLICECOPY must be non-negative")
}
}
+ return n
- case OSLICE, OSLICE3:
- ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
+ case ir.OSLICE, ir.OSLICE3:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
low, high, max := n.SliceBounds()
- hasmax := n.Op.IsSlice3()
+ hasmax := n.Op().IsSlice3()
low = typecheck(low, ctxExpr)
high = typecheck(high, ctxExpr)
max = typecheck(max, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
+ n.SetLeft(defaultlit(n.Left(), nil))
low = indexlit(low)
high = indexlit(high)
max = indexlit(max)
n.SetSliceBounds(low, high, max)
- l := n.Left
- if l.Type == nil {
- n.Type = nil
+ l := n.Left()
+ if l.Type() == nil {
+ n.SetType(nil)
return n
}
- if l.Type.IsArray() {
- if !islvalue(n.Left) {
- yyerror("invalid operation %v (slice of unaddressable value)", n)
- n.Type = nil
+ if l.Type().IsArray() {
+ if !islvalue(n.Left()) {
+ base.Errorf("invalid operation %v (slice of unaddressable value)", n)
+ n.SetType(nil)
return n
}
- n.Left = nod(OADDR, n.Left, nil)
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxExpr)
- l = n.Left
+ addr := nodAddr(n.Left())
+ addr.SetImplicit(true)
+ n.SetLeft(typecheck(addr, ctxExpr))
+ l = n.Left()
}
- t := l.Type
+ t := l.Type()
var tp *types.Type
if t.IsString() {
if hasmax {
- yyerror("invalid operation %v (3-index slice of string)", n)
- n.Type = nil
+ base.Errorf("invalid operation %v (3-index slice of string)", n)
+ n.SetType(nil)
return n
}
- n.Type = t
- n.Op = OSLICESTR
+ n.SetType(t)
+ n.SetOp(ir.OSLICESTR)
} else if t.IsPtr() && t.Elem().IsArray() {
tp = t.Elem()
- n.Type = types.NewSlice(tp.Elem())
- dowidth(n.Type)
+ n.SetType(types.NewSlice(tp.Elem()))
+ dowidth(n.Type())
if hasmax {
- n.Op = OSLICE3ARR
+ n.SetOp(ir.OSLICE3ARR)
} else {
- n.Op = OSLICEARR
+ n.SetOp(ir.OSLICEARR)
}
} else if t.IsSlice() {
- n.Type = t
+ n.SetType(t)
} else {
- yyerror("cannot slice %v (type %v)", l, t)
- n.Type = nil
+ base.Errorf("cannot slice %v (type %v)", l, t)
+ n.SetType(nil)
return n
}
if low != nil && !checksliceindex(l, low, tp) {
- n.Type = nil
+ n.SetType(nil)
return n
}
if high != nil && !checksliceindex(l, high, tp) {
- n.Type = nil
+ n.SetType(nil)
return n
}
if max != nil && !checksliceindex(l, max, tp) {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) {
- n.Type = nil
+ n.SetType(nil)
return n
}
+ return n
// call and call like
- case OCALL:
- typecheckslice(n.Ninit.Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907)
- n.Left = typecheck(n.Left, ctxExpr|ctxType|ctxCallee)
- if n.Left.Diag() {
+ case ir.OCALL:
+ n := n.(*ir.CallExpr)
+ n.Use = ir.CallUseExpr
+ if top == ctxStmt {
+ n.Use = ir.CallUseStmt
+ }
+ typecheckslice(n.Init().Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907)
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType|ctxCallee))
+ if n.Left().Diag() {
n.SetDiag(true)
}
- l := n.Left
+ l := n.Left()
- if l.Op == ONAME && l.SubOp() != 0 {
- if n.IsDDD() && l.SubOp() != OAPPEND {
- yyerror("invalid use of ... with builtin %v", l)
+ if l.Op() == ir.ONAME && l.(*ir.Name).SubOp() != 0 {
+ if n.IsDDD() && l.SubOp() != ir.OAPPEND {
+ base.Errorf("invalid use of ... with builtin %v", l)
}
// builtin: OLEN, OCAP, etc.
- n.Op = l.SubOp()
- n.Left = n.Right
- n.Right = nil
- n = typecheck1(n, top)
- return n
+ switch l.SubOp() {
+ default:
+ base.Fatalf("unknown builtin %v", l)
+
+ case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ n.SetOp(l.SubOp())
+ n.SetLeft(nil)
+ n.SetTypecheck(0) // re-typechecking new op is OK, not a loop
+ return typecheck(n, top)
+
+ case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL:
+ typecheckargs(n)
+ fallthrough
+ case ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ arg, ok := needOneArg(n, "%v", n.Op())
+ if !ok {
+ n.SetType(nil)
+ return n
+ }
+ u := ir.NewUnaryExpr(n.Pos(), l.SubOp(), arg)
+ return typecheck(initExpr(n.Init().Slice(), u), top) // typecheckargs can add to old.Init
+
+ case ir.OCOMPLEX, ir.OCOPY:
+ typecheckargs(n)
+ arg1, arg2, ok := needTwoArgs(n)
+ if !ok {
+ n.SetType(nil)
+ return n
+ }
+ b := ir.NewBinaryExpr(n.Pos(), l.SubOp(), arg1, arg2)
+ return typecheck(initExpr(n.Init().Slice(), b), top) // typecheckargs can add to old.Init
+ }
+ panic("unreachable")
}
- n.Left = defaultlit(n.Left, nil)
- l = n.Left
- if l.Op == OTYPE {
+ n.SetLeft(defaultlit(n.Left(), nil))
+ l = n.Left()
+ if l.Op() == ir.OTYPE {
if n.IsDDD() {
- if !l.Type.Broke() {
- yyerror("invalid use of ... in type conversion to %v", l.Type)
+ if !l.Type().Broke() {
+ base.Errorf("invalid use of ... in type conversion to %v", l.Type())
}
n.SetDiag(true)
}
// pick off before type-checking arguments
- ok |= ctxExpr
-
- // turn CALL(type, arg) into CONV(arg) w/ type
- n.Left = nil
-
- n.Op = OCONV
- n.Type = l.Type
- if !onearg(n, "conversion to %v", l.Type) {
- n.Type = nil
+ arg, ok := needOneArg(n, "conversion to %v", l.Type())
+ if !ok {
+ n.SetType(nil)
return n
}
- n = typecheck1(n, top)
- return n
+
+ n := ir.NodAt(n.Pos(), ir.OCONV, arg, nil)
+ n.SetType(l.Type())
+ return typecheck1(n, top)
}
typecheckargs(n)
- t := l.Type
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
checkwidth(t)
- switch l.Op {
- case ODOTINTER:
- n.Op = OCALLINTER
+ switch l.Op() {
+ case ir.ODOTINTER:
+ n.SetOp(ir.OCALLINTER)
- case ODOTMETH:
- n.Op = OCALLMETH
+ case ir.ODOTMETH:
+ n.SetOp(ir.OCALLMETH)
// typecheckaste was used here but there wasn't enough
// information further down the call chain to know if we
// It isn't necessary, so just do a sanity check.
tp := t.Recv().Type
- if l.Left == nil || !types.Identical(l.Left.Type, tp) {
- Fatalf("method receiver")
+ if l.Left() == nil || !types.Identical(l.Left().Type(), tp) {
+ base.Fatalf("method receiver")
}
default:
- n.Op = OCALLFUNC
- if t.Etype != TFUNC {
- name := l.String()
- if isBuiltinFuncName(name) && l.Name.Defn != nil {
- // be more specific when the function
+ n.SetOp(ir.OCALLFUNC)
+ if t.Kind() != types.TFUNC {
+ // TODO(mdempsky): Remove "o.Sym() != nil" once we stop
+ // using ir.Name for numeric literals.
+ if o := ir.Orig(l); o.Name() != nil && o.Sym() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil {
+ // be more specific when the non-function
// name matches a predeclared function
- yyerror("cannot call non-function %s (type %v), declared at %s",
- name, t, linestr(l.Name.Defn.Pos))
+ base.Errorf("cannot call non-function %L, declared at %s",
+ l, base.FmtPos(o.Name().Pos()))
} else {
- yyerror("cannot call non-function %s (type %v)", name, t)
+ base.Errorf("cannot call non-function %L", l)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
}
- typecheckaste(OCALL, n.Left, n.IsDDD(), t.Params(), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) })
- ok |= ctxStmt
+ typecheckaste(ir.OCALL, n.Left(), n.IsDDD(), t.Params(), n.List(), func() string { return fmt.Sprintf("argument to %v", n.Left()) })
if t.NumResults() == 0 {
- break
+ return n
}
- ok |= ctxExpr
if t.NumResults() == 1 {
- n.Type = l.Type.Results().Field(0).Type
-
- if n.Op == OCALLFUNC && n.Left.Op == ONAME && isRuntimePkg(n.Left.Sym.Pkg) && n.Left.Sym.Name == "getg" {
- // Emit code for runtime.getg() directly instead of calling function.
- // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
- // so that the ordering pass can make sure to preserve the semantics of the original code
- // (in particular, the exact time of the function call) by introducing temporaries.
- // In this case, we know getg() always returns the same result within a given function
- // and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
- n.Op = OGETG
+ n.SetType(l.Type().Results().Field(0).Type)
+
+ if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME {
+ if sym := n.Left().(*ir.Name).Sym(); isRuntimePkg(sym.Pkg) && sym.Name == "getg" {
+ // Emit code for runtime.getg() directly instead of calling function.
+ // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
+ // so that the ordering pass can make sure to preserve the semantics of the original code
+ // (in particular, the exact time of the function call) by introducing temporaries.
+ // In this case, we know getg() always returns the same result within a given function
+ // and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
+ n.SetOp(ir.OGETG)
+ }
}
-
- break
+ return n
}
// multiple return
if top&(ctxMultiOK|ctxStmt) == 0 {
- yyerror("multiple-value %v() in single-value context", l)
- break
- }
-
- n.Type = l.Type.Results()
-
- case OALIGNOF, OOFFSETOF, OSIZEOF:
- ok |= ctxExpr
- if !onearg(n, "%v", n.Op) {
- n.Type = nil
+ base.Errorf("multiple-value %v() in single-value context", l)
return n
}
- n.Type = types.Types[TUINTPTR]
- case OCAP, OLEN:
- ok |= ctxExpr
- if !onearg(n, "%v", n.Op) {
- n.Type = nil
- return n
- }
+ n.SetType(l.Type().Results())
+ return n
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- n.Left = implicitstar(n.Left)
- l := n.Left
- t := l.Type
+ case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ n.SetType(types.Types[types.TUINTPTR])
+ return n
+
+ case ir.OCAP, ir.OLEN:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ n.SetLeft(implicitstar(n.Left()))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
var ok bool
- if n.Op == OLEN {
- ok = okforlen[t.Etype]
+ if n.Op() == ir.OLEN {
+ ok = okforlen[t.Kind()]
} else {
- ok = okforcap[t.Etype]
+ ok = okforcap[t.Kind()]
}
if !ok {
- yyerror("invalid argument %L for %v", l, n.Op)
- n.Type = nil
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
return n
}
- n.Type = types.Types[TINT]
-
- case OREAL, OIMAG:
- ok |= ctxExpr
- if !onearg(n, "%v", n.Op) {
- n.Type = nil
- return n
- }
+ n.SetType(types.Types[types.TINT])
+ return n
- n.Left = typecheck(n.Left, ctxExpr)
- l := n.Left
- t := l.Type
+ case ir.OREAL, ir.OIMAG:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
// Determine result type.
- switch t.Etype {
- case TIDEAL:
- n.Type = types.UntypedFloat
- case TCOMPLEX64:
- n.Type = types.Types[TFLOAT32]
- case TCOMPLEX128:
- n.Type = types.Types[TFLOAT64]
+ switch t.Kind() {
+ case types.TIDEAL:
+ n.SetType(types.UntypedFloat)
+ case types.TCOMPLEX64:
+ n.SetType(types.Types[types.TFLOAT32])
+ case types.TCOMPLEX128:
+ n.SetType(types.Types[types.TFLOAT64])
default:
- yyerror("invalid argument %L for %v", l, n.Op)
- n.Type = nil
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
return n
}
+ return n
- case OCOMPLEX:
- ok |= ctxExpr
- typecheckargs(n)
- if !twoarg(n) {
- n.Type = nil
- return n
- }
- l := n.Left
- r := n.Right
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ case ir.OCOMPLEX:
+ l := typecheck(n.Left(), ctxExpr)
+ r := typecheck(n.Right(), ctxExpr)
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
l, r = defaultlit2(l, r, false)
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- n.Left = l
- n.Right = r
+ n.SetLeft(l)
+ n.SetRight(r)
- if !types.Identical(l.Type, r.Type) {
- yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
- n.Type = nil
+ if !types.Identical(l.Type(), r.Type()) {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
return n
}
var t *types.Type
- switch l.Type.Etype {
+ switch l.Type().Kind() {
default:
- yyerror("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type)
- n.Type = nil
+ base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type())
+ n.SetType(nil)
return n
- case TIDEAL:
+ case types.TIDEAL:
t = types.UntypedComplex
- case TFLOAT32:
- t = types.Types[TCOMPLEX64]
+ case types.TFLOAT32:
+ t = types.Types[types.TCOMPLEX64]
- case TFLOAT64:
- t = types.Types[TCOMPLEX128]
+ case types.TFLOAT64:
+ t = types.Types[types.TCOMPLEX128]
}
- n.Type = t
+ n.SetType(t)
+ return n
- case OCLOSE:
- if !onearg(n, "%v", n.Op) {
- n.Type = nil
- return n
- }
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- l := n.Left
- t := l.Type
+ case ir.OCLOSE:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsChan() {
- yyerror("invalid operation: %v (non-chan type %v)", n, t)
- n.Type = nil
+ base.Errorf("invalid operation: %v (non-chan type %v)", n, t)
+ n.SetType(nil)
return n
}
if !t.ChanDir().CanSend() {
- yyerror("invalid operation: %v (cannot close receive-only channel)", n)
- n.Type = nil
+ base.Errorf("invalid operation: %v (cannot close receive-only channel)", n)
+ n.SetType(nil)
return n
}
+ return n
- ok |= ctxStmt
-
- case ODELETE:
- ok |= ctxStmt
+ case ir.ODELETE:
typecheckargs(n)
- args := n.List
+ args := n.List()
if args.Len() == 0 {
- yyerror("missing arguments to delete")
- n.Type = nil
+ base.Errorf("missing arguments to delete")
+ n.SetType(nil)
return n
}
if args.Len() == 1 {
- yyerror("missing second (key) argument to delete")
- n.Type = nil
+ base.Errorf("missing second (key) argument to delete")
+ n.SetType(nil)
return n
}
if args.Len() != 2 {
- yyerror("too many arguments to delete")
- n.Type = nil
+ base.Errorf("too many arguments to delete")
+ n.SetType(nil)
return n
}
l := args.First()
r := args.Second()
- if l.Type != nil && !l.Type.IsMap() {
- yyerror("first argument to delete must be map; have %L", l.Type)
- n.Type = nil
+ if l.Type() != nil && !l.Type().IsMap() {
+ base.Errorf("first argument to delete must be map; have %L", l.Type())
+ n.SetType(nil)
return n
}
- args.SetSecond(assignconv(r, l.Type.Key(), "delete"))
+ args.SetSecond(assignconv(r, l.Type().Key(), "delete"))
+ return n
- case OAPPEND:
- ok |= ctxExpr
+ case ir.OAPPEND:
typecheckargs(n)
- args := n.List
+ args := n.List()
if args.Len() == 0 {
- yyerror("missing arguments to append")
- n.Type = nil
+ base.Errorf("missing arguments to append")
+ n.SetType(nil)
return n
}
- t := args.First().Type
+ t := args.First().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
if !t.IsSlice() {
- if Isconst(args.First(), CTNIL) {
- yyerror("first argument to append must be typed slice; have untyped nil")
- n.Type = nil
+ if ir.IsNil(args.First()) {
+ base.Errorf("first argument to append must be typed slice; have untyped nil")
+ n.SetType(nil)
return n
}
- yyerror("first argument to append must be slice; have %L", t)
- n.Type = nil
+ base.Errorf("first argument to append must be slice; have %L", t)
+ n.SetType(nil)
return n
}
if n.IsDDD() {
if args.Len() == 1 {
- yyerror("cannot use ... on first argument to append")
- n.Type = nil
+ base.Errorf("cannot use ... on first argument to append")
+ n.SetType(nil)
return n
}
if args.Len() != 2 {
- yyerror("too many arguments to append")
- n.Type = nil
+ base.Errorf("too many arguments to append")
+ n.SetType(nil)
return n
}
- if t.Elem().IsKind(TUINT8) && args.Second().Type.IsString() {
- args.SetSecond(defaultlit(args.Second(), types.Types[TSTRING]))
- break
+ if t.Elem().IsKind(types.TUINT8) && args.Second().Type().IsString() {
+ args.SetSecond(defaultlit(args.Second(), types.Types[types.TSTRING]))
+ return n
}
- args.SetSecond(assignconv(args.Second(), t.Orig, "append"))
- break
+ args.SetSecond(assignconv(args.Second(), t.Underlying(), "append"))
+ return n
}
as := args.Slice()[1:]
for i, n := range as {
- if n.Type == nil {
+ if n.Type() == nil {
continue
}
as[i] = assignconv(n, t.Elem(), "append")
- checkwidth(as[i].Type) // ensure width is calculated for backend
+ checkwidth(as[i].Type()) // ensure width is calculated for backend
}
+ return n
- case OCOPY:
- ok |= ctxStmt | ctxExpr
- typecheckargs(n)
- if !twoarg(n) {
- n.Type = nil
- return n
- }
- n.Type = types.Types[TINT]
- if n.Left.Type == nil || n.Right.Type == nil {
- n.Type = nil
- return n
- }
- n.Left = defaultlit(n.Left, nil)
- n.Right = defaultlit(n.Right, nil)
- if n.Left.Type == nil || n.Right.Type == nil {
- n.Type = nil
+ case ir.OCOPY:
+ n.SetType(types.Types[types.TINT])
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ n.SetRight(defaultlit(n.Right(), nil))
+ if n.Left().Type() == nil || n.Right().Type() == nil {
+ n.SetType(nil)
return n
}
// copy([]byte, string)
- if n.Left.Type.IsSlice() && n.Right.Type.IsString() {
- if types.Identical(n.Left.Type.Elem(), types.Bytetype) {
- break
+ if n.Left().Type().IsSlice() && n.Right().Type().IsString() {
+ if types.Identical(n.Left().Type().Elem(), types.ByteType) {
+ return n
}
- yyerror("arguments to copy have different element types: %L and string", n.Left.Type)
- n.Type = nil
+ base.Errorf("arguments to copy have different element types: %L and string", n.Left().Type())
+ n.SetType(nil)
return n
}
- if !n.Left.Type.IsSlice() || !n.Right.Type.IsSlice() {
- if !n.Left.Type.IsSlice() && !n.Right.Type.IsSlice() {
- yyerror("arguments to copy must be slices; have %L, %L", n.Left.Type, n.Right.Type)
- } else if !n.Left.Type.IsSlice() {
- yyerror("first argument to copy should be slice; have %L", n.Left.Type)
+ if !n.Left().Type().IsSlice() || !n.Right().Type().IsSlice() {
+ if !n.Left().Type().IsSlice() && !n.Right().Type().IsSlice() {
+ base.Errorf("arguments to copy must be slices; have %L, %L", n.Left().Type(), n.Right().Type())
+ } else if !n.Left().Type().IsSlice() {
+ base.Errorf("first argument to copy should be slice; have %L", n.Left().Type())
} else {
- yyerror("second argument to copy should be slice or string; have %L", n.Right.Type)
+ base.Errorf("second argument to copy should be slice or string; have %L", n.Right().Type())
}
- n.Type = nil
+ n.SetType(nil)
return n
}
- if !types.Identical(n.Left.Type.Elem(), n.Right.Type.Elem()) {
- yyerror("arguments to copy have different element types: %L and %L", n.Left.Type, n.Right.Type)
- n.Type = nil
+ if !types.Identical(n.Left().Type().Elem(), n.Right().Type().Elem()) {
+ base.Errorf("arguments to copy have different element types: %L and %L", n.Left().Type(), n.Right().Type())
+ n.SetType(nil)
return n
}
+ return n
- case OCONV:
- ok |= ctxExpr
- checkwidth(n.Type) // ensure width is calculated for backend
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = convlit1(n.Left, n.Type, true, nil)
- t := n.Left.Type
- if t == nil || n.Type == nil {
- n.Type = nil
+ case ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ checkwidth(n.Type()) // ensure width is calculated for backend
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(convlit1(n.Left(), n.Type(), true, nil))
+ t := n.Left().Type()
+ if t == nil || n.Type() == nil {
+ n.SetType(nil)
return n
}
- var why string
- n.Op, why = convertop(n.Left.Op == OLITERAL, t, n.Type)
- if n.Op == OXXX {
- if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() {
- yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why)
+ op, why := convertop(n.Left().Op() == ir.OLITERAL, t, n.Type())
+ if op == ir.OXXX {
+ if !n.Diag() && !n.Type().Broke() && !n.Left().Diag() {
+ base.Errorf("cannot convert %L to type %v%s", n.Left(), n.Type(), why)
n.SetDiag(true)
}
- n.Op = OCONV
- n.Type = nil
+ n.SetOp(ir.OCONV)
+ n.SetType(nil)
return n
}
- switch n.Op {
- case OCONVNOP:
- if t.Etype == n.Type.Etype {
- switch t.Etype {
- case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128:
+ n.SetOp(op)
+ switch n.Op() {
+ case ir.OCONVNOP:
+ if t.Kind() == n.Type().Kind() {
+ switch t.Kind() {
+ case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128:
// Floating point casts imply rounding and
// so the conversion must be kept.
- n.Op = OCONV
+ n.SetOp(ir.OCONV)
}
}
// do not convert to []byte literal. See CL 125796.
// generated code and compiler memory footprint is better without it.
- case OSTR2BYTES:
- break
+ case ir.OSTR2BYTES:
+ // ok
- case OSTR2RUNES:
- if n.Left.Op == OLITERAL {
- n = stringtoruneslit(n)
+ case ir.OSTR2RUNES:
+ if n.Left().Op() == ir.OLITERAL {
+ return stringtoruneslit(n)
}
}
+ return n
- case OMAKE:
- ok |= ctxExpr
- args := n.List.Slice()
+ case ir.OMAKE:
+ args := n.List().Slice()
if len(args) == 0 {
- yyerror("missing argument to make")
- n.Type = nil
+ base.Errorf("missing argument to make")
+ n.SetType(nil)
return n
}
- n.List.Set(nil)
+ n.PtrList().Set(nil)
l := args[0]
l = typecheck(l, ctxType)
- t := l.Type
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
i := 1
- switch t.Etype {
+ var nn ir.Node
+ switch t.Kind() {
default:
- yyerror("cannot make type %v", t)
- n.Type = nil
+ base.Errorf("cannot make type %v", t)
+ n.SetType(nil)
return n
- case TSLICE:
+ case types.TSLICE:
if i >= len(args) {
- yyerror("missing len argument to make(%v)", t)
- n.Type = nil
+ base.Errorf("missing len argument to make(%v)", t)
+ n.SetType(nil)
return n
}
l = args[i]
i++
l = typecheck(l, ctxExpr)
- var r *Node
+ var r ir.Node
if i < len(args) {
r = args[i]
i++
r = typecheck(r, ctxExpr)
}
- if l.Type == nil || (r != nil && r.Type == nil) {
- n.Type = nil
+ if l.Type() == nil || (r != nil && r.Type() == nil) {
+ n.SetType(nil)
return n
}
if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) {
- n.Type = nil
+ n.SetType(nil)
return n
}
- if Isconst(l, CTINT) && r != nil && Isconst(r, CTINT) && l.Val().U.(*Mpint).Cmp(r.Val().U.(*Mpint)) > 0 {
- yyerror("len larger than cap in make(%v)", t)
- n.Type = nil
+ if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) {
+ base.Errorf("len larger than cap in make(%v)", t)
+ n.SetType(nil)
return n
}
+ nn = ir.NodAt(n.Pos(), ir.OMAKESLICE, l, r)
- n.Left = l
- n.Right = r
- n.Op = OMAKESLICE
-
- case TMAP:
+ case types.TMAP:
if i < len(args) {
l = args[i]
i++
l = typecheck(l, ctxExpr)
- l = defaultlit(l, types.Types[TINT])
- if l.Type == nil {
- n.Type = nil
+ l = defaultlit(l, types.Types[types.TINT])
+ if l.Type() == nil {
+ n.SetType(nil)
return n
}
if !checkmake(t, "size", &l) {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Left = l
} else {
- n.Left = nodintconst(0)
+ l = nodintconst(0)
}
- n.Op = OMAKEMAP
+ nn = ir.NodAt(n.Pos(), ir.OMAKEMAP, l, nil)
+ nn.SetEsc(n.Esc())
- case TCHAN:
+ case types.TCHAN:
l = nil
if i < len(args) {
l = args[i]
i++
l = typecheck(l, ctxExpr)
- l = defaultlit(l, types.Types[TINT])
- if l.Type == nil {
- n.Type = nil
+ l = defaultlit(l, types.Types[types.TINT])
+ if l.Type() == nil {
+ n.SetType(nil)
return n
}
if !checkmake(t, "buffer", &l) {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Left = l
} else {
- n.Left = nodintconst(0)
+ l = nodintconst(0)
}
- n.Op = OMAKECHAN
+ nn = ir.NodAt(n.Pos(), ir.OMAKECHAN, l, nil)
}
if i < len(args) {
- yyerror("too many arguments to make(%v)", t)
- n.Op = OMAKE
- n.Type = nil
+ base.Errorf("too many arguments to make(%v)", t)
+ n.SetType(nil)
return n
}
- n.Type = t
+ nn.SetType(t)
+ return nn
- case ONEW:
- ok |= ctxExpr
- args := n.List
- if args.Len() == 0 {
- yyerror("missing argument to new")
- n.Type = nil
- return n
+ case ir.ONEW:
+ if n.Left() == nil {
+ // Fatalf because the OCALL above checked for us,
+ // so this must be an internally-generated mistake.
+ base.Fatalf("missing argument to new")
}
-
- l := args.First()
+ l := n.Left()
l = typecheck(l, ctxType)
- t := l.Type
+ t := l.Type()
if t == nil {
- n.Type = nil
- return n
- }
- if args.Len() > 1 {
- yyerror("too many arguments to new(%v)", t)
- n.Type = nil
+ n.SetType(nil)
return n
}
+ n.SetLeft(l)
+ n.SetType(types.NewPtr(t))
+ return n
- n.Left = l
- n.Type = types.NewPtr(t)
-
- case OPRINT, OPRINTN:
- ok |= ctxStmt
+ case ir.OPRINT, ir.OPRINTN:
typecheckargs(n)
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
// Special case for print: int constant is int64, not int.
- if Isconst(n1, CTINT) {
- ls[i1] = defaultlit(ls[i1], types.Types[TINT64])
+ if ir.IsConst(n1, constant.Int) {
+ ls[i1] = defaultlit(ls[i1], types.Types[types.TINT64])
} else {
ls[i1] = defaultlit(ls[i1], nil)
}
}
+ return n
- case OPANIC:
- ok |= ctxStmt
- if !onearg(n, "panic") {
- n.Type = nil
- return n
- }
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, types.Types[TINTER])
- if n.Left.Type == nil {
- n.Type = nil
+ case ir.OPANIC:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), types.Types[types.TINTER]))
+ if n.Left().Type() == nil {
+ n.SetType(nil)
return n
}
+ return n
- case ORECOVER:
- ok |= ctxExpr | ctxStmt
- if n.List.Len() != 0 {
- yyerror("too many arguments to recover")
- n.Type = nil
+ case ir.ORECOVER:
+ if n.List().Len() != 0 {
+ base.Errorf("too many arguments to recover")
+ n.SetType(nil)
return n
}
- n.Type = types.Types[TINTER]
+ n.SetType(types.Types[types.TINTER])
+ return n
- case OCLOSURE:
- ok |= ctxExpr
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
typecheckclosure(n, top)
- if n.Type == nil {
+ if n.Type() == nil {
return n
}
+ return n
- case OITAB:
- ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- t := n.Left.Type
+ case ir.OITAB:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ t := n.Left().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsInterface() {
- Fatalf("OITAB of %v", t)
+ base.Fatalf("OITAB of %v", t)
}
- n.Type = types.NewPtr(types.Types[TUINTPTR])
+ n.SetType(types.NewPtr(types.Types[types.TUINTPTR]))
+ return n
- case OIDATA:
+ case ir.OIDATA:
// Whoever creates the OIDATA node must know a priori the concrete type at that moment,
// usually by just having checked the OITAB.
- Fatalf("cannot typecheck interface data %v", n)
+ base.Fatalf("cannot typecheck interface data %v", n)
+ panic("unreachable")
- case OSPTR:
- ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- t := n.Left.Type
+ case ir.OSPTR:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ t := n.Left().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsSlice() && !t.IsString() {
- Fatalf("OSPTR of %v", t)
+ base.Fatalf("OSPTR of %v", t)
}
if t.IsString() {
- n.Type = types.NewPtr(types.Types[TUINT8])
+ n.SetType(types.NewPtr(types.Types[types.TUINT8]))
} else {
- n.Type = types.NewPtr(t.Elem())
+ n.SetType(types.NewPtr(t.Elem()))
}
+ return n
- case OCLOSUREVAR:
- ok |= ctxExpr
+ case ir.OCLOSUREREAD:
+ return n
- case OCFUNC:
- ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Type = types.Types[TUINTPTR]
+ case ir.OCFUNC:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetType(types.Types[types.TUINTPTR])
+ return n
- case OCONVNOP:
- ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
+ case ir.OCONVNOP:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ return n
// statements
- case OAS:
- ok |= ctxStmt
-
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
typecheckas(n)
// Code that creates temps does not bother to set defn, so do it here.
- if n.Left.Op == ONAME && n.Left.IsAutoTmp() {
- n.Left.Name.Defn = n
- }
-
- case OAS2:
- ok |= ctxStmt
- typecheckas2(n)
-
- case OBREAK,
- OCONTINUE,
- ODCL,
- OEMPTY,
- OGOTO,
- OFALL,
- OVARKILL,
- OVARLIVE:
- ok |= ctxStmt
-
- case OLABEL:
- ok |= ctxStmt
+ if n.Left().Op() == ir.ONAME && ir.IsAutoTmp(n.Left()) {
+ n.Left().Name().Defn = n
+ }
+ return n
+
+ case ir.OAS2:
+ typecheckas2(n.(*ir.AssignListStmt))
+ return n
+
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.ODCL,
+ ir.OGOTO,
+ ir.OFALL,
+ ir.OVARKILL,
+ ir.OVARLIVE:
+ return n
+
+ case ir.OBLOCK:
+ typecheckslice(n.List().Slice(), ctxStmt)
+ return n
+
+ case ir.OLABEL:
decldepth++
- if n.Sym.IsBlank() {
+ if n.Sym().IsBlank() {
// Empty identifier is valid but useless.
// Eliminate now to simplify life later.
// See issues 7538, 11589, 11593.
- n.Op = OEMPTY
- n.Left = nil
+ n = ir.NodAt(n.Pos(), ir.OBLOCK, nil, nil)
}
+ return n
- case ODEFER:
- ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxStmt|ctxExpr)
- if !n.Left.Diag() {
+ case ir.ODEFER, ir.OGO:
+ n := n.(*ir.GoDeferStmt)
+ n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr))
+ if !n.Left().Diag() {
checkdefergo(n)
}
+ return n
- case OGO:
- ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxStmt|ctxExpr)
- checkdefergo(n)
-
- case OFOR, OFORUNTIL:
- ok |= ctxStmt
- typecheckslice(n.Ninit.Slice(), ctxStmt)
+ case ir.OFOR, ir.OFORUNTIL:
+ typecheckslice(n.Init().Slice(), ctxStmt)
decldepth++
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- if n.Left != nil {
- t := n.Left.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ if n.Left() != nil {
+ t := n.Left().Type()
if t != nil && !t.IsBoolean() {
- yyerror("non-bool %L used as for condition", n.Left)
+ base.Errorf("non-bool %L used as for condition", n.Left())
}
}
- n.Right = typecheck(n.Right, ctxStmt)
- if n.Op == OFORUNTIL {
- typecheckslice(n.List.Slice(), ctxStmt)
+ n.SetRight(typecheck(n.Right(), ctxStmt))
+ if n.Op() == ir.OFORUNTIL {
+ typecheckslice(n.List().Slice(), ctxStmt)
}
- typecheckslice(n.Nbody.Slice(), ctxStmt)
+ typecheckslice(n.Body().Slice(), ctxStmt)
decldepth--
+ return n
- case OIF:
- ok |= ctxStmt
- typecheckslice(n.Ninit.Slice(), ctxStmt)
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- if n.Left != nil {
- t := n.Left.Type
+ case ir.OIF:
+ typecheckslice(n.Init().Slice(), ctxStmt)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ if n.Left() != nil {
+ t := n.Left().Type()
if t != nil && !t.IsBoolean() {
- yyerror("non-bool %L used as if condition", n.Left)
+ base.Errorf("non-bool %L used as if condition", n.Left())
}
}
- typecheckslice(n.Nbody.Slice(), ctxStmt)
- typecheckslice(n.Rlist.Slice(), ctxStmt)
+ typecheckslice(n.Body().Slice(), ctxStmt)
+ typecheckslice(n.Rlist().Slice(), ctxStmt)
+ return n
- case ORETURN:
- ok |= ctxStmt
+ case ir.ORETURN:
typecheckargs(n)
if Curfn == nil {
- yyerror("return outside function")
- n.Type = nil
+ base.Errorf("return outside function")
+ n.SetType(nil)
return n
}
- if Curfn.Type.FuncType().Outnamed && n.List.Len() == 0 {
- break
+ if hasNamedResults(Curfn) && n.List().Len() == 0 {
+ return n
}
- typecheckaste(ORETURN, nil, false, Curfn.Type.Results(), n.List, func() string { return "return argument" })
-
- case ORETJMP:
- ok |= ctxStmt
-
- case OSELECT:
- ok |= ctxStmt
- typecheckselect(n)
-
- case OSWITCH:
- ok |= ctxStmt
- typecheckswitch(n)
-
- case ORANGE:
- ok |= ctxStmt
- typecheckrange(n)
-
- case OTYPESW:
- yyerror("use of .(type) outside type switch")
- n.Type = nil
+ typecheckaste(ir.ORETURN, nil, false, Curfn.Type().Results(), n.List(), func() string { return "return argument" })
return n
- case ODCLFUNC:
- ok |= ctxStmt
- typecheckfunc(n)
-
- case ODCLCONST:
- ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxExpr)
-
- case ODCLTYPE:
- ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxType)
- checkwidth(n.Left.Type)
- }
+ case ir.ORETJMP:
+ return n
- t := n.Type
- if t != nil && !t.IsFuncArgStruct() && n.Op != OTYPE {
- switch t.Etype {
- case TFUNC, // might have TANY; wait until it's called
- TANY, TFORW, TIDEAL, TNIL, TBLANK:
- break
+ case ir.OSELECT:
+ typecheckselect(n.(*ir.SelectStmt))
+ return n
- default:
- checkwidth(t)
- }
- }
+ case ir.OSWITCH:
+ typecheckswitch(n.(*ir.SwitchStmt))
+ return n
- evconst(n)
- if n.Op == OTYPE && top&ctxType == 0 {
- if !n.Type.Broke() {
- yyerror("type %v is not an expression", n.Type)
- }
- n.Type = nil
+ case ir.ORANGE:
+ typecheckrange(n.(*ir.RangeStmt))
return n
- }
- if top&(ctxExpr|ctxType) == ctxType && n.Op != OTYPE {
- yyerror("%v is not a type", n)
- n.Type = nil
+ case ir.OTYPESW:
+ base.Errorf("use of .(type) outside type switch")
+ n.SetType(nil)
return n
- }
- // TODO(rsc): simplify
- if (top&(ctxCallee|ctxExpr|ctxType) != 0) && top&ctxStmt == 0 && ok&(ctxExpr|ctxType|ctxCallee) == 0 {
- yyerror("%v used as value", n)
- n.Type = nil
+ case ir.ODCLFUNC:
+ typecheckfunc(n.(*ir.Func))
return n
- }
- if (top&ctxStmt != 0) && top&(ctxCallee|ctxExpr|ctxType) == 0 && ok&ctxStmt == 0 {
- if !n.Diag() {
- yyerror("%v evaluated but not used", n)
- n.SetDiag(true)
- }
+ case ir.ODCLCONST:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ return n
- n.Type = nil
+ case ir.ODCLTYPE:
+ n.SetLeft(typecheck(n.Left(), ctxType))
+ checkwidth(n.Left().Type())
return n
}
- return n
+ // No return n here!
+ // Individual cases can type-assert n, introducing a new one.
+ // Each must execute its own return n.
}
-func typecheckargs(n *Node) {
- if n.List.Len() != 1 || n.IsDDD() {
- typecheckslice(n.List.Slice(), ctxExpr)
+func typecheckargs(n ir.Node) {
+ var list []ir.Node
+ switch n := n.(type) {
+ default:
+ base.Fatalf("typecheckargs %+v", n.Op())
+ case *ir.CallExpr:
+ list = n.List().Slice()
+ if n.IsDDD() {
+ typecheckslice(list, ctxExpr)
+ return
+ }
+ case *ir.ReturnStmt:
+ list = n.List().Slice()
+ }
+ if len(list) != 1 {
+ typecheckslice(list, ctxExpr)
return
}
- typecheckslice(n.List.Slice(), ctxExpr|ctxMultiOK)
- t := n.List.First().Type
+ typecheckslice(list, ctxExpr|ctxMultiOK)
+ t := list[0].Type()
if t == nil || !t.IsFuncArgStruct() {
return
}
// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
// Save n as n.Orig for fmt.go.
- if n.Orig == n {
- n.Orig = n.sepcopy()
+ if ir.Orig(n) == n {
+ n.(ir.OrigNode).SetOrig(ir.SepCopy(n))
}
- as := nod(OAS2, nil, nil)
- as.Rlist.AppendNodes(&n.List)
+ as := ir.Nod(ir.OAS2, nil, nil)
+ as.PtrRlist().Append(list...)
// If we're outside of function context, then this call will
// be executed during the generated init function. However,
// init.go hasn't yet created it. Instead, associate the
- // temporary variables with dummyInitFn for now, and init.go
+ // temporary variables with initTodo for now, and init.go
// will reassociate them later when it's appropriate.
static := Curfn == nil
if static {
- Curfn = dummyInitFn
+ Curfn = initTodo
}
+ list = nil
for _, f := range t.FieldSlice() {
t := temp(f.Type)
- as.Ninit.Append(nod(ODCL, t, nil))
- as.List.Append(t)
- n.List.Append(t)
+ as.PtrInit().Append(ir.Nod(ir.ODCL, t, nil))
+ as.PtrList().Append(t)
+ list = append(list, t)
}
if static {
Curfn = nil
}
- as = typecheck(as, ctxStmt)
- n.Ninit.Append(as)
+ switch n := n.(type) {
+ case *ir.CallExpr:
+ n.PtrList().Set(list)
+ case *ir.ReturnStmt:
+ n.PtrList().Set(list)
+ }
+
+ n.PtrInit().Append(typecheck(as, ctxStmt))
}
-func checksliceindex(l *Node, r *Node, tp *types.Type) bool {
- t := r.Type
+func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool {
+ t := r.Type()
if t == nil {
return false
}
if !t.IsInteger() {
- yyerror("invalid slice index %v (type %v)", r, t)
+ base.Errorf("invalid slice index %v (type %v)", r, t)
return false
}
- if r.Op == OLITERAL {
- if r.Int64Val() < 0 {
- yyerror("invalid slice index %v (index must be non-negative)", r)
+ if r.Op() == ir.OLITERAL {
+ x := r.Val()
+ if constant.Sign(x) < 0 {
+ base.Errorf("invalid slice index %v (index must be non-negative)", r)
return false
- } else if tp != nil && tp.NumElem() >= 0 && r.Int64Val() > tp.NumElem() {
- yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
+ } else if tp != nil && tp.NumElem() >= 0 && constant.Compare(x, token.GTR, constant.MakeInt64(tp.NumElem())) {
+ base.Errorf("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
return false
- } else if Isconst(l, CTSTR) && r.Int64Val() > int64(len(l.StringVal())) {
- yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal()))
+ } else if ir.IsConst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(ir.StringVal(l))))) {
+ base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(ir.StringVal(l)))
return false
- } else if r.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
- yyerror("invalid slice index %v (index too large)", r)
+ } else if doesoverflow(x, types.Types[types.TINT]) {
+ base.Errorf("invalid slice index %v (index too large)", r)
return false
}
}
return true
}
-func checksliceconst(lo *Node, hi *Node) bool {
- if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && lo.Val().U.(*Mpint).Cmp(hi.Val().U.(*Mpint)) > 0 {
- yyerror("invalid slice index: %v > %v", lo, hi)
+func checksliceconst(lo ir.Node, hi ir.Node) bool {
+ if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) {
+ base.Errorf("invalid slice index: %v > %v", lo, hi)
return false
}
return true
}
-func checkdefergo(n *Node) {
+func checkdefergo(n *ir.GoDeferStmt) {
what := "defer"
- if n.Op == OGO {
+ if n.Op() == ir.OGO {
what = "go"
}
- switch n.Left.Op {
+ switch n.Left().Op() {
// ok
- case OCALLINTER,
- OCALLMETH,
- OCALLFUNC,
- OCLOSE,
- OCOPY,
- ODELETE,
- OPANIC,
- OPRINT,
- OPRINTN,
- ORECOVER:
+ case ir.OCALLINTER,
+ ir.OCALLMETH,
+ ir.OCALLFUNC,
+ ir.OCLOSE,
+ ir.OCOPY,
+ ir.ODELETE,
+ ir.OPANIC,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.ORECOVER:
return
- case OAPPEND,
- OCAP,
- OCOMPLEX,
- OIMAG,
- OLEN,
- OMAKE,
- OMAKESLICE,
- OMAKECHAN,
- OMAKEMAP,
- ONEW,
- OREAL,
- OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
- if n.Left.Orig != nil && n.Left.Orig.Op == OCONV {
+ case ir.OAPPEND,
+ ir.OCAP,
+ ir.OCOMPLEX,
+ ir.OIMAG,
+ ir.OLEN,
+ ir.OMAKE,
+ ir.OMAKESLICE,
+ ir.OMAKECHAN,
+ ir.OMAKEMAP,
+ ir.ONEW,
+ ir.OREAL,
+ ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
+ if orig := ir.Orig(n.Left()); orig.Op() == ir.OCONV {
break
}
- yyerrorl(n.Pos, "%s discards result of %v", what, n.Left)
+ base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Left())
return
}
// type is broken or missing, most likely a method call on a broken type
// we will warn about the broken type elsewhere. no need to emit a potentially confusing error
- if n.Left.Type == nil || n.Left.Type.Broke() {
+ if n.Left().Type() == nil || n.Left().Type().Broke() {
return
}
// The syntax made sure it was a call, so this must be
// a conversion.
n.SetDiag(true)
- yyerrorl(n.Pos, "%s requires function call, not conversion", what)
+ base.ErrorfAt(n.Pos(), "%s requires function call, not conversion", what)
}
}
// The result of implicitstar MUST be assigned back to n, e.g.
// n.Left = implicitstar(n.Left)
-func implicitstar(n *Node) *Node {
+func implicitstar(n ir.Node) ir.Node {
// insert implicit * if needed for fixed array
- t := n.Type
+ t := n.Type()
if t == nil || !t.IsPtr() {
return n
}
if !t.IsArray() {
return n
}
- n = nod(ODEREF, n, nil)
- n.SetImplicit(true)
- n = typecheck(n, ctxExpr)
- return n
+ star := ir.Nod(ir.ODEREF, n, nil)
+ star.SetImplicit(true)
+ return typecheck(star, ctxExpr)
}
-func onearg(n *Node, f string, args ...interface{}) bool {
- if n.Left != nil {
- return true
- }
- if n.List.Len() == 0 {
+func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) {
+ if n.List().Len() == 0 {
p := fmt.Sprintf(f, args...)
- yyerror("missing argument to %s: %v", p, n)
- return false
+ base.Errorf("missing argument to %s: %v", p, n)
+ return nil, false
}
- if n.List.Len() > 1 {
+ if n.List().Len() > 1 {
p := fmt.Sprintf(f, args...)
- yyerror("too many arguments to %s: %v", p, n)
- n.Left = n.List.First()
- n.List.Set(nil)
- return false
+ base.Errorf("too many arguments to %s: %v", p, n)
+ return n.List().First(), false
}
- n.Left = n.List.First()
- n.List.Set(nil)
- return true
+ return n.List().First(), true
}
-func twoarg(n *Node) bool {
- if n.Left != nil {
- return true
- }
- if n.List.Len() != 2 {
- if n.List.Len() < 2 {
- yyerror("not enough arguments in call to %v", n)
+func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) {
+ if n.List().Len() != 2 {
+ if n.List().Len() < 2 {
+ base.Errorf("not enough arguments in call to %v", n)
} else {
- yyerror("too many arguments in call to %v", n)
+ base.Errorf("too many arguments in call to %v", n)
}
- return false
+ return nil, nil, false
}
- n.Left = n.List.First()
- n.Right = n.List.Second()
- n.List.Set(nil)
- return true
+ return n.List().First(), n.List().Second(), true
}
-func lookdot1(errnode *Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
+func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
var r *types.Field
for _, f := range fs.Slice() {
if dostrcmp != 0 && f.Sym.Name == s.Name {
}
if r != nil {
if errnode != nil {
- yyerror("ambiguous selector %v", errnode)
+ base.Errorf("ambiguous selector %v", errnode)
} else if t.IsPtr() {
- yyerror("ambiguous selector (%v).%v", t, s)
+ base.Errorf("ambiguous selector (%v).%v", t, s)
} else {
- yyerror("ambiguous selector %v.%v", t, s)
+ base.Errorf("ambiguous selector %v.%v", t, s)
}
break
}
// typecheckMethodExpr checks selector expressions (ODOT) where the
// base expression is a type expression (OTYPE).
-func typecheckMethodExpr(n *Node) (res *Node) {
- if enableTrace && trace {
+func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckMethodExpr", n)(&res)
}
- t := n.Left.Type
+ t := n.Left().Type()
// Compute the method set for t.
var ms *types.Fields
} else {
mt := methtype(t)
if mt == nil {
- yyerror("%v undefined (type %v has no method %v)", n, t, n.Sym)
- n.Type = nil
+ base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sym())
+ n.SetType(nil)
return n
}
expandmeth(mt)
// types declared at package scope. However, we need
// to make sure to generate wrappers for anonymous
// receiver types too.
- if mt.Sym == nil {
- addsignat(t)
+ if mt.Sym() == nil {
+ NeedRuntimeType(t)
}
}
- s := n.Sym
+ s := n.Sym()
m := lookdot1(n, s, t, ms, 0)
if m == nil {
if lookdot1(n, s, t, ms, 1) != nil {
- yyerror("%v undefined (cannot refer to unexported method %v)", n, s)
+ base.Errorf("%v undefined (cannot refer to unexported method %v)", n, s)
} else if _, ambig := dotpath(s, t, nil, false); ambig {
- yyerror("%v undefined (ambiguous selector)", n) // method or field
+ base.Errorf("%v undefined (ambiguous selector)", n) // method or field
} else {
- yyerror("%v undefined (type %v has no method %v)", n, t, s)
+ base.Errorf("%v undefined (type %v has no method %v)", n, t, s)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
if !isMethodApplicable(t, m) {
- yyerror("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
- n.Type = nil
+ base.Errorf("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
+ n.SetType(nil)
return n
}
- n.Op = ONAME
- if n.Name == nil {
- n.Name = new(Name)
- }
- n.Right = newname(n.Sym)
- n.Sym = methodSym(t, n.Sym)
- n.Type = methodfunc(m.Type, n.Left.Type)
- n.Xoffset = 0
- n.SetClass(PFUNC)
- // methodSym already marked n.Sym as a function.
+ me := ir.NewMethodExpr(n.Pos(), n.Left().Type(), m)
+ me.SetType(methodfunc(m.Type, n.Left().Type()))
+ f := NewName(methodSym(t, m.Sym))
+ f.SetClass(ir.PFUNC)
+ f.SetType(me.Type())
+ me.FuncName_ = f
// Issue 25065. Make sure that we emit the symbol for a local method.
- if Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == localpkg) {
- makefuncsym(n.Sym)
+ if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == types.LocalPkg) {
+ NeedFuncSym(me.FuncName_.Sym())
}
- return n
+ return me
}
// isMethodApplicable reports whether method m can be called on a
return t
}
-func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
- s := n.Sym
+func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field {
+ s := n.Sym()
dowidth(t)
var f1 *types.Field
}
var f2 *types.Field
- if n.Left.Type == t || n.Left.Type.Sym == nil {
+ if n.Left().Type() == t || n.Left().Type().Sym() == nil {
mt := methtype(t)
if mt != nil {
f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp)
return f1
}
if f2 != nil {
- yyerror("%v is both field and method", n.Sym)
+ base.Errorf("%v is both field and method", n.Sym())
}
- if f1.Offset == BADWIDTH {
- Fatalf("lookdot badwidth %v %p", f1, f1)
+ if f1.Offset == types.BADWIDTH {
+ base.Fatalf("lookdot badwidth %v %p", f1, f1)
}
- n.Xoffset = f1.Offset
- n.Type = f1.Type
+ n.SetOffset(f1.Offset)
+ n.SetType(f1.Type)
if t.IsInterface() {
- if n.Left.Type.IsPtr() {
- n.Left = nod(ODEREF, n.Left, nil) // implicitstar
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxExpr)
+ if n.Left().Type().IsPtr() {
+ star := ir.Nod(ir.ODEREF, n.Left(), nil)
+ star.SetImplicit(true)
+ n.SetLeft(typecheck(star, ctxExpr))
}
- n.Op = ODOTINTER
- } else {
- n.SetOpt(f1)
+ n.SetOp(ir.ODOTINTER)
}
-
+ n.Selection = f1
return f1
}
// Already in the process of diagnosing an error.
return f2
}
- tt := n.Left.Type
+ tt := n.Left().Type()
dowidth(tt)
rcvr := f2.Type.Recv().Type
if !types.Identical(rcvr, tt) {
if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
- checklvalue(n.Left, "call pointer method on")
- n.Left = nod(OADDR, n.Left, nil)
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ checklvalue(n.Left(), "call pointer method on")
+ addr := nodAddr(n.Left())
+ addr.SetImplicit(true)
+ n.SetLeft(typecheck(addr, ctxType|ctxExpr))
} else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
- n.Left = nod(ODEREF, n.Left, nil)
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ star := ir.Nod(ir.ODEREF, n.Left(), nil)
+ star.SetImplicit(true)
+ n.SetLeft(typecheck(star, ctxType|ctxExpr))
} else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
- yyerror("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left)
+ base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sym(), n.Left())
for tt.IsPtr() {
// Stop one level early for method with pointer receiver.
if rcvr.IsPtr() && !tt.Elem().IsPtr() {
break
}
- n.Left = nod(ODEREF, n.Left, nil)
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ star := ir.Nod(ir.ODEREF, n.Left(), nil)
+ star.SetImplicit(true)
+ n.SetLeft(typecheck(star, ctxType|ctxExpr))
tt = tt.Elem()
}
} else {
- Fatalf("method mismatch: %v for %v", rcvr, tt)
+ base.Fatalf("method mismatch: %v for %v", rcvr, tt)
}
}
- pll := n
- ll := n.Left
- for ll.Left != nil && (ll.Op == ODOT || ll.Op == ODOTPTR || ll.Op == ODEREF) {
- pll = ll
- ll = ll.Left
+ implicit, ll := n.Implicit(), n.Left()
+ for ll != nil && (ll.Op() == ir.ODOT || ll.Op() == ir.ODOTPTR || ll.Op() == ir.ODEREF) {
+ switch l := ll.(type) {
+ case *ir.SelectorExpr:
+ implicit, ll = l.Implicit(), l.Left()
+ case *ir.StarExpr:
+ implicit, ll = l.Implicit(), l.Left()
+ }
}
- if pll.Implicit() && ll.Type.IsPtr() && ll.Type.Sym != nil && asNode(ll.Type.Sym.Def) != nil && asNode(ll.Type.Sym.Def).Op == OTYPE {
+ if implicit && ll.Type().IsPtr() && ll.Type().Sym() != nil && ll.Type().Sym().Def != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE {
// It is invalid to automatically dereference a named pointer type when selecting a method.
// Make n.Left == ll to clarify error message.
- n.Left = ll
+ n.SetLeft(ll)
return nil
}
- n.Sym = methodSym(n.Left.Type, f2.Sym)
- n.Xoffset = f2.Offset
- n.Type = f2.Type
- n.Op = ODOTMETH
+ n.SetSym(methodSym(n.Left().Type(), f2.Sym))
+ n.SetOffset(f2.Offset)
+ n.SetType(f2.Type)
+ n.SetOp(ir.ODOTMETH)
+ n.Selection = f2
return f2
}
return nil
}
-func nokeys(l Nodes) bool {
+func nokeys(l ir.Nodes) bool {
for _, n := range l.Slice() {
- if n.Op == OKEY || n.Op == OSTRUCTKEY {
+ if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY {
return false
}
}
}
// typecheck assignment: type list = expression list
-func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, desc func() string) {
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) {
var t *types.Type
var i int
- lno := lineno
- defer func() { lineno = lno }()
+ lno := base.Pos
+ defer func() { base.Pos = lno }()
if tstruct.Broke() {
return
}
- var n *Node
+ var n ir.Node
if nl.Len() == 1 {
n = nl.First()
}
}
n = nl.Index(i)
setlineno(n)
- if n.Type != nil {
+ if n.Type() != nil {
nl.SetIndex(i, assignconvfn(n, t, desc))
}
return
for ; i < nl.Len(); i++ {
n = nl.Index(i)
setlineno(n)
- if n.Type != nil {
+ if n.Type() != nil {
nl.SetIndex(i, assignconvfn(n, t.Elem(), desc))
}
}
}
n = nl.Index(i)
setlineno(n)
- if n.Type != nil {
+ if n.Type() != nil {
nl.SetIndex(i, assignconvfn(n, t, desc))
}
i++
}
if isddd {
if call != nil {
- yyerror("invalid use of ... in call to %v", call)
+ base.Errorf("invalid use of ... in call to %v", call)
} else {
- yyerror("invalid use of ... in %v", op)
+ base.Errorf("invalid use of ... in %v", op)
}
}
return
notenough:
- if n == nil || (!n.Diag() && n.Type != nil) {
+ if n == nil || (!n.Diag() && n.Type() != nil) {
details := errorDetails(nl, tstruct, isddd)
if call != nil {
// call is the expression being called, not the overall call.
// Method expressions have the form T.M, and the compiler has
// rewritten those to ONAME nodes but left T in Left.
- if call.isMethodExpression() {
- yyerror("not enough arguments in call to method expression %v%s", call, details)
+ if call.Op() == ir.OMETHEXPR {
+ base.Errorf("not enough arguments in call to method expression %v%s", call, details)
} else {
- yyerror("not enough arguments in call to %v%s", call, details)
+ base.Errorf("not enough arguments in call to %v%s", call, details)
}
} else {
- yyerror("not enough arguments to %v%s", op, details)
+ base.Errorf("not enough arguments to %v%s", op, details)
}
if n != nil {
n.SetDiag(true)
toomany:
details := errorDetails(nl, tstruct, isddd)
if call != nil {
- yyerror("too many arguments in call to %v%s", call, details)
+ base.Errorf("too many arguments in call to %v%s", call, details)
} else {
- yyerror("too many arguments to %v%s", op, details)
+ base.Errorf("too many arguments to %v%s", op, details)
}
}
-func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string {
+func errorDetails(nl ir.Nodes, tstruct *types.Type, isddd bool) string {
// If we don't know any type at a call site, let's suppress any return
// message signatures. See Issue https://golang.org/issues/19012.
if tstruct == nil {
}
// If any node has an unknown type, suppress it as well
for _, n := range nl.Slice() {
- if n.Type == nil {
+ if n.Type() == nil {
return ""
}
}
- return fmt.Sprintf("\n\thave %s\n\twant %v", nl.sigerr(isddd), tstruct)
+ return fmt.Sprintf("\n\thave %s\n\twant %v", fmtSignature(nl, isddd), tstruct)
}
// sigrepr is a type's representation to the outside world,
return "bool"
}
- if t.Etype == TIDEAL {
+ if t.Kind() == types.TIDEAL {
// "untyped number" is not commonly used
// outside of the compiler, so let's use "number".
// TODO(mdempsky): Revisit this.
// Turn []T... argument to ...T for clearer error message.
if isddd {
if !t.IsSlice() {
- Fatalf("bad type for ... argument: %v", t)
+ base.Fatalf("bad type for ... argument: %v", t)
}
return "..." + t.Elem().String()
}
}
// sigerr returns the signature of the types at the call or return.
-func (nl Nodes) sigerr(isddd bool) string {
+func fmtSignature(nl ir.Nodes, isddd bool) string {
if nl.Len() < 1 {
return "()"
}
var typeStrings []string
for i, n := range nl.Slice() {
isdddArg := isddd && i == nl.Len()-1
- typeStrings = append(typeStrings, sigrepr(n.Type, isdddArg))
+ typeStrings = append(typeStrings, sigrepr(n.Type(), isdddArg))
}
return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
// type check composite
func fielddup(name string, hash map[string]bool) {
if hash[name] {
- yyerror("duplicate field name in struct literal: %s", name)
+ base.Errorf("duplicate field name in struct literal: %s", name)
return
}
hash[name] = true
// iscomptype reports whether type t is a composite literal type.
func iscomptype(t *types.Type) bool {
- switch t.Etype {
- case TARRAY, TSLICE, TSTRUCT, TMAP:
+ switch t.Kind() {
+ case types.TARRAY, types.TSLICE, types.TSTRUCT, types.TMAP:
return true
default:
return false
// pushtype adds elided type information for composite literals if
// appropriate, and returns the resulting expression.
-func pushtype(n *Node, t *types.Type) *Node {
- if n == nil || n.Op != OCOMPLIT || n.Right != nil {
+func pushtype(nn ir.Node, t *types.Type) ir.Node {
+ if nn == nil || nn.Op() != ir.OCOMPLIT {
+ return nn
+ }
+ n := nn.(*ir.CompLitExpr)
+ if n.Right() != nil {
return n
}
switch {
case iscomptype(t):
// For T, return T{...}.
- n.Right = typenod(t)
+ n.SetRight(ir.TypeNode(t))
case t.IsPtr() && iscomptype(t.Elem()):
// For *T, return &T{...}.
- n.Right = typenod(t.Elem())
+ n.SetRight(ir.TypeNode(t.Elem()))
- n = nodl(n.Pos, OADDR, n, nil)
- n.SetImplicit(true)
+ addr := nodAddrAt(n.Pos(), n)
+ addr.SetImplicit(true)
+ return addr
}
-
return n
}
// The result of typecheckcomplit MUST be assigned back to n, e.g.
// n.Left = typecheckcomplit(n.Left)
-func typecheckcomplit(n *Node) (res *Node) {
- if enableTrace && trace {
+func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckcomplit", n)(&res)
}
- lno := lineno
+ lno := base.Pos
defer func() {
- lineno = lno
+ base.Pos = lno
}()
- if n.Right == nil {
- yyerrorl(n.Pos, "missing type in composite literal")
- n.Type = nil
+ if n.Right() == nil {
+ base.ErrorfAt(n.Pos(), "missing type in composite literal")
+ n.SetType(nil)
return n
}
// Save original node (including n.Right)
- n.Orig = n.copy()
+ n.SetOrig(ir.Copy(n))
- setlineno(n.Right)
+ setlineno(n.Right())
// Need to handle [...]T arrays specially.
- if n.Right.Op == OTARRAY && n.Right.Left != nil && n.Right.Left.Op == ODDD {
- n.Right.Right = typecheck(n.Right.Right, ctxType)
- if n.Right.Right.Type == nil {
- n.Type = nil
+ if array, ok := n.Right().(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil {
+ array.Elem = typecheck(array.Elem, ctxType)
+ elemType := array.Elem.Type()
+ if elemType == nil {
+ n.SetType(nil)
return n
}
- elemType := n.Right.Right.Type
-
- length := typecheckarraylit(elemType, -1, n.List.Slice(), "array literal")
-
- n.Op = OARRAYLIT
- n.Type = types.NewArray(elemType, length)
- n.Right = nil
+ length := typecheckarraylit(elemType, -1, n.List().Slice(), "array literal")
+ n.SetOp(ir.OARRAYLIT)
+ n.SetType(types.NewArray(elemType, length))
+ n.SetRight(nil)
return n
}
- n.Right = typecheck(n.Right, ctxType)
- t := n.Right.Type
+ n.SetRight(typecheck(n.Right(), ctxType))
+ t := n.Right().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
- switch t.Etype {
+ switch t.Kind() {
default:
- yyerror("invalid composite literal type %v", t)
- n.Type = nil
+ base.Errorf("invalid composite literal type %v", t)
+ n.SetType(nil)
- case TARRAY:
- typecheckarraylit(t.Elem(), t.NumElem(), n.List.Slice(), "array literal")
- n.Op = OARRAYLIT
- n.Right = nil
+ case types.TARRAY:
+ typecheckarraylit(t.Elem(), t.NumElem(), n.List().Slice(), "array literal")
+ n.SetOp(ir.OARRAYLIT)
+ n.SetRight(nil)
- case TSLICE:
- length := typecheckarraylit(t.Elem(), -1, n.List.Slice(), "slice literal")
- n.Op = OSLICELIT
- n.Right = nodintconst(length)
+ case types.TSLICE:
+ length := typecheckarraylit(t.Elem(), -1, n.List().Slice(), "slice literal")
+ n.SetOp(ir.OSLICELIT)
+ n.SetRight(nil)
+ n.Len = length
- case TMAP:
+ case types.TMAP:
var cs constSet
- for i3, l := range n.List.Slice() {
+ for i3, l := range n.List().Slice() {
setlineno(l)
- if l.Op != OKEY {
- n.List.SetIndex(i3, typecheck(l, ctxExpr))
- yyerror("missing key in map literal")
+ if l.Op() != ir.OKEY {
+ n.List().SetIndex(i3, typecheck(l, ctxExpr))
+ base.Errorf("missing key in map literal")
continue
}
+ l := l.(*ir.KeyExpr)
- r := l.Left
+ r := l.Left()
r = pushtype(r, t.Key())
r = typecheck(r, ctxExpr)
- l.Left = assignconv(r, t.Key(), "map key")
- cs.add(lineno, l.Left, "key", "map literal")
+ l.SetLeft(assignconv(r, t.Key(), "map key"))
+ cs.add(base.Pos, l.Left(), "key", "map literal")
- r = l.Right
+ r = l.Right()
r = pushtype(r, t.Elem())
r = typecheck(r, ctxExpr)
- l.Right = assignconv(r, t.Elem(), "map value")
+ l.SetRight(assignconv(r, t.Elem(), "map value"))
}
- n.Op = OMAPLIT
- n.Right = nil
+ n.SetOp(ir.OMAPLIT)
+ n.SetRight(nil)
- case TSTRUCT:
+ case types.TSTRUCT:
// Need valid field offsets for Xoffset below.
dowidth(t)
errored := false
- if n.List.Len() != 0 && nokeys(n.List) {
+ if n.List().Len() != 0 && nokeys(n.List()) {
// simple list of variables
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i, n1 := range ls {
setlineno(n1)
n1 = typecheck(n1, ctxExpr)
ls[i] = n1
if i >= t.NumFields() {
if !errored {
- yyerror("too many values in %v", n)
+ base.Errorf("too many values in %v", n)
errored = true
}
continue
f := t.Field(i)
s := f.Sym
- if s != nil && !types.IsExported(s.Name) && s.Pkg != localpkg {
- yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+ if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
+ base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
}
// No pushtype allowed here. Must name fields for that.
n1 = assignconv(n1, f.Type, "field value")
- n1 = nodSym(OSTRUCTKEY, n1, f.Sym)
- n1.Xoffset = f.Offset
- ls[i] = n1
+ sk := nodSym(ir.OSTRUCTKEY, n1, f.Sym)
+ sk.SetOffset(f.Offset)
+ ls[i] = sk
}
if len(ls) < t.NumFields() {
- yyerror("too few values in %v", n)
+ base.Errorf("too few values in %v", n)
}
} else {
hash := make(map[string]bool)
// keyed list
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i, l := range ls {
setlineno(l)
- if l.Op == OKEY {
- key := l.Left
+ if l.Op() == ir.OKEY {
+ kv := l.(*ir.KeyExpr)
+ key := kv.Left()
- l.Op = OSTRUCTKEY
- l.Left = l.Right
- l.Right = nil
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ s := key.Sym()
+ if id, ok := key.(*ir.Ident); ok && dotImportRefs[id] != nil {
+ s = lookup(s.Name)
+ }
// An OXDOT uses the Sym field to hold
// the field to the right of the dot,
// so s will be non-nil, but an OXDOT
// is never a valid struct literal key.
- if key.Sym == nil || key.Op == OXDOT || key.Sym.IsBlank() {
- yyerror("invalid field name %v in struct initializer", key)
- l.Left = typecheck(l.Left, ctxExpr)
+ if s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank() {
+ base.Errorf("invalid field name %v in struct initializer", key)
continue
}
- // Sym might have resolved to name in other top-level
- // package, because of import dot. Redirect to correct sym
- // before we do the lookup.
- s := key.Sym
- if s.Pkg != localpkg && types.IsExported(s.Name) {
- s1 := lookup(s.Name)
- if s1.Origpkg == s.Pkg {
- s = s1
- }
- }
- l.Sym = s
+ l = ir.NewStructKeyExpr(l.Pos(), s, kv.Right())
+ ls[i] = l
}
- if l.Op != OSTRUCTKEY {
+ if l.Op() != ir.OSTRUCTKEY {
if !errored {
- yyerror("mixture of field:value and value initializers")
+ base.Errorf("mixture of field:value and value initializers")
errored = true
}
ls[i] = typecheck(ls[i], ctxExpr)
continue
}
+ l := l.(*ir.StructKeyExpr)
- f := lookdot1(nil, l.Sym, t, t.Fields(), 0)
+ f := lookdot1(nil, l.Sym(), t, t.Fields(), 0)
if f == nil {
- if ci := lookdot1(nil, l.Sym, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
+ if ci := lookdot1(nil, l.Sym(), t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
if visible(ci.Sym) {
- yyerror("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym, t, ci.Sym)
- } else if nonexported(l.Sym) && l.Sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
- yyerror("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym, t)
+ base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym(), t, ci.Sym)
+ } else if nonexported(l.Sym()) && l.Sym().Name == ci.Sym.Name { // Ensure exactness before the suggestion.
+ base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym(), t)
} else {
- yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t)
+ base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym(), t)
}
continue
}
var f *types.Field
- p, _ := dotpath(l.Sym, t, &f, true)
+ p, _ := dotpath(l.Sym(), t, &f, true)
if p == nil || f.IsMethod() {
- yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t)
+ base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym(), t)
continue
}
// dotpath returns the parent embedded types in reverse order.
for ei := len(p) - 1; ei >= 0; ei-- {
ep = append(ep, p[ei].field.Sym.Name)
}
- ep = append(ep, l.Sym.Name)
- yyerror("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
+ ep = append(ep, l.Sym().Name)
+ base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
continue
}
fielddup(f.Sym.Name, hash)
- l.Xoffset = f.Offset
+ l.SetOffset(f.Offset)
// No pushtype allowed here. Tried and rejected.
- l.Left = typecheck(l.Left, ctxExpr)
- l.Left = assignconv(l.Left, f.Type, "field value")
+ l.SetLeft(typecheck(l.Left(), ctxExpr))
+ l.SetLeft(assignconv(l.Left(), f.Type, "field value"))
}
}
- n.Op = OSTRUCTLIT
- n.Right = nil
+ n.SetOp(ir.OSTRUCTLIT)
+ n.SetRight(nil)
}
return n
}
// typecheckarraylit type-checks a sequence of slice/array literal elements.
-func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx string) int64 {
+func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx string) int64 {
// If there are key/value pairs, create a map to keep seen
// keys so we can check for duplicate indices.
var indices map[int64]bool
for _, elt := range elts {
- if elt.Op == OKEY {
+ if elt.Op() == ir.OKEY {
indices = make(map[int64]bool)
break
}
var key, length int64
for i, elt := range elts {
setlineno(elt)
- vp := &elts[i]
- if elt.Op == OKEY {
- elt.Left = typecheck(elt.Left, ctxExpr)
- key = indexconst(elt.Left)
+ r := elts[i]
+ var kv *ir.KeyExpr
+ if elt.Op() == ir.OKEY {
+ elt := elt.(*ir.KeyExpr)
+ elt.SetLeft(typecheck(elt.Left(), ctxExpr))
+ key = indexconst(elt.Left())
if key < 0 {
- if !elt.Left.Diag() {
+ if !elt.Left().Diag() {
if key == -2 {
- yyerror("index too large")
+ base.Errorf("index too large")
} else {
- yyerror("index must be non-negative integer constant")
+ base.Errorf("index must be non-negative integer constant")
}
- elt.Left.SetDiag(true)
+ elt.Left().SetDiag(true)
}
key = -(1 << 30) // stay negative for a while
}
- vp = &elt.Right
+ kv = elt
+ r = elt.Right()
}
- r := *vp
r = pushtype(r, elemType)
r = typecheck(r, ctxExpr)
- *vp = assignconv(r, elemType, ctx)
+ r = assignconv(r, elemType, ctx)
+ if kv != nil {
+ kv.SetRight(r)
+ } else {
+ elts[i] = r
+ }
if key >= 0 {
if indices != nil {
if indices[key] {
- yyerror("duplicate index in %s: %d", ctx, key)
+ base.Errorf("duplicate index in %s: %d", ctx, key)
} else {
indices[key] = true
}
}
if bound >= 0 && key >= bound {
- yyerror("array index %d out of bounds [0:%d]", key, bound)
+ base.Errorf("array index %d out of bounds [0:%d]", key, bound)
bound = -1
}
}
// visible reports whether sym is exported or locally defined.
func visible(sym *types.Sym) bool {
- return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == localpkg)
+ return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == types.LocalPkg)
}
// nonexported reports whether sym is an unexported field.
}
// lvalue etc
-func islvalue(n *Node) bool {
- switch n.Op {
- case OINDEX:
- if n.Left.Type != nil && n.Left.Type.IsArray() {
- return islvalue(n.Left)
+func islvalue(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OINDEX:
+ if n.Left().Type() != nil && n.Left().Type().IsArray() {
+ return islvalue(n.Left())
}
- if n.Left.Type != nil && n.Left.Type.IsString() {
+ if n.Left().Type() != nil && n.Left().Type().IsString() {
return false
}
fallthrough
- case ODEREF, ODOTPTR, OCLOSUREVAR:
+ case ir.ODEREF, ir.ODOTPTR, ir.OCLOSUREREAD:
return true
- case ODOT:
- return islvalue(n.Left)
+ case ir.ODOT:
+ return islvalue(n.Left())
- case ONAME:
- if n.Class() == PFUNC {
+ case ir.ONAME:
+ if n.Class() == ir.PFUNC {
return false
}
return true
+
+ case ir.ONAMEOFFSET:
+ return true
}
return false
}
-func checklvalue(n *Node, verb string) {
+func checklvalue(n ir.Node, verb string) {
if !islvalue(n) {
- yyerror("cannot %s %v", verb, n)
+ base.Errorf("cannot %s %v", verb, n)
}
}
-func checkassign(stmt *Node, n *Node) {
+func checkassign(stmt ir.Node, n ir.Node) {
// Variables declared in ORANGE are assigned on every iteration.
- if n.Name == nil || n.Name.Defn != stmt || stmt.Op == ORANGE {
+ if !ir.DeclaredBy(n, stmt) || stmt.Op() == ir.ORANGE {
r := outervalue(n)
- if r.Op == ONAME {
- r.Name.SetAssigned(true)
- if r.Name.IsClosureVar() {
- r.Name.Defn.Name.SetAssigned(true)
+ if r.Op() == ir.ONAME {
+ r.Name().SetAssigned(true)
+ if r.Name().IsClosureVar() {
+ r.Name().Defn.Name().SetAssigned(true)
}
}
}
if islvalue(n) {
return
}
- if n.Op == OINDEXMAP {
+ if n.Op() == ir.OINDEXMAP {
n.SetIndexMapLValue(true)
return
}
// have already complained about n being invalid
- if n.Type == nil {
+ if n.Type() == nil {
return
}
switch {
- case n.Op == ODOT && n.Left.Op == OINDEXMAP:
- yyerror("cannot assign to struct field %v in map", n)
- case (n.Op == OINDEX && n.Left.Type.IsString()) || n.Op == OSLICESTR:
- yyerror("cannot assign to %v (strings are immutable)", n)
- case n.Op == OLITERAL && n.Sym != nil && n.isGoConst():
- yyerror("cannot assign to %v (declared const)", n)
+ case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).Left().Op() == ir.OINDEXMAP:
+ base.Errorf("cannot assign to struct field %v in map", n)
+ case (n.Op() == ir.OINDEX && n.(*ir.IndexExpr).Left().Type().IsString()) || n.Op() == ir.OSLICESTR:
+ base.Errorf("cannot assign to %v (strings are immutable)", n)
+ case n.Op() == ir.OLITERAL && n.Sym() != nil && isGoConst(n):
+ base.Errorf("cannot assign to %v (declared const)", n)
default:
- yyerror("cannot assign to %v", n)
+ base.Errorf("cannot assign to %v", n)
}
- n.Type = nil
+ n.SetType(nil)
}
-func checkassignlist(stmt *Node, l Nodes) {
+func checkassignlist(stmt ir.Node, l ir.Nodes) {
for _, n := range l.Slice() {
checkassign(stmt, n)
}
// currently OK, since the only place samesafeexpr gets used on an
// lvalue expression is for OSLICE and OAPPEND optimizations, and it
// is correct in those settings.
-func samesafeexpr(l *Node, r *Node) bool {
- if l.Op != r.Op || !types.Identical(l.Type, r.Type) {
+func samesafeexpr(l ir.Node, r ir.Node) bool {
+ if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) {
return false
}
- switch l.Op {
- case ONAME, OCLOSUREVAR:
+ switch l.Op() {
+ case ir.ONAME, ir.OCLOSUREREAD:
return l == r
- case ODOT, ODOTPTR:
- return l.Sym != nil && r.Sym != nil && l.Sym == r.Sym && samesafeexpr(l.Left, r.Left)
-
- case ODEREF, OCONVNOP,
- ONOT, OBITNOT, OPLUS, ONEG:
- return samesafeexpr(l.Left, r.Left)
-
- case OCONV:
+ case ir.ODOT, ir.ODOTPTR:
+ l := l.(*ir.SelectorExpr)
+ r := r.(*ir.SelectorExpr)
+ return l.Sym() != nil && r.Sym() != nil && l.Sym() == r.Sym() && samesafeexpr(l.Left(), r.Left())
+
+ case ir.ODEREF:
+ l := l.(*ir.StarExpr)
+ r := r.(*ir.StarExpr)
+ return samesafeexpr(l.Left(), r.Left())
+
+ case ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG:
+ l := l.(*ir.UnaryExpr)
+ r := r.(*ir.UnaryExpr)
+ return samesafeexpr(l.Left(), r.Left())
+
+ case ir.OCONVNOP:
+ l := l.(*ir.ConvExpr)
+ r := r.(*ir.ConvExpr)
+ return samesafeexpr(l.Left(), r.Left())
+
+ case ir.OCONV:
+ l := l.(*ir.ConvExpr)
+ r := r.(*ir.ConvExpr)
// Some conversions can't be reused, such as []byte(str).
// Allow only numeric-ish types. This is a bit conservative.
- return issimple[l.Type.Etype] && samesafeexpr(l.Left, r.Left)
+ return issimple[l.Type().Kind()] && samesafeexpr(l.Left(), r.Left())
+
+ case ir.OINDEX, ir.OINDEXMAP:
+ l := l.(*ir.IndexExpr)
+ r := r.(*ir.IndexExpr)
+ return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right())
- case OINDEX, OINDEXMAP,
- OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
- return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right)
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
+ l := l.(*ir.BinaryExpr)
+ r := r.(*ir.BinaryExpr)
+ return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right())
- case OLITERAL:
- return eqval(l.Val(), r.Val())
+ case ir.OLITERAL:
+ return constant.Compare(l.Val(), token.EQL, r.Val())
+
+ case ir.ONIL:
+ return true
}
return false
// type check assignment.
// if this assignment is the definition of a var on the left side,
// fill in the var's type.
-func typecheckas(n *Node) {
- if enableTrace && trace {
+func typecheckas(n *ir.AssignStmt) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckas", n)(nil)
}
// if the variable has a type (ntype) then typechecking
// will not look at defn, so it is okay (and desirable,
// so that the conversion below happens).
- n.Left = resolve(n.Left)
+ n.SetLeft(resolve(n.Left()))
- if n.Left.Name == nil || n.Left.Name.Defn != n || n.Left.Name.Param.Ntype != nil {
- n.Left = typecheck(n.Left, ctxExpr|ctxAssign)
+ if !ir.DeclaredBy(n.Left(), n) || n.Left().Name().Ntype != nil {
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign))
}
// Use ctxMultiOK so we can emit an "N variables but M values" error
// to be consistent with typecheckas2 (#26616).
- n.Right = typecheck(n.Right, ctxExpr|ctxMultiOK)
- checkassign(n, n.Left)
- if n.Right != nil && n.Right.Type != nil {
- if n.Right.Type.IsFuncArgStruct() {
- yyerror("assignment mismatch: 1 variable but %v returns %d values", n.Right.Left, n.Right.Type.NumFields())
+ n.SetRight(typecheck(n.Right(), ctxExpr|ctxMultiOK))
+ checkassign(n, n.Left())
+ if n.Right() != nil && n.Right().Type() != nil {
+ if n.Right().Type().IsFuncArgStruct() {
+ base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right().(*ir.CallExpr).Left(), n.Right().Type().NumFields())
// Multi-value RHS isn't actually valid for OAS; nil out
// to indicate failed typechecking.
- n.Right.Type = nil
- } else if n.Left.Type != nil {
- n.Right = assignconv(n.Right, n.Left.Type, "assignment")
+ n.Right().SetType(nil)
+ } else if n.Left().Type() != nil {
+ n.SetRight(assignconv(n.Right(), n.Left().Type(), "assignment"))
}
}
- if n.Left.Name != nil && n.Left.Name.Defn == n && n.Left.Name.Param.Ntype == nil {
- n.Right = defaultlit(n.Right, nil)
- n.Left.Type = n.Right.Type
+ if ir.DeclaredBy(n.Left(), n) && n.Left().Name().Ntype == nil {
+ n.SetRight(defaultlit(n.Right(), nil))
+ n.Left().SetType(n.Right().Type())
}
// second half of dance.
// just to get it over with. see dance above.
n.SetTypecheck(1)
- if n.Left.Typecheck() == 0 {
- n.Left = typecheck(n.Left, ctxExpr|ctxAssign)
+ if n.Left().Typecheck() == 0 {
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign))
}
- if !n.Left.isBlank() {
- checkwidth(n.Left.Type) // ensure width is calculated for backend
+ if !ir.IsBlank(n.Left()) {
+ checkwidth(n.Left().Type()) // ensure width is calculated for backend
}
}
-func checkassignto(src *types.Type, dst *Node) {
- if op, why := assignop(src, dst.Type); op == OXXX {
- yyerror("cannot assign %v to %L in multiple assignment%s", src, dst, why)
+func checkassignto(src *types.Type, dst ir.Node) {
+ if op, why := assignop(src, dst.Type()); op == ir.OXXX {
+ base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why)
return
}
}
-func typecheckas2(n *Node) {
- if enableTrace && trace {
+func typecheckas2(n *ir.AssignListStmt) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckas2", n)(nil)
}
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
// delicate little dance.
n1 = resolve(n1)
ls[i1] = n1
- if n1.Name == nil || n1.Name.Defn != n || n1.Name.Param.Ntype != nil {
+ if !ir.DeclaredBy(n1, n) || n1.Name().Ntype != nil {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
}
- cl := n.List.Len()
- cr := n.Rlist.Len()
+ cl := n.List().Len()
+ cr := n.Rlist().Len()
if cl > 1 && cr == 1 {
- n.Rlist.SetFirst(typecheck(n.Rlist.First(), ctxExpr|ctxMultiOK))
+ n.Rlist().SetFirst(typecheck(n.Rlist().First(), ctxExpr|ctxMultiOK))
} else {
- typecheckslice(n.Rlist.Slice(), ctxExpr)
+ typecheckslice(n.Rlist().Slice(), ctxExpr)
}
- checkassignlist(n, n.List)
+ checkassignlist(n, n.List())
- var l *Node
- var r *Node
+ var l ir.Node
+ var r ir.Node
if cl == cr {
// easy
- ls := n.List.Slice()
- rs := n.Rlist.Slice()
+ ls := n.List().Slice()
+ rs := n.Rlist().Slice()
for il, nl := range ls {
nr := rs[il]
- if nl.Type != nil && nr.Type != nil {
- rs[il] = assignconv(nr, nl.Type, "assignment")
+ if nl.Type() != nil && nr.Type() != nil {
+ rs[il] = assignconv(nr, nl.Type(), "assignment")
}
- if nl.Name != nil && nl.Name.Defn == n && nl.Name.Param.Ntype == nil {
+ if ir.DeclaredBy(nl, n) && nl.Name().Ntype == nil {
rs[il] = defaultlit(rs[il], nil)
- nl.Type = rs[il].Type
+ nl.SetType(rs[il].Type())
}
}
goto out
}
- l = n.List.First()
- r = n.Rlist.First()
+ l = n.List().First()
+ r = n.Rlist().First()
// x,y,z = f()
if cr == 1 {
- if r.Type == nil {
+ if r.Type() == nil {
goto out
}
- switch r.Op {
- case OCALLMETH, OCALLINTER, OCALLFUNC:
- if !r.Type.IsFuncArgStruct() {
+ switch r.Op() {
+ case ir.OCALLMETH, ir.OCALLINTER, ir.OCALLFUNC:
+ if !r.Type().IsFuncArgStruct() {
break
}
- cr = r.Type.NumFields()
+ cr = r.Type().NumFields()
if cr != cl {
goto mismatch
}
- n.Op = OAS2FUNC
- n.Right = r
- n.Rlist.Set(nil)
- for i, l := range n.List.Slice() {
- f := r.Type.Field(i)
- if f.Type != nil && l.Type != nil {
+ r.(*ir.CallExpr).Use = ir.CallUseList
+ n.SetOp(ir.OAS2FUNC)
+ for i, l := range n.List().Slice() {
+ f := r.Type().Field(i)
+ if f.Type != nil && l.Type() != nil {
checkassignto(f.Type, l)
}
- if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil {
- l.Type = f.Type
+ if ir.DeclaredBy(l, n) && l.Name().Ntype == nil {
+ l.SetType(f.Type)
}
}
goto out
// x, ok = y
if cl == 2 && cr == 1 {
- if r.Type == nil {
+ if r.Type() == nil {
goto out
}
- switch r.Op {
- case OINDEXMAP, ORECV, ODOTTYPE:
- switch r.Op {
- case OINDEXMAP:
- n.Op = OAS2MAPR
- case ORECV:
- n.Op = OAS2RECV
- case ODOTTYPE:
- n.Op = OAS2DOTTYPE
- r.Op = ODOTTYPE2
+ switch r.Op() {
+ case ir.OINDEXMAP, ir.ORECV, ir.ODOTTYPE:
+ switch r.Op() {
+ case ir.OINDEXMAP:
+ n.SetOp(ir.OAS2MAPR)
+ case ir.ORECV:
+ n.SetOp(ir.OAS2RECV)
+ case ir.ODOTTYPE:
+ n.SetOp(ir.OAS2DOTTYPE)
+ r.SetOp(ir.ODOTTYPE2)
}
- n.Right = r
- n.Rlist.Set(nil)
- if l.Type != nil {
- checkassignto(r.Type, l)
+ if l.Type() != nil {
+ checkassignto(r.Type(), l)
}
- if l.Name != nil && l.Name.Defn == n {
- l.Type = r.Type
+ if ir.DeclaredBy(l, n) {
+ l.SetType(r.Type())
}
- l := n.List.Second()
- if l.Type != nil && !l.Type.IsBoolean() {
- checkassignto(types.Types[TBOOL], l)
+ l := n.List().Second()
+ if l.Type() != nil && !l.Type().IsBoolean() {
+ checkassignto(types.Types[types.TBOOL], l)
}
- if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil {
- l.Type = types.Types[TBOOL]
+ if ir.DeclaredBy(l, n) && l.Name().Ntype == nil {
+ l.SetType(types.Types[types.TBOOL])
}
goto out
}
}
mismatch:
- switch r.Op {
+ switch r.Op() {
default:
- yyerror("assignment mismatch: %d variables but %d values", cl, cr)
- case OCALLFUNC, OCALLMETH, OCALLINTER:
- yyerror("assignment mismatch: %d variables but %v returns %d values", cl, r.Left, cr)
+ base.Errorf("assignment mismatch: %d variables but %d values", cl, cr)
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.Left(), cr)
}
// second half of dance
out:
n.SetTypecheck(1)
- ls = n.List.Slice()
+ ls = n.List().Slice()
for i1, n1 := range ls {
if n1.Typecheck() == 0 {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
// type check function definition
-func typecheckfunc(n *Node) {
- if enableTrace && trace {
+// To be called by typecheck, not directly.
+// (Call typecheckFunc instead.)
+func typecheckfunc(n *ir.Func) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckfunc", n)(nil)
}
- for _, ln := range n.Func.Dcl {
- if ln.Op == ONAME && (ln.Class() == PPARAM || ln.Class() == PPARAMOUT) {
- ln.Name.Decldepth = 1
+ for _, ln := range n.Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class() == ir.PPARAM || ln.Class() == ir.PPARAMOUT) {
+ ln.Decldepth = 1
}
}
- n.Func.Nname = typecheck(n.Func.Nname, ctxExpr|ctxAssign)
- t := n.Func.Nname.Type
+ n.Nname = typecheck(n.Nname, ctxExpr|ctxAssign).(*ir.Name)
+ t := n.Nname.Type()
if t == nil {
return
}
- n.Type = t
- t.FuncType().Nname = asTypesNode(n.Func.Nname)
+ n.SetType(t)
rcvr := t.Recv()
- if rcvr != nil && n.Func.Shortname != nil {
- m := addmethod(n.Func.Shortname, t, true, n.Func.Pragma&Nointerface != 0)
+ if rcvr != nil && n.Shortname != nil {
+ m := addmethod(n, n.Shortname, t, true, n.Pragma&ir.Nointerface != 0)
if m == nil {
return
}
- n.Func.Nname.Sym = methodSym(rcvr.Type, n.Func.Shortname)
- declare(n.Func.Nname, PFUNC)
+ n.Nname.SetSym(methodSym(rcvr.Type, n.Shortname))
+ declare(n.Nname, ir.PFUNC)
}
- if Ctxt.Flag_dynlink && !inimport && n.Func.Nname != nil {
- makefuncsym(n.Func.Nname.Sym)
+ if base.Ctxt.Flag_dynlink && !inimport && n.Nname != nil {
+ NeedFuncSym(n.Sym())
}
}
// The result of stringtoruneslit MUST be assigned back to n, e.g.
// n.Left = stringtoruneslit(n.Left)
-func stringtoruneslit(n *Node) *Node {
- if n.Left.Op != OLITERAL || n.Left.Val().Ctype() != CTSTR {
- Fatalf("stringtoarraylit %v", n)
+func stringtoruneslit(n *ir.ConvExpr) ir.Node {
+ if n.Left().Op() != ir.OLITERAL || n.Left().Val().Kind() != constant.String {
+ base.Fatalf("stringtoarraylit %v", n)
}
- var l []*Node
+ var l []ir.Node
i := 0
- for _, r := range n.Left.StringVal() {
- l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
+ for _, r := range ir.StringVal(n.Left()) {
+ l = append(l, ir.Nod(ir.OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
i++
}
- nn := nod(OCOMPLIT, nil, typenod(n.Type))
- nn.List.Set(l)
- nn = typecheck(nn, ctxExpr)
- return nn
+ nn := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(n.Type()))
+ nn.PtrList().Set(l)
+ return typecheck(nn, ctxExpr)
}
-var mapqueue []*Node
+var mapqueue []*ir.MapType
func checkMapKeys() {
for _, n := range mapqueue {
- k := n.Type.MapType().Key
+ k := n.Type().MapType().Key
if !k.Broke() && !IsComparable(k) {
- yyerrorl(n.Pos, "invalid map key type %v", k)
+ base.ErrorfAt(n.Pos(), "invalid map key type %v", k)
}
}
mapqueue = nil
}
-func setUnderlying(t, underlying *types.Type) {
- if underlying.Etype == TFORW {
- // This type isn't computed yet; when it is, update n.
- underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t)
- return
- }
-
- n := asNode(t.Nod)
- ft := t.ForwardType()
- cache := t.Cache
-
- // TODO(mdempsky): Fix Type rekinding.
- *t = *underlying
-
- // Restore unnecessarily clobbered attributes.
- t.Nod = asTypesNode(n)
- t.Sym = n.Sym
- if n.Name != nil {
- t.Vargen = n.Name.Vargen
- }
- t.Cache = cache
- t.SetDeferwidth(false)
-
- // spec: "The declared type does not inherit any methods bound
- // to the existing type, but the method set of an interface
- // type [...] remains unchanged."
- if !t.IsInterface() {
- *t.Methods() = types.Fields{}
- *t.AllMethods() = types.Fields{}
+func typecheckdeftype(n *ir.Name) {
+ if enableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheckdeftype", n)(nil)
}
- // Propagate go:notinheap pragma from the Name to the Type.
- if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma()&NotInHeap != 0 {
+ t := types.NewNamed(n)
+ t.Vargen = n.Vargen
+ if n.Pragma()&ir.NotInHeap != 0 {
t.SetNotInHeap(true)
}
- // Update types waiting on this type.
- for _, w := range ft.Copyto {
- setUnderlying(w, t)
- }
-
- // Double-check use of type as embedded type.
- if ft.Embedlineno.IsKnown() {
- if t.IsPtr() || t.IsUnsafePtr() {
- yyerrorl(ft.Embedlineno, "embedded type cannot be a pointer")
- }
- }
-}
-
-func typecheckdeftype(n *Node) {
- if enableTrace && trace {
- defer tracePrint("typecheckdeftype", n)(nil)
- }
-
+ n.SetType(t)
n.SetTypecheck(1)
- n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
- t := n.Name.Param.Ntype.Type
- if t == nil {
- n.SetDiag(true)
- n.Type = nil
- } else if n.Type == nil {
- n.SetDiag(true)
+ n.SetWalkdef(1)
+
+ defercheckwidth()
+ errorsBefore := base.Errors()
+ n.Ntype = typecheckNtype(n.Ntype)
+ if underlying := n.Ntype.Type(); underlying != nil {
+ t.SetUnderlying(underlying)
} else {
- // copy new type and clear fields
- // that don't come along.
- setUnderlying(n.Type, t)
+ n.SetDiag(true)
+ n.SetType(nil)
}
+ if t.Kind() == types.TFORW && base.Errors() > errorsBefore {
+ // Something went wrong during type-checking,
+ // but it was reported. Silence future errors.
+ t.SetBroke(true)
+ }
+ resumecheckwidth()
}
-func typecheckdef(n *Node) {
- if enableTrace && trace {
+func typecheckdef(n ir.Node) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckdef", n)(nil)
}
lno := setlineno(n)
- if n.Op == ONONAME {
+ if n.Op() == ir.ONONAME {
if !n.Diag() {
n.SetDiag(true)
// Note: adderrorname looks for this string and
// adds context about the outer expression
- yyerrorl(lineno, "undefined: %v", n.Sym)
+ base.ErrorfAt(base.Pos, "undefined: %v", n.Sym())
}
- lineno = lno
+ base.Pos = lno
return
}
if n.Walkdef() == 1 {
- lineno = lno
+ base.Pos = lno
return
}
typecheckdefstack = append(typecheckdefstack, n)
if n.Walkdef() == 2 {
- flusherrors()
+ base.FlushErrors()
fmt.Printf("typecheckdef loop:")
for i := len(typecheckdefstack) - 1; i >= 0; i-- {
n := typecheckdefstack[i]
- fmt.Printf(" %v", n.Sym)
+ fmt.Printf(" %v", n.Sym())
}
fmt.Printf("\n")
- Fatalf("typecheckdef loop")
+ base.Fatalf("typecheckdef loop")
}
n.SetWalkdef(2)
- if n.Type != nil || n.Sym == nil { // builtin or no name
+ if n.Type() != nil || n.Sym() == nil { // builtin or no name
goto ret
}
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("typecheckdef %v", n.Op)
-
- case OLITERAL:
- if n.Name.Param.Ntype != nil {
- n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
- n.Type = n.Name.Param.Ntype.Type
- n.Name.Param.Ntype = nil
- if n.Type == nil {
+ base.Fatalf("typecheckdef %v", n.Op())
+
+ case ir.OLITERAL:
+ if n.Name().Ntype != nil {
+ n.Name().Ntype = typecheckNtype(n.Name().Ntype)
+ n.SetType(n.Name().Ntype.Type())
+ n.Name().Ntype = nil
+ if n.Type() == nil {
n.SetDiag(true)
goto ret
}
}
- e := n.Name.Defn
- n.Name.Defn = nil
+ e := n.Name().Defn
+ n.Name().Defn = nil
if e == nil {
- Dump("typecheckdef nil defn", n)
- yyerrorl(n.Pos, "xxx")
+ ir.Dump("typecheckdef nil defn", n)
+ base.ErrorfAt(n.Pos(), "xxx")
}
e = typecheck(e, ctxExpr)
- if e.Type == nil {
+ if e.Type() == nil {
goto ret
}
- if !e.isGoConst() {
+ if !isGoConst(e) {
if !e.Diag() {
- if Isconst(e, CTNIL) {
- yyerrorl(n.Pos, "const initializer cannot be nil")
+ if e.Op() == ir.ONIL {
+ base.ErrorfAt(n.Pos(), "const initializer cannot be nil")
} else {
- yyerrorl(n.Pos, "const initializer %v is not a constant", e)
+ base.ErrorfAt(n.Pos(), "const initializer %v is not a constant", e)
}
e.SetDiag(true)
}
goto ret
}
- t := n.Type
+ t := n.Type()
if t != nil {
- if !okforconst[t.Etype] {
- yyerrorl(n.Pos, "invalid constant type %v", t)
+ if !ir.OKForConst[t.Kind()] {
+ base.ErrorfAt(n.Pos(), "invalid constant type %v", t)
goto ret
}
- if !e.Type.IsUntyped() && !types.Identical(t, e.Type) {
- yyerrorl(n.Pos, "cannot use %L as type %v in const initializer", e, t)
+ if !e.Type().IsUntyped() && !types.Identical(t, e.Type()) {
+ base.ErrorfAt(n.Pos(), "cannot use %L as type %v in const initializer", e, t)
goto ret
}
e = convlit(e, t)
}
- n.SetVal(e.Val())
- n.Type = e.Type
+ n.SetType(e.Type())
+ if n.Type() != nil {
+ n.SetVal(e.Val())
+ }
- case ONAME:
- if n.Name.Param.Ntype != nil {
- n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
- n.Type = n.Name.Param.Ntype.Type
- if n.Type == nil {
+ case ir.ONAME:
+ if n.Name().Ntype != nil {
+ n.Name().Ntype = typecheckNtype(n.Name().Ntype)
+ n.SetType(n.Name().Ntype.Type())
+ if n.Type() == nil {
n.SetDiag(true)
goto ret
}
}
- if n.Type != nil {
+ if n.Type() != nil {
break
}
- if n.Name.Defn == nil {
+ if n.Name().Defn == nil {
if n.SubOp() != 0 { // like OPRINTN
break
}
- if nsavederrors+nerrors > 0 {
+ if base.Errors() > 0 {
// Can have undefined variables in x := foo
// that make x have an n.name.Defn == nil.
// If there are other errors anyway, don't
break
}
- Fatalf("var without type, init: %v", n.Sym)
+ base.Fatalf("var without type, init: %v", n.Sym())
}
- if n.Name.Defn.Op == ONAME {
- n.Name.Defn = typecheck(n.Name.Defn, ctxExpr)
- n.Type = n.Name.Defn.Type
+ if n.Name().Defn.Op() == ir.ONAME {
+ n.Name().Defn = typecheck(n.Name().Defn, ctxExpr)
+ n.SetType(n.Name().Defn.Type())
break
}
- n.Name.Defn = typecheck(n.Name.Defn, ctxStmt) // fills in n.Type
+ n.Name().Defn = typecheck(n.Name().Defn, ctxStmt) // fills in n.Type
- case OTYPE:
- if p := n.Name.Param; p.Alias() {
+ case ir.OTYPE:
+ n := n.(*ir.Name)
+ if n.Alias() {
// Type alias declaration: Simply use the rhs type - no need
// to create a new type.
- // If we have a syntax error, p.Ntype may be nil.
- if p.Ntype != nil {
- p.Ntype = typecheck(p.Ntype, ctxType)
- n.Type = p.Ntype.Type
- if n.Type == nil {
+ // If we have a syntax error, name.Ntype may be nil.
+ if n.Ntype != nil {
+ n.Ntype = typecheckNtype(n.Ntype)
+ n.SetType(n.Ntype.Type())
+ if n.Type() == nil {
n.SetDiag(true)
goto ret
}
// For package-level type aliases, set n.Sym.Def so we can identify
// it as a type alias during export. See also #31959.
- if n.Name.Curfn == nil {
- n.Sym.Def = asTypesNode(p.Ntype)
+ if n.Curfn == nil {
+ n.Sym().Def = n.Ntype
}
}
break
}
// regular type declaration
- defercheckwidth()
- n.SetWalkdef(1)
- setTypeNode(n, types.New(TFORW))
- n.Type.Sym = n.Sym
- nerrors0 := nerrors
typecheckdeftype(n)
- if n.Type.Etype == TFORW && nerrors > nerrors0 {
- // Something went wrong during type-checking,
- // but it was reported. Silence future errors.
- n.Type.SetBroke(true)
- }
- resumecheckwidth()
}
ret:
- if n.Op != OLITERAL && n.Type != nil && n.Type.IsUntyped() {
- Fatalf("got %v for %v", n.Type, n)
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().IsUntyped() {
+ base.Fatalf("got %v for %v", n.Type(), n)
}
last := len(typecheckdefstack) - 1
if typecheckdefstack[last] != n {
- Fatalf("typecheckdefstack mismatch")
+ base.Fatalf("typecheckdefstack mismatch")
}
typecheckdefstack[last] = nil
typecheckdefstack = typecheckdefstack[:last]
- lineno = lno
+ base.Pos = lno
n.SetWalkdef(1)
}
-func checkmake(t *types.Type, arg string, np **Node) bool {
+func checkmake(t *types.Type, arg string, np *ir.Node) bool {
n := *np
- if !n.Type.IsInteger() && n.Type.Etype != TIDEAL {
- yyerror("non-integer %s argument in make(%v) - %v", arg, t, n.Type)
+ if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+ base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type())
return false
}
// Do range checks for constants before defaultlit
// to avoid redundant "constant NNN overflows int" errors.
- switch consttype(n) {
- case CTINT, CTRUNE, CTFLT, CTCPLX:
- v := toint(n.Val()).U.(*Mpint)
- if v.CmpInt64(0) < 0 {
- yyerror("negative %s argument in make(%v)", arg, t)
+ if n.Op() == ir.OLITERAL {
+ v := toint(n.Val())
+ if constant.Sign(v) < 0 {
+ base.Errorf("negative %s argument in make(%v)", arg, t)
return false
}
- if v.Cmp(maxintval[TINT]) > 0 {
- yyerror("%s argument too large in make(%v)", arg, t)
+ if doesoverflow(v, types.Types[types.TINT]) {
+ base.Errorf("%s argument too large in make(%v)", arg, t)
return false
}
}
// are the same as for index expressions. Factor the code better;
// for instance, indexlit might be called here and incorporate some
// of the bounds checks done for make.
- n = defaultlit(n, types.Types[TINT])
+ n = defaultlit(n, types.Types[types.TINT])
*np = n
return true
}
-func markbreak(n *Node, implicit *Node) {
- if n == nil {
- return
- }
+// markBreak marks control statements containing break statements with SetHasBreak(true).
+func markBreak(fn *ir.Func) {
+ var labels map[*types.Sym]ir.Node
+ var implicit ir.Node
- switch n.Op {
- case OBREAK:
- if n.Sym == nil {
- if implicit != nil {
- implicit.SetHasBreak(true)
+ var mark func(ir.Node) error
+ mark = func(n ir.Node) error {
+ switch n.Op() {
+ default:
+ ir.DoChildren(n, mark)
+
+ case ir.OBREAK:
+ if n.Sym() == nil {
+ setHasBreak(implicit)
+ } else {
+ setHasBreak(labels[n.Sym()])
}
- } else {
- lab := asNode(n.Sym.Label)
- if lab != nil {
- lab.SetHasBreak(true)
+
+ case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT, ir.ORANGE:
+ old := implicit
+ implicit = n
+ var sym *types.Sym
+ switch n := n.(type) {
+ case *ir.ForStmt:
+ sym = n.Sym()
+ case *ir.RangeStmt:
+ sym = n.Sym()
+ case *ir.SelectStmt:
+ sym = n.Sym()
+ case *ir.SwitchStmt:
+ sym = n.Sym()
+ }
+ if sym != nil {
+ if labels == nil {
+ // Map creation delayed until we need it - most functions don't.
+ labels = make(map[*types.Sym]ir.Node)
+ }
+ labels[sym] = n
+ }
+ ir.DoChildren(n, mark)
+ if sym != nil {
+ delete(labels, sym)
}
+ implicit = old
}
- case OFOR, OFORUNTIL, OSWITCH, OTYPESW, OSELECT, ORANGE:
- implicit = n
- fallthrough
- default:
- markbreak(n.Left, implicit)
- markbreak(n.Right, implicit)
- markbreaklist(n.Ninit, implicit)
- markbreaklist(n.Nbody, implicit)
- markbreaklist(n.List, implicit)
- markbreaklist(n.Rlist, implicit)
+ return nil
}
+
+ mark(fn)
}
-func markbreaklist(l Nodes, implicit *Node) {
- s := l.Slice()
- for i := 0; i < len(s); i++ {
- n := s[i]
- if n == nil {
- continue
- }
- if n.Op == OLABEL && i+1 < len(s) && n.Name.Defn == s[i+1] {
- switch n.Name.Defn.Op {
- case OFOR, OFORUNTIL, OSWITCH, OTYPESW, OSELECT, ORANGE:
- n.Sym.Label = asTypesNode(n.Name.Defn)
- markbreak(n.Name.Defn, n.Name.Defn)
- n.Sym.Label = nil
- i++
- continue
- }
- }
+func controlLabel(n ir.Node) *types.Sym {
+ switch n := n.(type) {
+ default:
+ base.Fatalf("controlLabel %+v", n.Op())
+ return nil
+ case *ir.ForStmt:
+ return n.Sym()
+ case *ir.RangeStmt:
+ return n.Sym()
+ case *ir.SelectStmt:
+ return n.Sym()
+ case *ir.SwitchStmt:
+ return n.Sym()
+ }
+}
- markbreak(n, implicit)
+func setHasBreak(n ir.Node) {
+ switch n := n.(type) {
+ default:
+ base.Fatalf("setHasBreak %+v", n.Op())
+ case nil:
+ // ignore
+ case *ir.ForStmt:
+ n.SetHasBreak(true)
+ case *ir.RangeStmt:
+ n.SetHasBreak(true)
+ case *ir.SelectStmt:
+ n.SetHasBreak(true)
+ case *ir.SwitchStmt:
+ n.SetHasBreak(true)
}
}
-// isterminating reports whether the Nodes list ends with a terminating statement.
-func (l Nodes) isterminating() bool {
+// isTermNodes reports whether the Nodes list ends with a terminating statement.
+func isTermNodes(l ir.Nodes) bool {
s := l.Slice()
c := len(s)
if c == 0 {
return false
}
- return s[c-1].isterminating()
+ return isTermNode(s[c-1])
}
-// Isterminating reports whether the node n, the last one in a
+// isTermNode reports whether the node n, the last one in a
// statement list, is a terminating statement.
-func (n *Node) isterminating() bool {
- switch n.Op {
+func isTermNode(n ir.Node) bool {
+ switch n.Op() {
// NOTE: OLABEL is treated as a separate statement,
// not a separate prefix, so skipping to the last statement
// in the block handles the labeled statement case by
// skipping over the label. No case OLABEL here.
- case OBLOCK:
- return n.List.isterminating()
+ case ir.OBLOCK:
+ return isTermNodes(n.List())
- case OGOTO, ORETURN, ORETJMP, OPANIC, OFALL:
+ case ir.OGOTO, ir.ORETURN, ir.ORETJMP, ir.OPANIC, ir.OFALL:
return true
- case OFOR, OFORUNTIL:
- if n.Left != nil {
+ case ir.OFOR, ir.OFORUNTIL:
+ if n.Left() != nil {
return false
}
if n.HasBreak() {
}
return true
- case OIF:
- return n.Nbody.isterminating() && n.Rlist.isterminating()
+ case ir.OIF:
+ return isTermNodes(n.Body()) && isTermNodes(n.Rlist())
- case OSWITCH, OTYPESW, OSELECT:
+ case ir.OSWITCH:
if n.HasBreak() {
return false
}
def := false
- for _, n1 := range n.List.Slice() {
- if !n1.Nbody.isterminating() {
+ for _, cas := range n.List().Slice() {
+ cas := cas.(*ir.CaseStmt)
+ if !isTermNodes(cas.Body()) {
return false
}
- if n1.List.Len() == 0 { // default
+ if cas.List().Len() == 0 { // default
def = true
}
}
+ return def
- if n.Op != OSELECT && !def {
+ case ir.OSELECT:
+ if n.HasBreak() {
return false
}
+ for _, cas := range n.List().Slice() {
+ cas := cas.(*ir.CaseStmt)
+ if !isTermNodes(cas.Body()) {
+ return false
+ }
+ }
return true
}
}
// checkreturn makes sure that fn terminates appropriately.
-func checkreturn(fn *Node) {
- if fn.Type.NumResults() != 0 && fn.Nbody.Len() != 0 {
- markbreaklist(fn.Nbody, nil)
- if !fn.Nbody.isterminating() {
- yyerrorl(fn.Func.Endlineno, "missing return at end of function")
+func checkreturn(fn *ir.Func) {
+ if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 {
+ markBreak(fn)
+ if !isTermNodes(fn.Body()) {
+ base.ErrorfAt(fn.Endlineno, "missing return at end of function")
}
}
}
-func deadcode(fn *Node) {
- deadcodeslice(fn.Nbody)
- deadcodefn(fn)
-}
+func deadcode(fn *ir.Func) {
+ deadcodeslice(fn.PtrBody())
-func deadcodefn(fn *Node) {
- if fn.Nbody.Len() == 0 {
+ if fn.Body().Len() == 0 {
return
}
- for _, n := range fn.Nbody.Slice() {
- if n.Ninit.Len() > 0 {
+ for _, n := range fn.Body().Slice() {
+ if n.Init().Len() > 0 {
return
}
- switch n.Op {
- case OIF:
- if !Isconst(n.Left, CTBOOL) || n.Nbody.Len() > 0 || n.Rlist.Len() > 0 {
+ switch n.Op() {
+ case ir.OIF:
+ if !ir.IsConst(n.Left(), constant.Bool) || n.Body().Len() > 0 || n.Rlist().Len() > 0 {
return
}
- case OFOR:
- if !Isconst(n.Left, CTBOOL) || n.Left.BoolVal() {
+ case ir.OFOR:
+ if !ir.IsConst(n.Left(), constant.Bool) || ir.BoolVal(n.Left()) {
return
}
default:
}
}
- fn.Nbody.Set([]*Node{nod(OEMPTY, nil, nil)})
+ fn.PtrBody().Set([]ir.Node{ir.Nod(ir.OBLOCK, nil, nil)})
}
-func deadcodeslice(nn Nodes) {
+func deadcodeslice(nn *ir.Nodes) {
var lastLabel = -1
for i, n := range nn.Slice() {
- if n != nil && n.Op == OLABEL {
+ if n != nil && n.Op() == ir.OLABEL {
lastLabel = i
}
}
if n == nil {
continue
}
- if n.Op == OIF {
- n.Left = deadcodeexpr(n.Left)
- if Isconst(n.Left, CTBOOL) {
- var body Nodes
- if n.Left.BoolVal() {
- n.Rlist = Nodes{}
- body = n.Nbody
+ if n.Op() == ir.OIF {
+ n.SetLeft(deadcodeexpr(n.Left()))
+ if ir.IsConst(n.Left(), constant.Bool) {
+ var body ir.Nodes
+ if ir.BoolVal(n.Left()) {
+ n.SetRlist(ir.Nodes{})
+ body = n.Body()
} else {
- n.Nbody = Nodes{}
- body = n.Rlist
+ n.SetBody(ir.Nodes{})
+ body = n.Rlist()
}
// If "then" or "else" branch ends with panic or return statement,
// it is safe to remove all statements after this node.
// We must be careful not to deadcode-remove labels, as they
// might be the target of a goto. See issue 28616.
if body := body.Slice(); len(body) != 0 {
- switch body[(len(body) - 1)].Op {
- case ORETURN, ORETJMP, OPANIC:
+ switch body[(len(body) - 1)].Op() {
+ case ir.ORETURN, ir.ORETJMP, ir.OPANIC:
if i > lastLabel {
cut = true
}
}
}
- deadcodeslice(n.Ninit)
- deadcodeslice(n.Nbody)
- deadcodeslice(n.List)
- deadcodeslice(n.Rlist)
+ deadcodeslice(n.PtrInit())
+ switch n.Op() {
+ case ir.OBLOCK:
+ deadcodeslice(n.PtrList())
+ case ir.OCASE:
+ deadcodeslice(n.PtrBody())
+ case ir.OFOR:
+ deadcodeslice(n.PtrBody())
+ case ir.OIF:
+ deadcodeslice(n.PtrBody())
+ deadcodeslice(n.PtrRlist())
+ case ir.ORANGE:
+ deadcodeslice(n.PtrBody())
+ case ir.OSELECT:
+ deadcodeslice(n.PtrList())
+ case ir.OSWITCH:
+ deadcodeslice(n.PtrList())
+ }
+
if cut {
- *nn.slice = nn.Slice()[:i+1]
+ nn.Set(nn.Slice()[:i+1])
break
}
}
}
-func deadcodeexpr(n *Node) *Node {
+func deadcodeexpr(n ir.Node) ir.Node {
// Perform dead-code elimination on short-circuited boolean
// expressions involving constants with the intent of
// producing a constant 'if' condition.
- switch n.Op {
- case OANDAND:
- n.Left = deadcodeexpr(n.Left)
- n.Right = deadcodeexpr(n.Right)
- if Isconst(n.Left, CTBOOL) {
- if n.Left.BoolVal() {
- return n.Right // true && x => x
+ switch n.Op() {
+ case ir.OANDAND:
+ n.SetLeft(deadcodeexpr(n.Left()))
+ n.SetRight(deadcodeexpr(n.Right()))
+ if ir.IsConst(n.Left(), constant.Bool) {
+ if ir.BoolVal(n.Left()) {
+ return n.Right() // true && x => x
} else {
- return n.Left // false && x => false
+ return n.Left() // false && x => false
}
}
- case OOROR:
- n.Left = deadcodeexpr(n.Left)
- n.Right = deadcodeexpr(n.Right)
- if Isconst(n.Left, CTBOOL) {
- if n.Left.BoolVal() {
- return n.Left // true || x => true
+ case ir.OOROR:
+ n.SetLeft(deadcodeexpr(n.Left()))
+ n.SetRight(deadcodeexpr(n.Right()))
+ if ir.IsConst(n.Left(), constant.Bool) {
+ if ir.BoolVal(n.Left()) {
+ return n.Left() // true || x => true
} else {
- return n.Right // false || x => x
+ return n.Right() // false || x => x
}
}
}
return n
}
-// setTypeNode sets n to an OTYPE node representing t.
-func setTypeNode(n *Node, t *types.Type) {
- n.Op = OTYPE
- n.Type = t
- n.Type.Nod = asTypesNode(n)
-}
-
// getIotaValue returns the current value for "iota",
// or -1 if not within a ConstSpec.
func getIotaValue() int64 {
if i := len(typecheckdefstack); i > 0 {
- if x := typecheckdefstack[i-1]; x.Op == OLITERAL {
- return x.Iota()
+ if x := typecheckdefstack[i-1]; x.Op() == ir.OLITERAL {
+ return x.(*ir.Name).Iota()
}
}
fn := Curfn
if fn == nil {
// Initialization expressions for package-scope variables.
- return localpkg
+ return types.LocalPkg
}
+ return fnpkg(fn.Nname)
+}
- // TODO(mdempsky): Standardize on either ODCLFUNC or ONAME for
- // Curfn, rather than mixing them.
- if fn.Op == ODCLFUNC {
- fn = fn.Func.Nname
- }
+// MethodName returns the ONAME representing the method
+// referenced by expression n, which must be a method selector,
+// method expression, or method value.
+func methodExprName(n ir.Node) *ir.Name {
+ name, _ := methodExprFunc(n).Nname.(*ir.Name)
+ return name
+}
- return fnpkg(fn)
+// MethodFunc is like MethodName, but returns the types.Field instead.
+func methodExprFunc(n ir.Node) *types.Field {
+ switch n.Op() {
+ case ir.ODOTMETH:
+ return n.(*ir.SelectorExpr).Selection
+ case ir.OMETHEXPR:
+ return n.(*ir.MethodExpr).Method
+ case ir.OCALLPART:
+ return callpartMethod(n)
+ }
+ base.Fatalf("unexpected node: %v (%v)", n, n.Op())
+ panic("unreachable")
}
// license that can be found in the LICENSE file.
package gc
-
-import (
- "cmd/compile/internal/types"
-)
-
-// convenience constants
-const (
- Txxx = types.Txxx
-
- TINT8 = types.TINT8
- TUINT8 = types.TUINT8
- TINT16 = types.TINT16
- TUINT16 = types.TUINT16
- TINT32 = types.TINT32
- TUINT32 = types.TUINT32
- TINT64 = types.TINT64
- TUINT64 = types.TUINT64
- TINT = types.TINT
- TUINT = types.TUINT
- TUINTPTR = types.TUINTPTR
-
- TCOMPLEX64 = types.TCOMPLEX64
- TCOMPLEX128 = types.TCOMPLEX128
-
- TFLOAT32 = types.TFLOAT32
- TFLOAT64 = types.TFLOAT64
-
- TBOOL = types.TBOOL
-
- TPTR = types.TPTR
- TFUNC = types.TFUNC
- TSLICE = types.TSLICE
- TARRAY = types.TARRAY
- TSTRUCT = types.TSTRUCT
- TCHAN = types.TCHAN
- TMAP = types.TMAP
- TINTER = types.TINTER
- TFORW = types.TFORW
- TANY = types.TANY
- TSTRING = types.TSTRING
- TUNSAFEPTR = types.TUNSAFEPTR
-
- // pseudo-types for literals
- TIDEAL = types.TIDEAL
- TNIL = types.TNIL
- TBLANK = types.TBLANK
-
- // pseudo-types for frame layout
- TFUNCARGS = types.TFUNCARGS
- TCHANARGS = types.TCHANARGS
-
- NTYPE = types.NTYPE
-)
// TODO(gri) try to eliminate these soon
package gc
-
-import (
- "cmd/compile/internal/types"
- "unsafe"
-)
-
-func asNode(n *types.Node) *Node { return (*Node)(unsafe.Pointer(n)) }
-func asTypesNode(n *Node) *types.Node { return (*types.Node)(unsafe.Pointer(n)) }
package gc
-import "cmd/compile/internal/types"
-
-// builtinpkg is a fake package that declares the universe block.
-var builtinpkg *types.Pkg
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "go/constant"
+)
var basicTypes = [...]struct {
name string
- etype types.EType
+ etype types.Kind
}{
- {"int8", TINT8},
- {"int16", TINT16},
- {"int32", TINT32},
- {"int64", TINT64},
- {"uint8", TUINT8},
- {"uint16", TUINT16},
- {"uint32", TUINT32},
- {"uint64", TUINT64},
- {"float32", TFLOAT32},
- {"float64", TFLOAT64},
- {"complex64", TCOMPLEX64},
- {"complex128", TCOMPLEX128},
- {"bool", TBOOL},
- {"string", TSTRING},
+ {"int8", types.TINT8},
+ {"int16", types.TINT16},
+ {"int32", types.TINT32},
+ {"int64", types.TINT64},
+ {"uint8", types.TUINT8},
+ {"uint16", types.TUINT16},
+ {"uint32", types.TUINT32},
+ {"uint64", types.TUINT64},
+ {"float32", types.TFLOAT32},
+ {"float64", types.TFLOAT64},
+ {"complex64", types.TCOMPLEX64},
+ {"complex128", types.TCOMPLEX128},
+ {"bool", types.TBOOL},
+ {"string", types.TSTRING},
}
var typedefs = [...]struct {
name string
- etype types.EType
- sameas32 types.EType
- sameas64 types.EType
+ etype types.Kind
+ sameas32 types.Kind
+ sameas64 types.Kind
}{
- {"int", TINT, TINT32, TINT64},
- {"uint", TUINT, TUINT32, TUINT64},
- {"uintptr", TUINTPTR, TUINT32, TUINT64},
+ {"int", types.TINT, types.TINT32, types.TINT64},
+ {"uint", types.TUINT, types.TUINT32, types.TUINT64},
+ {"uintptr", types.TUINTPTR, types.TUINT32, types.TUINT64},
}
var builtinFuncs = [...]struct {
name string
- op Op
+ op ir.Op
}{
- {"append", OAPPEND},
- {"cap", OCAP},
- {"close", OCLOSE},
- {"complex", OCOMPLEX},
- {"copy", OCOPY},
- {"delete", ODELETE},
- {"imag", OIMAG},
- {"len", OLEN},
- {"make", OMAKE},
- {"new", ONEW},
- {"panic", OPANIC},
- {"print", OPRINT},
- {"println", OPRINTN},
- {"real", OREAL},
- {"recover", ORECOVER},
-}
-
-// isBuiltinFuncName reports whether name matches a builtin function
-// name.
-func isBuiltinFuncName(name string) bool {
- for _, fn := range &builtinFuncs {
- if fn.name == name {
- return true
- }
- }
- return false
+ {"append", ir.OAPPEND},
+ {"cap", ir.OCAP},
+ {"close", ir.OCLOSE},
+ {"complex", ir.OCOMPLEX},
+ {"copy", ir.OCOPY},
+ {"delete", ir.ODELETE},
+ {"imag", ir.OIMAG},
+ {"len", ir.OLEN},
+ {"make", ir.OMAKE},
+ {"new", ir.ONEW},
+ {"panic", ir.OPANIC},
+ {"print", ir.OPRINT},
+ {"println", ir.OPRINTN},
+ {"real", ir.OREAL},
+ {"recover", ir.ORECOVER},
}
var unsafeFuncs = [...]struct {
name string
- op Op
+ op ir.Op
}{
- {"Alignof", OALIGNOF},
- {"Offsetof", OOFFSETOF},
- {"Sizeof", OSIZEOF},
+ {"Alignof", ir.OALIGNOF},
+ {"Offsetof", ir.OOFFSETOF},
+ {"Sizeof", ir.OSIZEOF},
}
// initUniverse initializes the universe block.
func initUniverse() {
- lexinit()
- typeinit()
- lexinit1()
-}
+ if Widthptr == 0 {
+ base.Fatalf("typeinit before betypeinit")
+ }
-// lexinit initializes known symbols and the basic types.
-func lexinit() {
- for _, s := range &basicTypes {
- etype := s.etype
- if int(etype) >= len(types.Types) {
- Fatalf("lexinit: %s bad etype", s.name)
+ slicePtrOffset = 0
+ sliceLenOffset = Rnd(slicePtrOffset+int64(Widthptr), int64(Widthptr))
+ sliceCapOffset = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
+ sizeofSlice = Rnd(sliceCapOffset+int64(Widthptr), int64(Widthptr))
+
+ // string is same as slice wo the cap
+ sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
+
+ for et := types.Kind(0); et < types.NTYPE; et++ {
+ simtype[et] = et
+ }
+
+ types.Types[types.TANY] = types.New(types.TANY)
+ types.Types[types.TINTER] = types.NewInterface(types.LocalPkg, nil)
+
+ defBasic := func(kind types.Kind, pkg *types.Pkg, name string) *types.Type {
+ sym := pkg.Lookup(name)
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, sym)
+ t := types.NewBasic(kind, n)
+ n.SetType(t)
+ sym.Def = n
+ if kind != types.TANY {
+ dowidth(t)
}
- s2 := builtinpkg.Lookup(s.name)
- t := types.Types[etype]
- if t == nil {
- t = types.New(etype)
- t.Sym = s2
- if etype != TANY && etype != TSTRING {
- dowidth(t)
- }
- types.Types[etype] = t
+ return t
+ }
+
+ for _, s := range &basicTypes {
+ types.Types[s.etype] = defBasic(s.etype, types.BuiltinPkg, s.name)
+ }
+
+ for _, s := range &typedefs {
+ sameas := s.sameas32
+ if Widthptr == 8 {
+ sameas = s.sameas64
}
- s2.Def = asTypesNode(typenod(t))
- asNode(s2.Def).Name = new(Name)
+ simtype[s.etype] = sameas
+
+ types.Types[s.etype] = defBasic(s.etype, types.BuiltinPkg, s.name)
}
+ // We create separate byte and rune types for better error messages
+ // rather than just creating type alias *types.Sym's for the uint8 and
+ // int32 types. Hence, (bytetype|runtype).Sym.isAlias() is false.
+ // TODO(gri) Should we get rid of this special case (at the cost
+ // of less informative error messages involving bytes and runes)?
+ // (Alternatively, we could introduce an OTALIAS node representing
+ // type aliases, albeit at the cost of having to deal with it everywhere).
+ types.ByteType = defBasic(types.TUINT8, types.BuiltinPkg, "byte")
+ types.RuneType = defBasic(types.TINT32, types.BuiltinPkg, "rune")
+
+ // error type
+ s := types.BuiltinPkg.Lookup("error")
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, s)
+ types.ErrorType = types.NewNamed(n)
+ types.ErrorType.SetUnderlying(makeErrorInterface())
+ n.SetType(types.ErrorType)
+ s.Def = n
+ dowidth(types.ErrorType)
+
+ types.Types[types.TUNSAFEPTR] = defBasic(types.TUNSAFEPTR, unsafepkg, "Pointer")
+
+ // simple aliases
+ simtype[types.TMAP] = types.TPTR
+ simtype[types.TCHAN] = types.TPTR
+ simtype[types.TFUNC] = types.TPTR
+ simtype[types.TUNSAFEPTR] = types.TPTR
+
for _, s := range &builtinFuncs {
- s2 := builtinpkg.Lookup(s.name)
- s2.Def = asTypesNode(newname(s2))
- asNode(s2.Def).SetSubOp(s.op)
+ s2 := types.BuiltinPkg.Lookup(s.name)
+ def := NewName(s2)
+ def.SetSubOp(s.op)
+ s2.Def = def
}
for _, s := range &unsafeFuncs {
s2 := unsafepkg.Lookup(s.name)
- s2.Def = asTypesNode(newname(s2))
- asNode(s2.Def).SetSubOp(s.op)
+ def := NewName(s2)
+ def.SetSubOp(s.op)
+ s2.Def = def
}
- types.UntypedString = types.New(TSTRING)
- types.UntypedBool = types.New(TBOOL)
- types.Types[TANY] = types.New(TANY)
-
- s := builtinpkg.Lookup("true")
- s.Def = asTypesNode(nodbool(true))
- asNode(s.Def).Sym = lookup("true")
- asNode(s.Def).Name = new(Name)
- asNode(s.Def).Type = types.UntypedBool
+ s = types.BuiltinPkg.Lookup("true")
+ s.Def = ir.NewConstAt(src.NoXPos, s, types.UntypedBool, constant.MakeBool(true))
- s = builtinpkg.Lookup("false")
- s.Def = asTypesNode(nodbool(false))
- asNode(s.Def).Sym = lookup("false")
- asNode(s.Def).Name = new(Name)
- asNode(s.Def).Type = types.UntypedBool
+ s = types.BuiltinPkg.Lookup("false")
+ s.Def = ir.NewConstAt(src.NoXPos, s, types.UntypedBool, constant.MakeBool(false))
s = lookup("_")
+ types.BlankSym = s
s.Block = -100
- s.Def = asTypesNode(newname(s))
- types.Types[TBLANK] = types.New(TBLANK)
- asNode(s.Def).Type = types.Types[TBLANK]
- nblank = asNode(s.Def)
+ s.Def = NewName(s)
+ types.Types[types.TBLANK] = types.New(types.TBLANK)
+ ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
+ ir.BlankNode = ir.AsNode(s.Def)
+ ir.BlankNode.SetTypecheck(1)
- s = builtinpkg.Lookup("_")
+ s = types.BuiltinPkg.Lookup("_")
s.Block = -100
- s.Def = asTypesNode(newname(s))
- types.Types[TBLANK] = types.New(TBLANK)
- asNode(s.Def).Type = types.Types[TBLANK]
-
- types.Types[TNIL] = types.New(TNIL)
- s = builtinpkg.Lookup("nil")
- var v Val
- v.U = new(NilVal)
- s.Def = asTypesNode(nodlit(v))
- asNode(s.Def).Sym = s
- asNode(s.Def).Name = new(Name)
-
- s = builtinpkg.Lookup("iota")
- s.Def = asTypesNode(nod(OIOTA, nil, nil))
- asNode(s.Def).Sym = s
- asNode(s.Def).Name = new(Name)
-}
+ s.Def = NewName(s)
+ types.Types[types.TBLANK] = types.New(types.TBLANK)
+ ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
-func typeinit() {
- if Widthptr == 0 {
- Fatalf("typeinit before betypeinit")
- }
+ types.Types[types.TNIL] = types.New(types.TNIL)
+ s = types.BuiltinPkg.Lookup("nil")
+ nnil := nodnil()
+ nnil.(*ir.NilExpr).SetSym(s)
+ s.Def = nnil
- for et := types.EType(0); et < NTYPE; et++ {
- simtype[et] = et
- }
-
- types.Types[TPTR] = types.New(TPTR)
- dowidth(types.Types[TPTR])
-
- t := types.New(TUNSAFEPTR)
- types.Types[TUNSAFEPTR] = t
- t.Sym = unsafepkg.Lookup("Pointer")
- t.Sym.Def = asTypesNode(typenod(t))
- asNode(t.Sym.Def).Name = new(Name)
- dowidth(types.Types[TUNSAFEPTR])
+ s = types.BuiltinPkg.Lookup("iota")
+ s.Def = ir.NewIota(base.Pos, s)
- for et := TINT8; et <= TUINT64; et++ {
+ for et := types.TINT8; et <= types.TUINT64; et++ {
isInt[et] = true
}
- isInt[TINT] = true
- isInt[TUINT] = true
- isInt[TUINTPTR] = true
+ isInt[types.TINT] = true
+ isInt[types.TUINT] = true
+ isInt[types.TUINTPTR] = true
- isFloat[TFLOAT32] = true
- isFloat[TFLOAT64] = true
+ isFloat[types.TFLOAT32] = true
+ isFloat[types.TFLOAT64] = true
- isComplex[TCOMPLEX64] = true
- isComplex[TCOMPLEX128] = true
+ isComplex[types.TCOMPLEX64] = true
+ isComplex[types.TCOMPLEX128] = true
// initialize okfor
- for et := types.EType(0); et < NTYPE; et++ {
- if isInt[et] || et == TIDEAL {
+ for et := types.Kind(0); et < types.NTYPE; et++ {
+ if isInt[et] || et == types.TIDEAL {
okforeq[et] = true
okforcmp[et] = true
okforarith[et] = true
okforadd[et] = true
okforand[et] = true
- okforconst[et] = true
+ ir.OKForConst[et] = true
issimple[et] = true
- minintval[et] = new(Mpint)
- maxintval[et] = new(Mpint)
}
if isFloat[et] {
okforcmp[et] = true
okforadd[et] = true
okforarith[et] = true
- okforconst[et] = true
+ ir.OKForConst[et] = true
issimple[et] = true
- minfltval[et] = newMpflt()
- maxfltval[et] = newMpflt()
}
if isComplex[et] {
okforeq[et] = true
okforadd[et] = true
okforarith[et] = true
- okforconst[et] = true
+ ir.OKForConst[et] = true
issimple[et] = true
}
}
- issimple[TBOOL] = true
+ issimple[types.TBOOL] = true
- okforadd[TSTRING] = true
+ okforadd[types.TSTRING] = true
- okforbool[TBOOL] = true
+ okforbool[types.TBOOL] = true
- okforcap[TARRAY] = true
- okforcap[TCHAN] = true
- okforcap[TSLICE] = true
+ okforcap[types.TARRAY] = true
+ okforcap[types.TCHAN] = true
+ okforcap[types.TSLICE] = true
- okforconst[TBOOL] = true
- okforconst[TSTRING] = true
+ ir.OKForConst[types.TBOOL] = true
+ ir.OKForConst[types.TSTRING] = true
- okforlen[TARRAY] = true
- okforlen[TCHAN] = true
- okforlen[TMAP] = true
- okforlen[TSLICE] = true
- okforlen[TSTRING] = true
+ okforlen[types.TARRAY] = true
+ okforlen[types.TCHAN] = true
+ okforlen[types.TMAP] = true
+ okforlen[types.TSLICE] = true
+ okforlen[types.TSTRING] = true
- okforeq[TPTR] = true
- okforeq[TUNSAFEPTR] = true
- okforeq[TINTER] = true
- okforeq[TCHAN] = true
- okforeq[TSTRING] = true
- okforeq[TBOOL] = true
- okforeq[TMAP] = true // nil only; refined in typecheck
- okforeq[TFUNC] = true // nil only; refined in typecheck
- okforeq[TSLICE] = true // nil only; refined in typecheck
- okforeq[TARRAY] = true // only if element type is comparable; refined in typecheck
- okforeq[TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
+ okforeq[types.TPTR] = true
+ okforeq[types.TUNSAFEPTR] = true
+ okforeq[types.TINTER] = true
+ okforeq[types.TCHAN] = true
+ okforeq[types.TSTRING] = true
+ okforeq[types.TBOOL] = true
+ okforeq[types.TMAP] = true // nil only; refined in typecheck
+ okforeq[types.TFUNC] = true // nil only; refined in typecheck
+ okforeq[types.TSLICE] = true // nil only; refined in typecheck
+ okforeq[types.TARRAY] = true // only if element type is comparable; refined in typecheck
+ okforeq[types.TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
- okforcmp[TSTRING] = true
+ okforcmp[types.TSTRING] = true
- var i int
- for i = 0; i < len(okfor); i++ {
+ for i := range okfor {
okfor[i] = okfornone[:]
}
// binary
- okfor[OADD] = okforadd[:]
- okfor[OAND] = okforand[:]
- okfor[OANDAND] = okforbool[:]
- okfor[OANDNOT] = okforand[:]
- okfor[ODIV] = okforarith[:]
- okfor[OEQ] = okforeq[:]
- okfor[OGE] = okforcmp[:]
- okfor[OGT] = okforcmp[:]
- okfor[OLE] = okforcmp[:]
- okfor[OLT] = okforcmp[:]
- okfor[OMOD] = okforand[:]
- okfor[OMUL] = okforarith[:]
- okfor[ONE] = okforeq[:]
- okfor[OOR] = okforand[:]
- okfor[OOROR] = okforbool[:]
- okfor[OSUB] = okforarith[:]
- okfor[OXOR] = okforand[:]
- okfor[OLSH] = okforand[:]
- okfor[ORSH] = okforand[:]
+ okfor[ir.OADD] = okforadd[:]
+ okfor[ir.OAND] = okforand[:]
+ okfor[ir.OANDAND] = okforbool[:]
+ okfor[ir.OANDNOT] = okforand[:]
+ okfor[ir.ODIV] = okforarith[:]
+ okfor[ir.OEQ] = okforeq[:]
+ okfor[ir.OGE] = okforcmp[:]
+ okfor[ir.OGT] = okforcmp[:]
+ okfor[ir.OLE] = okforcmp[:]
+ okfor[ir.OLT] = okforcmp[:]
+ okfor[ir.OMOD] = okforand[:]
+ okfor[ir.OMUL] = okforarith[:]
+ okfor[ir.ONE] = okforeq[:]
+ okfor[ir.OOR] = okforand[:]
+ okfor[ir.OOROR] = okforbool[:]
+ okfor[ir.OSUB] = okforarith[:]
+ okfor[ir.OXOR] = okforand[:]
+ okfor[ir.OLSH] = okforand[:]
+ okfor[ir.ORSH] = okforand[:]
// unary
- okfor[OBITNOT] = okforand[:]
- okfor[ONEG] = okforarith[:]
- okfor[ONOT] = okforbool[:]
- okfor[OPLUS] = okforarith[:]
+ okfor[ir.OBITNOT] = okforand[:]
+ okfor[ir.ONEG] = okforarith[:]
+ okfor[ir.ONOT] = okforbool[:]
+ okfor[ir.OPLUS] = okforarith[:]
// special
- okfor[OCAP] = okforcap[:]
- okfor[OLEN] = okforlen[:]
+ okfor[ir.OCAP] = okforcap[:]
+ okfor[ir.OLEN] = okforlen[:]
// comparison
- iscmp[OLT] = true
- iscmp[OGT] = true
- iscmp[OGE] = true
- iscmp[OLE] = true
- iscmp[OEQ] = true
- iscmp[ONE] = true
-
- maxintval[TINT8].SetString("0x7f")
- minintval[TINT8].SetString("-0x80")
- maxintval[TINT16].SetString("0x7fff")
- minintval[TINT16].SetString("-0x8000")
- maxintval[TINT32].SetString("0x7fffffff")
- minintval[TINT32].SetString("-0x80000000")
- maxintval[TINT64].SetString("0x7fffffffffffffff")
- minintval[TINT64].SetString("-0x8000000000000000")
-
- maxintval[TUINT8].SetString("0xff")
- maxintval[TUINT16].SetString("0xffff")
- maxintval[TUINT32].SetString("0xffffffff")
- maxintval[TUINT64].SetString("0xffffffffffffffff")
-
- // f is valid float if min < f < max. (min and max are not themselves valid.)
- maxfltval[TFLOAT32].SetString("33554431p103") // 2^24-1 p (127-23) + 1/2 ulp
- minfltval[TFLOAT32].SetString("-33554431p103")
- maxfltval[TFLOAT64].SetString("18014398509481983p970") // 2^53-1 p (1023-52) + 1/2 ulp
- minfltval[TFLOAT64].SetString("-18014398509481983p970")
-
- maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
- minfltval[TCOMPLEX64] = minfltval[TFLOAT32]
- maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
- minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
-
- types.Types[TINTER] = types.New(TINTER) // empty interface
-
- // simple aliases
- simtype[TMAP] = TPTR
- simtype[TCHAN] = TPTR
- simtype[TFUNC] = TPTR
- simtype[TUNSAFEPTR] = TPTR
-
- slicePtrOffset = 0
- sliceLenOffset = Rnd(slicePtrOffset+int64(Widthptr), int64(Widthptr))
- sliceCapOffset = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
- sizeofSlice = Rnd(sliceCapOffset+int64(Widthptr), int64(Widthptr))
-
- // string is same as slice wo the cap
- sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
-
- dowidth(types.Types[TSTRING])
- dowidth(types.UntypedString)
+ iscmp[ir.OLT] = true
+ iscmp[ir.OGT] = true
+ iscmp[ir.OGE] = true
+ iscmp[ir.OLE] = true
+ iscmp[ir.OEQ] = true
+ iscmp[ir.ONE] = true
}
func makeErrorInterface() *types.Type {
- field := types.NewField()
- field.Type = types.Types[TSTRING]
- f := functypefield(fakeRecvField(), nil, []*types.Field{field})
-
- field = types.NewField()
- field.Sym = lookup("Error")
- field.Type = f
-
- t := types.New(TINTER)
- t.SetInterface([]*types.Field{field})
- return t
-}
-
-func lexinit1() {
- // error type
- s := builtinpkg.Lookup("error")
- types.Errortype = makeErrorInterface()
- types.Errortype.Sym = s
- types.Errortype.Orig = makeErrorInterface()
- s.Def = asTypesNode(typenod(types.Errortype))
- dowidth(types.Errortype)
-
- // We create separate byte and rune types for better error messages
- // rather than just creating type alias *types.Sym's for the uint8 and
- // int32 types. Hence, (bytetype|runtype).Sym.isAlias() is false.
- // TODO(gri) Should we get rid of this special case (at the cost
- // of less informative error messages involving bytes and runes)?
- // (Alternatively, we could introduce an OTALIAS node representing
- // type aliases, albeit at the cost of having to deal with it everywhere).
-
- // byte alias
- s = builtinpkg.Lookup("byte")
- types.Bytetype = types.New(TUINT8)
- types.Bytetype.Sym = s
- s.Def = asTypesNode(typenod(types.Bytetype))
- asNode(s.Def).Name = new(Name)
- dowidth(types.Bytetype)
-
- // rune alias
- s = builtinpkg.Lookup("rune")
- types.Runetype = types.New(TINT32)
- types.Runetype.Sym = s
- s.Def = asTypesNode(typenod(types.Runetype))
- asNode(s.Def).Name = new(Name)
- dowidth(types.Runetype)
-
- // backend-dependent builtin types (e.g. int).
- for _, s := range &typedefs {
- s1 := builtinpkg.Lookup(s.name)
-
- sameas := s.sameas32
- if Widthptr == 8 {
- sameas = s.sameas64
- }
-
- simtype[s.etype] = sameas
- minfltval[s.etype] = minfltval[sameas]
- maxfltval[s.etype] = maxfltval[sameas]
- minintval[s.etype] = minintval[sameas]
- maxintval[s.etype] = maxintval[sameas]
-
- t := types.New(s.etype)
- t.Sym = s1
- types.Types[s.etype] = t
- s1.Def = asTypesNode(typenod(t))
- asNode(s1.Def).Name = new(Name)
- s1.Origpkg = builtinpkg
-
- dowidth(t)
- }
+ sig := types.NewSignature(types.NoPkg, fakeRecvField(), nil, []*types.Field{
+ types.NewField(src.NoXPos, nil, types.Types[types.TSTRING]),
+ })
+ method := types.NewField(src.NoXPos, lookup("Error"), sig)
+ return types.NewInterface(types.NoPkg, []*types.Field{method})
}
// finishUniverse makes the universe block visible within the current package.
// that we silently skip symbols that are already declared in the
// package block rather than emitting a redeclared symbol error.
- for _, s := range builtinpkg.Syms {
+ for _, s := range types.BuiltinPkg.Syms {
if s.Def == nil {
continue
}
s1.Block = s.Block
}
- nodfp = newname(lookup(".fp"))
- nodfp.Type = types.Types[TINT32]
- nodfp.SetClass(PPARAM)
- nodfp.Name.SetUsed(true)
+ nodfp = NewName(lookup(".fp"))
+ nodfp.SetType(types.Types[types.TINT32])
+ nodfp.SetClass(ir.PPARAM)
+ nodfp.SetUsed(true)
}
package gc
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+)
+
// evalunsafe evaluates a package unsafe operation and returns the result.
-func evalunsafe(n *Node) int64 {
- switch n.Op {
- case OALIGNOF, OSIZEOF:
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- tr := n.Left.Type
+func evalunsafe(n ir.Node) int64 {
+ switch n.Op() {
+ case ir.OALIGNOF, ir.OSIZEOF:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ tr := n.Left().Type()
if tr == nil {
return 0
}
dowidth(tr)
- if n.Op == OALIGNOF {
+ if n.Op() == ir.OALIGNOF {
return int64(tr.Align)
}
return tr.Width
- case OOFFSETOF:
+ case ir.OOFFSETOF:
// must be a selector.
- if n.Left.Op != OXDOT {
- yyerror("invalid expression %v", n)
+ if n.Left().Op() != ir.OXDOT {
+ base.Errorf("invalid expression %v", n)
return 0
}
+ sel := n.Left().(*ir.SelectorExpr)
// Remember base of selector to find it back after dot insertion.
// Since r->left may be mutated by typechecking, check it explicitly
// first to track it correctly.
- n.Left.Left = typecheck(n.Left.Left, ctxExpr)
- base := n.Left.Left
+ sel.SetLeft(typecheck(sel.Left(), ctxExpr))
+ sbase := sel.Left()
- n.Left = typecheck(n.Left, ctxExpr)
- if n.Left.Type == nil {
+ tsel := typecheck(sel, ctxExpr)
+ n.SetLeft(tsel)
+ if tsel.Type() == nil {
return 0
}
- switch n.Left.Op {
- case ODOT, ODOTPTR:
+ switch tsel.Op() {
+ case ir.ODOT, ir.ODOTPTR:
break
- case OCALLPART:
- yyerror("invalid expression %v: argument is a method value", n)
+ case ir.OCALLPART:
+ base.Errorf("invalid expression %v: argument is a method value", n)
return 0
default:
- yyerror("invalid expression %v", n)
+ base.Errorf("invalid expression %v", n)
return 0
}
- // Sum offsets for dots until we reach base.
+ // Sum offsets for dots until we reach sbase.
var v int64
- for r := n.Left; r != base; r = r.Left {
- switch r.Op {
- case ODOTPTR:
+ var next ir.Node
+ for r := tsel; r != sbase; r = next {
+ switch r.Op() {
+ case ir.ODOTPTR:
// For Offsetof(s.f), s may itself be a pointer,
// but accessing f must not otherwise involve
// indirection via embedded pointer types.
- if r.Left != base {
- yyerror("invalid expression %v: selector implies indirection of embedded %v", n, r.Left)
+ if r.Left() != sbase {
+ base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.Left())
return 0
}
fallthrough
- case ODOT:
- v += r.Xoffset
+ case ir.ODOT:
+ v += r.Offset()
+ next = r.Left()
default:
- Dump("unsafenmagic", n.Left)
- Fatalf("impossible %#v node after dot insertion", r.Op)
+ ir.Dump("unsafenmagic", tsel)
+ base.Fatalf("impossible %v node after dot insertion", r.Op())
}
}
return v
}
- Fatalf("unexpected op %v", n.Op)
+ base.Fatalf("unexpected op %v", n.Op())
return 0
}
"os"
"runtime"
"runtime/pprof"
-)
-
-// Line returns n's position as a string. If n has been inlined,
-// it uses the outermost position where n has been inlined.
-func (n *Node) Line() string {
- return linestr(n.Pos)
-}
-var atExitFuncs []func()
-
-func atExit(f func()) {
- atExitFuncs = append(atExitFuncs, f)
-}
-
-func Exit(code int) {
- for i := len(atExitFuncs) - 1; i >= 0; i-- {
- f := atExitFuncs[i]
- atExitFuncs = atExitFuncs[:i]
- f()
- }
- os.Exit(code)
-}
+ "cmd/compile/internal/base"
+)
var (
- blockprofile string
- cpuprofile string
- memprofile string
memprofilerate int64
- traceprofile string
traceHandler func(string)
- mutexprofile string
)
func startProfile() {
- if cpuprofile != "" {
- f, err := os.Create(cpuprofile)
+ if base.Flag.CPUProfile != "" {
+ f, err := os.Create(base.Flag.CPUProfile)
if err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
- atExit(pprof.StopCPUProfile)
+ base.AtExit(pprof.StopCPUProfile)
}
- if memprofile != "" {
+ if base.Flag.MemProfile != "" {
if memprofilerate != 0 {
runtime.MemProfileRate = int(memprofilerate)
}
- f, err := os.Create(memprofile)
+ f, err := os.Create(base.Flag.MemProfile)
if err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
- atExit(func() {
+ base.AtExit(func() {
// Profile all outstanding allocations.
runtime.GC()
// compilebench parses the memory profile to extract memstats,
// See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
const writeLegacyFormat = 1
if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
})
} else {
// Not doing memory profiling; disable it entirely.
runtime.MemProfileRate = 0
}
- if blockprofile != "" {
- f, err := os.Create(blockprofile)
+ if base.Flag.BlockProfile != "" {
+ f, err := os.Create(base.Flag.BlockProfile)
if err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
runtime.SetBlockProfileRate(1)
- atExit(func() {
+ base.AtExit(func() {
pprof.Lookup("block").WriteTo(f, 0)
f.Close()
})
}
- if mutexprofile != "" {
- f, err := os.Create(mutexprofile)
+ if base.Flag.MutexProfile != "" {
+ f, err := os.Create(base.Flag.MutexProfile)
if err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
startMutexProfiling()
- atExit(func() {
+ base.AtExit(func() {
pprof.Lookup("mutex").WriteTo(f, 0)
f.Close()
})
}
- if traceprofile != "" && traceHandler != nil {
- traceHandler(traceprofile)
+ if base.Flag.TraceProfile != "" && traceHandler != nil {
+ traceHandler(base.Flag.TraceProfile)
}
}
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/sys"
"encoding/binary"
+ "errors"
"fmt"
+ "go/constant"
+ "go/token"
"strings"
)
const tmpstringbufsize = 32
const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
-func walk(fn *Node) {
+func walk(fn *ir.Func) {
Curfn = fn
+ errorsBefore := base.Errors()
+ order(fn)
+ if base.Errors() > errorsBefore {
+ return
+ }
- if Debug.W != 0 {
- s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym)
- dumplist(s, Curfn.Nbody)
+ if base.Flag.W != 0 {
+ s := fmt.Sprintf("\nbefore walk %v", Curfn.Sym())
+ ir.DumpList(s, Curfn.Body())
}
- lno := lineno
+ lno := base.Pos
// Final typecheck for any unused variables.
- for i, ln := range fn.Func.Dcl {
- if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) {
- ln = typecheck(ln, ctxExpr|ctxAssign)
- fn.Func.Dcl[i] = ln
+ for i, ln := range fn.Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) {
+ ln = typecheck(ln, ctxExpr|ctxAssign).(*ir.Name)
+ fn.Dcl[i] = ln
}
}
// Propagate the used flag for typeswitch variables up to the NONAME in its definition.
- for _, ln := range fn.Func.Dcl {
- if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Name.Used() {
- ln.Name.Defn.Left.Name.SetUsed(true)
+ for _, ln := range fn.Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Defn != nil && ln.Defn.Op() == ir.OTYPESW && ln.Used() {
+ ln.Defn.(*ir.TypeSwitchGuard).Used = true
}
}
- for _, ln := range fn.Func.Dcl {
- if ln.Op != ONAME || (ln.Class() != PAUTO && ln.Class() != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() {
+ for _, ln := range fn.Dcl {
+ if ln.Op() != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Used() {
continue
}
- if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW {
- if defn.Left.Name.Used() {
+ if defn, ok := ln.Defn.(*ir.TypeSwitchGuard); ok {
+ if defn.Used {
continue
}
- yyerrorl(defn.Left.Pos, "%v declared but not used", ln.Sym)
- defn.Left.Name.SetUsed(true) // suppress repeats
+ base.ErrorfAt(defn.Tag.Pos(), "%v declared but not used", ln.Sym())
+ defn.Used = true // suppress repeats
} else {
- yyerrorl(ln.Pos, "%v declared but not used", ln.Sym)
+ base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym())
}
}
- lineno = lno
- if nerrors != 0 {
+ base.Pos = lno
+ if base.Errors() > errorsBefore {
return
}
- walkstmtlist(Curfn.Nbody.Slice())
- if Debug.W != 0 {
- s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
- dumplist(s, Curfn.Nbody)
+ walkstmtlist(Curfn.Body().Slice())
+ if base.Flag.W != 0 {
+ s := fmt.Sprintf("after walk %v", Curfn.Sym())
+ ir.DumpList(s, Curfn.Body())
}
zeroResults()
heapmoves()
- if Debug.W != 0 && Curfn.Func.Enter.Len() > 0 {
- s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
- dumplist(s, Curfn.Func.Enter)
+ if base.Flag.W != 0 && Curfn.Enter.Len() > 0 {
+ s := fmt.Sprintf("enter %v", Curfn.Sym())
+ ir.DumpList(s, Curfn.Enter)
+ }
+
+ if instrumenting {
+ instrument(fn)
}
}
-func walkstmtlist(s []*Node) {
+func walkstmtlist(s []ir.Node) {
for i := range s {
s[i] = walkstmt(s[i])
}
}
-func paramoutheap(fn *Node) bool {
- for _, ln := range fn.Func.Dcl {
+func paramoutheap(fn *ir.Func) bool {
+ for _, ln := range fn.Dcl {
switch ln.Class() {
- case PPARAMOUT:
- if ln.isParamStackCopy() || ln.Name.Addrtaken() {
+ case ir.PPARAMOUT:
+ if isParamStackCopy(ln) || ln.Addrtaken() {
return true
}
- case PAUTO:
+ case ir.PAUTO:
// stop early - parameters are over
return false
}
// The result of walkstmt MUST be assigned back to n, e.g.
// n.Left = walkstmt(n.Left)
-func walkstmt(n *Node) *Node {
+func walkstmt(n ir.Node) ir.Node {
if n == nil {
return n
}
setlineno(n)
- walkstmtlist(n.Ninit.Slice())
+ walkstmtlist(n.Init().Slice())
- switch n.Op {
+ switch n.Op() {
default:
- if n.Op == ONAME {
- yyerror("%v is not a top level statement", n.Sym)
+ if n.Op() == ir.ONAME {
+ base.Errorf("%v is not a top level statement", n.Sym())
} else {
- yyerror("%v is not a top level statement", n.Op)
- }
- Dump("nottop", n)
-
- case OAS,
- OASOP,
- OAS2,
- OAS2DOTTYPE,
- OAS2RECV,
- OAS2FUNC,
- OAS2MAPR,
- OCLOSE,
- OCOPY,
- OCALLMETH,
- OCALLINTER,
- OCALL,
- OCALLFUNC,
- ODELETE,
- OSEND,
- OPRINT,
- OPRINTN,
- OPANIC,
- OEMPTY,
- ORECOVER,
- OGETG:
+ base.Errorf("%v is not a top level statement", n.Op())
+ }
+ ir.Dump("nottop", n)
+ return n
+
+ case ir.OAS,
+ ir.OASOP,
+ ir.OAS2,
+ ir.OAS2DOTTYPE,
+ ir.OAS2RECV,
+ ir.OAS2FUNC,
+ ir.OAS2MAPR,
+ ir.OCLOSE,
+ ir.OCOPY,
+ ir.OCALLMETH,
+ ir.OCALLINTER,
+ ir.OCALL,
+ ir.OCALLFUNC,
+ ir.ODELETE,
+ ir.OSEND,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.OPANIC,
+ ir.ORECOVER,
+ ir.OGETG:
if n.Typecheck() == 0 {
- Fatalf("missing typecheck: %+v", n)
+ base.Fatalf("missing typecheck: %+v", n)
}
- wascopy := n.Op == OCOPY
- init := n.Ninit
- n.Ninit.Set(nil)
+ init := n.Init()
+ n.PtrInit().Set(nil)
n = walkexpr(n, &init)
- n = addinit(n, init.Slice())
- if wascopy && n.Op == OCONVNOP {
- n.Op = OEMPTY // don't leave plain values as statements.
+ if n.Op() == ir.ONAME {
+ // copy rewrote to a statement list and a temp for the length.
+ // Throw away the temp to avoid plain values as statements.
+ n = ir.NewBlockStmt(n.Pos(), init.Slice())
+ init.Set(nil)
+ }
+ if init.Len() > 0 {
+ switch n.Op() {
+ case ir.OAS, ir.OAS2, ir.OBLOCK:
+ n.PtrInit().Prepend(init.Slice()...)
+
+ default:
+ init.Append(n)
+ n = ir.NewBlockStmt(n.Pos(), init.Slice())
+ }
}
+ return n
// special case for a receive where we throw away
// the value received.
- case ORECV:
+ case ir.ORECV:
if n.Typecheck() == 0 {
- Fatalf("missing typecheck: %+v", n)
- }
- init := n.Ninit
- n.Ninit.Set(nil)
-
- n.Left = walkexpr(n.Left, &init)
- n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, n.Left, nodnil())
- n = walkexpr(n, &init)
-
- n = addinit(n, init.Slice())
-
- case OBREAK,
- OCONTINUE,
- OFALL,
- OGOTO,
- OLABEL,
- ODCLCONST,
- ODCLTYPE,
- OCHECKNIL,
- OVARDEF,
- OVARKILL,
- OVARLIVE:
- break
+ base.Fatalf("missing typecheck: %+v", n)
+ }
+ init := n.Init()
+ n.PtrInit().Set(nil)
+
+ n.SetLeft(walkexpr(n.Left(), &init))
+ call := walkexpr(mkcall1(chanfn("chanrecv1", 2, n.Left().Type()), nil, &init, n.Left(), nodnil()), &init)
+ return initExpr(init.Slice(), call)
+
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.OFALL,
+ ir.OGOTO,
+ ir.OLABEL,
+ ir.ODCLCONST,
+ ir.ODCLTYPE,
+ ir.OCHECKNIL,
+ ir.OVARDEF,
+ ir.OVARKILL,
+ ir.OVARLIVE:
+ return n
- case ODCL:
- v := n.Left
- if v.Class() == PAUTOHEAP {
- if compiling_runtime {
- yyerror("%v escapes to heap, not allowed in runtime", v)
+ case ir.ODCL:
+ v := n.Left().(*ir.Name)
+ if v.Class() == ir.PAUTOHEAP {
+ if base.Flag.CompilingRuntime {
+ base.Errorf("%v escapes to heap, not allowed in runtime", v)
}
- if prealloc[v] == nil {
- prealloc[v] = callnew(v.Type)
- }
- nn := nod(OAS, v.Name.Param.Heapaddr, prealloc[v])
+ nn := ir.Nod(ir.OAS, v.Name().Heapaddr, callnew(v.Type()))
nn.SetColas(true)
- nn = typecheck(nn, ctxStmt)
- return walkstmt(nn)
+ return walkstmt(typecheck(nn, ctxStmt))
}
+ return n
- case OBLOCK:
- walkstmtlist(n.List.Slice())
+ case ir.OBLOCK:
+ walkstmtlist(n.List().Slice())
+ return n
- case OCASE:
- yyerror("case statement out of place")
+ case ir.OCASE:
+ base.Errorf("case statement out of place")
+ panic("unreachable")
- case ODEFER:
- Curfn.Func.SetHasDefer(true)
- Curfn.Func.numDefers++
- if Curfn.Func.numDefers > maxOpenDefers {
+ case ir.ODEFER:
+ Curfn.SetHasDefer(true)
+ Curfn.NumDefers++
+ if Curfn.NumDefers > maxOpenDefers {
// Don't allow open-coded defers if there are more than
// 8 defers in the function, since we use a single
// byte to record active defers.
- Curfn.Func.SetOpenCodedDeferDisallowed(true)
+ Curfn.SetOpenCodedDeferDisallowed(true)
}
- if n.Esc != EscNever {
+ if n.Esc() != EscNever {
// If n.Esc is not EscNever, then this defer occurs in a loop,
// so open-coded defers cannot be used in this function.
- Curfn.Func.SetOpenCodedDeferDisallowed(true)
+ Curfn.SetOpenCodedDeferDisallowed(true)
}
fallthrough
- case OGO:
- switch n.Left.Op {
- case OPRINT, OPRINTN:
- n.Left = wrapCall(n.Left, &n.Ninit)
-
- case ODELETE:
- if mapfast(n.Left.List.First().Type) == mapslow {
- n.Left = wrapCall(n.Left, &n.Ninit)
+ case ir.OGO:
+ var init ir.Nodes
+ switch call := n.Left(); call.Op() {
+ case ir.OPRINT, ir.OPRINTN:
+ call := call.(*ir.CallExpr)
+ n.SetLeft(wrapCall(call, &init))
+
+ case ir.ODELETE:
+ call := call.(*ir.CallExpr)
+ if mapfast(call.List().First().Type()) == mapslow {
+ n.SetLeft(wrapCall(call, &init))
} else {
- n.Left = walkexpr(n.Left, &n.Ninit)
+ n.SetLeft(walkexpr(call, &init))
}
- case OCOPY:
- n.Left = copyany(n.Left, &n.Ninit, true)
+ case ir.OCOPY:
+ call := call.(*ir.BinaryExpr)
+ n.SetLeft(copyany(call, &init, true))
- case OCALLFUNC, OCALLMETH, OCALLINTER:
- if n.Left.Nbody.Len() > 0 {
- n.Left = wrapCall(n.Left, &n.Ninit)
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ call := call.(*ir.CallExpr)
+ if call.Body().Len() > 0 {
+ n.SetLeft(wrapCall(call, &init))
} else {
- n.Left = walkexpr(n.Left, &n.Ninit)
+ n.SetLeft(walkexpr(call, &init))
}
default:
- n.Left = walkexpr(n.Left, &n.Ninit)
+ n.SetLeft(walkexpr(call, &init))
+ }
+ if init.Len() > 0 {
+ init.Append(n)
+ return ir.NewBlockStmt(n.Pos(), init.Slice())
}
+ return n
- case OFOR, OFORUNTIL:
- if n.Left != nil {
- walkstmtlist(n.Left.Ninit.Slice())
- init := n.Left.Ninit
- n.Left.Ninit.Set(nil)
- n.Left = walkexpr(n.Left, &init)
- n.Left = addinit(n.Left, init.Slice())
+ case ir.OFOR, ir.OFORUNTIL:
+ if n.Left() != nil {
+ walkstmtlist(n.Left().Init().Slice())
+ init := n.Left().Init()
+ n.Left().PtrInit().Set(nil)
+ n.SetLeft(walkexpr(n.Left(), &init))
+ n.SetLeft(initExpr(init.Slice(), n.Left()))
}
- n.Right = walkstmt(n.Right)
- if n.Op == OFORUNTIL {
- walkstmtlist(n.List.Slice())
+ n.SetRight(walkstmt(n.Right()))
+ if n.Op() == ir.OFORUNTIL {
+ walkstmtlist(n.List().Slice())
}
- walkstmtlist(n.Nbody.Slice())
+ walkstmtlist(n.Body().Slice())
+ return n
- case OIF:
- n.Left = walkexpr(n.Left, &n.Ninit)
- walkstmtlist(n.Nbody.Slice())
- walkstmtlist(n.Rlist.Slice())
+ case ir.OIF:
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
+ walkstmtlist(n.Body().Slice())
+ walkstmtlist(n.Rlist().Slice())
+ return n
- case ORETURN:
- Curfn.Func.numReturns++
- if n.List.Len() == 0 {
- break
+ case ir.ORETURN:
+ Curfn.NumReturns++
+ if n.List().Len() == 0 {
+ return n
}
- if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) {
+ if (hasNamedResults(Curfn) && n.List().Len() > 1) || paramoutheap(Curfn) {
// assign to the function out parameters,
- // so that reorder3 can fix up conflicts
- var rl []*Node
+ // so that ascompatee can fix up conflicts
+ var rl []ir.Node
- for _, ln := range Curfn.Func.Dcl {
+ for _, ln := range Curfn.Dcl {
cl := ln.Class()
- if cl == PAUTO || cl == PAUTOHEAP {
+ if cl == ir.PAUTO || cl == ir.PAUTOHEAP {
break
}
- if cl == PPARAMOUT {
- if ln.isParamStackCopy() {
- ln = walkexpr(typecheck(nod(ODEREF, ln.Name.Param.Heapaddr, nil), ctxExpr), nil)
+ if cl == ir.PPARAMOUT {
+ var ln ir.Node = ln
+ if isParamStackCopy(ln) {
+ ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name().Heapaddr, nil), ctxExpr), nil)
}
rl = append(rl, ln)
}
}
- if got, want := n.List.Len(), len(rl); got != want {
+ if got, want := n.List().Len(), len(rl); got != want {
// order should have rewritten multi-value function calls
// with explicit OAS2FUNC nodes.
- Fatalf("expected %v return arguments, have %v", want, got)
+ base.Fatalf("expected %v return arguments, have %v", want, got)
}
- // move function calls out, to make reorder3's job easier.
- walkexprlistsafe(n.List.Slice(), &n.Ninit)
+ // move function calls out, to make ascompatee's job easier.
+ walkexprlistsafe(n.List().Slice(), n.PtrInit())
- ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit)
- n.List.Set(reorder3(ll))
- break
+ n.PtrList().Set(ascompatee(n.Op(), rl, n.List().Slice(), n.PtrInit()))
+ return n
}
- walkexprlist(n.List.Slice(), &n.Ninit)
+ walkexprlist(n.List().Slice(), n.PtrInit())
// For each return parameter (lhs), assign the corresponding result (rhs).
- lhs := Curfn.Type.Results()
- rhs := n.List.Slice()
- res := make([]*Node, lhs.NumFields())
+ lhs := Curfn.Type().Results()
+ rhs := n.List().Slice()
+ res := make([]ir.Node, lhs.NumFields())
for i, nl := range lhs.FieldSlice() {
- nname := asNode(nl.Nname)
- if nname.isParamHeapCopy() {
- nname = nname.Name.Param.Stackcopy
+ nname := ir.AsNode(nl.Nname)
+ if isParamHeapCopy(nname) {
+ nname = nname.Name().Stackcopy
}
- a := nod(OAS, nname, rhs[i])
- res[i] = convas(a, &n.Ninit)
+ a := ir.NewAssignStmt(base.Pos, nname, rhs[i])
+ res[i] = convas(a, n.PtrInit())
}
- n.List.Set(res)
+ n.PtrList().Set(res)
+ return n
- case ORETJMP:
- break
+ case ir.ORETJMP:
+ return n
- case OINLMARK:
- break
+ case ir.OINLMARK:
+ return n
- case OSELECT:
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
walkselect(n)
+ return n
- case OSWITCH:
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
walkswitch(n)
+ return n
- case ORANGE:
- n = walkrange(n)
+ case ir.ORANGE:
+ n := n.(*ir.RangeStmt)
+ return walkrange(n)
}
- if n.Op == ONAME {
- Fatalf("walkstmt ended up with name: %+v", n)
- }
- return n
+ // No return! Each case must return (or panic),
+ // to avoid confusion about what gets returned
+ // in the presence of type assertions.
}
// walk the whole tree of the body of an
// the types expressions are calculated.
// compile-time constants are evaluated.
// complex side effects like statements are appended to init
-func walkexprlist(s []*Node, init *Nodes) {
+func walkexprlist(s []ir.Node, init *ir.Nodes) {
for i := range s {
s[i] = walkexpr(s[i], init)
}
}
-func walkexprlistsafe(s []*Node, init *Nodes) {
+func walkexprlistsafe(s []ir.Node, init *ir.Nodes) {
for i, n := range s {
s[i] = safeexpr(n, init)
s[i] = walkexpr(s[i], init)
}
}
-func walkexprlistcheap(s []*Node, init *Nodes) {
+func walkexprlistcheap(s []ir.Node, init *ir.Nodes) {
for i, n := range s {
s[i] = cheapexpr(n, init)
s[i] = walkexpr(s[i], init)
return "convT16", false
case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
return "convT32", false
- case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !from.HasPointers():
+ case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers():
return "convT64", false
}
if sc := from.SoleComponent(); sc != nil {
return "convT2I", true
}
}
- Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
+ base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
panic("unreachable")
}
// The result of walkexpr MUST be assigned back to n, e.g.
// n.Left = walkexpr(n.Left, init)
-func walkexpr(n *Node, init *Nodes) *Node {
+func walkexpr(n ir.Node, init *ir.Nodes) ir.Node {
if n == nil {
return n
}
// Eagerly checkwidth all expressions for the back end.
- if n.Type != nil && !n.Type.WidthCalculated() {
- switch n.Type.Etype {
- case TBLANK, TNIL, TIDEAL:
+ if n.Type() != nil && !n.Type().WidthCalculated() {
+ switch n.Type().Kind() {
+ case types.TBLANK, types.TNIL, types.TIDEAL:
default:
- checkwidth(n.Type)
+ checkwidth(n.Type())
}
}
- if init == &n.Ninit {
+ if init == n.PtrInit() {
// not okay to use n->ninit when walking n,
// because we might replace n with some other node
// and would lose the init list.
- Fatalf("walkexpr init == &n->ninit")
+ base.Fatalf("walkexpr init == &n->ninit")
}
- if n.Ninit.Len() != 0 {
- walkstmtlist(n.Ninit.Slice())
- init.AppendNodes(&n.Ninit)
+ if n.Init().Len() != 0 {
+ walkstmtlist(n.Init().Slice())
+ init.AppendNodes(n.PtrInit())
}
lno := setlineno(n)
- if Debug.w > 1 {
- Dump("before walk expr", n)
+ if base.Flag.LowerW > 1 {
+ ir.Dump("before walk expr", n)
}
if n.Typecheck() != 1 {
- Fatalf("missed typecheck: %+v", n)
+ base.Fatalf("missed typecheck: %+v", n)
+ }
+
+ if n.Type().IsUntyped() {
+ base.Fatalf("expression has untyped type: %+v", n)
+ }
+
+ if n.Op() == ir.ONAME && n.(*ir.Name).Class() == ir.PAUTOHEAP {
+ nn := ir.Nod(ir.ODEREF, n.Name().Heapaddr, nil)
+ nn.Left().MarkNonNil()
+ return walkexpr(typecheck(nn, ctxExpr), init)
}
- if n.Type.IsUntyped() {
- Fatalf("expression has untyped type: %+v", n)
+ n = walkexpr1(n, init)
+
+ // Expressions that are constant at run time but not
+ // considered const by the language spec are not turned into
+ // constants until walk. For example, if n is y%1 == 0, the
+ // walk of y%1 may have replaced it by 0.
+ // Check whether n with its updated args is itself now a constant.
+ t := n.Type()
+ n = evalConst(n)
+ if n.Type() != t {
+ base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type())
}
+ if n.Op() == ir.OLITERAL {
+ n = typecheck(n, ctxExpr)
+ // Emit string symbol now to avoid emitting
+ // any concurrently during the backend.
+ if v := n.Val(); v.Kind() == constant.String {
+ _ = stringsym(n.Pos(), constant.StringVal(v))
+ }
+ }
+
+ updateHasCall(n)
- if n.Op == ONAME && n.Class() == PAUTOHEAP {
- nn := nod(ODEREF, n.Name.Param.Heapaddr, nil)
- nn = typecheck(nn, ctxExpr)
- nn = walkexpr(nn, init)
- nn.Left.MarkNonNil()
- return nn
+ if base.Flag.LowerW != 0 && n != nil {
+ ir.Dump("after walk expr", n)
}
-opswitch:
- switch n.Op {
+ base.Pos = lno
+ return n
+}
+
+func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
+ switch n.Op() {
default:
- Dump("walk", n)
- Fatalf("walkexpr: switch 1 unknown op %+S", n)
+ ir.Dump("walk", n)
+ base.Fatalf("walkexpr: switch 1 unknown op %+v", n.Op())
+ panic("unreachable")
- case ONONAME, OEMPTY, OGETG, ONEWOBJ:
+ case ir.ONONAME, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR:
+ return n
- case OTYPE, ONAME, OLITERAL:
+ case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
// TODO(mdempsky): Just return n; see discussion on CL 38655.
// Perhaps refactor to use Node.mayBeShared for these instead.
// If these return early, make sure to still call
// stringsym for constant strings.
+ return n
- case ONOT, ONEG, OPLUS, OBITNOT, OREAL, OIMAG, ODOTMETH, ODOTINTER,
- ODEREF, OSPTR, OITAB, OIDATA, OADDR:
- n.Left = walkexpr(n.Left, init)
+ case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA:
+ n.SetLeft(walkexpr(n.Left(), init))
+ return n
+
+ case ir.ODOTMETH, ir.ODOTINTER:
+ n.SetLeft(walkexpr(n.Left(), init))
+ return n
- case OEFACE, OAND, OANDNOT, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ case ir.OADDR:
+ n.SetLeft(walkexpr(n.Left(), init))
+ return n
- case ODOT, ODOTPTR:
+ case ir.ODEREF:
+ n.SetLeft(walkexpr(n.Left(), init))
+ return n
+
+ case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH:
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
+ return n
+
+ case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
usefield(n)
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ return n
- case ODOTTYPE, ODOTTYPE2:
- n.Left = walkexpr(n.Left, init)
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n.SetLeft(walkexpr(n.Left(), init))
// Set up interface type addresses for back end.
- n.Right = typename(n.Type)
- if n.Op == ODOTTYPE {
- n.Right.Right = typename(n.Left.Type)
+ n.SetRight(typename(n.Type()))
+ if n.Op() == ir.ODOTTYPE {
+ n.Right().(*ir.AddrExpr).SetRight(typename(n.Left().Type()))
}
- if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() {
- n.List.Set1(itabname(n.Type, n.Left.Type))
+ if !n.Type().IsInterface() && !n.Left().Type().IsEmptyInterface() {
+ n.PtrList().Set1(itabname(n.Type(), n.Left().Type()))
}
+ return n
- case OLEN, OCAP:
+ case ir.OLEN, ir.OCAP:
if isRuneCount(n) {
// Replace len([]rune(string)) with runtime.countrunes(string).
- n = mkcall("countrunes", n.Type, init, conv(n.Left.Left, types.Types[TSTRING]))
- break
+ return mkcall("countrunes", n.Type(), init, conv(n.Left().(*ir.ConvExpr).Left(), types.Types[types.TSTRING]))
}
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
// replace len(*[10]int) with 10.
// delayed until now to preserve side effects.
- t := n.Left.Type
+ t := n.Left().Type()
if t.IsPtr() {
t = t.Elem()
}
if t.IsArray() {
- safeexpr(n.Left, init)
- setintconst(n, t.NumElem())
- n.SetTypecheck(1)
+ safeexpr(n.Left(), init)
+ con := origIntConst(n, t.NumElem())
+ con.SetTypecheck(1)
+ return con
}
+ return n
- case OCOMPLEX:
- // Use results from call expression as arguments for complex.
- if n.Left == nil && n.Right == nil {
- n.Left = n.List.First()
- n.Right = n.List.Second()
- }
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ case ir.OCOMPLEX:
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
+ return n
- case OEQ, ONE, OLT, OLE, OGT, OGE:
- n = walkcompare(n, init)
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ return walkcompare(n, init)
- case OANDAND, OOROR:
- n.Left = walkexpr(n.Left, init)
+ case ir.OANDAND, ir.OOROR:
+ n.SetLeft(walkexpr(n.Left(), init))
// cannot put side effects from n.Right on init,
// because they cannot run before n.Left is checked.
// save elsewhere and store on the eventual n.Right.
- var ll Nodes
+ var ll ir.Nodes
- n.Right = walkexpr(n.Right, &ll)
- n.Right = addinit(n.Right, ll.Slice())
+ n.SetRight(walkexpr(n.Right(), &ll))
+ n.SetRight(initExpr(ll.Slice(), n.Right()))
+ return n
- case OPRINT, OPRINTN:
- n = walkprint(n, init)
+ case ir.OPRINT, ir.OPRINTN:
+ return walkprint(n.(*ir.CallExpr), init)
- case OPANIC:
- n = mkcall("gopanic", nil, init, n.Left)
+ case ir.OPANIC:
+ return mkcall("gopanic", nil, init, n.Left())
- case ORECOVER:
- n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil))
+ case ir.ORECOVER:
+ return mkcall("gorecover", n.Type(), init, nodAddr(nodfp))
- case OCLOSUREVAR, OCFUNC:
+ case ir.OCLOSUREREAD, ir.OCFUNC:
+ return n
- case OCALLINTER, OCALLFUNC, OCALLMETH:
- if n.Op == OCALLINTER {
+ case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH:
+ n := n.(*ir.CallExpr)
+ if n.Op() == ir.OCALLINTER {
usemethod(n)
markUsedIfaceMethod(n)
}
- if n.Op == OCALLFUNC && n.Left.Op == OCLOSURE {
+ if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.OCLOSURE {
// Transform direct call of a closure to call of a normal function.
// transformclosure already did all preparation work.
// Prepend captured variables to argument list.
- n.List.Prepend(n.Left.Func.Enter.Slice()...)
-
- n.Left.Func.Enter.Set(nil)
+ clo := n.Left().(*ir.ClosureExpr)
+ n.PtrList().Prepend(clo.Func().ClosureEnter.Slice()...)
+ clo.Func().ClosureEnter.Set(nil)
// Replace OCLOSURE with ONAME/PFUNC.
- n.Left = n.Left.Func.Closure.Func.Nname
+ n.SetLeft(clo.Func().Nname)
// Update type of OCALLFUNC node.
// Output arguments had not changed, but their offsets could.
- if n.Left.Type.NumResults() == 1 {
- n.Type = n.Left.Type.Results().Field(0).Type
+ if n.Left().Type().NumResults() == 1 {
+ n.SetType(n.Left().Type().Results().Field(0).Type)
} else {
- n.Type = n.Left.Type.Results()
+ n.SetType(n.Left().Type().Results())
}
}
walkCall(n, init)
+ return n
+
+ case ir.OAS, ir.OASOP:
+ init.AppendNodes(n.PtrInit())
- case OAS, OASOP:
- init.AppendNodes(&n.Ninit)
+ var left, right ir.Node
+ switch n.Op() {
+ case ir.OAS:
+ left, right = n.Left(), n.Right()
+ case ir.OASOP:
+ left, right = n.Left(), n.Right()
+ }
// Recognize m[k] = append(m[k], ...) so we can reuse
// the mapassign call.
- mapAppend := n.Left.Op == OINDEXMAP && n.Right.Op == OAPPEND
- if mapAppend && !samesafeexpr(n.Left, n.Right.List.First()) {
- Fatalf("not same expressions: %v != %v", n.Left, n.Right.List.First())
+ var mapAppend *ir.CallExpr
+ if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND {
+ mapAppend = right.(*ir.CallExpr)
+ if !samesafeexpr(left, mapAppend.List().First()) {
+ base.Fatalf("not same expressions: %v != %v", left, mapAppend.List().First())
+ }
}
- n.Left = walkexpr(n.Left, init)
- n.Left = safeexpr(n.Left, init)
-
- if mapAppend {
- n.Right.List.SetFirst(n.Left)
+ left = walkexpr(left, init)
+ left = safeexpr(left, init)
+ if mapAppend != nil {
+ mapAppend.List().SetFirst(left)
}
- if n.Op == OASOP {
+ if n.Op() == ir.OASOP {
// Rewrite x op= y into x = x op y.
- n.Right = nod(n.SubOp(), n.Left, n.Right)
- n.Right = typecheck(n.Right, ctxExpr)
-
- n.Op = OAS
- n.ResetAux()
+ n = ir.Nod(ir.OAS, left,
+ typecheck(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).SubOp(), left, right), ctxExpr))
+ } else {
+ n.(*ir.AssignStmt).SetLeft(left)
}
+ as := n.(*ir.AssignStmt)
- if oaslit(n, init) {
- break
+ if oaslit(as, init) {
+ return ir.NodAt(as.Pos(), ir.OBLOCK, nil, nil)
}
- if n.Right == nil {
+ if as.Right() == nil {
// TODO(austin): Check all "implicit zeroing"
- break
+ return as
}
- if !instrumenting && isZero(n.Right) {
- break
+ if !instrumenting && isZero(as.Right()) {
+ return as
}
- switch n.Right.Op {
+ switch as.Right().Op() {
default:
- n.Right = walkexpr(n.Right, init)
+ as.SetRight(walkexpr(as.Right(), init))
- case ORECV:
- // x = <-c; n.Left is x, n.Right.Left is c.
+ case ir.ORECV:
+ // x = <-c; as.Left is x, as.Right.Left is c.
// order.stmt made sure x is addressable.
- n.Right.Left = walkexpr(n.Right.Left, init)
+ recv := as.Right().(*ir.UnaryExpr)
+ recv.SetLeft(walkexpr(recv.Left(), init))
- n1 := nod(OADDR, n.Left, nil)
- r := n.Right.Left // the channel
- n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1)
- n = walkexpr(n, init)
- break opswitch
+ n1 := nodAddr(as.Left())
+ r := recv.Left() // the channel
+ return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1)
- case OAPPEND:
+ case ir.OAPPEND:
// x = append(...)
- r := n.Right
- if r.Type.Elem().NotInHeap() {
- yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type.Elem())
+ call := as.Right().(*ir.CallExpr)
+ if call.Type().Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem())
}
+ var r ir.Node
switch {
- case isAppendOfMake(r):
+ case isAppendOfMake(call):
// x = append(y, make([]T, y)...)
- r = extendslice(r, init)
- case r.IsDDD():
- r = appendslice(r, init) // also works for append(slice, string).
+ r = extendslice(call, init)
+ case call.IsDDD():
+ r = appendslice(call, init) // also works for append(slice, string).
default:
- r = walkappend(r, init, n)
+ r = walkappend(call, init, as)
}
- n.Right = r
- if r.Op == OAPPEND {
+ as.SetRight(r)
+ if r.Op() == ir.OAPPEND {
// Left in place for back end.
// Do not add a new write barrier.
// Set up address of type for back end.
- r.Left = typename(r.Type.Elem())
- break opswitch
+ r.(*ir.CallExpr).SetLeft(typename(r.Type().Elem()))
+ return as
}
// Otherwise, lowered for race detector.
// Treat as ordinary assignment.
}
- if n.Left != nil && n.Right != nil {
- n = convas(n, init)
+ if as.Left() != nil && as.Right() != nil {
+ return convas(as, init)
}
+ return as
- case OAS2:
- init.AppendNodes(&n.Ninit)
- walkexprlistsafe(n.List.Slice(), init)
- walkexprlistsafe(n.Rlist.Slice(), init)
- ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init)
- ll = reorder3(ll)
- n = liststmt(ll)
+ case ir.OAS2:
+ init.AppendNodes(n.PtrInit())
+ walkexprlistsafe(n.List().Slice(), init)
+ walkexprlistsafe(n.Rlist().Slice(), init)
+ return liststmt(ascompatee(ir.OAS, n.List().Slice(), n.Rlist().Slice(), init))
// a,b,... = fn()
- case OAS2FUNC:
- init.AppendNodes(&n.Ninit)
+ case ir.OAS2FUNC:
+ init.AppendNodes(n.PtrInit())
- r := n.Right
- walkexprlistsafe(n.List.Slice(), init)
+ r := n.Rlist().First()
+ walkexprlistsafe(n.List().Slice(), init)
r = walkexpr(r, init)
- if isIntrinsicCall(r) {
- n.Right = r
- break
+ if IsIntrinsicCall(r.(*ir.CallExpr)) {
+ n.PtrRlist().Set1(r)
+ return n
}
init.Append(r)
- ll := ascompatet(n.List, r.Type)
- n = liststmt(ll)
+ ll := ascompatet(n.List(), r.Type())
+ return liststmt(ll)
// x, y = <-c
// order.stmt made sure x is addressable or blank.
- case OAS2RECV:
- init.AppendNodes(&n.Ninit)
-
- r := n.Right
- walkexprlistsafe(n.List.Slice(), init)
- r.Left = walkexpr(r.Left, init)
- var n1 *Node
- if n.List.First().isBlank() {
+ case ir.OAS2RECV:
+ init.AppendNodes(n.PtrInit())
+
+ r := n.Rlist().First().(*ir.UnaryExpr) // recv
+ walkexprlistsafe(n.List().Slice(), init)
+ r.SetLeft(walkexpr(r.Left(), init))
+ var n1 ir.Node
+ if ir.IsBlank(n.List().First()) {
n1 = nodnil()
} else {
- n1 = nod(OADDR, n.List.First(), nil)
+ n1 = nodAddr(n.List().First())
}
- fn := chanfn("chanrecv2", 2, r.Left.Type)
- ok := n.List.Second()
- call := mkcall1(fn, types.Types[TBOOL], init, r.Left, n1)
- n = nod(OAS, ok, call)
- n = typecheck(n, ctxStmt)
+ fn := chanfn("chanrecv2", 2, r.Left().Type())
+ ok := n.List().Second()
+ call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left(), n1)
+ return typecheck(ir.Nod(ir.OAS, ok, call), ctxStmt)
// a,b = m[i]
- case OAS2MAPR:
- init.AppendNodes(&n.Ninit)
+ case ir.OAS2MAPR:
+ init.AppendNodes(n.PtrInit())
- r := n.Right
- walkexprlistsafe(n.List.Slice(), init)
- r.Left = walkexpr(r.Left, init)
- r.Right = walkexpr(r.Right, init)
- t := r.Left.Type
+ r := n.Rlist().First().(*ir.IndexExpr)
+ walkexprlistsafe(n.List().Slice(), init)
+ r.SetLeft(walkexpr(r.Left(), init))
+ r.SetRight(walkexpr(r.Right(), init))
+ t := r.Left().Type()
fast := mapfast(t)
- var key *Node
+ var key ir.Node
if fast != mapslow {
// fast versions take key by value
- key = r.Right
+ key = r.Right()
} else {
// standard version takes key by reference
// order.expr made sure key is addressable.
- key = nod(OADDR, r.Right, nil)
+ key = nodAddr(r.Right())
}
// from:
// to:
// var,b = mapaccess2*(t, m, i)
// a = *var
- a := n.List.First()
+ a := n.List().First()
+ var call *ir.CallExpr
if w := t.Elem().Width; w <= zeroValSize {
fn := mapfn(mapaccess2[fast], t)
- r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key)
+ call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key)
} else {
fn := mapfn("mapaccess2_fat", t)
z := zeroaddr(w)
- r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z)
+ call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key, z)
}
// mapaccess2* returns a typed bool, but due to spec changes,
// the boolean result of i.(T) is now untyped so we make it the
// same type as the variable on the lhs.
- if ok := n.List.Second(); !ok.isBlank() && ok.Type.IsBoolean() {
- r.Type.Field(1).Type = ok.Type
+ if ok := n.List().Second(); !ir.IsBlank(ok) && ok.Type().IsBoolean() {
+ call.Type().Field(1).Type = ok.Type()
}
- n.Right = r
- n.Op = OAS2FUNC
+ n.PtrRlist().Set1(call)
+ n.SetOp(ir.OAS2FUNC)
// don't generate a = *var if a is _
- if !a.isBlank() {
- var_ := temp(types.NewPtr(t.Elem()))
- var_.SetTypecheck(1)
- var_.MarkNonNil() // mapaccess always returns a non-nil pointer
- n.List.SetFirst(var_)
- n = walkexpr(n, init)
- init.Append(n)
- n = nod(OAS, a, nod(ODEREF, var_, nil))
+ if ir.IsBlank(a) {
+ return walkexpr(typecheck(n, ctxStmt), init)
}
- n = typecheck(n, ctxStmt)
- n = walkexpr(n, init)
+ var_ := temp(types.NewPtr(t.Elem()))
+ var_.SetTypecheck(1)
+ var_.MarkNonNil() // mapaccess always returns a non-nil pointer
+
+ n.List().SetFirst(var_)
+ init.Append(walkexpr(n, init))
- case ODELETE:
- init.AppendNodes(&n.Ninit)
- map_ := n.List.First()
- key := n.List.Second()
+ as := ir.Nod(ir.OAS, a, ir.Nod(ir.ODEREF, var_, nil))
+ return walkexpr(typecheck(as, ctxStmt), init)
+
+ case ir.ODELETE:
+ init.AppendNodes(n.PtrInit())
+ map_ := n.List().First()
+ key := n.List().Second()
map_ = walkexpr(map_, init)
key = walkexpr(key, init)
- t := map_.Type
+ t := map_.Type()
fast := mapfast(t)
if fast == mapslow {
// order.stmt made sure key is addressable.
- key = nod(OADDR, key, nil)
+ key = nodAddr(key)
}
- n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key)
+ return mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key)
- case OAS2DOTTYPE:
- walkexprlistsafe(n.List.Slice(), init)
- n.Right = walkexpr(n.Right, init)
+ case ir.OAS2DOTTYPE:
+ walkexprlistsafe(n.List().Slice(), init)
+ n.PtrRlist().SetIndex(0, walkexpr(n.Rlist().First(), init))
+ return n
- case OCONVIFACE:
- n.Left = walkexpr(n.Left, init)
+ case ir.OCONVIFACE:
+ n.SetLeft(walkexpr(n.Left(), init))
- fromType := n.Left.Type
- toType := n.Type
+ fromType := n.Left().Type()
+ toType := n.Type()
- if !fromType.IsInterface() && !Curfn.Func.Nname.isBlank() { // skip unnamed functions (func _())
- markTypeUsedInInterface(fromType, Curfn.Func.lsym)
+ if !fromType.IsInterface() && !ir.IsBlank(Curfn.Nname) { // skip unnamed functions (func _())
+ markTypeUsedInInterface(fromType, Curfn.LSym)
}
// typeword generates the type word of the interface value.
- typeword := func() *Node {
+ typeword := func() ir.Node {
if toType.IsEmptyInterface() {
return typename(fromType)
}
// Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
if isdirectiface(fromType) {
- l := nod(OEFACE, typeword(), n.Left)
- l.Type = toType
+ l := ir.Nod(ir.OEFACE, typeword(), n.Left())
+ l.SetType(toType)
l.SetTypecheck(n.Typecheck())
- n = l
- break
+ return l
}
if staticuint64s == nil {
- staticuint64s = newname(Runtimepkg.Lookup("staticuint64s"))
- staticuint64s.SetClass(PEXTERN)
+ staticuint64s = NewName(Runtimepkg.Lookup("staticuint64s"))
+ staticuint64s.SetClass(ir.PEXTERN)
// The actual type is [256]uint64, but we use [256*8]uint8 so we can address
// individual bytes.
- staticuint64s.Type = types.NewArray(types.Types[TUINT8], 256*8)
- zerobase = newname(Runtimepkg.Lookup("zerobase"))
- zerobase.SetClass(PEXTERN)
- zerobase.Type = types.Types[TUINTPTR]
+ staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8))
+ zerobase = NewName(Runtimepkg.Lookup("zerobase"))
+ zerobase.SetClass(ir.PEXTERN)
+ zerobase.SetType(types.Types[types.TUINTPTR])
}
// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
// by using an existing addressable value identical to n.Left
// or creating one on the stack.
- var value *Node
+ var value ir.Node
switch {
case fromType.Size() == 0:
// n.Left is zero-sized. Use zerobase.
- cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246.
+ cheapexpr(n.Left(), init) // Evaluate n.Left for side-effects. See issue 19246.
value = zerobase
case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
// n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
// and staticuint64s[n.Left * 8 + 7] on big-endian.
- n.Left = cheapexpr(n.Left, init)
+ n.SetLeft(cheapexpr(n.Left(), init))
// byteindex widens n.Left so that the multiplication doesn't overflow.
- index := nod(OLSH, byteindex(n.Left), nodintconst(3))
+ index := ir.Nod(ir.OLSH, byteindex(n.Left()), nodintconst(3))
if thearch.LinkArch.ByteOrder == binary.BigEndian {
- index = nod(OADD, index, nodintconst(7))
+ index = ir.Nod(ir.OADD, index, nodintconst(7))
}
- value = nod(OINDEX, staticuint64s, index)
- value.SetBounded(true)
- case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly():
+ xe := ir.Nod(ir.OINDEX, staticuint64s, index)
+ xe.SetBounded(true)
+ value = xe
+ case n.Left().Op() == ir.ONAME && n.Left().(*ir.Name).Class() == ir.PEXTERN && n.Left().(*ir.Name).Readonly():
// n.Left is a readonly global; use it directly.
- value = n.Left
- case !fromType.IsInterface() && n.Esc == EscNone && fromType.Width <= 1024:
+ value = n.Left()
+ case !fromType.IsInterface() && n.Esc() == EscNone && fromType.Width <= 1024:
// n.Left does not escape. Use a stack temporary initialized to n.Left.
value = temp(fromType)
- init.Append(typecheck(nod(OAS, value, n.Left), ctxStmt))
+ init.Append(typecheck(ir.Nod(ir.OAS, value, n.Left()), ctxStmt))
}
if value != nil {
// Value is identical to n.Left.
// Construct the interface directly: {type/itab, &value}.
- l := nod(OEFACE, typeword(), typecheck(nod(OADDR, value, nil), ctxExpr))
- l.Type = toType
+ l := ir.Nod(ir.OEFACE, typeword(), typecheck(nodAddr(value), ctxExpr))
+ l.SetType(toType)
l.SetTypecheck(n.Typecheck())
- n = l
- break
+ return l
}
// Implement interface to empty interface conversion.
if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() {
// Evaluate the input interface.
c := temp(fromType)
- init.Append(nod(OAS, c, n.Left))
+ init.Append(ir.Nod(ir.OAS, c, n.Left()))
// Get the itab out of the interface.
- tmp := temp(types.NewPtr(types.Types[TUINT8]))
- init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), ctxExpr)))
+ tmp := temp(types.NewPtr(types.Types[types.TUINT8]))
+ init.Append(ir.Nod(ir.OAS, tmp, typecheck(ir.Nod(ir.OITAB, c, nil), ctxExpr)))
// Get the type out of the itab.
- nif := nod(OIF, typecheck(nod(ONE, tmp, nodnil()), ctxExpr), nil)
- nif.Nbody.Set1(nod(OAS, tmp, itabType(tmp)))
+ nif := ir.Nod(ir.OIF, typecheck(ir.Nod(ir.ONE, tmp, nodnil()), ctxExpr), nil)
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, tmp, itabType(tmp)))
init.Append(nif)
// Build the result.
- e := nod(OEFACE, tmp, ifaceData(n.Pos, c, types.NewPtr(types.Types[TUINT8])))
- e.Type = toType // assign type manually, typecheck doesn't understand OEFACE.
+ e := ir.Nod(ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8])))
+ e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
e.SetTypecheck(1)
- n = e
- break
+ return e
}
fnname, needsaddr := convFuncName(fromType, toType)
fn := syslook(fnname)
dowidth(fromType)
fn = substArgTypes(fn, fromType)
- dowidth(fn.Type)
- call := nod(OCALL, fn, nil)
- call.List.Set1(n.Left)
- call = typecheck(call, ctxExpr)
- call = walkexpr(call, init)
- call = safeexpr(call, init)
- e := nod(OEFACE, typeword(), call)
- e.Type = toType
+ dowidth(fn.Type())
+ call := ir.Nod(ir.OCALL, fn, nil)
+ call.PtrList().Set1(n.Left())
+ e := ir.Nod(ir.OEFACE, typeword(), safeexpr(walkexpr(typecheck(call, ctxExpr), init), init))
+ e.SetType(toType)
e.SetTypecheck(1)
- n = e
- break
+ return e
}
- var tab *Node
+ var tab ir.Node
if fromType.IsInterface() {
// convI2I
tab = typename(toType)
tab = typeword()
}
- v := n.Left
+ v := n.Left()
if needsaddr {
// Types of large or unknown size are passed by reference.
// Orderexpr arranged for n.Left to be a temporary for all
// with non-interface cases, is not visible to order.stmt, so we
// have to fall back on allocating a temp here.
if !islvalue(v) {
- v = copyexpr(v, v.Type, init)
+ v = copyexpr(v, v.Type(), init)
}
- v = nod(OADDR, v, nil)
+ v = nodAddr(v)
}
dowidth(fromType)
fn := syslook(fnname)
fn = substArgTypes(fn, fromType, toType)
- dowidth(fn.Type)
- n = nod(OCALL, fn, nil)
- n.List.Set2(tab, v)
- n = typecheck(n, ctxExpr)
- n = walkexpr(n, init)
-
- case OCONV, OCONVNOP:
- n.Left = walkexpr(n.Left, init)
- if n.Op == OCONVNOP && checkPtr(Curfn, 1) {
- if n.Type.IsPtr() && n.Left.Type.IsUnsafePtr() { // unsafe.Pointer to *T
- n = walkCheckPtrAlignment(n, init, nil)
- break
+ dowidth(fn.Type())
+ call := ir.Nod(ir.OCALL, fn, nil)
+ call.PtrList().Set2(tab, v)
+ return walkexpr(typecheck(call, ctxExpr), init)
+
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ n.SetLeft(walkexpr(n.Left(), init))
+ if n.Op() == ir.OCONVNOP && n.Type() == n.Left().Type() {
+ return n.Left()
+ }
+ if n.Op() == ir.OCONVNOP && checkPtr(Curfn, 1) {
+ if n.Type().IsPtr() && n.Left().Type().IsUnsafePtr() { // unsafe.Pointer to *T
+ return walkCheckPtrAlignment(n, init, nil)
}
- if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() { // uintptr to unsafe.Pointer
- n = walkCheckPtrArithmetic(n, init)
- break
+ if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() { // uintptr to unsafe.Pointer
+ return walkCheckPtrArithmetic(n, init)
}
}
- param, result := rtconvfn(n.Left.Type, n.Type)
- if param == Txxx {
- break
+ param, result := rtconvfn(n.Left().Type(), n.Type())
+ if param == types.Txxx {
+ return n
}
- fn := basicnames[param] + "to" + basicnames[result]
- n = conv(mkcall(fn, types.Types[result], init, conv(n.Left, types.Types[param])), n.Type)
+ fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result]
+ return conv(mkcall(fn, types.Types[result], init, conv(n.Left(), types.Types[param])), n.Type())
- case ODIV, OMOD:
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ case ir.ODIV, ir.OMOD:
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
// rewrite complex div into function call.
- et := n.Left.Type.Etype
+ et := n.Left().Type().Kind()
- if isComplex[et] && n.Op == ODIV {
- t := n.Type
- n = mkcall("complex128div", types.Types[TCOMPLEX128], init, conv(n.Left, types.Types[TCOMPLEX128]), conv(n.Right, types.Types[TCOMPLEX128]))
- n = conv(n, t)
- break
+ if isComplex[et] && n.Op() == ir.ODIV {
+ t := n.Type()
+ call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left(), types.Types[types.TCOMPLEX128]), conv(n.Right(), types.Types[types.TCOMPLEX128]))
+ return conv(call, t)
}
// Nothing to do for float divisions.
if isFloat[et] {
- break
+ return n
}
// rewrite 64-bit div and mod on 32-bit architectures.
// TODO: Remove this code once we can introduce
// runtime calls late in SSA processing.
- if Widthreg < 8 && (et == TINT64 || et == TUINT64) {
- if n.Right.Op == OLITERAL {
+ if Widthreg < 8 && (et == types.TINT64 || et == types.TUINT64) {
+ if n.Right().Op() == ir.OLITERAL {
// Leave div/mod by constant powers of 2 or small 16-bit constants.
// The SSA backend will handle those.
switch et {
- case TINT64:
- c := n.Right.Int64Val()
+ case types.TINT64:
+ c := ir.Int64Val(n.Right())
if c < 0 {
c = -c
}
if c != 0 && c&(c-1) == 0 {
- break opswitch
+ return n
}
- case TUINT64:
- c := uint64(n.Right.Int64Val())
+ case types.TUINT64:
+ c := ir.Uint64Val(n.Right())
if c < 1<<16 {
- break opswitch
+ return n
}
if c != 0 && c&(c-1) == 0 {
- break opswitch
+ return n
}
}
}
var fn string
- if et == TINT64 {
+ if et == types.TINT64 {
fn = "int64"
} else {
fn = "uint64"
}
- if n.Op == ODIV {
+ if n.Op() == ir.ODIV {
fn += "div"
} else {
fn += "mod"
}
- n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et]))
+ return mkcall(fn, n.Type(), init, conv(n.Left(), types.Types[et]), conv(n.Right(), types.Types[et]))
}
+ return n
- case OINDEX:
- n.Left = walkexpr(n.Left, init)
+ case ir.OINDEX:
+ n.SetLeft(walkexpr(n.Left(), init))
// save the original node for bounds checking elision.
// If it was a ODIV/OMOD walk might rewrite it.
- r := n.Right
+ r := n.Right()
- n.Right = walkexpr(n.Right, init)
+ n.SetRight(walkexpr(n.Right(), init))
// if range of type cannot exceed static array bound,
// disable bounds check.
if n.Bounded() {
- break
+ return n
}
- t := n.Left.Type
+ t := n.Left().Type()
if t != nil && t.IsPtr() {
t = t.Elem()
}
if t.IsArray() {
n.SetBounded(bounded(r, t.NumElem()))
- if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
- Warn("index bounds check elided")
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) {
+ base.Warn("index bounds check elided")
}
- if smallintconst(n.Right) && !n.Bounded() {
- yyerror("index out of bounds")
+ if smallintconst(n.Right()) && !n.Bounded() {
+ base.Errorf("index out of bounds")
}
- } else if Isconst(n.Left, CTSTR) {
- n.SetBounded(bounded(r, int64(len(n.Left.StringVal()))))
- if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
- Warn("index bounds check elided")
+ } else if ir.IsConst(n.Left(), constant.String) {
+ n.SetBounded(bounded(r, int64(len(ir.StringVal(n.Left())))))
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) {
+ base.Warn("index bounds check elided")
}
- if smallintconst(n.Right) && !n.Bounded() {
- yyerror("index out of bounds")
+ if smallintconst(n.Right()) && !n.Bounded() {
+ base.Errorf("index out of bounds")
}
}
- if Isconst(n.Right, CTINT) {
- if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
- yyerror("index out of bounds")
+ if ir.IsConst(n.Right(), constant.Int) {
+ if v := n.Right().Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[types.TINT]) {
+ base.Errorf("index out of bounds")
}
}
+ return n
- case OINDEXMAP:
+ case ir.OINDEXMAP:
// Replace m[k] with *map{access1,assign}(maptype, m, &k)
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
- map_ := n.Left
- key := n.Right
- t := map_.Type
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
+ map_ := n.Left()
+ key := n.Right()
+ t := map_.Type()
+ var call *ir.CallExpr
if n.IndexMapLValue() {
// This m[k] expression is on the left-hand side of an assignment.
fast := mapfast(t)
if fast == mapslow {
// standard version takes key by reference.
// order.expr made sure key is addressable.
- key = nod(OADDR, key, nil)
+ key = nodAddr(key)
}
- n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key)
+ call = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key)
} else {
// m[k] is not the target of an assignment.
fast := mapfast(t)
if fast == mapslow {
// standard version takes key by reference.
// order.expr made sure key is addressable.
- key = nod(OADDR, key, nil)
+ key = nodAddr(key)
}
if w := t.Elem().Width; w <= zeroValSize {
- n = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, typename(t), map_, key)
+ call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, typename(t), map_, key)
} else {
z := zeroaddr(w)
- n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z)
+ call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z)
}
}
- n.Type = types.NewPtr(t.Elem())
- n.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
- n = nod(ODEREF, n, nil)
- n.Type = t.Elem()
- n.SetTypecheck(1)
-
- case ORECV:
- Fatalf("walkexpr ORECV") // should see inside OAS only
+ call.SetType(types.NewPtr(t.Elem()))
+ call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
+ star := ir.Nod(ir.ODEREF, call, nil)
+ star.SetType(t.Elem())
+ star.SetTypecheck(1)
+ return star
+
+ case ir.ORECV:
+ base.Fatalf("walkexpr ORECV") // should see inside OAS only
+ panic("unreachable")
+
+ case ir.OSLICEHEADER:
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.List().SetFirst(walkexpr(n.List().First(), init))
+ n.List().SetSecond(walkexpr(n.List().Second(), init))
+ return n
- case OSLICEHEADER:
- n.Left = walkexpr(n.Left, init)
- n.List.SetFirst(walkexpr(n.List.First(), init))
- n.List.SetSecond(walkexpr(n.List.Second(), init))
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
+ n := n.(*ir.SliceExpr)
- case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
- checkSlice := checkPtr(Curfn, 1) && n.Op == OSLICE3ARR && n.Left.Op == OCONVNOP && n.Left.Left.Type.IsUnsafePtr()
+ checkSlice := checkPtr(Curfn, 1) && n.Op() == ir.OSLICE3ARR && n.Left().Op() == ir.OCONVNOP && n.Left().(*ir.ConvExpr).Left().Type().IsUnsafePtr()
if checkSlice {
- n.Left.Left = walkexpr(n.Left.Left, init)
+ conv := n.Left().(*ir.ConvExpr)
+ conv.SetLeft(walkexpr(conv.Left(), init))
} else {
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
}
+
low, high, max := n.SliceBounds()
low = walkexpr(low, init)
if low != nil && isZero(low) {
max = walkexpr(max, init)
n.SetSliceBounds(low, high, max)
if checkSlice {
- n.Left = walkCheckPtrAlignment(n.Left, init, max)
+ n.SetLeft(walkCheckPtrAlignment(n.Left().(*ir.ConvExpr), init, max))
}
- if n.Op.IsSlice3() {
- if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) {
+
+ if n.Op().IsSlice3() {
+ if max != nil && max.Op() == ir.OCAP && samesafeexpr(n.Left(), max.(*ir.UnaryExpr).Left()) {
// Reduce x[i:j:cap(x)] to x[i:j].
- if n.Op == OSLICE3 {
- n.Op = OSLICE
+ if n.Op() == ir.OSLICE3 {
+ n.SetOp(ir.OSLICE)
} else {
- n.Op = OSLICEARR
+ n.SetOp(ir.OSLICEARR)
}
- n = reduceSlice(n)
+ return reduceSlice(n)
}
- } else {
- n = reduceSlice(n)
+ return n
}
+ return reduceSlice(n)
- case ONEW:
- if n.Type.Elem().NotInHeap() {
- yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem())
+ case ir.ONEW:
+ if n.Type().Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
}
- if n.Esc == EscNone {
- if n.Type.Elem().Width >= maxImplicitStackVarSize {
- Fatalf("large ONEW with EscNone: %v", n)
+ if n.Esc() == EscNone {
+ if n.Type().Elem().Width >= maxImplicitStackVarSize {
+ base.Fatalf("large ONEW with EscNone: %v", n)
}
- r := temp(n.Type.Elem())
- r = nod(OAS, r, nil) // zero temp
- r = typecheck(r, ctxStmt)
- init.Append(r)
- r = nod(OADDR, r.Left, nil)
- r = typecheck(r, ctxExpr)
- n = r
- } else {
- n = callnew(n.Type.Elem())
+ r := temp(n.Type().Elem())
+ init.Append(typecheck(ir.Nod(ir.OAS, r, nil), ctxStmt)) // zero temp
+ return typecheck(nodAddr(r), ctxExpr)
}
+ return callnew(n.Type().Elem())
- case OADDSTR:
- n = addstr(n, init)
+ case ir.OADDSTR:
+ return addstr(n.(*ir.AddStringExpr), init)
- case OAPPEND:
+ case ir.OAPPEND:
// order should make sure we only see OAS(node, OAPPEND), which we handle above.
- Fatalf("append outside assignment")
+ base.Fatalf("append outside assignment")
+ panic("unreachable")
- case OCOPY:
- n = copyany(n, init, instrumenting && !compiling_runtime)
+ case ir.OCOPY:
+ return copyany(n.(*ir.BinaryExpr), init, instrumenting && !base.Flag.CompilingRuntime)
+ case ir.OCLOSE:
// cannot use chanfn - closechan takes any, not chan any
- case OCLOSE:
fn := syslook("closechan")
+ fn = substArgTypes(fn, n.Left().Type())
+ return mkcall1(fn, nil, init, n.Left())
- fn = substArgTypes(fn, n.Left.Type)
- n = mkcall1(fn, nil, init, n.Left)
-
- case OMAKECHAN:
+ case ir.OMAKECHAN:
// When size fits into int, use makechan instead of
// makechan64, which is faster and shorter on 32 bit platforms.
- size := n.Left
+ size := n.Left()
fnname := "makechan64"
- argtype := types.Types[TINT64]
+ argtype := types.Types[types.TINT64]
// Type checking guarantees that TIDEAL size is positive and fits in an int.
// The case of size overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makechan during runtime.
- if size.Type.IsKind(TIDEAL) || maxintval[size.Type.Etype].Cmp(maxintval[TUINT]) <= 0 {
+ if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
fnname = "makechan"
- argtype = types.Types[TINT]
+ argtype = types.Types[types.TINT]
}
- n = mkcall1(chanfn(fnname, 1, n.Type), n.Type, init, typename(n.Type), conv(size, argtype))
+ return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), conv(size, argtype))
- case OMAKEMAP:
- t := n.Type
+ case ir.OMAKEMAP:
+ t := n.Type()
hmapType := hmap(t)
- hint := n.Left
+ hint := n.Left()
// var h *hmap
- var h *Node
- if n.Esc == EscNone {
+ var h ir.Node
+ if n.Esc() == EscNone {
// Allocate hmap on stack.
// var hv hmap
hv := temp(hmapType)
- zero := nod(OAS, hv, nil)
- zero = typecheck(zero, ctxStmt)
- init.Append(zero)
+ init.Append(typecheck(ir.Nod(ir.OAS, hv, nil), ctxStmt))
// h = &hv
- h = nod(OADDR, hv, nil)
+ h = nodAddr(hv)
// Allocate one bucket pointed to by hmap.buckets on stack if hint
// is not larger than BUCKETSIZE. In case hint is larger than
// BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
// Maximum key and elem size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
- if !Isconst(hint, CTINT) ||
- hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
+ if !ir.IsConst(hint, constant.Int) ||
+ constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) {
// In case hint is larger than BUCKETSIZE runtime.makemap
// will allocate the buckets on the heap, see #20184
// h.buckets = b
// }
- nif := nod(OIF, nod(OLE, hint, nodintconst(BUCKETSIZE)), nil)
+ nif := ir.Nod(ir.OIF, ir.Nod(ir.OLE, hint, nodintconst(BUCKETSIZE)), nil)
nif.SetLikely(true)
// var bv bmap
bv := temp(bmap(t))
- zero = nod(OAS, bv, nil)
- nif.Nbody.Append(zero)
+ nif.PtrBody().Append(ir.Nod(ir.OAS, bv, nil))
// b = &bv
- b := nod(OADDR, bv, nil)
+ b := nodAddr(bv)
// h.buckets = b
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
- na := nod(OAS, nodSym(ODOT, h, bsym), b)
- nif.Nbody.Append(na)
-
- nif = typecheck(nif, ctxStmt)
- nif = walkstmt(nif)
- init.Append(nif)
+ na := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, bsym), b)
+ nif.PtrBody().Append(na)
+ appendWalkStmt(init, nif)
}
}
- if Isconst(hint, CTINT) && hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
+ if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) {
// Handling make(map[any]any) and
// make(map[any]any, hint) where hint <= BUCKETSIZE
// special allows for faster map initialization and
// For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
// and no buckets will be allocated by makemap. Therefore,
// no buckets need to be allocated in this code path.
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Only need to initialize h.hash0 since
// hmap h has been allocated on the stack already.
// h.hash0 = fastrand()
- rand := mkcall("fastrand", types.Types[TUINT32], init)
+ rand := mkcall("fastrand", types.Types[types.TUINT32], init)
hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
- a := nod(OAS, nodSym(ODOT, h, hashsym), rand)
- a = typecheck(a, ctxStmt)
- a = walkexpr(a, init)
- init.Append(a)
- n = convnop(h, t)
- } else {
- // Call runtime.makehmap to allocate an
- // hmap on the heap and initialize hmap's hash0 field.
- fn := syslook("makemap_small")
- fn = substArgTypes(fn, t.Key(), t.Elem())
- n = mkcall1(fn, n.Type, init)
+ appendWalkStmt(init, ir.Nod(ir.OAS, nodSym(ir.ODOT, h, hashsym), rand))
+ return convnop(h, t)
}
- } else {
- if n.Esc != EscNone {
- h = nodnil()
- }
- // Map initialization with a variable or large hint is
- // more complicated. We therefore generate a call to
- // runtime.makemap to initialize hmap and allocate the
- // map buckets.
+ // Call runtime.makehmap to allocate an
+ // hmap on the heap and initialize hmap's hash0 field.
+ fn := syslook("makemap_small")
+ fn = substArgTypes(fn, t.Key(), t.Elem())
+ return mkcall1(fn, n.Type(), init)
+ }
- // When hint fits into int, use makemap instead of
- // makemap64, which is faster and shorter on 32 bit platforms.
- fnname := "makemap64"
- argtype := types.Types[TINT64]
+ if n.Esc() != EscNone {
+ h = nodnil()
+ }
+ // Map initialization with a variable or large hint is
+ // more complicated. We therefore generate a call to
+ // runtime.makemap to initialize hmap and allocate the
+ // map buckets.
- // Type checking guarantees that TIDEAL hint is positive and fits in an int.
- // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
- // The case of hint overflow when converting TUINT or TUINTPTR to TINT
- // will be handled by the negative range checks in makemap during runtime.
- if hint.Type.IsKind(TIDEAL) || maxintval[hint.Type.Etype].Cmp(maxintval[TUINT]) <= 0 {
- fnname = "makemap"
- argtype = types.Types[TINT]
- }
+ // When hint fits into int, use makemap instead of
+ // makemap64, which is faster and shorter on 32 bit platforms.
+ fnname := "makemap64"
+ argtype := types.Types[types.TINT64]
- fn := syslook(fnname)
- fn = substArgTypes(fn, hmapType, t.Key(), t.Elem())
- n = mkcall1(fn, n.Type, init, typename(n.Type), conv(hint, argtype), h)
+ // Type checking guarantees that TIDEAL hint is positive and fits in an int.
+ // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
+ // The case of hint overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makemap during runtime.
+ if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
+ fnname = "makemap"
+ argtype = types.Types[types.TINT]
}
- case OMAKESLICE:
- l := n.Left
- r := n.Right
+ fn := syslook(fnname)
+ fn = substArgTypes(fn, hmapType, t.Key(), t.Elem())
+ return mkcall1(fn, n.Type(), init, typename(n.Type()), conv(hint, argtype), h)
+
+ case ir.OMAKESLICE:
+ l := n.Left()
+ r := n.Right()
if r == nil {
r = safeexpr(l, init)
l = r
}
- t := n.Type
+ t := n.Type()
if t.Elem().NotInHeap() {
- yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
if why := heapAllocReason(n); why != "" {
- Fatalf("%v has EscNone, but %v", n, why)
+ base.Fatalf("%v has EscNone, but %v", n, why)
}
// var arr [r]T
// n = arr[:l]
i := indexconst(r)
if i < 0 {
- Fatalf("walkexpr: invalid index %v", r)
+ base.Fatalf("walkexpr: invalid index %v", r)
}
// cap is constrained to [0,2^31) or [0,2^63) depending on whether
// if len < 0 { panicmakeslicelen() }
// panicmakeslicecap()
// }
- nif := nod(OIF, nod(OGT, conv(l, types.Types[TUINT64]), nodintconst(i)), nil)
- niflen := nod(OIF, nod(OLT, l, nodintconst(0)), nil)
- niflen.Nbody.Set1(mkcall("panicmakeslicelen", nil, init))
- nif.Nbody.Append(niflen, mkcall("panicmakeslicecap", nil, init))
- nif = typecheck(nif, ctxStmt)
- init.Append(nif)
+ nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, conv(l, types.Types[types.TUINT64]), nodintconst(i)), nil)
+ niflen := ir.Nod(ir.OIF, ir.Nod(ir.OLT, l, nodintconst(0)), nil)
+ niflen.PtrBody().Set1(mkcall("panicmakeslicelen", nil, init))
+ nif.PtrBody().Append(niflen, mkcall("panicmakeslicecap", nil, init))
+ init.Append(typecheck(nif, ctxStmt))
t = types.NewArray(t.Elem(), i) // [r]T
var_ := temp(t)
- a := nod(OAS, var_, nil) // zero temp
- a = typecheck(a, ctxStmt)
- init.Append(a)
- r := nod(OSLICE, var_, nil) // arr[:l]
+ appendWalkStmt(init, ir.Nod(ir.OAS, var_, nil)) // zero temp
+ r := ir.Nod(ir.OSLICE, var_, nil) // arr[:l]
r.SetSliceBounds(nil, l, nil)
- r = conv(r, n.Type) // in case n.Type is named.
- r = typecheck(r, ctxExpr)
- r = walkexpr(r, init)
- n = r
- } else {
- // n escapes; set up a call to makeslice.
- // When len and cap can fit into int, use makeslice instead of
- // makeslice64, which is faster and shorter on 32 bit platforms.
+ // The conv is necessary in case n.Type is named.
+ return walkexpr(typecheck(conv(r, n.Type()), ctxExpr), init)
+ }
- len, cap := l, r
+ // n escapes; set up a call to makeslice.
+ // When len and cap can fit into int, use makeslice instead of
+ // makeslice64, which is faster and shorter on 32 bit platforms.
- fnname := "makeslice64"
- argtype := types.Types[TINT64]
+ len, cap := l, r
- // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
- // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
- // will be handled by the negative range checks in makeslice during runtime.
- if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) &&
- (cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) {
- fnname = "makeslice"
- argtype = types.Types[TINT]
- }
+ fnname := "makeslice64"
+ argtype := types.Types[types.TINT64]
- m := nod(OSLICEHEADER, nil, nil)
- m.Type = t
+ // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
+ // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makeslice during runtime.
+ if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
+ (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
+ fnname = "makeslice"
+ argtype = types.Types[types.TINT]
+ }
- fn := syslook(fnname)
- m.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))
- m.Left.MarkNonNil()
- m.List.Set2(conv(len, types.Types[TINT]), conv(cap, types.Types[TINT]))
+ m := ir.Nod(ir.OSLICEHEADER, nil, nil)
+ m.SetType(t)
- m = typecheck(m, ctxExpr)
- m = walkexpr(m, init)
- n = m
- }
+ fn := syslook(fnname)
+ m.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)))
+ m.Left().MarkNonNil()
+ m.PtrList().Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT]))
+ return walkexpr(typecheck(m, ctxExpr), init)
- case OMAKESLICECOPY:
- if n.Esc == EscNone {
- Fatalf("OMAKESLICECOPY with EscNone: %v", n)
+ case ir.OMAKESLICECOPY:
+ if n.Esc() == EscNone {
+ base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
}
- t := n.Type
+ t := n.Type()
if t.Elem().NotInHeap() {
- yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
- length := conv(n.Left, types.Types[TINT])
- copylen := nod(OLEN, n.Right, nil)
- copyptr := nod(OSPTR, n.Right, nil)
+ length := conv(n.Left(), types.Types[types.TINT])
+ copylen := ir.Nod(ir.OLEN, n.Right(), nil)
+ copyptr := ir.Nod(ir.OSPTR, n.Right(), nil)
if !t.Elem().HasPointers() && n.Bounded() {
// When len(to)==len(from) and elements have no pointers:
// We do not check for overflow of len(to)*elem.Width here
// since len(from) is an existing checked slice capacity
// with same elem.Width for the from slice.
- size := nod(OMUL, conv(length, types.Types[TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[TUINTPTR]))
+ size := ir.Nod(ir.OMUL, conv(length, types.Types[types.TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[types.TUINTPTR]))
// instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
fn := syslook("mallocgc")
- sh := nod(OSLICEHEADER, nil, nil)
- sh.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, size, nodnil(), nodbool(false))
- sh.Left.MarkNonNil()
- sh.List.Set2(length, length)
- sh.Type = t
+ sh := ir.Nod(ir.OSLICEHEADER, nil, nil)
+ sh.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false)))
+ sh.Left().MarkNonNil()
+ sh.PtrList().Set2(length, length)
+ sh.SetType(t)
s := temp(t)
- r := typecheck(nod(OAS, s, sh), ctxStmt)
+ r := typecheck(ir.Nod(ir.OAS, s, sh), ctxStmt)
r = walkexpr(r, init)
init.Append(r)
// instantiate memmove(to *any, frm *any, size uintptr)
fn = syslook("memmove")
fn = substArgTypes(fn, t.Elem(), t.Elem())
- ncopy := mkcall1(fn, nil, init, nod(OSPTR, s, nil), copyptr, size)
- ncopy = typecheck(ncopy, ctxStmt)
- ncopy = walkexpr(ncopy, init)
- init.Append(ncopy)
-
- n = s
- } else { // Replace make+copy with runtime.makeslicecopy.
- // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
- fn := syslook("makeslicecopy")
- s := nod(OSLICEHEADER, nil, nil)
- s.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[TUNSAFEPTR]))
- s.Left.MarkNonNil()
- s.List.Set2(length, length)
- s.Type = t
- n = typecheck(s, ctxExpr)
- n = walkexpr(n, init)
- }
-
- case ORUNESTR:
+ ncopy := mkcall1(fn, nil, init, ir.Nod(ir.OSPTR, s, nil), copyptr, size)
+ init.Append(walkexpr(typecheck(ncopy, ctxStmt), init))
+
+ return s
+ }
+ // Replace make+copy with runtime.makeslicecopy.
+ // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
+ fn := syslook("makeslicecopy")
+ s := ir.Nod(ir.OSLICEHEADER, nil, nil)
+ s.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR])))
+ s.Left().MarkNonNil()
+ s.PtrList().Set2(length, length)
+ s.SetType(t)
+ return walkexpr(typecheck(s, ctxExpr), init)
+
+ case ir.ORUNESTR:
a := nodnil()
- if n.Esc == EscNone {
- t := types.NewArray(types.Types[TUINT8], 4)
- a = nod(OADDR, temp(t), nil)
+ if n.Esc() == EscNone {
+ t := types.NewArray(types.Types[types.TUINT8], 4)
+ a = nodAddr(temp(t))
}
// intstring(*[4]byte, rune)
- n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[TINT64]))
+ return mkcall("intstring", n.Type(), init, a, conv(n.Left(), types.Types[types.TINT64]))
- case OBYTES2STR, ORUNES2STR:
+ case ir.OBYTES2STR, ir.ORUNES2STR:
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Create temporary buffer for string on stack.
- t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
- a = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
+ a = nodAddr(temp(t))
}
- if n.Op == ORUNES2STR {
+ if n.Op() == ir.ORUNES2STR {
// slicerunetostring(*[32]byte, []rune) string
- n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
- } else {
- // slicebytetostring(*[32]byte, ptr *byte, n int) string
- n.Left = cheapexpr(n.Left, init)
- ptr, len := n.Left.backingArrayPtrLen()
- n = mkcall("slicebytetostring", n.Type, init, a, ptr, len)
+ return mkcall("slicerunetostring", n.Type(), init, a, n.Left())
}
+ // slicebytetostring(*[32]byte, ptr *byte, n int) string
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptr, len := backingArrayPtrLen(n.Left())
+ return mkcall("slicebytetostring", n.Type(), init, a, ptr, len)
- case OBYTES2STRTMP:
- n.Left = walkexpr(n.Left, init)
+ case ir.OBYTES2STRTMP:
+ n.SetLeft(walkexpr(n.Left(), init))
if !instrumenting {
// Let the backend handle OBYTES2STRTMP directly
// to avoid a function call to slicebytetostringtmp.
- break
+ return n
}
// slicebytetostringtmp(ptr *byte, n int) string
- n.Left = cheapexpr(n.Left, init)
- ptr, len := n.Left.backingArrayPtrLen()
- n = mkcall("slicebytetostringtmp", n.Type, init, ptr, len)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptr, len := backingArrayPtrLen(n.Left())
+ return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len)
- case OSTR2BYTES:
- s := n.Left
- if Isconst(s, CTSTR) {
- sc := s.StringVal()
+ case ir.OSTR2BYTES:
+ s := n.Left()
+ if ir.IsConst(s, constant.String) {
+ sc := ir.StringVal(s)
// Allocate a [n]byte of the right size.
- t := types.NewArray(types.Types[TUINT8], int64(len(sc)))
- var a *Node
- if n.Esc == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
- a = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
+ var a ir.Node
+ if n.Esc() == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
+ a = nodAddr(temp(t))
} else {
a = callnew(t)
}
p := temp(t.PtrTo()) // *[n]byte
- init.Append(typecheck(nod(OAS, p, a), ctxStmt))
+ init.Append(typecheck(ir.Nod(ir.OAS, p, a), ctxStmt))
// Copy from the static string data to the [n]byte.
if len(sc) > 0 {
- as := nod(OAS,
- nod(ODEREF, p, nil),
- nod(ODEREF, convnop(nod(OSPTR, s, nil), t.PtrTo()), nil))
- as = typecheck(as, ctxStmt)
- as = walkstmt(as)
- init.Append(as)
+ as := ir.Nod(ir.OAS,
+ ir.Nod(ir.ODEREF, p, nil),
+ ir.Nod(ir.ODEREF, convnop(ir.Nod(ir.OSPTR, s, nil), t.PtrTo()), nil))
+ appendWalkStmt(init, as)
}
// Slice the [n]byte to a []byte.
- n.Op = OSLICEARR
- n.Left = p
- n = walkexpr(n, init)
- break
+ slice := ir.NodAt(n.Pos(), ir.OSLICEARR, p, nil)
+ slice.SetType(n.Type())
+ slice.SetTypecheck(1)
+ return walkexpr(slice, init)
}
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Create temporary buffer for slice on stack.
- t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
- a = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
+ a = nodAddr(temp(t))
}
// stringtoslicebyte(*32[byte], string) []byte
- n = mkcall("stringtoslicebyte", n.Type, init, a, conv(s, types.Types[TSTRING]))
+ return mkcall("stringtoslicebyte", n.Type(), init, a, conv(s, types.Types[types.TSTRING]))
- case OSTR2BYTESTMP:
+ case ir.OSTR2BYTESTMP:
// []byte(string) conversion that creates a slice
// referring to the actual string bytes.
// This conversion is handled later by the backend and
// that know that the slice won't be mutated.
// The only such case today is:
// for i, c := range []byte(string)
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ return n
- case OSTR2RUNES:
+ case ir.OSTR2RUNES:
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Create temporary buffer for slice on stack.
- t := types.NewArray(types.Types[TINT32], tmpstringbufsize)
- a = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize)
+ a = nodAddr(temp(t))
}
// stringtoslicerune(*[32]rune, string) []rune
- n = mkcall("stringtoslicerune", n.Type, init, a, conv(n.Left, types.Types[TSTRING]))
+ return mkcall("stringtoslicerune", n.Type(), init, a, conv(n.Left(), types.Types[types.TSTRING]))
- case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT:
- if isStaticCompositeLiteral(n) && !canSSAType(n.Type) {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT:
+ if isStaticCompositeLiteral(n) && !canSSAType(n.Type()) {
+ n := n.(*ir.CompLitExpr) // not OPTRLIT
// n can be directly represented in the read-only data section.
// Make direct reference to the static data. See issue 12841.
- vstat := readonlystaticname(n.Type)
+ vstat := readonlystaticname(n.Type())
fixedlit(inInitFunction, initKindStatic, n, vstat, init)
- n = vstat
- n = typecheck(n, ctxExpr)
- break
+ return typecheck(vstat, ctxExpr)
}
- var_ := temp(n.Type)
+ var_ := temp(n.Type())
anylit(n, var_, init)
- n = var_
+ return var_
- case OSEND:
- n1 := n.Right
- n1 = assignconv(n1, n.Left.Type.Elem(), "chan send")
+ case ir.OSEND:
+ n1 := n.Right()
+ n1 = assignconv(n1, n.Left().Type().Elem(), "chan send")
n1 = walkexpr(n1, init)
- n1 = nod(OADDR, n1, nil)
- n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1)
+ n1 = nodAddr(n1)
+ return mkcall1(chanfn("chansend1", 2, n.Left().Type()), nil, init, n.Left(), n1)
- case OCLOSURE:
- n = walkclosure(n, init)
+ case ir.OCLOSURE:
+ return walkclosure(n.(*ir.ClosureExpr), init)
- case OCALLPART:
- n = walkpartialcall(n, init)
- }
-
- // Expressions that are constant at run time but not
- // considered const by the language spec are not turned into
- // constants until walk. For example, if n is y%1 == 0, the
- // walk of y%1 may have replaced it by 0.
- // Check whether n with its updated args is itself now a constant.
- t := n.Type
- evconst(n)
- if n.Type != t {
- Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type)
- }
- if n.Op == OLITERAL {
- n = typecheck(n, ctxExpr)
- // Emit string symbol now to avoid emitting
- // any concurrently during the backend.
- if s, ok := n.Val().U.(string); ok {
- _ = stringsym(n.Pos, s)
- }
+ case ir.OCALLPART:
+ return walkpartialcall(n.(*ir.CallPartExpr), init)
}
- updateHasCall(n)
-
- if Debug.w != 0 && n != nil {
- Dump("after walk expr", n)
- }
-
- lineno = lno
- return n
+ // No return! Each case must return (or panic),
+ // to avoid confusion about what gets returned
+ // in the presence of type assertions.
}
// markTypeUsedInInterface marks that type t is converted to an interface.
// markUsedIfaceMethod marks that an interface method is used in the current
// function. n is OCALLINTER node.
-func markUsedIfaceMethod(n *Node) {
- ityp := n.Left.Left.Type
+func markUsedIfaceMethod(n *ir.CallExpr) {
+ dot := n.Left().(*ir.SelectorExpr)
+ ityp := dot.Left().Type()
tsym := typenamesym(ityp).Linksym()
- r := obj.Addrel(Curfn.Func.lsym)
+ r := obj.Addrel(Curfn.LSym)
r.Sym = tsym
- // n.Left.Xoffset is the method index * Widthptr (the offset of code pointer
+ // dot.Xoffset is the method index * Widthptr (the offset of code pointer
// in itab).
- midx := n.Left.Xoffset / int64(Widthptr)
+ midx := dot.Offset() / int64(Widthptr)
r.Add = ifaceMethodOffset(ityp, midx)
r.Type = objabi.R_USEIFACEMETHOD
}
// name can be derived from the names of the returned types.
//
// If no such function is necessary, it returns (Txxx, Txxx).
-func rtconvfn(src, dst *types.Type) (param, result types.EType) {
+func rtconvfn(src, dst *types.Type) (param, result types.Kind) {
if thearch.SoftFloat {
- return Txxx, Txxx
+ return types.Txxx, types.Txxx
}
switch thearch.LinkArch.Family {
case sys.ARM, sys.MIPS:
if src.IsFloat() {
- switch dst.Etype {
- case TINT64, TUINT64:
- return TFLOAT64, dst.Etype
+ switch dst.Kind() {
+ case types.TINT64, types.TUINT64:
+ return types.TFLOAT64, dst.Kind()
}
}
if dst.IsFloat() {
- switch src.Etype {
- case TINT64, TUINT64:
- return src.Etype, TFLOAT64
+ switch src.Kind() {
+ case types.TINT64, types.TUINT64:
+ return src.Kind(), types.TFLOAT64
}
}
case sys.I386:
if src.IsFloat() {
- switch dst.Etype {
- case TINT64, TUINT64:
- return TFLOAT64, dst.Etype
- case TUINT32, TUINT, TUINTPTR:
- return TFLOAT64, TUINT32
+ switch dst.Kind() {
+ case types.TINT64, types.TUINT64:
+ return types.TFLOAT64, dst.Kind()
+ case types.TUINT32, types.TUINT, types.TUINTPTR:
+ return types.TFLOAT64, types.TUINT32
}
}
if dst.IsFloat() {
- switch src.Etype {
- case TINT64, TUINT64:
- return src.Etype, TFLOAT64
- case TUINT32, TUINT, TUINTPTR:
- return TUINT32, TFLOAT64
+ switch src.Kind() {
+ case types.TINT64, types.TUINT64:
+ return src.Kind(), types.TFLOAT64
+ case types.TUINT32, types.TUINT, types.TUINTPTR:
+ return types.TUINT32, types.TFLOAT64
}
}
}
- return Txxx, Txxx
+ return types.Txxx, types.Txxx
}
// TODO(josharian): combine this with its caller and simplify
-func reduceSlice(n *Node) *Node {
+func reduceSlice(n *ir.SliceExpr) ir.Node {
low, high, max := n.SliceBounds()
- if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) {
+ if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.Left(), high.(*ir.UnaryExpr).Left()) {
// Reduce x[i:len(x)] to x[i:].
high = nil
}
n.SetSliceBounds(low, high, max)
- if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil {
+ if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && low == nil && high == nil {
// Reduce x[:] to x.
- if Debug_slice > 0 {
- Warn("slice: omit slice operation")
+ if base.Debug.Slice > 0 {
+ base.Warn("slice: omit slice operation")
}
- return n.Left
+ return n.Left()
}
return n
}
-func ascompatee1(l *Node, r *Node, init *Nodes) *Node {
+func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) *ir.AssignStmt {
// convas will turn map assigns into function calls,
// making it impossible for reorder3 to work.
- n := nod(OAS, l, r)
+ n := ir.NewAssignStmt(base.Pos, l, r)
- if l.Op == OINDEXMAP {
+ if l.Op() == ir.OINDEXMAP {
return n
}
return convas(n, init)
}
-func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
+func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node {
// check assign expression list to
// an expression list. called in
// expr-list = expr-list
nr[i1] = safeexpr(nr[i1], init)
}
- var nn []*Node
+ var nn []*ir.AssignStmt
i := 0
for ; i < len(nl); i++ {
if i >= len(nr) {
break
}
// Do not generate 'x = x' during return. See issue 4014.
- if op == ORETURN && samesafeexpr(nl[i], nr[i]) {
+ if op == ir.ORETURN && samesafeexpr(nl[i], nr[i]) {
continue
}
nn = append(nn, ascompatee1(nl[i], nr[i], init))
// cannot happen: caller checked that lists had same length
if i < len(nl) || i < len(nr) {
- var nln, nrn Nodes
+ var nln, nrn ir.Nodes
nln.Set(nl)
nrn.Set(nr)
- Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.funcname())
+ base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(Curfn))
}
- return nn
+ return reorder3(nn)
}
// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
-func fncall(l *Node, rt *types.Type) bool {
- if l.HasCall() || l.Op == OINDEXMAP {
+func fncall(l ir.Node, rt *types.Type) bool {
+ if l.HasCall() || l.Op() == ir.OINDEXMAP {
return true
}
- if types.Identical(l.Type, rt) {
+ if types.Identical(l.Type(), rt) {
return false
}
// There might be a conversion required, which might involve a runtime call.
// check assign type list to
// an expression list. called in
// expr-list = func()
-func ascompatet(nl Nodes, nr *types.Type) []*Node {
+func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
if nl.Len() != nr.NumFields() {
- Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
+ base.Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
}
- var nn, mm Nodes
+ var nn, mm ir.Nodes
for i, l := range nl.Slice() {
- if l.isBlank() {
+ if ir.IsBlank(l) {
continue
}
r := nr.Field(i)
// Any assignment to an lvalue that might cause a function call must be
// deferred until all the returned values have been read.
if fncall(l, r.Type) {
- tmp := temp(r.Type)
+ tmp := ir.Node(temp(r.Type))
tmp = typecheck(tmp, ctxExpr)
- a := nod(OAS, l, tmp)
- a = convas(a, &mm)
+ a := convas(ir.NewAssignStmt(base.Pos, l, tmp), &mm)
mm.Append(a)
l = tmp
}
- res := nod(ORESULT, nil, nil)
- res.Xoffset = Ctxt.FixedFrameSize() + r.Offset
- res.Type = r.Type
+ res := ir.Nod(ir.ORESULT, nil, nil)
+ res.SetOffset(base.Ctxt.FixedFrameSize() + r.Offset)
+ res.SetType(r.Type)
res.SetTypecheck(1)
- a := nod(OAS, l, res)
- a = convas(a, &nn)
+ a := convas(ir.NewAssignStmt(base.Pos, l, res), &nn)
updateHasCall(a)
if a.HasCall() {
- Dump("ascompatet ucount", a)
- Fatalf("ascompatet: too many function calls evaluating parameters")
+ ir.Dump("ascompatet ucount", a)
+ base.Fatalf("ascompatet: too many function calls evaluating parameters")
}
nn.Append(a)
}
// package all the arguments that match a ... T parameter into a []T.
-func mkdotargslice(typ *types.Type, args []*Node) *Node {
- var n *Node
+func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node {
+ var n ir.Node
if len(args) == 0 {
n = nodnil()
- n.Type = typ
+ n.SetType(typ)
} else {
- n = nod(OCOMPLIT, nil, typenod(typ))
- n.List.Append(args...)
- n.SetImplicit(true)
+ lit := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ))
+ lit.PtrList().Append(args...)
+ lit.SetImplicit(true)
+ n = lit
}
n = typecheck(n, ctxExpr)
- if n.Type == nil {
- Fatalf("mkdotargslice: typecheck failed")
+ if n.Type() == nil {
+ base.Fatalf("mkdotargslice: typecheck failed")
}
return n
}
// fixVariadicCall rewrites calls to variadic functions to use an
// explicit ... argument if one is not already present.
-func fixVariadicCall(call *Node) {
- fntype := call.Left.Type
+func fixVariadicCall(call *ir.CallExpr) {
+ fntype := call.Left().Type()
if !fntype.IsVariadic() || call.IsDDD() {
return
}
vi := fntype.NumParams() - 1
vt := fntype.Params().Field(vi).Type
- args := call.List.Slice()
+ args := call.List().Slice()
extra := args[vi:]
slice := mkdotargslice(vt, extra)
for i := range extra {
extra[i] = nil // allow GC
}
- call.List.Set(append(args[:vi], slice))
+ call.PtrList().Set(append(args[:vi], slice))
call.SetIsDDD(true)
}
-func walkCall(n *Node, init *Nodes) {
- if n.Rlist.Len() != 0 {
+func walkCall(n *ir.CallExpr, init *ir.Nodes) {
+ if n.Rlist().Len() != 0 {
return // already walked
}
- params := n.Left.Type.Params()
- args := n.List.Slice()
+ params := n.Left().Type().Params()
+ args := n.List().Slice()
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
walkexprlist(args, init)
// If this is a method call, add the receiver at the beginning of the args.
- if n.Op == OCALLMETH {
- withRecv := make([]*Node, len(args)+1)
- withRecv[0] = n.Left.Left
- n.Left.Left = nil
+ if n.Op() == ir.OCALLMETH {
+ withRecv := make([]ir.Node, len(args)+1)
+ dot := n.Left().(*ir.SelectorExpr)
+ withRecv[0] = dot.Left()
+ dot.SetLeft(nil)
copy(withRecv[1:], args)
args = withRecv
}
// store that argument into a temporary variable,
// to prevent that calls from clobbering arguments already on the stack.
// When instrumenting, all arguments might require function calls.
- var tempAssigns []*Node
+ var tempAssigns []ir.Node
for i, arg := range args {
updateHasCall(arg)
// Determine param type.
var t *types.Type
- if n.Op == OCALLMETH {
+ if n.Op() == ir.OCALLMETH {
if i == 0 {
- t = n.Left.Type.Recv().Type
+ t = n.Left().Type().Recv().Type
} else {
t = params.Field(i - 1).Type
}
if instrumenting || fncall(arg, t) {
// make assignment of fncall to tempAt
tmp := temp(t)
- a := nod(OAS, tmp, arg)
- a = convas(a, init)
+ a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init)
tempAssigns = append(tempAssigns, a)
// replace arg with temp
args[i] = tmp
}
}
- n.List.Set(tempAssigns)
- n.Rlist.Set(args)
+ n.PtrList().Set(tempAssigns)
+ n.PtrRlist().Set(args)
}
// generate code for print
-func walkprint(nn *Node, init *Nodes) *Node {
+func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
// Hoist all the argument evaluation up before the lock.
- walkexprlistcheap(nn.List.Slice(), init)
+ walkexprlistcheap(nn.List().Slice(), init)
// For println, add " " between elements and "\n" at the end.
- if nn.Op == OPRINTN {
- s := nn.List.Slice()
- t := make([]*Node, 0, len(s)*2)
+ if nn.Op() == ir.OPRINTN {
+ s := nn.List().Slice()
+ t := make([]ir.Node, 0, len(s)*2)
for i, n := range s {
if i != 0 {
t = append(t, nodstr(" "))
t = append(t, n)
}
t = append(t, nodstr("\n"))
- nn.List.Set(t)
+ nn.PtrList().Set(t)
}
// Collapse runs of constant strings.
- s := nn.List.Slice()
- t := make([]*Node, 0, len(s))
+ s := nn.List().Slice()
+ t := make([]ir.Node, 0, len(s))
for i := 0; i < len(s); {
var strs []string
- for i < len(s) && Isconst(s[i], CTSTR) {
- strs = append(strs, s[i].StringVal())
+ for i < len(s) && ir.IsConst(s[i], constant.String) {
+ strs = append(strs, ir.StringVal(s[i]))
i++
}
if len(strs) > 0 {
i++
}
}
- nn.List.Set(t)
+ nn.PtrList().Set(t)
- calls := []*Node{mkcall("printlock", nil, init)}
- for i, n := range nn.List.Slice() {
- if n.Op == OLITERAL {
- switch n.Val().Ctype() {
- case CTRUNE:
- n = defaultlit(n, types.Runetype)
+ calls := []ir.Node{mkcall("printlock", nil, init)}
+ for i, n := range nn.List().Slice() {
+ if n.Op() == ir.OLITERAL {
+ if n.Type() == types.UntypedRune {
+ n = defaultlit(n, types.RuneType)
+ }
- case CTINT:
- n = defaultlit(n, types.Types[TINT64])
+ switch n.Val().Kind() {
+ case constant.Int:
+ n = defaultlit(n, types.Types[types.TINT64])
- case CTFLT:
- n = defaultlit(n, types.Types[TFLOAT64])
+ case constant.Float:
+ n = defaultlit(n, types.Types[types.TFLOAT64])
}
}
- if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
- n = defaultlit(n, types.Types[TINT64])
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
+ n = defaultlit(n, types.Types[types.TINT64])
}
n = defaultlit(n, nil)
- nn.List.SetIndex(i, n)
- if n.Type == nil || n.Type.Etype == TFORW {
+ nn.List().SetIndex(i, n)
+ if n.Type() == nil || n.Type().Kind() == types.TFORW {
continue
}
- var on *Node
- switch n.Type.Etype {
- case TINTER:
- if n.Type.IsEmptyInterface() {
+ var on *ir.Name
+ switch n.Type().Kind() {
+ case types.TINTER:
+ if n.Type().IsEmptyInterface() {
on = syslook("printeface")
} else {
on = syslook("printiface")
}
- on = substArgTypes(on, n.Type) // any-1
- case TPTR:
- if n.Type.Elem().NotInHeap() {
+ on = substArgTypes(on, n.Type()) // any-1
+ case types.TPTR:
+ if n.Type().Elem().NotInHeap() {
on = syslook("printuintptr")
- n = nod(OCONV, n, nil)
- n.Type = types.Types[TUNSAFEPTR]
- n = nod(OCONV, n, nil)
- n.Type = types.Types[TUINTPTR]
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(types.Types[types.TUNSAFEPTR])
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(types.Types[types.TUINTPTR])
break
}
fallthrough
- case TCHAN, TMAP, TFUNC, TUNSAFEPTR:
+ case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
on = syslook("printpointer")
- on = substArgTypes(on, n.Type) // any-1
- case TSLICE:
+ on = substArgTypes(on, n.Type()) // any-1
+ case types.TSLICE:
on = syslook("printslice")
- on = substArgTypes(on, n.Type) // any-1
- case TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR:
- if isRuntimePkg(n.Type.Sym.Pkg) && n.Type.Sym.Name == "hex" {
+ on = substArgTypes(on, n.Type()) // any-1
+ case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
+ if isRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" {
on = syslook("printhex")
} else {
on = syslook("printuint")
}
- case TINT, TINT8, TINT16, TINT32, TINT64:
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
on = syslook("printint")
- case TFLOAT32, TFLOAT64:
+ case types.TFLOAT32, types.TFLOAT64:
on = syslook("printfloat")
- case TCOMPLEX64, TCOMPLEX128:
+ case types.TCOMPLEX64, types.TCOMPLEX128:
on = syslook("printcomplex")
- case TBOOL:
+ case types.TBOOL:
on = syslook("printbool")
- case TSTRING:
+ case types.TSTRING:
cs := ""
- if Isconst(n, CTSTR) {
- cs = n.StringVal()
+ if ir.IsConst(n, constant.String) {
+ cs = ir.StringVal(n)
}
switch cs {
case " ":
on = syslook("printstring")
}
default:
- badtype(OPRINT, n.Type, nil)
+ badtype(ir.OPRINT, n.Type(), nil)
continue
}
- r := nod(OCALL, on, nil)
- if params := on.Type.Params().FieldSlice(); len(params) > 0 {
+ r := ir.Nod(ir.OCALL, on, nil)
+ if params := on.Type().Params().FieldSlice(); len(params) > 0 {
t := params[0].Type
- if !types.Identical(t, n.Type) {
- n = nod(OCONV, n, nil)
- n.Type = t
+ if !types.Identical(t, n.Type()) {
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(t)
}
- r.List.Append(n)
+ r.PtrList().Append(n)
}
calls = append(calls, r)
}
typecheckslice(calls, ctxStmt)
walkexprlist(calls, init)
- r := nod(OEMPTY, nil, nil)
- r = typecheck(r, ctxStmt)
- r = walkexpr(r, init)
- r.Ninit.Set(calls)
- return r
+ r := ir.Nod(ir.OBLOCK, nil, nil)
+ r.PtrList().Set(calls)
+ return walkstmt(typecheck(r, ctxStmt))
}
-func callnew(t *types.Type) *Node {
+func callnew(t *types.Type) ir.Node {
dowidth(t)
- n := nod(ONEWOBJ, typename(t), nil)
- n.Type = types.NewPtr(t)
+ n := ir.Nod(ir.ONEWOBJ, typename(t), nil)
+ n.SetType(types.NewPtr(t))
n.SetTypecheck(1)
n.MarkNonNil()
return n
// isReflectHeaderDataField reports whether l is an expression p.Data
// where p has type reflect.SliceHeader or reflect.StringHeader.
-func isReflectHeaderDataField(l *Node) bool {
- if l.Type != types.Types[TUINTPTR] {
+func isReflectHeaderDataField(l ir.Node) bool {
+ if l.Type() != types.Types[types.TUINTPTR] {
return false
}
var tsym *types.Sym
- switch l.Op {
- case ODOT:
- tsym = l.Left.Type.Sym
- case ODOTPTR:
- tsym = l.Left.Type.Elem().Sym
+ switch l.Op() {
+ case ir.ODOT:
+ tsym = l.Left().Type().Sym()
+ case ir.ODOTPTR:
+ tsym = l.Left().Type().Elem().Sym()
default:
return false
}
- if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" {
+ if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" {
return false
}
return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
}
-func convas(n *Node, init *Nodes) *Node {
- if n.Op != OAS {
- Fatalf("convas: not OAS %v", n.Op)
+func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
+ if n.Op() != ir.OAS {
+ base.Fatalf("convas: not OAS %v", n.Op())
}
defer updateHasCall(n)
n.SetTypecheck(1)
- if n.Left == nil || n.Right == nil {
+ if n.Left() == nil || n.Right() == nil {
return n
}
- lt := n.Left.Type
- rt := n.Right.Type
+ lt := n.Left().Type()
+ rt := n.Right().Type()
if lt == nil || rt == nil {
return n
}
- if n.Left.isBlank() {
- n.Right = defaultlit(n.Right, nil)
+ if ir.IsBlank(n.Left()) {
+ n.SetRight(defaultlit(n.Right(), nil))
return n
}
if !types.Identical(lt, rt) {
- n.Right = assignconv(n.Right, lt, "assignment")
- n.Right = walkexpr(n.Right, init)
+ n.SetRight(assignconv(n.Right(), lt, "assignment"))
+ n.SetRight(walkexpr(n.Right(), init))
}
- dowidth(n.Right.Type)
+ dowidth(n.Right().Type())
return n
}
-// from ascompat[ee]
+// reorder3
+// from ascompatee
// a,b = c,d
// simultaneous assignment. there cannot
// be later use of an earlier lvalue.
//
// function calls have been removed.
-func reorder3(all []*Node) []*Node {
+func reorder3(all []*ir.AssignStmt) []ir.Node {
// If a needed expression may be affected by an
// earlier assignment, make an early copy of that
// expression and use the copy instead.
- var early []*Node
+ var early []ir.Node
- var mapinit Nodes
+ var mapinit ir.Nodes
for i, n := range all {
- l := n.Left
+ l := n.Left()
// Save subexpressions needed on left side.
// Drill through non-dereferences.
for {
- if l.Op == ODOT || l.Op == OPAREN {
- l = l.Left
+ switch ll := l; ll.Op() {
+ case ir.ODOT:
+ l = ll.Left()
continue
- }
-
- if l.Op == OINDEX && l.Left.Type.IsArray() {
- l.Right = reorder3save(l.Right, all, i, &early)
- l = l.Left
+ case ir.OPAREN:
+ l = ll.Left()
continue
+ case ir.OINDEX:
+ if ll.Left().Type().IsArray() {
+ ll.SetRight(reorder3save(ll.Right(), all, i, &early))
+ l = ll.Left()
+ continue
+ }
}
-
break
}
- switch l.Op {
+ switch l.Op() {
default:
- Fatalf("reorder3 unexpected lvalue %#v", l.Op)
+ base.Fatalf("reorder3 unexpected lvalue %v", l.Op())
- case ONAME:
+ case ir.ONAME:
break
- case OINDEX, OINDEXMAP:
- l.Left = reorder3save(l.Left, all, i, &early)
- l.Right = reorder3save(l.Right, all, i, &early)
- if l.Op == OINDEXMAP {
+ case ir.OINDEX, ir.OINDEXMAP:
+ l.SetLeft(reorder3save(l.Left(), all, i, &early))
+ l.SetRight(reorder3save(l.Right(), all, i, &early))
+ if l.Op() == ir.OINDEXMAP {
all[i] = convas(all[i], &mapinit)
}
- case ODEREF, ODOTPTR:
- l.Left = reorder3save(l.Left, all, i, &early)
+ case ir.ODEREF:
+ l.SetLeft(reorder3save(l.Left(), all, i, &early))
+ case ir.ODOTPTR:
+ l.SetLeft(reorder3save(l.Left(), all, i, &early))
}
// Save expression on right side.
- all[i].Right = reorder3save(all[i].Right, all, i, &early)
+ all[i].SetRight(reorder3save(all[i].Right(), all, i, &early))
}
early = append(mapinit.Slice(), early...)
- return append(early, all...)
+ for _, as := range all {
+ early = append(early, as)
+ }
+ return early
}
// if the evaluation of *np would be affected by the
// replace *np with that temp.
// The result of reorder3save MUST be assigned back to n, e.g.
// n.Left = reorder3save(n.Left, all, i, early)
-func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node {
+func reorder3save(n ir.Node, all []*ir.AssignStmt, i int, early *[]ir.Node) ir.Node {
if !aliased(n, all[:i]) {
return n
}
- q := temp(n.Type)
- q = nod(OAS, q, n)
- q = typecheck(q, ctxStmt)
- *early = append(*early, q)
- return q.Left
+ q := ir.Node(temp(n.Type()))
+ as := typecheck(ir.Nod(ir.OAS, q, n), ctxStmt)
+ *early = append(*early, as)
+ return q
}
// what's the outer value that a write to n affects?
// outer value means containing struct or array.
-func outervalue(n *Node) *Node {
+func outervalue(n ir.Node) ir.Node {
for {
- switch n.Op {
- case OXDOT:
- Fatalf("OXDOT in walk")
- case ODOT, OPAREN, OCONVNOP:
- n = n.Left
+ switch nn := n; nn.Op() {
+ case ir.OXDOT:
+ base.Fatalf("OXDOT in walk")
+ case ir.ODOT:
+ n = nn.Left()
+ continue
+ case ir.OPAREN:
+ n = nn.Left()
+ continue
+ case ir.OCONVNOP:
+ n = nn.Left()
continue
- case OINDEX:
- if n.Left.Type != nil && n.Left.Type.IsArray() {
- n = n.Left
+ case ir.OINDEX:
+ if nn.Left().Type() != nil && nn.Left().Type().IsArray() {
+ n = nn.Left()
continue
}
}
// Is it possible that the computation of r might be
// affected by assignments in all?
-func aliased(r *Node, all []*Node) bool {
+func aliased(r ir.Node, all []*ir.AssignStmt) bool {
if r == nil {
return false
}
// Treat all fields of a struct as referring to the whole struct.
// We could do better but we would have to keep track of the fields.
- for r.Op == ODOT {
- r = r.Left
+ for r.Op() == ir.ODOT {
+ r = r.(*ir.SelectorExpr).Left()
}
// Look for obvious aliasing: a variable being assigned
memwrite := false
for _, as := range all {
// We can ignore assignments to blank.
- if as.Left.isBlank() {
+ if ir.IsBlank(as.Left()) {
continue
}
- l := outervalue(as.Left)
- if l.Op != ONAME {
+ lv := outervalue(as.Left())
+ if lv.Op() != ir.ONAME {
memwrite = true
continue
}
+ l := lv.(*ir.Name)
switch l.Class() {
default:
- Fatalf("unexpected class: %v, %v", l, l.Class())
+ base.Fatalf("unexpected class: %v, %v", l, l.Class())
- case PAUTOHEAP, PEXTERN:
+ case ir.PAUTOHEAP, ir.PEXTERN:
memwrite = true
continue
- case PAUTO, PPARAM, PPARAMOUT:
- if l.Name.Addrtaken() {
+ case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
+ if l.Name().Addrtaken() {
memwrite = true
continue
}
- if vmatch2(l, r) {
+ if refersToName(l, r) {
// Direct hit: l appears in r.
return true
}
return false
}
- // If r does not refer to computed addresses
- // (that is, if r only refers to variables whose addresses
- // have not been taken), no aliasing.
- if varexpr(r) {
+ // If r does not refer to any variables whose addresses have been taken,
+ // then the only possible writes to r would be directly to the variables,
+ // and we checked those above, so no aliasing problems.
+ if !anyAddrTaken(r) {
return false
}
return true
}
-// does the evaluation of n only refer to variables
-// whose addresses have not been taken?
-// (and no other memory)
-func varexpr(n *Node) bool {
- if n == nil {
- return true
- }
-
- switch n.Op {
- case OLITERAL:
- return true
-
- case ONAME:
- switch n.Class() {
- case PAUTO, PPARAM, PPARAMOUT:
- if !n.Name.Addrtaken() {
- return true
- }
+// anyAddrTaken reports whether the evaluation n,
+// which appears on the left side of an assignment,
+// may refer to variables whose addresses have been taken.
+func anyAddrTaken(n ir.Node) bool {
+ return ir.Any(n, func(n ir.Node) bool {
+ switch n.Op() {
+ case ir.ONAME:
+ return n.Class() == ir.PEXTERN || n.Class() == ir.PAUTOHEAP || n.Name().Addrtaken()
+
+ case ir.ODOT: // but not ODOTPTR - should have been handled in aliased.
+ base.Fatalf("anyAddrTaken unexpected ODOT")
+
+ case ir.OADD,
+ ir.OAND,
+ ir.OANDAND,
+ ir.OANDNOT,
+ ir.OBITNOT,
+ ir.OCONV,
+ ir.OCONVIFACE,
+ ir.OCONVNOP,
+ ir.ODIV,
+ ir.ODOTTYPE,
+ ir.OLITERAL,
+ ir.OLSH,
+ ir.OMOD,
+ ir.OMUL,
+ ir.ONEG,
+ ir.ONIL,
+ ir.OOR,
+ ir.OOROR,
+ ir.OPAREN,
+ ir.OPLUS,
+ ir.ORSH,
+ ir.OSUB,
+ ir.OXOR:
+ return false
}
-
- return false
-
- case OADD,
- OSUB,
- OOR,
- OXOR,
- OMUL,
- ODIV,
- OMOD,
- OLSH,
- ORSH,
- OAND,
- OANDNOT,
- OPLUS,
- ONEG,
- OBITNOT,
- OPAREN,
- OANDAND,
- OOROR,
- OCONV,
- OCONVNOP,
- OCONVIFACE,
- ODOTTYPE:
- return varexpr(n.Left) && varexpr(n.Right)
-
- case ODOT: // but not ODOTPTR
- // Should have been handled in aliased.
- Fatalf("varexpr unexpected ODOT")
- }
-
- // Be conservative.
- return false
+ // Be conservative.
+ return true
+ })
}
-// is the name l mentioned in r?
-func vmatch2(l *Node, r *Node) bool {
- if r == nil {
- return false
- }
- switch r.Op {
- // match each right given left
- case ONAME:
- return l == r
-
- case OLITERAL:
- return false
- }
-
- if vmatch2(l, r.Left) {
- return true
- }
- if vmatch2(l, r.Right) {
- return true
- }
- for _, n := range r.List.Slice() {
- if vmatch2(l, n) {
- return true
- }
- }
- return false
+// refersToName reports whether r refers to name.
+func refersToName(name *ir.Name, r ir.Node) bool {
+ return ir.Any(r, func(r ir.Node) bool {
+ return r.Op() == ir.ONAME && r == name
+ })
}
-// is any name mentioned in l also mentioned in r?
-// called by sinit.go
-func vmatch1(l *Node, r *Node) bool {
- // isolate all left sides
+var stop = errors.New("stop")
+
+// refersToCommonName reports whether any name
+// appears in common between l and r.
+// This is called from sinit.go.
+func refersToCommonName(l ir.Node, r ir.Node) bool {
if l == nil || r == nil {
return false
}
- switch l.Op {
- case ONAME:
- switch l.Class() {
- case PPARAM, PAUTO:
- break
- default:
- // assignment to non-stack variable must be
- // delayed if right has function calls.
- if r.HasCall() {
- return true
+ // This could be written elegantly as a Find nested inside a Find:
+ //
+ // found := ir.Find(l, func(l ir.Node) interface{} {
+ // if l.Op() == ir.ONAME {
+ // return ir.Find(r, func(r ir.Node) interface{} {
+ // if r.Op() == ir.ONAME && l.Name() == r.Name() {
+ // return r
+ // }
+ // return nil
+ // })
+ // }
+ // return nil
+ // })
+ // return found != nil
+ //
+ // But that would allocate a new closure for the inner Find
+ // for each name found on the left side.
+ // It may not matter at all, but the below way of writing it
+ // only allocates two closures, not O(|L|) closures.
+
+ var doL, doR func(ir.Node) error
+ var targetL *ir.Name
+ doR = func(r ir.Node) error {
+ if r.Op() == ir.ONAME && r.Name() == targetL {
+ return stop
+ }
+ return ir.DoChildren(r, doR)
+ }
+ doL = func(l ir.Node) error {
+ if l.Op() == ir.ONAME {
+ targetL = l.Name()
+ if doR(r) == stop {
+ return stop
}
}
-
- return vmatch2(l, r)
-
- case OLITERAL:
- return false
- }
-
- if vmatch1(l.Left, r) {
- return true
- }
- if vmatch1(l.Right, r) {
- return true
- }
- for _, n := range l.List.Slice() {
- if vmatch1(n, r) {
- return true
- }
+ return ir.DoChildren(l, doL)
}
- return false
+ return doL(l) == stop
}
// paramstoheap returns code to allocate memory for heap-escaped parameters
// and to copy non-result parameters' values from the stack.
-func paramstoheap(params *types.Type) []*Node {
- var nn []*Node
+func paramstoheap(params *types.Type) []ir.Node {
+ var nn []ir.Node
for _, t := range params.Fields().Slice() {
- v := asNode(t.Nname)
- if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result
+ v := ir.AsNode(t.Nname)
+ if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result
v = nil
}
if v == nil {
continue
}
- if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil {
- nn = append(nn, walkstmt(nod(ODCL, v, nil)))
- if stackcopy.Class() == PPARAM {
- nn = append(nn, walkstmt(typecheck(nod(OAS, v, stackcopy), ctxStmt)))
+ if stackcopy := v.Name().Stackcopy; stackcopy != nil {
+ nn = append(nn, walkstmt(ir.Nod(ir.ODCL, v, nil)))
+ if stackcopy.Class() == ir.PPARAM {
+ nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, v, stackcopy), ctxStmt)))
}
}
}
// even allocations to move params/results to the heap.
// The generated code is added to Curfn's Enter list.
func zeroResults() {
- for _, f := range Curfn.Type.Results().Fields().Slice() {
- v := asNode(f.Nname)
- if v != nil && v.Name.Param.Heapaddr != nil {
+ for _, f := range Curfn.Type().Results().Fields().Slice() {
+ v := ir.AsNode(f.Nname)
+ if v != nil && v.Name().Heapaddr != nil {
// The local which points to the return value is the
// thing that needs zeroing. This is already handled
// by a Needzero annotation in plive.go:livenessepilogue.
continue
}
- if v.isParamHeapCopy() {
+ if isParamHeapCopy(v) {
// TODO(josharian/khr): Investigate whether we can switch to "continue" here,
// and document more in either case.
// In the review of CL 114797, Keith wrote (roughly):
// I don't think the zeroing below matters.
// The stack return value will never be marked as live anywhere in the function.
// It is not written to until deferreturn returns.
- v = v.Name.Param.Stackcopy
+ v = v.Name().Stackcopy
}
// Zero the stack location containing f.
- Curfn.Func.Enter.Append(nodl(Curfn.Pos, OAS, v, nil))
+ Curfn.Enter.Append(ir.NodAt(Curfn.Pos(), ir.OAS, v, nil))
}
}
// returnsfromheap returns code to copy values for heap-escaped parameters
// back to the stack.
-func returnsfromheap(params *types.Type) []*Node {
- var nn []*Node
+func returnsfromheap(params *types.Type) []ir.Node {
+ var nn []ir.Node
for _, t := range params.Fields().Slice() {
- v := asNode(t.Nname)
+ v := ir.AsNode(t.Nname)
if v == nil {
continue
}
- if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == PPARAMOUT {
- nn = append(nn, walkstmt(typecheck(nod(OAS, stackcopy, v), ctxStmt)))
+ if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT {
+ nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, stackcopy, v), ctxStmt)))
}
}
// between the stack and the heap. The generated code is added to Curfn's
// Enter and Exit lists.
func heapmoves() {
- lno := lineno
- lineno = Curfn.Pos
- nn := paramstoheap(Curfn.Type.Recvs())
- nn = append(nn, paramstoheap(Curfn.Type.Params())...)
- nn = append(nn, paramstoheap(Curfn.Type.Results())...)
- Curfn.Func.Enter.Append(nn...)
- lineno = Curfn.Func.Endlineno
- Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...)
- lineno = lno
+ lno := base.Pos
+ base.Pos = Curfn.Pos()
+ nn := paramstoheap(Curfn.Type().Recvs())
+ nn = append(nn, paramstoheap(Curfn.Type().Params())...)
+ nn = append(nn, paramstoheap(Curfn.Type().Results())...)
+ Curfn.Enter.Append(nn...)
+ base.Pos = Curfn.Endlineno
+ Curfn.Exit.Append(returnsfromheap(Curfn.Type().Results())...)
+ base.Pos = lno
}
-func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node {
- if fn.Type == nil || fn.Type.Etype != TFUNC {
- Fatalf("mkcall %v %v", fn, fn.Type)
+func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
+ if fn.Type() == nil || fn.Type().Kind() != types.TFUNC {
+ base.Fatalf("mkcall %v %v", fn, fn.Type())
}
- n := fn.Type.NumParams()
+ n := fn.Type().NumParams()
if n != len(va) {
- Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
+ base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
}
- r := nod(OCALL, fn, nil)
- r.List.Set(va)
- if fn.Type.NumResults() > 0 {
- r = typecheck(r, ctxExpr|ctxMultiOK)
- } else {
- r = typecheck(r, ctxStmt)
- }
- r = walkexpr(r, init)
- r.Type = t
- return r
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, va)
+ TypecheckCall(call)
+ call.SetType(t)
+ return walkexpr(call, init).(*ir.CallExpr)
}
-func mkcall(name string, t *types.Type, init *Nodes, args ...*Node) *Node {
+func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
return vmkcall(syslook(name), t, init, args)
}
-func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node {
+func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
return vmkcall(fn, t, init, args)
}
-func conv(n *Node, t *types.Type) *Node {
- if types.Identical(n.Type, t) {
+func conv(n ir.Node, t *types.Type) ir.Node {
+ if types.Identical(n.Type(), t) {
return n
}
- n = nod(OCONV, n, nil)
- n.Type = t
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(t)
n = typecheck(n, ctxExpr)
return n
}
// convnop converts node n to type t using the OCONVNOP op
// and typechecks the result with ctxExpr.
-func convnop(n *Node, t *types.Type) *Node {
- if types.Identical(n.Type, t) {
+func convnop(n ir.Node, t *types.Type) ir.Node {
+ if types.Identical(n.Type(), t) {
return n
}
- n = nod(OCONVNOP, n, nil)
- n.Type = t
+ n = ir.Nod(ir.OCONVNOP, n, nil)
+ n.SetType(t)
n = typecheck(n, ctxExpr)
return n
}
// byteindex converts n, which is byte-sized, to an int used to index into an array.
// We cannot use conv, because we allow converting bool to int here,
// which is forbidden in user code.
-func byteindex(n *Node) *Node {
+func byteindex(n ir.Node) ir.Node {
// We cannot convert from bool to int directly.
// While converting from int8 to int is possible, it would yield
// the wrong result for negative values.
// Reinterpreting the value as an unsigned byte solves both cases.
- if !types.Identical(n.Type, types.Types[TUINT8]) {
- n = nod(OCONV, n, nil)
- n.Type = types.Types[TUINT8]
+ if !types.Identical(n.Type(), types.Types[types.TUINT8]) {
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(types.Types[types.TUINT8])
n.SetTypecheck(1)
}
- n = nod(OCONV, n, nil)
- n.Type = types.Types[TINT]
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(types.Types[types.TINT])
n.SetTypecheck(1)
return n
}
-func chanfn(name string, n int, t *types.Type) *Node {
+func chanfn(name string, n int, t *types.Type) ir.Node {
if !t.IsChan() {
- Fatalf("chanfn %v", t)
+ base.Fatalf("chanfn %v", t)
}
fn := syslook(name)
switch n {
default:
- Fatalf("chanfn %d", n)
+ base.Fatalf("chanfn %d", n)
case 1:
fn = substArgTypes(fn, t.Elem())
case 2:
return fn
}
-func mapfn(name string, t *types.Type) *Node {
+func mapfn(name string, t *types.Type) ir.Node {
if !t.IsMap() {
- Fatalf("mapfn %v", t)
+ base.Fatalf("mapfn %v", t)
}
fn := syslook(name)
fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
return fn
}
-func mapfndel(name string, t *types.Type) *Node {
+func mapfndel(name string, t *types.Type) ir.Node {
if !t.IsMap() {
- Fatalf("mapfn %v", t)
+ base.Fatalf("mapfn %v", t)
}
fn := syslook(name)
fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key())
if Widthptr == 4 {
return mapfast32ptr
}
- Fatalf("small pointer %v", t.Key())
+ base.Fatalf("small pointer %v", t.Key())
case AMEM64:
if !t.Key().HasPointers() {
return mapfast64
return mapslow
}
-func writebarrierfn(name string, l *types.Type, r *types.Type) *Node {
+func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
fn := syslook(name)
fn = substArgTypes(fn, l, r)
return fn
}
-func addstr(n *Node, init *Nodes) *Node {
- // order.expr rewrote OADDSTR to have a list of strings.
- c := n.List.Len()
+func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node {
+ c := n.List().Len()
if c < 2 {
- Fatalf("addstr count %d too small", c)
+ base.Fatalf("addstr count %d too small", c)
}
buf := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
sz := int64(0)
- for _, n1 := range n.List.Slice() {
- if n1.Op == OLITERAL {
- sz += int64(len(n1.StringVal()))
+ for _, n1 := range n.List().Slice() {
+ if n1.Op() == ir.OLITERAL {
+ sz += int64(len(ir.StringVal(n1)))
}
}
// Don't allocate the buffer if the result won't fit.
if sz < tmpstringbufsize {
// Create temporary buffer for result string on stack.
- t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
- buf = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
+ buf = nodAddr(temp(t))
}
}
// build list of string arguments
- args := []*Node{buf}
- for _, n2 := range n.List.Slice() {
- args = append(args, conv(n2, types.Types[TSTRING]))
+ args := []ir.Node{buf}
+ for _, n2 := range n.List().Slice() {
+ args = append(args, conv(n2, types.Types[types.TSTRING]))
}
var fn string
// large numbers of strings are passed to the runtime as a slice.
fn = "concatstrings"
- t := types.NewSlice(types.Types[TSTRING])
- slice := nod(OCOMPLIT, nil, typenod(t))
- if prealloc[n] != nil {
- prealloc[slice] = prealloc[n]
- }
- slice.List.Set(args[1:]) // skip buf arg
- args = []*Node{buf, slice}
- slice.Esc = EscNone
+ t := types.NewSlice(types.Types[types.TSTRING])
+ // args[1:] to skip buf arg
+ slice := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(t), args[1:])
+ slice.Prealloc = n.Prealloc
+ args = []ir.Node{buf, slice}
+ slice.SetEsc(EscNone)
}
cat := syslook(fn)
- r := nod(OCALL, cat, nil)
- r.List.Set(args)
- r = typecheck(r, ctxExpr)
- r = walkexpr(r, init)
- r.Type = n.Type
+ r := ir.Nod(ir.OCALL, cat, nil)
+ r.PtrList().Set(args)
+ r1 := typecheck(r, ctxExpr)
+ r1 = walkexpr(r1, init)
+ r1.SetType(n.Type())
- return r
+ return r1
}
-func walkAppendArgs(n *Node, init *Nodes) {
- walkexprlistsafe(n.List.Slice(), init)
+func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) {
+ walkexprlistsafe(n.List().Slice(), init)
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly.
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
ls[i1] = cheapexpr(n1, init)
}
// s
//
// l2 is allowed to be a string.
-func appendslice(n *Node, init *Nodes) *Node {
+func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
walkAppendArgs(n, init)
- l1 := n.List.First()
- l2 := n.List.Second()
+ l1 := n.List().First()
+ l2 := n.List().Second()
l2 = cheapexpr(l2, init)
- n.List.SetSecond(l2)
+ n.List().SetSecond(l2)
- var nodes Nodes
+ var nodes ir.Nodes
// var s []T
- s := temp(l1.Type)
- nodes.Append(nod(OAS, s, l1)) // s = l1
+ s := temp(l1.Type())
+ nodes.Append(ir.Nod(ir.OAS, s, l1)) // s = l1
- elemtype := s.Type.Elem()
+ elemtype := s.Type().Elem()
// n := len(s) + len(l2)
- nn := temp(types.Types[TINT])
- nodes.Append(nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil))))
+ nn := temp(types.Types[types.TINT])
+ nodes.Append(ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, s, nil), ir.Nod(ir.OLEN, l2, nil))))
// if uint(n) > uint(cap(s))
- nif := nod(OIF, nil, nil)
- nuint := conv(nn, types.Types[TUINT])
- scapuint := conv(nod(OCAP, s, nil), types.Types[TUINT])
- nif.Left = nod(OGT, nuint, scapuint)
+ nif := ir.Nod(ir.OIF, nil, nil)
+ nuint := conv(nn, types.Types[types.TUINT])
+ scapuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT])
+ nif.SetLeft(ir.Nod(ir.OGT, nuint, scapuint))
// instantiate growslice(typ *type, []any, int) []any
fn := syslook("growslice")
fn = substArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
- nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn)))
nodes.Append(nif)
// s = s[:n]
- nt := nod(OSLICE, s, nil)
+ nt := ir.Nod(ir.OSLICE, s, nil)
nt.SetSliceBounds(nil, nn, nil)
nt.SetBounded(true)
- nodes.Append(nod(OAS, s, nt))
+ nodes.Append(ir.Nod(ir.OAS, s, nt))
- var ncopy *Node
+ var ncopy ir.Node
if elemtype.HasPointers() {
// copy(s[len(l1):], l2)
- nptr1 := nod(OSLICE, s, nil)
- nptr1.Type = s.Type
- nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
- nptr1 = cheapexpr(nptr1, &nodes)
+ slice := ir.Nod(ir.OSLICE, s, nil)
+ slice.SetType(s.Type())
+ slice.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil)
- nptr2 := l2
-
- Curfn.Func.setWBPos(n.Pos)
+ Curfn.SetWBPos(n.Pos())
// instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
fn := syslook("typedslicecopy")
- fn = substArgTypes(fn, l1.Type.Elem(), l2.Type.Elem())
- ptr1, len1 := nptr1.backingArrayPtrLen()
- ptr2, len2 := nptr2.backingArrayPtrLen()
- ncopy = mkcall1(fn, types.Types[TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2)
- } else if instrumenting && !compiling_runtime {
+ fn = substArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
+ ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes))
+ ptr2, len2 := backingArrayPtrLen(l2)
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2)
+ } else if instrumenting && !base.Flag.CompilingRuntime {
// rely on runtime to instrument:
// copy(s[len(l1):], l2)
// l2 can be a slice or string.
- nptr1 := nod(OSLICE, s, nil)
- nptr1.Type = s.Type
- nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
- nptr1 = cheapexpr(nptr1, &nodes)
- nptr2 := l2
+ slice := ir.Nod(ir.OSLICE, s, nil)
+ slice.SetType(s.Type())
+ slice.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil)
- ptr1, len1 := nptr1.backingArrayPtrLen()
- ptr2, len2 := nptr2.backingArrayPtrLen()
+ ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes))
+ ptr2, len2 := backingArrayPtrLen(l2)
fn := syslook("slicecopy")
- fn = substArgTypes(fn, ptr1.Type.Elem(), ptr2.Type.Elem())
- ncopy = mkcall1(fn, types.Types[TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width))
+ fn = substArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem())
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width))
} else {
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
- nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil))
- nptr1.SetBounded(true)
- nptr1 = nod(OADDR, nptr1, nil)
+ ix := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil))
+ ix.SetBounded(true)
+ addr := nodAddr(ix)
- nptr2 := nod(OSPTR, l2, nil)
+ sptr := ir.Nod(ir.OSPTR, l2, nil)
- nwid := cheapexpr(conv(nod(OLEN, l2, nil), types.Types[TUINTPTR]), &nodes)
- nwid = nod(OMUL, nwid, nodintconst(elemtype.Width))
+ nwid := cheapexpr(conv(ir.Nod(ir.OLEN, l2, nil), types.Types[types.TUINTPTR]), &nodes)
+ nwid = ir.Nod(ir.OMUL, nwid, nodintconst(elemtype.Width))
// instantiate func memmove(to *any, frm *any, length uintptr)
fn := syslook("memmove")
fn = substArgTypes(fn, elemtype, elemtype)
- ncopy = mkcall1(fn, nil, &nodes, nptr1, nptr2, nwid)
+ ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid)
}
ln := append(nodes.Slice(), ncopy)
// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
// isAppendOfMake assumes n has already been typechecked.
-func isAppendOfMake(n *Node) bool {
- if Debug.N != 0 || instrumenting {
+func isAppendOfMake(n ir.Node) bool {
+ if base.Flag.N != 0 || instrumenting {
return false
}
if n.Typecheck() == 0 {
- Fatalf("missing typecheck: %+v", n)
+ base.Fatalf("missing typecheck: %+v", n)
}
- if n.Op != OAPPEND || !n.IsDDD() || n.List.Len() != 2 {
+ if n.Op() != ir.OAPPEND {
+ return false
+ }
+ call := n.(*ir.CallExpr)
+ if !call.IsDDD() || call.List().Len() != 2 || call.List().Second().Op() != ir.OMAKESLICE {
return false
}
- second := n.List.Second()
- if second.Op != OMAKESLICE || second.Right != nil {
+ mk := call.List().Second().(*ir.MakeExpr)
+ if mk.Right() != nil {
return false
}
// typecheck made sure that constant arguments to make are not negative and fit into an int.
// The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
- y := second.Left
- if !Isconst(y, CTINT) && maxintval[y.Type.Etype].Cmp(maxintval[TUINT]) > 0 {
+ y := mk.Left()
+ if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() {
return false
}
// }
// }
// s
-func extendslice(n *Node, init *Nodes) *Node {
+func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
// isAppendOfMake made sure all possible positive values of l2 fit into an uint.
// The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
// check of l2 < 0 at runtime which is generated below.
- l2 := conv(n.List.Second().Left, types.Types[TINT])
+ l2 := conv(n.List().Second().(*ir.MakeExpr).Left(), types.Types[types.TINT])
l2 = typecheck(l2, ctxExpr)
- n.List.SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second().
+ n.List().SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second().
walkAppendArgs(n, init)
- l1 := n.List.First()
- l2 = n.List.Second() // re-read l2, as it may have been updated by walkAppendArgs
+ l1 := n.List().First()
+ l2 = n.List().Second() // re-read l2, as it may have been updated by walkAppendArgs
- var nodes []*Node
+ var nodes []ir.Node
// if l2 >= 0 (likely happens), do nothing
- nifneg := nod(OIF, nod(OGE, l2, nodintconst(0)), nil)
+ nifneg := ir.Nod(ir.OIF, ir.Nod(ir.OGE, l2, nodintconst(0)), nil)
nifneg.SetLikely(true)
// else panicmakeslicelen()
- nifneg.Rlist.Set1(mkcall("panicmakeslicelen", nil, init))
+ nifneg.PtrRlist().Set1(mkcall("panicmakeslicelen", nil, init))
nodes = append(nodes, nifneg)
// s := l1
- s := temp(l1.Type)
- nodes = append(nodes, nod(OAS, s, l1))
+ s := temp(l1.Type())
+ nodes = append(nodes, ir.Nod(ir.OAS, s, l1))
- elemtype := s.Type.Elem()
+ elemtype := s.Type().Elem()
// n := len(s) + l2
- nn := temp(types.Types[TINT])
- nodes = append(nodes, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), l2)))
+ nn := temp(types.Types[types.TINT])
+ nodes = append(nodes, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, s, nil), l2)))
// if uint(n) > uint(cap(s))
- nuint := conv(nn, types.Types[TUINT])
- capuint := conv(nod(OCAP, s, nil), types.Types[TUINT])
- nif := nod(OIF, nod(OGT, nuint, capuint), nil)
+ nuint := conv(nn, types.Types[types.TUINT])
+ capuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT])
+ nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, nuint, capuint), nil)
// instantiate growslice(typ *type, old []any, newcap int) []any
fn := syslook("growslice")
fn = substArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
- nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn)))
nodes = append(nodes, nif)
// s = s[:n]
- nt := nod(OSLICE, s, nil)
+ nt := ir.Nod(ir.OSLICE, s, nil)
nt.SetSliceBounds(nil, nn, nil)
nt.SetBounded(true)
- nodes = append(nodes, nod(OAS, s, nt))
+ nodes = append(nodes, ir.Nod(ir.OAS, s, nt))
// lptr := &l1[0]
- l1ptr := temp(l1.Type.Elem().PtrTo())
- tmp := nod(OSPTR, l1, nil)
- nodes = append(nodes, nod(OAS, l1ptr, tmp))
+ l1ptr := temp(l1.Type().Elem().PtrTo())
+ tmp := ir.Nod(ir.OSPTR, l1, nil)
+ nodes = append(nodes, ir.Nod(ir.OAS, l1ptr, tmp))
// sptr := &s[0]
sptr := temp(elemtype.PtrTo())
- tmp = nod(OSPTR, s, nil)
- nodes = append(nodes, nod(OAS, sptr, tmp))
+ tmp = ir.Nod(ir.OSPTR, s, nil)
+ nodes = append(nodes, ir.Nod(ir.OAS, sptr, tmp))
// hp := &s[len(l1)]
- hp := nod(OINDEX, s, nod(OLEN, l1, nil))
- hp.SetBounded(true)
- hp = nod(OADDR, hp, nil)
- hp = convnop(hp, types.Types[TUNSAFEPTR])
+ ix := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil))
+ ix.SetBounded(true)
+ hp := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR])
// hn := l2 * sizeof(elem(s))
- hn := nod(OMUL, l2, nodintconst(elemtype.Width))
- hn = conv(hn, types.Types[TUINTPTR])
+ hn := conv(ir.Nod(ir.OMUL, l2, nodintconst(elemtype.Width)), types.Types[types.TUINTPTR])
clrname := "memclrNoHeapPointers"
hasPointers := elemtype.HasPointers()
if hasPointers {
clrname = "memclrHasPointers"
- Curfn.Func.setWBPos(n.Pos)
+ Curfn.SetWBPos(n.Pos())
}
- var clr Nodes
+ var clr ir.Nodes
clrfn := mkcall(clrname, nil, &clr, hp, hn)
clr.Append(clrfn)
if hasPointers {
// if l1ptr == sptr
- nifclr := nod(OIF, nod(OEQ, l1ptr, sptr), nil)
- nifclr.Nbody = clr
+ nifclr := ir.Nod(ir.OIF, ir.Nod(ir.OEQ, l1ptr, sptr), nil)
+ nifclr.SetBody(clr)
nodes = append(nodes, nifclr)
} else {
nodes = append(nodes, clr.Slice()...)
// ...
// }
// s
-func walkappend(n *Node, init *Nodes, dst *Node) *Node {
- if !samesafeexpr(dst, n.List.First()) {
- n.List.SetFirst(safeexpr(n.List.First(), init))
- n.List.SetFirst(walkexpr(n.List.First(), init))
+func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
+ if !samesafeexpr(dst, n.List().First()) {
+ n.List().SetFirst(safeexpr(n.List().First(), init))
+ n.List().SetFirst(walkexpr(n.List().First(), init))
}
- walkexprlistsafe(n.List.Slice()[1:], init)
+ walkexprlistsafe(n.List().Slice()[1:], init)
- nsrc := n.List.First()
+ nsrc := n.List().First()
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// Using cheapexpr also makes sure that the evaluation
// of all arguments (and especially any panics) happen
// before we begin to modify the slice in a visible way.
- ls := n.List.Slice()[1:]
+ ls := n.List().Slice()[1:]
for i, n := range ls {
n = cheapexpr(n, init)
- if !types.Identical(n.Type, nsrc.Type.Elem()) {
- n = assignconv(n, nsrc.Type.Elem(), "append")
+ if !types.Identical(n.Type(), nsrc.Type().Elem()) {
+ n = assignconv(n, nsrc.Type().Elem(), "append")
n = walkexpr(n, init)
}
ls[i] = n
}
- argc := n.List.Len() - 1
+ argc := n.List().Len() - 1
if argc < 1 {
return nsrc
}
// General case, with no function calls left as arguments.
// Leave for gen, except that instrumentation requires old form.
- if !instrumenting || compiling_runtime {
+ if !instrumenting || base.Flag.CompilingRuntime {
return n
}
- var l []*Node
+ var l []ir.Node
- ns := temp(nsrc.Type)
- l = append(l, nod(OAS, ns, nsrc)) // s = src
+ ns := temp(nsrc.Type())
+ l = append(l, ir.Nod(ir.OAS, ns, nsrc)) // s = src
- na := nodintconst(int64(argc)) // const argc
- nx := nod(OIF, nil, nil) // if cap(s) - len(s) < argc
- nx.Left = nod(OLT, nod(OSUB, nod(OCAP, ns, nil), nod(OLEN, ns, nil)), na)
+ na := nodintconst(int64(argc)) // const argc
+ nif := ir.Nod(ir.OIF, nil, nil) // if cap(s) - len(s) < argc
+ nif.SetLeft(ir.Nod(ir.OLT, ir.Nod(ir.OSUB, ir.Nod(ir.OCAP, ns, nil), ir.Nod(ir.OLEN, ns, nil)), na))
fn := syslook("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
- fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem())
+ fn = substArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
- nx.Nbody.Set1(nod(OAS, ns,
- mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns,
- nod(OADD, nod(OLEN, ns, nil), na))))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, ns,
+ mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns,
+ ir.Nod(ir.OADD, ir.Nod(ir.OLEN, ns, nil), na))))
- l = append(l, nx)
+ l = append(l, nif)
- nn := temp(types.Types[TINT])
- l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s)
+ nn := temp(types.Types[types.TINT])
+ l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OLEN, ns, nil))) // n = len(s)
- nx = nod(OSLICE, ns, nil) // ...s[:n+argc]
- nx.SetSliceBounds(nil, nod(OADD, nn, na), nil)
- nx.SetBounded(true)
- l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc]
+ slice := ir.Nod(ir.OSLICE, ns, nil) // ...s[:n+argc]
+ slice.SetSliceBounds(nil, ir.Nod(ir.OADD, nn, na), nil)
+ slice.SetBounded(true)
+ l = append(l, ir.Nod(ir.OAS, ns, slice)) // s = s[:n+argc]
- ls = n.List.Slice()[1:]
+ ls = n.List().Slice()[1:]
for i, n := range ls {
- nx = nod(OINDEX, ns, nn) // s[n] ...
- nx.SetBounded(true)
- l = append(l, nod(OAS, nx, n)) // s[n] = arg
+ ix := ir.Nod(ir.OINDEX, ns, nn) // s[n] ...
+ ix.SetBounded(true)
+ l = append(l, ir.Nod(ir.OAS, ix, n)) // s[n] = arg
if i+1 < len(ls) {
- l = append(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))) // n = n + 1
+ l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, nn, nodintconst(1)))) // n = n + 1
}
}
//
// Also works if b is a string.
//
-func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
- if n.Left.Type.Elem().HasPointers() {
- Curfn.Func.setWBPos(n.Pos)
- fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem())
- n.Left = cheapexpr(n.Left, init)
- ptrL, lenL := n.Left.backingArrayPtrLen()
- n.Right = cheapexpr(n.Right, init)
- ptrR, lenR := n.Right.backingArrayPtrLen()
- return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), ptrL, lenL, ptrR, lenR)
+func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
+ if n.Left().Type().Elem().HasPointers() {
+ Curfn.SetWBPos(n.Pos())
+ fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem())
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptrL, lenL := backingArrayPtrLen(n.Left())
+ n.SetRight(cheapexpr(n.Right(), init))
+ ptrR, lenR := backingArrayPtrLen(n.Right())
+ return mkcall1(fn, n.Type(), init, typename(n.Left().Type().Elem()), ptrL, lenL, ptrR, lenR)
}
if runtimecall {
// copy(n.Left, n.Right)
// n.Right can be a slice or string.
- n.Left = cheapexpr(n.Left, init)
- ptrL, lenL := n.Left.backingArrayPtrLen()
- n.Right = cheapexpr(n.Right, init)
- ptrR, lenR := n.Right.backingArrayPtrLen()
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptrL, lenL := backingArrayPtrLen(n.Left())
+ n.SetRight(cheapexpr(n.Right(), init))
+ ptrR, lenR := backingArrayPtrLen(n.Right())
fn := syslook("slicecopy")
- fn = substArgTypes(fn, ptrL.Type.Elem(), ptrR.Type.Elem())
+ fn = substArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
- return mkcall1(fn, n.Type, init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left.Type.Elem().Width))
+ return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left().Type().Elem().Width))
}
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
- nl := temp(n.Left.Type)
- nr := temp(n.Right.Type)
- var l []*Node
- l = append(l, nod(OAS, nl, n.Left))
- l = append(l, nod(OAS, nr, n.Right))
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
+ nl := temp(n.Left().Type())
+ nr := temp(n.Right().Type())
+ var l []ir.Node
+ l = append(l, ir.Nod(ir.OAS, nl, n.Left()))
+ l = append(l, ir.Nod(ir.OAS, nr, n.Right()))
- nfrm := nod(OSPTR, nr, nil)
- nto := nod(OSPTR, nl, nil)
+ nfrm := ir.Nod(ir.OSPTR, nr, nil)
+ nto := ir.Nod(ir.OSPTR, nl, nil)
- nlen := temp(types.Types[TINT])
+ nlen := temp(types.Types[types.TINT])
// n = len(to)
- l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil)))
+ l = append(l, ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nl, nil)))
// if n > len(frm) { n = len(frm) }
- nif := nod(OIF, nil, nil)
+ nif := ir.Nod(ir.OIF, nil, nil)
- nif.Left = nod(OGT, nlen, nod(OLEN, nr, nil))
- nif.Nbody.Append(nod(OAS, nlen, nod(OLEN, nr, nil)))
+ nif.SetLeft(ir.Nod(ir.OGT, nlen, ir.Nod(ir.OLEN, nr, nil)))
+ nif.PtrBody().Append(ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nr, nil)))
l = append(l, nif)
// if to.ptr != frm.ptr { memmove( ... ) }
- ne := nod(OIF, nod(ONE, nto, nfrm), nil)
+ ne := ir.Nod(ir.OIF, ir.Nod(ir.ONE, nto, nfrm), nil)
ne.SetLikely(true)
l = append(l, ne)
fn := syslook("memmove")
- fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem())
- nwid := temp(types.Types[TUINTPTR])
- setwid := nod(OAS, nwid, conv(nlen, types.Types[TUINTPTR]))
- ne.Nbody.Append(setwid)
- nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width))
+ fn = substArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
+ nwid := ir.Node(temp(types.Types[types.TUINTPTR]))
+ setwid := ir.Nod(ir.OAS, nwid, conv(nlen, types.Types[types.TUINTPTR]))
+ ne.PtrBody().Append(setwid)
+ nwid = ir.Nod(ir.OMUL, nwid, nodintconst(nl.Type().Elem().Width))
call := mkcall1(fn, nil, init, nto, nfrm, nwid)
- ne.Nbody.Append(call)
+ ne.PtrBody().Append(call)
typecheckslice(l, ctxStmt)
walkstmtlist(l)
return nlen
}
-func eqfor(t *types.Type) (n *Node, needsize bool) {
+func eqfor(t *types.Type) (n ir.Node, needsize bool) {
// Should only arrive here with large memory or
// a struct/array containing a non-memory field/element.
// Small memory is handled inline, and single non-memory
return n, true
case ASPECIAL:
sym := typesymprefix(".eq", t)
- n := newname(sym)
+ n := NewName(sym)
setNodeNameFunc(n)
- n.Type = functype(nil, []*Node{
+ n.SetType(functype(nil, []*ir.Field{
anonfield(types.NewPtr(t)),
anonfield(types.NewPtr(t)),
- }, []*Node{
- anonfield(types.Types[TBOOL]),
- })
+ }, []*ir.Field{
+ anonfield(types.Types[types.TBOOL]),
+ }))
return n, false
}
- Fatalf("eqfor %v", t)
+ base.Fatalf("eqfor %v", t)
return nil, false
}
// The result of walkcompare MUST be assigned back to n, e.g.
// n.Left = walkcompare(n.Left, init)
-func walkcompare(n *Node, init *Nodes) *Node {
- if n.Left.Type.IsInterface() && n.Right.Type.IsInterface() && n.Left.Op != OLITERAL && n.Right.Op != OLITERAL {
+func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ if n.Left().Type().IsInterface() && n.Right().Type().IsInterface() && n.Left().Op() != ir.ONIL && n.Right().Op() != ir.ONIL {
return walkcompareInterface(n, init)
}
- if n.Left.Type.IsString() && n.Right.Type.IsString() {
+ if n.Left().Type().IsString() && n.Right().Type().IsString() {
return walkcompareString(n, init)
}
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
// Given mixed interface/concrete comparison,
// rewrite into types-equal && data-equal.
// This is efficient, avoids allocations, and avoids runtime calls.
- if n.Left.Type.IsInterface() != n.Right.Type.IsInterface() {
+ if n.Left().Type().IsInterface() != n.Right().Type().IsInterface() {
// Preserve side-effects in case of short-circuiting; see #32187.
- l := cheapexpr(n.Left, init)
- r := cheapexpr(n.Right, init)
+ l := cheapexpr(n.Left(), init)
+ r := cheapexpr(n.Right(), init)
// Swap so that l is the interface value and r is the concrete value.
- if n.Right.Type.IsInterface() {
+ if n.Right().Type().IsInterface() {
l, r = r, l
}
// Handle both == and !=.
- eq := n.Op
- andor := OOROR
- if eq == OEQ {
- andor = OANDAND
+ eq := n.Op()
+ andor := ir.OOROR
+ if eq == ir.OEQ {
+ andor = ir.OANDAND
}
// Check for types equal.
// For empty interface, this is:
// l.tab == type(r)
// For non-empty interface, this is:
// l.tab != nil && l.tab._type == type(r)
- var eqtype *Node
- tab := nod(OITAB, l, nil)
- rtyp := typename(r.Type)
- if l.Type.IsEmptyInterface() {
- tab.Type = types.NewPtr(types.Types[TUINT8])
+ var eqtype ir.Node
+ tab := ir.Nod(ir.OITAB, l, nil)
+ rtyp := typename(r.Type())
+ if l.Type().IsEmptyInterface() {
+ tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
tab.SetTypecheck(1)
- eqtype = nod(eq, tab, rtyp)
+ eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp)
} else {
- nonnil := nod(brcom(eq), nodnil(), tab)
- match := nod(eq, itabType(tab), rtyp)
- eqtype = nod(andor, nonnil, match)
+ nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), nodnil(), tab)
+ match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp)
+ eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match)
}
// Check for data equal.
- eqdata := nod(eq, ifaceData(n.Pos, l, r.Type), r)
+ eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r)
// Put it all together.
- expr := nod(andor, eqtype, eqdata)
- n = finishcompare(n, expr, init)
- return n
+ expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata)
+ return finishcompare(n, expr, init)
}
// Must be comparison of array or struct.
// Otherwise back end handles it.
// While we're here, decide whether to
// inline or call an eq alg.
- t := n.Left.Type
+ t := n.Left().Type()
var inline bool
maxcmpsize := int64(4)
maxcmpsize = 2 * int64(thearch.LinkArch.RegSize)
}
- switch t.Etype {
+ switch t.Kind() {
default:
- if Debug_libfuzzer != 0 && t.IsInteger() {
- n.Left = cheapexpr(n.Left, init)
- n.Right = cheapexpr(n.Right, init)
+ if base.Debug.Libfuzzer != 0 && t.IsInteger() {
+ n.SetLeft(cheapexpr(n.Left(), init))
+ n.SetRight(cheapexpr(n.Right(), init))
// If exactly one comparison operand is
// constant, invoke the constcmp functions
// instead, and arrange for the constant
// operand to be the first argument.
- l, r := n.Left, n.Right
- if r.Op == OLITERAL {
+ l, r := n.Left(), n.Right()
+ if r.Op() == ir.OLITERAL {
l, r = r, l
}
- constcmp := l.Op == OLITERAL && r.Op != OLITERAL
+ constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL
var fn string
var paramType *types.Type
if constcmp {
fn = "libfuzzerTraceConstCmp1"
}
- paramType = types.Types[TUINT8]
+ paramType = types.Types[types.TUINT8]
case 2:
fn = "libfuzzerTraceCmp2"
if constcmp {
fn = "libfuzzerTraceConstCmp2"
}
- paramType = types.Types[TUINT16]
+ paramType = types.Types[types.TUINT16]
case 4:
fn = "libfuzzerTraceCmp4"
if constcmp {
fn = "libfuzzerTraceConstCmp4"
}
- paramType = types.Types[TUINT32]
+ paramType = types.Types[types.TUINT32]
case 8:
fn = "libfuzzerTraceCmp8"
if constcmp {
fn = "libfuzzerTraceConstCmp8"
}
- paramType = types.Types[TUINT64]
+ paramType = types.Types[types.TUINT64]
default:
- Fatalf("unexpected integer size %d for %v", t.Size(), t)
+ base.Fatalf("unexpected integer size %d for %v", t.Size(), t)
}
init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init)))
}
return n
- case TARRAY:
+ case types.TARRAY:
// We can compare several elements at once with 2/4/8 byte integer compares
- inline = t.NumElem() <= 1 || (issimple[t.Elem().Etype] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
- case TSTRUCT:
+ inline = t.NumElem() <= 1 || (issimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
+ case types.TSTRUCT:
inline = t.NumComponents(types.IgnoreBlankFields) <= 4
}
- cmpl := n.Left
- for cmpl != nil && cmpl.Op == OCONVNOP {
- cmpl = cmpl.Left
+ cmpl := n.Left()
+ for cmpl != nil && cmpl.Op() == ir.OCONVNOP {
+ cmpl = cmpl.(*ir.ConvExpr).Left()
}
- cmpr := n.Right
- for cmpr != nil && cmpr.Op == OCONVNOP {
- cmpr = cmpr.Left
+ cmpr := n.Right()
+ for cmpr != nil && cmpr.Op() == ir.OCONVNOP {
+ cmpr = cmpr.(*ir.ConvExpr).Left()
}
// Chose not to inline. Call equality function directly.
if !inline {
// eq algs take pointers; cmpl and cmpr must be addressable
if !islvalue(cmpl) || !islvalue(cmpr) {
- Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
+ base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
}
fn, needsize := eqfor(t)
- call := nod(OCALL, fn, nil)
- call.List.Append(nod(OADDR, cmpl, nil))
- call.List.Append(nod(OADDR, cmpr, nil))
+ call := ir.Nod(ir.OCALL, fn, nil)
+ call.PtrList().Append(nodAddr(cmpl))
+ call.PtrList().Append(nodAddr(cmpr))
if needsize {
- call.List.Append(nodintconst(t.Width))
+ call.PtrList().Append(nodintconst(t.Width))
}
- res := call
- if n.Op != OEQ {
- res = nod(ONOT, res, nil)
+ res := ir.Node(call)
+ if n.Op() != ir.OEQ {
+ res = ir.Nod(ir.ONOT, res, nil)
}
- n = finishcompare(n, res, init)
- return n
+ return finishcompare(n, res, init)
}
// inline: build boolean expression comparing element by element
- andor := OANDAND
- if n.Op == ONE {
- andor = OOROR
+ andor := ir.OANDAND
+ if n.Op() == ir.ONE {
+ andor = ir.OOROR
}
- var expr *Node
- compare := func(el, er *Node) {
- a := nod(n.Op, el, er)
+ var expr ir.Node
+ compare := func(el, er ir.Node) {
+ a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er)
if expr == nil {
expr = a
} else {
- expr = nod(andor, expr, a)
+ expr = ir.NewLogicalExpr(base.Pos, andor, expr, a)
}
}
cmpl = safeexpr(cmpl, init)
continue
}
compare(
- nodSym(OXDOT, cmpl, sym),
- nodSym(OXDOT, cmpr, sym),
+ nodSym(ir.OXDOT, cmpl, sym),
+ nodSym(ir.OXDOT, cmpr, sym),
)
}
} else {
var convType *types.Type
switch {
case remains >= 8 && combine64bit:
- convType = types.Types[TINT64]
+ convType = types.Types[types.TINT64]
step = 8 / t.Elem().Width
case remains >= 4 && combine32bit:
- convType = types.Types[TUINT32]
+ convType = types.Types[types.TUINT32]
step = 4 / t.Elem().Width
case remains >= 2 && combine16bit:
- convType = types.Types[TUINT16]
+ convType = types.Types[types.TUINT16]
step = 2 / t.Elem().Width
default:
step = 1
}
if step == 1 {
compare(
- nod(OINDEX, cmpl, nodintconst(i)),
- nod(OINDEX, cmpr, nodintconst(i)),
+ ir.Nod(ir.OINDEX, cmpl, nodintconst(i)),
+ ir.Nod(ir.OINDEX, cmpr, nodintconst(i)),
)
i++
remains -= t.Elem().Width
} else {
elemType := t.Elem().ToUnsigned()
- cmplw := nod(OINDEX, cmpl, nodintconst(i))
+ cmplw := ir.Node(ir.Nod(ir.OINDEX, cmpl, nodintconst(i)))
cmplw = conv(cmplw, elemType) // convert to unsigned
cmplw = conv(cmplw, convType) // widen
- cmprw := nod(OINDEX, cmpr, nodintconst(i))
+ cmprw := ir.Node(ir.Nod(ir.OINDEX, cmpr, nodintconst(i)))
cmprw = conv(cmprw, elemType)
cmprw = conv(cmprw, convType)
// For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
// ssa will generate a single large load.
for offset := int64(1); offset < step; offset++ {
- lb := nod(OINDEX, cmpl, nodintconst(i+offset))
+ lb := ir.Node(ir.Nod(ir.OINDEX, cmpl, nodintconst(i+offset)))
lb = conv(lb, elemType)
lb = conv(lb, convType)
- lb = nod(OLSH, lb, nodintconst(8*t.Elem().Width*offset))
- cmplw = nod(OOR, cmplw, lb)
- rb := nod(OINDEX, cmpr, nodintconst(i+offset))
+ lb = ir.Nod(ir.OLSH, lb, nodintconst(8*t.Elem().Width*offset))
+ cmplw = ir.Nod(ir.OOR, cmplw, lb)
+ rb := ir.Node(ir.Nod(ir.OINDEX, cmpr, nodintconst(i+offset)))
rb = conv(rb, elemType)
rb = conv(rb, convType)
- rb = nod(OLSH, rb, nodintconst(8*t.Elem().Width*offset))
- cmprw = nod(OOR, cmprw, rb)
+ rb = ir.Nod(ir.OLSH, rb, nodintconst(8*t.Elem().Width*offset))
+ cmprw = ir.Nod(ir.OOR, cmprw, rb)
}
compare(cmplw, cmprw)
i += step
}
}
if expr == nil {
- expr = nodbool(n.Op == OEQ)
+ expr = nodbool(n.Op() == ir.OEQ)
// We still need to use cmpl and cmpr, in case they contain
// an expression which might panic. See issue 23837.
- t := temp(cmpl.Type)
- a1 := nod(OAS, t, cmpl)
- a1 = typecheck(a1, ctxStmt)
- a2 := nod(OAS, t, cmpr)
- a2 = typecheck(a2, ctxStmt)
+ t := temp(cmpl.Type())
+ a1 := typecheck(ir.Nod(ir.OAS, t, cmpl), ctxStmt)
+ a2 := typecheck(ir.Nod(ir.OAS, t, cmpr), ctxStmt)
init.Append(a1, a2)
}
- n = finishcompare(n, expr, init)
- return n
+ return finishcompare(n, expr, init)
}
-func tracecmpArg(n *Node, t *types.Type, init *Nodes) *Node {
+func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
// Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
- if n.Op == OLITERAL && n.Type.IsSigned() && n.Int64Val() < 0 {
- n = copyexpr(n, n.Type, init)
+ if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 {
+ n = copyexpr(n, n.Type(), init)
}
return conv(n, t)
}
-func walkcompareInterface(n *Node, init *Nodes) *Node {
- n.Right = cheapexpr(n.Right, init)
- n.Left = cheapexpr(n.Left, init)
- eqtab, eqdata := eqinterface(n.Left, n.Right)
- var cmp *Node
- if n.Op == OEQ {
- cmp = nod(OANDAND, eqtab, eqdata)
+func walkcompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ n.SetRight(cheapexpr(n.Right(), init))
+ n.SetLeft(cheapexpr(n.Left(), init))
+ eqtab, eqdata := eqinterface(n.Left(), n.Right())
+ var cmp ir.Node
+ if n.Op() == ir.OEQ {
+ cmp = ir.Nod(ir.OANDAND, eqtab, eqdata)
} else {
- eqtab.Op = ONE
- cmp = nod(OOROR, eqtab, nod(ONOT, eqdata, nil))
+ eqtab.SetOp(ir.ONE)
+ cmp = ir.Nod(ir.OOROR, eqtab, ir.Nod(ir.ONOT, eqdata, nil))
}
return finishcompare(n, cmp, init)
}
-func walkcompareString(n *Node, init *Nodes) *Node {
+func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
- var cs, ncs *Node // const string, non-const string
+ var cs, ncs ir.Node // const string, non-const string
switch {
- case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR):
+ case ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.String):
// ignore; will be constant evaluated
- case Isconst(n.Left, CTSTR):
- cs = n.Left
- ncs = n.Right
- case Isconst(n.Right, CTSTR):
- cs = n.Right
- ncs = n.Left
+ case ir.IsConst(n.Left(), constant.String):
+ cs = n.Left()
+ ncs = n.Right()
+ case ir.IsConst(n.Right(), constant.String):
+ cs = n.Right()
+ ncs = n.Left()
}
if cs != nil {
- cmp := n.Op
+ cmp := n.Op()
// Our comparison below assumes that the non-constant string
// is on the left hand side, so rewrite "" cmp x to x cmp "".
// See issue 24817.
- if Isconst(n.Left, CTSTR) {
+ if ir.IsConst(n.Left(), constant.String) {
cmp = brrev(cmp)
}
combine64bit = thearch.LinkArch.RegSize >= 8
}
- var and Op
+ var and ir.Op
switch cmp {
- case OEQ:
- and = OANDAND
- case ONE:
- and = OOROR
+ case ir.OEQ:
+ and = ir.OANDAND
+ case ir.ONE:
+ and = ir.OOROR
default:
// Don't do byte-wise comparisons for <, <=, etc.
// They're fairly complicated.
// Length-only checks are ok, though.
maxRewriteLen = 0
}
- if s := cs.StringVal(); len(s) <= maxRewriteLen {
+ if s := ir.StringVal(cs); len(s) <= maxRewriteLen {
if len(s) > 0 {
ncs = safeexpr(ncs, init)
}
- r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s))))
+ r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.Nod(ir.OLEN, ncs, nil), nodintconst(int64(len(s)))))
remains := len(s)
for i := 0; remains > 0; {
if remains == 1 || !canCombineLoads {
cb := nodintconst(int64(s[i]))
- ncb := nod(OINDEX, ncs, nodintconst(int64(i)))
- r = nod(and, r, nod(cmp, ncb, cb))
+ ncb := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i)))
+ r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb))
remains--
i++
continue
var convType *types.Type
switch {
case remains >= 8 && combine64bit:
- convType = types.Types[TINT64]
+ convType = types.Types[types.TINT64]
step = 8
case remains >= 4:
- convType = types.Types[TUINT32]
+ convType = types.Types[types.TUINT32]
step = 4
case remains >= 2:
- convType = types.Types[TUINT16]
+ convType = types.Types[types.TUINT16]
step = 2
}
- ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i)))
- ncsubstr = conv(ncsubstr, convType)
+ ncsubstr := conv(ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i))), convType)
csubstr := int64(s[i])
// Calculate large constant from bytes as sequence of shifts and ors.
// Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
// ssa will combine this into a single large load.
for offset := 1; offset < step; offset++ {
- b := nod(OINDEX, ncs, nodintconst(int64(i+offset)))
- b = conv(b, convType)
- b = nod(OLSH, b, nodintconst(int64(8*offset)))
- ncsubstr = nod(OOR, ncsubstr, b)
+ b := conv(ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i+offset))), convType)
+ b = ir.Nod(ir.OLSH, b, nodintconst(int64(8*offset)))
+ ncsubstr = ir.Nod(ir.OOR, ncsubstr, b)
csubstr |= int64(s[i+offset]) << uint8(8*offset)
}
csubstrPart := nodintconst(csubstr)
// Compare "step" bytes as once
- r = nod(and, r, nod(cmp, csubstrPart, ncsubstr))
+ r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr))
remains -= step
i += step
}
}
}
- var r *Node
- if n.Op == OEQ || n.Op == ONE {
+ var r ir.Node
+ if n.Op() == ir.OEQ || n.Op() == ir.ONE {
// prepare for rewrite below
- n.Left = cheapexpr(n.Left, init)
- n.Right = cheapexpr(n.Right, init)
- eqlen, eqmem := eqstring(n.Left, n.Right)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ n.SetRight(cheapexpr(n.Right(), init))
+ eqlen, eqmem := eqstring(n.Left(), n.Right())
// quick check of len before full compare for == or !=.
// memequal then tests equality up to length len.
- if n.Op == OEQ {
+ if n.Op() == ir.OEQ {
// len(left) == len(right) && memequal(left, right, len)
- r = nod(OANDAND, eqlen, eqmem)
+ r = ir.Nod(ir.OANDAND, eqlen, eqmem)
} else {
// len(left) != len(right) || !memequal(left, right, len)
- eqlen.Op = ONE
- r = nod(OOROR, eqlen, nod(ONOT, eqmem, nil))
+ eqlen.SetOp(ir.ONE)
+ r = ir.Nod(ir.OOROR, eqlen, ir.Nod(ir.ONOT, eqmem, nil))
}
} else {
// sys_cmpstring(s1, s2) :: 0
- r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING]))
- r = nod(n.Op, r, nodintconst(0))
+ r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.Left(), types.Types[types.TSTRING]), conv(n.Right(), types.Types[types.TSTRING]))
+ r = ir.NewBinaryExpr(base.Pos, n.Op(), r, nodintconst(0))
}
return finishcompare(n, r, init)
// The result of finishcompare MUST be assigned back to n, e.g.
// n.Left = finishcompare(n.Left, x, r, init)
-func finishcompare(n, r *Node, init *Nodes) *Node {
+func finishcompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node {
r = typecheck(r, ctxExpr)
- r = conv(r, n.Type)
+ r = conv(r, n.Type())
r = walkexpr(r, init)
return r
}
// return 1 if integer n must be in range [0, max), 0 otherwise
-func bounded(n *Node, max int64) bool {
- if n.Type == nil || !n.Type.IsInteger() {
+func bounded(n ir.Node, max int64) bool {
+ if n.Type() == nil || !n.Type().IsInteger() {
return false
}
- sign := n.Type.IsSigned()
- bits := int32(8 * n.Type.Width)
+ sign := n.Type().IsSigned()
+ bits := int32(8 * n.Type().Width)
if smallintconst(n) {
- v := n.Int64Val()
+ v := ir.Int64Val(n)
return 0 <= v && v < max
}
- switch n.Op {
- case OAND, OANDNOT:
+ switch n.Op() {
+ case ir.OAND, ir.OANDNOT:
v := int64(-1)
switch {
- case smallintconst(n.Left):
- v = n.Left.Int64Val()
- case smallintconst(n.Right):
- v = n.Right.Int64Val()
- if n.Op == OANDNOT {
+ case smallintconst(n.Left()):
+ v = ir.Int64Val(n.Left())
+ case smallintconst(n.Right()):
+ v = ir.Int64Val(n.Right())
+ if n.Op() == ir.OANDNOT {
v = ^v
if !sign {
v &= 1<<uint(bits) - 1
return true
}
- case OMOD:
- if !sign && smallintconst(n.Right) {
- v := n.Right.Int64Val()
+ case ir.OMOD:
+ if !sign && smallintconst(n.Right()) {
+ v := ir.Int64Val(n.Right())
if 0 <= v && v <= max {
return true
}
}
- case ODIV:
- if !sign && smallintconst(n.Right) {
- v := n.Right.Int64Val()
+ case ir.ODIV:
+ if !sign && smallintconst(n.Right()) {
+ v := ir.Int64Val(n.Right())
for bits > 0 && v >= 2 {
bits--
v >>= 1
}
}
- case ORSH:
- if !sign && smallintconst(n.Right) {
- v := n.Right.Int64Val()
+ case ir.ORSH:
+ if !sign && smallintconst(n.Right()) {
+ v := ir.Int64Val(n.Right())
if v > int64(bits) {
return true
}
}
// usemethod checks interface method calls for uses of reflect.Type.Method.
-func usemethod(n *Node) {
- t := n.Left.Type
+func usemethod(n *ir.CallExpr) {
+ t := n.Left().Type()
// Looking for either of:
// Method(int) reflect.Method
}
if res1 == nil {
- if p0.Type.Etype != TINT {
+ if p0.Type.Kind() != types.TINT {
return
}
} else {
// Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors
// (including global variables such as numImports - was issue #19028).
// Also need to check for reflect package itself (see Issue #38515).
- if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) {
- Curfn.Func.SetReflectMethod(true)
+ if s := res0.Type.Sym(); s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) {
+ Curfn.SetReflectMethod(true)
// The LSym is initialized at this point. We need to set the attribute on the LSym.
- Curfn.Func.lsym.Set(obj.AttrReflectMethod, true)
+ Curfn.LSym.Set(obj.AttrReflectMethod, true)
}
}
-func usefield(n *Node) {
+func usefield(n *ir.SelectorExpr) {
if objabi.Fieldtrack_enabled == 0 {
return
}
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("usefield %v", n.Op)
+ base.Fatalf("usefield %v", n.Op())
- case ODOT, ODOTPTR:
+ case ir.ODOT, ir.ODOTPTR:
break
}
- if n.Sym == nil {
+ if n.Sym() == nil {
// No field name. This DOTPTR was built by the compiler for access
// to runtime data structures. Ignore.
return
}
- t := n.Left.Type
+ t := n.Left().Type()
if t.IsPtr() {
t = t.Elem()
}
- field := n.Opt().(*types.Field)
+ field := n.Selection
if field == nil {
- Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym)
+ base.Fatalf("usefield %v %v without paramfld", n.Left().Type(), n.Sym())
}
- if field.Sym != n.Sym || field.Offset != n.Xoffset {
- Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym, n.Xoffset)
+ if field.Sym != n.Sym() || field.Offset != n.Offset() {
+ base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym(), n.Offset())
}
if !strings.Contains(field.Note, "go:\"track\"") {
return
}
- outer := n.Left.Type
+ outer := n.Left().Type()
if outer.IsPtr() {
outer = outer.Elem()
}
- if outer.Sym == nil {
- yyerror("tracked field must be in named struct type")
+ if outer.Sym() == nil {
+ base.Errorf("tracked field must be in named struct type")
}
if !types.IsExported(field.Sym.Name) {
- yyerror("tracked field must be exported (upper case)")
+ base.Errorf("tracked field must be exported (upper case)")
}
sym := tracksym(outer, field)
- if Curfn.Func.FieldTrack == nil {
- Curfn.Func.FieldTrack = make(map[*types.Sym]struct{})
- }
- Curfn.Func.FieldTrack[sym] = struct{}{}
-}
-
-func candiscardlist(l Nodes) bool {
- for _, n := range l.Slice() {
- if !candiscard(n) {
- return false
- }
+ if Curfn.FieldTrack == nil {
+ Curfn.FieldTrack = make(map[*types.Sym]struct{})
}
- return true
+ Curfn.FieldTrack[sym] = struct{}{}
}
-func candiscard(n *Node) bool {
- if n == nil {
- return true
- }
+// anySideEffects reports whether n contains any operations that could have observable side effects.
+func anySideEffects(n ir.Node) bool {
+ return ir.Any(n, func(n ir.Node) bool {
+ switch n.Op() {
+ // Assume side effects unless we know otherwise.
+ default:
+ return true
- switch n.Op {
- default:
- return false
+ // No side effects here (arguments are checked separately).
+ case ir.ONAME,
+ ir.ONONAME,
+ ir.OTYPE,
+ ir.OPACK,
+ ir.OLITERAL,
+ ir.ONIL,
+ ir.OADD,
+ ir.OSUB,
+ ir.OOR,
+ ir.OXOR,
+ ir.OADDSTR,
+ ir.OADDR,
+ ir.OANDAND,
+ ir.OBYTES2STR,
+ ir.ORUNES2STR,
+ ir.OSTR2BYTES,
+ ir.OSTR2RUNES,
+ ir.OCAP,
+ ir.OCOMPLIT,
+ ir.OMAPLIT,
+ ir.OSTRUCTLIT,
+ ir.OARRAYLIT,
+ ir.OSLICELIT,
+ ir.OPTRLIT,
+ ir.OCONV,
+ ir.OCONVIFACE,
+ ir.OCONVNOP,
+ ir.ODOT,
+ ir.OEQ,
+ ir.ONE,
+ ir.OLT,
+ ir.OLE,
+ ir.OGT,
+ ir.OGE,
+ ir.OKEY,
+ ir.OSTRUCTKEY,
+ ir.OLEN,
+ ir.OMUL,
+ ir.OLSH,
+ ir.ORSH,
+ ir.OAND,
+ ir.OANDNOT,
+ ir.ONEW,
+ ir.ONOT,
+ ir.OBITNOT,
+ ir.OPLUS,
+ ir.ONEG,
+ ir.OOROR,
+ ir.OPAREN,
+ ir.ORUNESTR,
+ ir.OREAL,
+ ir.OIMAG,
+ ir.OCOMPLEX:
+ return false
- // Discardable as long as the subpieces are.
- case ONAME,
- ONONAME,
- OTYPE,
- OPACK,
- OLITERAL,
- OADD,
- OSUB,
- OOR,
- OXOR,
- OADDSTR,
- OADDR,
- OANDAND,
- OBYTES2STR,
- ORUNES2STR,
- OSTR2BYTES,
- OSTR2RUNES,
- OCAP,
- OCOMPLIT,
- OMAPLIT,
- OSTRUCTLIT,
- OARRAYLIT,
- OSLICELIT,
- OPTRLIT,
- OCONV,
- OCONVIFACE,
- OCONVNOP,
- ODOT,
- OEQ,
- ONE,
- OLT,
- OLE,
- OGT,
- OGE,
- OKEY,
- OSTRUCTKEY,
- OLEN,
- OMUL,
- OLSH,
- ORSH,
- OAND,
- OANDNOT,
- ONEW,
- ONOT,
- OBITNOT,
- OPLUS,
- ONEG,
- OOROR,
- OPAREN,
- ORUNESTR,
- OREAL,
- OIMAG,
- OCOMPLEX:
- break
+ // Only possible side effect is division by zero.
+ case ir.ODIV, ir.OMOD:
+ if n.Right().Op() != ir.OLITERAL || constant.Sign(n.Right().Val()) == 0 {
+ return true
+ }
- // Discardable as long as we know it's not division by zero.
- case ODIV, OMOD:
- if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 {
- break
- }
- if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 {
- break
- }
- return false
+ // Only possible side effect is panic on invalid size,
+ // but many makechan and makemap use size zero, which is definitely OK.
+ case ir.OMAKECHAN, ir.OMAKEMAP:
+ if !ir.IsConst(n.Left(), constant.Int) || constant.Sign(n.Left().Val()) != 0 {
+ return true
+ }
- // Discardable as long as we know it won't fail because of a bad size.
- case OMAKECHAN, OMAKEMAP:
- if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 {
- break
+ // Only possible side effect is panic on invalid size.
+ // TODO(rsc): Merge with previous case (probably breaks toolstash -cmp).
+ case ir.OMAKESLICE, ir.OMAKESLICECOPY:
+ return true
}
return false
-
- // Difficult to tell what sizes are okay.
- case OMAKESLICE:
- return false
-
- case OMAKESLICECOPY:
- return false
- }
-
- if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
- return false
- }
-
- return true
+ })
}
// Rewrite
// The result of wrapCall MUST be assigned back to n, e.g.
// n.Left = wrapCall(n.Left, init)
-func wrapCall(n *Node, init *Nodes) *Node {
- if n.Ninit.Len() != 0 {
- walkstmtlist(n.Ninit.Slice())
- init.AppendNodes(&n.Ninit)
+func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+ if n.Init().Len() != 0 {
+ walkstmtlist(n.Init().Slice())
+ init.AppendNodes(n.PtrInit())
}
- isBuiltinCall := n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER
+ isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER
// Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
if !isBuiltinCall && n.IsDDD() {
- last := n.List.Len() - 1
- if va := n.List.Index(last); va.Op == OSLICELIT {
- n.List.Set(append(n.List.Slice()[:last], va.List.Slice()...))
+ last := n.List().Len() - 1
+ if va := n.List().Index(last); va.Op() == ir.OSLICELIT {
+ n.PtrList().Set(append(n.List().Slice()[:last], va.List().Slice()...))
n.SetIsDDD(false)
}
}
// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
- origArgs := make([]*Node, n.List.Len())
- t := nod(OTFUNC, nil, nil)
- for i, arg := range n.List.Slice() {
+ origArgs := make([]ir.Node, n.List().Len())
+ var funcArgs []*ir.Field
+ for i, arg := range n.List().Slice() {
s := lookupN("a", i)
- if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.IsUintptr() && arg.Left.Type.IsUnsafePtr() {
+ if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).Left().Type().IsUnsafePtr() {
origArgs[i] = arg
- arg = arg.Left
- n.List.SetIndex(i, arg)
+ arg = arg.(*ir.ConvExpr).Left()
+ n.List().SetIndex(i, arg)
}
- t.List.Append(symfield(s, arg.Type))
+ funcArgs = append(funcArgs, symfield(s, arg.Type()))
}
+ t := ir.NewFuncType(base.Pos, nil, funcArgs, nil)
wrapCall_prgen++
sym := lookupN("wrap·", wrapCall_prgen)
fn := dclfunc(sym, t)
- args := paramNnames(t.Type)
+ args := paramNnames(t.Type())
for i, origArg := range origArgs {
if origArg == nil {
continue
}
- arg := nod(origArg.Op, args[i], nil)
- arg.Type = origArg.Type
- args[i] = arg
+ args[i] = ir.NewConvExpr(base.Pos, origArg.Op(), origArg.Type(), args[i])
}
- call := nod(n.Op, nil, nil)
+ call := ir.NewCallExpr(base.Pos, n.Op(), n.Left(), args)
if !isBuiltinCall {
- call.Op = OCALL
- call.Left = n.Left
+ call.SetOp(ir.OCALL)
call.SetIsDDD(n.IsDDD())
}
- call.List.Set(args)
- fn.Nbody.Set1(call)
+ fn.PtrBody().Set1(call)
funcbody()
- fn = typecheck(fn, ctxStmt)
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
- xtop = append(xtop, fn)
+ typecheckFunc(fn)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
+ Target.Decls = append(Target.Decls, fn)
- call = nod(OCALL, nil, nil)
- call.Left = fn.Func.Nname
- call.List.Set(n.List.Slice())
- call = typecheck(call, ctxStmt)
- call = walkexpr(call, init)
- return call
+ call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.List().Slice())
+ return walkexpr(typecheck(call, ctxStmt), init)
}
// substArgTypes substitutes the given list of types for
// type syntax expression n.Type.
// The result of substArgTypes MUST be assigned back to old, e.g.
// n.Left = substArgTypes(n.Left, t1, t2)
-func substArgTypes(old *Node, types_ ...*types.Type) *Node {
- n := old.copy()
+func substArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
+ n := old.CloneName()
for _, t := range types_ {
dowidth(t)
}
- n.Type = types.SubstAny(n.Type, &types_)
+ n.SetType(types.SubstAny(n.Type(), &types_))
if len(types_) > 0 {
- Fatalf("substArgTypes: too many argument types")
+ base.Fatalf("substArgTypes: too many argument types")
}
return n
}
// isRuneCount reports whether n is of the form len([]rune(string)).
// These are optimized into a call to runtime.countrunes.
-func isRuneCount(n *Node) bool {
- return Debug.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES
+func isRuneCount(n ir.Node) bool {
+ return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).Left().Op() == ir.OSTR2RUNES
}
-func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node {
- if !n.Type.IsPtr() {
- Fatalf("expected pointer type: %v", n.Type)
+func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Node {
+ if !n.Type().IsPtr() {
+ base.Fatalf("expected pointer type: %v", n.Type())
}
- elem := n.Type.Elem()
+ elem := n.Type().Elem()
if count != nil {
if !elem.IsArray() {
- Fatalf("expected array type: %v", elem)
+ base.Fatalf("expected array type: %v", elem)
}
elem = elem.Elem()
}
count = nodintconst(1)
}
- n.Left = cheapexpr(n.Left, init)
- init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left, types.Types[TUNSAFEPTR]), typename(elem), conv(count, types.Types[TUINTPTR])))
+ n.SetLeft(cheapexpr(n.Left(), init))
+ init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left(), types.Types[types.TUNSAFEPTR]), typename(elem), conv(count, types.Types[types.TUINTPTR])))
return n
}
var walkCheckPtrArithmeticMarker byte
-func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
+func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
// Calling cheapexpr(n, init) below leads to a recursive call
// to walkexpr, which leads us back here again. Use n.Opt to
// prevent infinite loops.
} else if opt != nil {
// We use n.Opt() here because today it's not used for OCONVNOP. If that changes,
// there's no guarantee that temporarily replacing it is safe, so just hard fail here.
- Fatalf("unexpected Opt: %v", opt)
+ base.Fatalf("unexpected Opt: %v", opt)
}
n.SetOpt(&walkCheckPtrArithmeticMarker)
defer n.SetOpt(nil)
// TODO(mdempsky): Make stricter. We only need to exempt
// reflect.Value.Pointer and reflect.Value.UnsafeAddr.
- switch n.Left.Op {
- case OCALLFUNC, OCALLMETH, OCALLINTER:
+ switch n.Left().Op() {
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
return n
}
- if n.Left.Op == ODOTPTR && isReflectHeaderDataField(n.Left) {
+ if n.Left().Op() == ir.ODOTPTR && isReflectHeaderDataField(n.Left()) {
return n
}
// "It is valid both to add and to subtract offsets from a
// pointer in this way. It is also valid to use &^ to round
// pointers, usually for alignment."
- var originals []*Node
- var walk func(n *Node)
- walk = func(n *Node) {
- switch n.Op {
- case OADD:
- walk(n.Left)
- walk(n.Right)
- case OSUB, OANDNOT:
- walk(n.Left)
- case OCONVNOP:
- if n.Left.Type.IsUnsafePtr() {
- n.Left = cheapexpr(n.Left, init)
- originals = append(originals, convnop(n.Left, types.Types[TUNSAFEPTR]))
+ var originals []ir.Node
+ var walk func(n ir.Node)
+ walk = func(n ir.Node) {
+ switch n.Op() {
+ case ir.OADD:
+ walk(n.Left())
+ walk(n.Right())
+ case ir.OSUB, ir.OANDNOT:
+ walk(n.Left())
+ case ir.OCONVNOP:
+ if n.Left().Type().IsUnsafePtr() {
+ n.SetLeft(cheapexpr(n.Left(), init))
+ originals = append(originals, convnop(n.Left(), types.Types[types.TUNSAFEPTR]))
}
}
}
- walk(n.Left)
+ walk(n.Left())
- n = cheapexpr(n, init)
+ cheap := cheapexpr(n, init)
- slice := mkdotargslice(types.NewSlice(types.Types[TUNSAFEPTR]), originals)
- slice.Esc = EscNone
+ slice := mkdotargslice(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
+ slice.SetEsc(EscNone)
- init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[TUNSAFEPTR]), slice))
+ init.Append(mkcall("checkptrArithmetic", nil, init, convnop(cheap, types.Types[types.TUNSAFEPTR]), slice))
// TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
// the backing store for multiple calls to checkptrArithmetic.
- return n
+ return cheap
}
// checkPtr reports whether pointer checking should be enabled for
// function fn at a given level. See debugHelpFooter for defined
// levels.
-func checkPtr(fn *Node, level int) bool {
- return Debug_checkptr >= level && fn.Func.Pragma&NoCheckPtr == 0
+func checkPtr(fn *ir.Func, level int) bool {
+ return base.Debug.Checkptr >= level && fn.Pragma&ir.NoCheckPtr == 0
+}
+
+// appendWalkStmt typechecks and walks stmt and then appends it to init.
+func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
+ op := stmt.Op()
+ n := typecheck(stmt, ctxStmt)
+ if op == ir.OAS || op == ir.OAS2 {
+ // If the assignment has side effects, walkexpr will append them
+ // directly to init for us, while walkstmt will wrap it in an OBLOCK.
+ // We need to append them directly.
+ // TODO(rsc): Clean this up.
+ n = walkexpr(n, init)
+ } else {
+ n = walkstmt(n)
+ }
+ init.Append(n)
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package ir
type bitset8 uint8
}
}
+func (f bitset8) get2(shift uint8) uint8 {
+ return uint8(f>>shift) & 3
+}
+
+// set2 sets two bits in f using the bottom two bits of b.
+func (f *bitset8) set2(shift uint8, b uint8) {
+ // Clear old bits.
+ *(*uint8)(f) &^= 3 << shift
+ // Set new bits.
+ *(*uint8)(f) |= uint8(b&3) << shift
+}
+
type bitset16 uint16
func (f *bitset16) set(mask uint16, b bool) {
// Code generated by "stringer -type=Class"; DO NOT EDIT.
-package gc
+package ir
import "strconv"
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/src"
+)
+
+// A Node may implement the Orig and SetOrig method to
+// maintain a pointer to the "unrewritten" form of a Node.
+// If a Node does not implement OrigNode, it is its own Orig.
+//
+// Note that both SepCopy and Copy have definitions compatible
+// with a Node that does not implement OrigNode: such a Node
+// is its own Orig, and in that case, that's what both want to return
+// anyway (SepCopy unconditionally, and Copy only when the input
+// is its own Orig as well, but if the output does not implement
+// OrigNode, then neither does the input, making the condition true).
+type OrigNode interface {
+ Node
+ Orig() Node
+ SetOrig(Node)
+}
+
+// Orig returns the “original” node for n.
+// If n implements OrigNode, Orig returns n.Orig().
+// Otherwise Orig returns n itself.
+func Orig(n Node) Node {
+ if n, ok := n.(OrigNode); ok {
+ o := n.Orig()
+ if o == nil {
+ Dump("Orig nil", n)
+ base.Fatalf("Orig returned nil")
+ }
+ return o
+ }
+ return n
+}
+
+// SepCopy returns a separate shallow copy of n,
+// breaking any Orig link to any other nodes.
+func SepCopy(n Node) Node {
+ n = n.copy()
+ if n, ok := n.(OrigNode); ok {
+ n.SetOrig(n)
+ }
+ return n
+}
+
+// Copy returns a shallow copy of n.
+// If Orig(n) == n, then Orig(Copy(n)) == the copy.
+// Otherwise the Orig link is preserved as well.
+//
+// The specific semantics surrounding Orig are subtle but right for most uses.
+// See issues #26855 and #27765 for pitfalls.
+func Copy(n Node) Node {
+ c := n.copy()
+ if n, ok := n.(OrigNode); ok && n.Orig() == n {
+ c.(OrigNode).SetOrig(c)
+ }
+ return c
+}
+
+// DeepCopy returns a “deep” copy of n, with its entire structure copied
+// (except for shared nodes like ONAME, ONONAME, OLITERAL, and OTYPE).
+// If pos.IsKnown(), it sets the source position of newly allocated Nodes to pos.
+func DeepCopy(pos src.XPos, n Node) Node {
+ var edit func(Node) Node
+ edit = func(x Node) Node {
+ switch x.Op() {
+ case OPACK, ONAME, ONONAME, OLITERAL, ONIL, OTYPE:
+ return x
+ }
+ x = Copy(x)
+ if pos.IsKnown() {
+ x.SetPos(pos)
+ }
+ EditChildren(x, edit)
+ return x
+ }
+ return edit(n)
+}
+
+// DeepCopyList returns a list of deep copies (using DeepCopy) of the nodes in list.
+func DeepCopyList(pos src.XPos, list []Node) []Node {
+ var out []Node
+ for _, n := range list {
+ out = append(out, DeepCopy(pos, n))
+ }
+ return out
+}
// for debugging purposes. The code is customized for Node graphs
// and may be used for an alternative view of the node structure.
-package gc
+package ir
import (
- "cmd/compile/internal/types"
- "cmd/internal/src"
"fmt"
"io"
"os"
"reflect"
"regexp"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
)
// dump is like fdump but prints to stderr.
-func dump(root interface{}, filter string, depth int) {
- fdump(os.Stderr, root, filter, depth)
+func DumpAny(root interface{}, filter string, depth int) {
+ FDumpAny(os.Stderr, root, filter, depth)
}
// fdump prints the structure of a rooted data structure
// rather than their type; struct fields with zero values or
// non-matching field names are omitted, and "…" means recursion
// depth has been reached or struct fields have been omitted.
-func fdump(w io.Writer, root interface{}, filter string, depth int) {
+func FDumpAny(w io.Writer, root interface{}, filter string, depth int) {
if root == nil {
fmt.Fprintln(w, "nil")
return
return
}
- // special cases
- switch v := x.Interface().(type) {
- case Nodes:
- // unpack Nodes since reflect cannot look inside
- // due to the unexported field in its struct
- x = reflect.ValueOf(v.Slice())
-
- case src.XPos:
- p.printf("%s", linestr(v))
+ if pos, ok := x.Interface().(src.XPos); ok {
+ p.printf("%s", base.FmtPos(pos))
return
-
- case *types.Node:
- x = reflect.ValueOf(asNode(v))
}
switch x.Kind() {
isNode := false
if n, ok := x.Interface().(Node); ok {
isNode = true
- p.printf("%s %s {", n.Op.String(), p.addr(x))
+ p.printf("%s %s {", n.Op().String(), p.addr(x))
} else {
p.printf("%s {", typ)
}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "go/constant"
+)
+
+func maybeDo(x Node, err error, do func(Node) error) error {
+ if x != nil && err == nil {
+ err = do(x)
+ }
+ return err
+}
+
+func maybeDoList(x Nodes, err error, do func(Node) error) error {
+ if err == nil {
+ err = DoList(x, do)
+ }
+ return err
+}
+
+func maybeEdit(x Node, edit func(Node) Node) Node {
+ if x == nil {
+ return x
+ }
+ return edit(x)
+}
+
+// An Expr is a Node that can appear as an expression.
+type Expr interface {
+ Node
+ isExpr()
+}
+
+// A miniExpr is a miniNode with extra fields common to expressions.
+// TODO(rsc): Once we are sure about the contents, compact the bools
+// into a bit field and leave extra bits available for implementations
+// embedding miniExpr. Right now there are ~60 unused bits sitting here.
+type miniExpr struct {
+ miniNode
+ typ *types.Type
+ init Nodes // TODO(rsc): Don't require every Node to have an init
+ opt interface{} // TODO(rsc): Don't require every Node to have an opt?
+ flags bitset8
+}
+
+const (
+ miniExprHasCall = 1 << iota
+ miniExprNonNil
+ miniExprTransient
+ miniExprBounded
+ miniExprImplicit // for use by implementations; not supported by every Expr
+)
+
+func (*miniExpr) isExpr() {}
+
+func (n *miniExpr) Type() *types.Type { return n.typ }
+func (n *miniExpr) SetType(x *types.Type) { n.typ = x }
+func (n *miniExpr) Opt() interface{} { return n.opt }
+func (n *miniExpr) SetOpt(x interface{}) { n.opt = x }
+func (n *miniExpr) HasCall() bool { return n.flags&miniExprHasCall != 0 }
+func (n *miniExpr) SetHasCall(b bool) { n.flags.set(miniExprHasCall, b) }
+func (n *miniExpr) NonNil() bool { return n.flags&miniExprNonNil != 0 }
+func (n *miniExpr) MarkNonNil() { n.flags |= miniExprNonNil }
+func (n *miniExpr) Transient() bool { return n.flags&miniExprTransient != 0 }
+func (n *miniExpr) SetTransient(b bool) { n.flags.set(miniExprTransient, b) }
+func (n *miniExpr) Bounded() bool { return n.flags&miniExprBounded != 0 }
+func (n *miniExpr) SetBounded(b bool) { n.flags.set(miniExprBounded, b) }
+func (n *miniExpr) Init() Nodes { return n.init }
+func (n *miniExpr) PtrInit() *Nodes { return &n.init }
+func (n *miniExpr) SetInit(x Nodes) { n.init = x }
+
+func toNtype(x Node) Ntype {
+ if x == nil {
+ return nil
+ }
+ if _, ok := x.(Ntype); !ok {
+ Dump("not Ntype", x)
+ }
+ return x.(Ntype)
+}
+
+// An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1].
+type AddStringExpr struct {
+ miniExpr
+ List_ Nodes
+ Prealloc *Name
+}
+
+func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr {
+ n := &AddStringExpr{}
+ n.pos = pos
+ n.op = OADDSTR
+ n.List_.Set(list)
+ return n
+}
+
+func (n *AddStringExpr) List() Nodes { return n.List_ }
+func (n *AddStringExpr) PtrList() *Nodes { return &n.List_ }
+func (n *AddStringExpr) SetList(x Nodes) { n.List_ = x }
+
+// An AddrExpr is an address-of expression &X.
+// It may end up being a normal address-of or an allocation of a composite literal.
+type AddrExpr struct {
+ miniExpr
+ X Node
+ Alloc Node // preallocated storage if any
+}
+
+func NewAddrExpr(pos src.XPos, x Node) *AddrExpr {
+ n := &AddrExpr{X: x}
+ n.op = OADDR
+ n.pos = pos
+ return n
+}
+
+func (n *AddrExpr) Left() Node { return n.X }
+func (n *AddrExpr) SetLeft(x Node) { n.X = x }
+func (n *AddrExpr) Right() Node { return n.Alloc }
+func (n *AddrExpr) SetRight(x Node) { n.Alloc = x }
+func (n *AddrExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *AddrExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (n *AddrExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OADDR, OPTRLIT:
+ n.op = op
+ }
+}
+
+// A BasicLit is a literal of basic type.
+type BasicLit struct {
+ miniExpr
+ val constant.Value
+}
+
+func NewBasicLit(pos src.XPos, val constant.Value) Node {
+ n := &BasicLit{val: val}
+ n.op = OLITERAL
+ n.pos = pos
+ if k := val.Kind(); k != constant.Unknown {
+ n.SetType(idealType(k))
+ }
+ return n
+}
+
+func (n *BasicLit) Val() constant.Value { return n.val }
+func (n *BasicLit) SetVal(val constant.Value) { n.val = val }
+
+// A BinaryExpr is a binary expression X Op Y,
+// or Op(X, Y) for builtin functions that do not become calls.
+type BinaryExpr struct {
+ miniExpr
+ X Node
+ Y Node
+}
+
+func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr {
+ n := &BinaryExpr{X: x, Y: y}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *BinaryExpr) Left() Node { return n.X }
+func (n *BinaryExpr) SetLeft(x Node) { n.X = x }
+func (n *BinaryExpr) Right() Node { return n.Y }
+func (n *BinaryExpr) SetRight(y Node) { n.Y = y }
+
+func (n *BinaryExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OADD, OADDSTR, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE,
+ OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR,
+ OCOPY, OCOMPLEX,
+ OEFACE:
+ n.op = op
+ }
+}
+
+// A CallUse records how the result of the call is used:
+type CallUse int
+
+const (
+ _ CallUse = iota
+
+ CallUseExpr // single expression result is used
+ CallUseList // list of results are used
+ CallUseStmt // results not used - call is a statement
+)
+
+// A CallExpr is a function call X(Args).
+type CallExpr struct {
+ miniExpr
+ orig Node
+ X Node
+ Args Nodes
+ Rargs Nodes // TODO(rsc): Delete.
+ Body_ Nodes // TODO(rsc): Delete.
+ DDD bool
+ Use CallUse
+ NoInline_ bool
+}
+
+func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {
+ n := &CallExpr{X: fun}
+ n.pos = pos
+ n.orig = n
+ n.SetOp(op)
+ n.Args.Set(args)
+ return n
+}
+
+func (*CallExpr) isStmt() {}
+
+func (n *CallExpr) Orig() Node { return n.orig }
+func (n *CallExpr) SetOrig(x Node) { n.orig = x }
+func (n *CallExpr) Left() Node { return n.X }
+func (n *CallExpr) SetLeft(x Node) { n.X = x }
+func (n *CallExpr) List() Nodes { return n.Args }
+func (n *CallExpr) PtrList() *Nodes { return &n.Args }
+func (n *CallExpr) SetList(x Nodes) { n.Args = x }
+func (n *CallExpr) Rlist() Nodes { return n.Rargs }
+func (n *CallExpr) PtrRlist() *Nodes { return &n.Rargs }
+func (n *CallExpr) SetRlist(x Nodes) { n.Rargs = x }
+func (n *CallExpr) IsDDD() bool { return n.DDD }
+func (n *CallExpr) SetIsDDD(x bool) { n.DDD = x }
+func (n *CallExpr) NoInline() bool { return n.NoInline_ }
+func (n *CallExpr) SetNoInline(x bool) { n.NoInline_ = x }
+func (n *CallExpr) Body() Nodes { return n.Body_ }
+func (n *CallExpr) PtrBody() *Nodes { return &n.Body_ }
+func (n *CallExpr) SetBody(x Nodes) { n.Body_ = x }
+
+func (n *CallExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
+ OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER:
+ n.op = op
+ }
+}
+
+// A CallPartExpr is a method expression X.Method (uncalled).
+type CallPartExpr struct {
+ miniExpr
+ Func_ *Func
+ X Node
+ Method *types.Field
+ Prealloc *Name
+}
+
+func NewCallPartExpr(pos src.XPos, x Node, method *types.Field, fn *Func) *CallPartExpr {
+ n := &CallPartExpr{Func_: fn, X: x, Method: method}
+ n.op = OCALLPART
+ n.pos = pos
+ n.typ = fn.Type()
+ n.Func_ = fn
+ return n
+}
+
+func (n *CallPartExpr) Func() *Func { return n.Func_ }
+func (n *CallPartExpr) Left() Node { return n.X }
+func (n *CallPartExpr) Sym() *types.Sym { return n.Method.Sym }
+func (n *CallPartExpr) SetLeft(x Node) { n.X = x }
+
+// A ClosureExpr is a function literal expression.
+type ClosureExpr struct {
+ miniExpr
+ Func_ *Func
+ Prealloc *Name
+}
+
+func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr {
+ n := &ClosureExpr{Func_: fn}
+ n.op = OCLOSURE
+ n.pos = pos
+ return n
+}
+
+func (n *ClosureExpr) Func() *Func { return n.Func_ }
+
+// A ClosureRead denotes reading a variable stored within a closure struct.
+type ClosureReadExpr struct {
+ miniExpr
+ Offset_ int64
+}
+
+func NewClosureRead(typ *types.Type, offset int64) *ClosureReadExpr {
+ n := &ClosureReadExpr{Offset_: offset}
+ n.typ = typ
+ n.op = OCLOSUREREAD
+ return n
+}
+
+func (n *ClosureReadExpr) Type() *types.Type { return n.typ }
+func (n *ClosureReadExpr) Offset() int64 { return n.Offset_ }
+
+// A CompLitExpr is a composite literal Type{Vals}.
+// Before type-checking, the type is Ntype.
+type CompLitExpr struct {
+ miniExpr
+ orig Node
+ Ntype Ntype
+ List_ Nodes // initialized values
+ Prealloc *Name
+ Len int64 // backing array length for OSLICELIT
+}
+
+func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr {
+ n := &CompLitExpr{Ntype: typ}
+ n.pos = pos
+ n.SetOp(op)
+ n.List_.Set(list)
+ n.orig = n
+ return n
+}
+
+func (n *CompLitExpr) Orig() Node { return n.orig }
+func (n *CompLitExpr) SetOrig(x Node) { n.orig = x }
+func (n *CompLitExpr) Right() Node { return n.Ntype }
+func (n *CompLitExpr) SetRight(x Node) { n.Ntype = toNtype(x) }
+func (n *CompLitExpr) List() Nodes { return n.List_ }
+func (n *CompLitExpr) PtrList() *Nodes { return &n.List_ }
+func (n *CompLitExpr) SetList(x Nodes) { n.List_ = x }
+func (n *CompLitExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *CompLitExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (n *CompLitExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OARRAYLIT, OCOMPLIT, OMAPLIT, OSTRUCTLIT, OSLICELIT:
+ n.op = op
+ }
+}
+
+type ConstExpr struct {
+ miniExpr
+ val constant.Value
+ orig Node
+}
+
+func NewConstExpr(val constant.Value, orig Node) Node {
+ n := &ConstExpr{orig: orig, val: val}
+ n.op = OLITERAL
+ n.pos = orig.Pos()
+ n.SetType(orig.Type())
+ n.SetTypecheck(orig.Typecheck())
+ n.SetDiag(orig.Diag())
+ return n
+}
+
+func (n *ConstExpr) Sym() *types.Sym { return n.orig.Sym() }
+func (n *ConstExpr) Orig() Node { return n.orig }
+func (n *ConstExpr) SetOrig(orig Node) { panic(n.no("SetOrig")) }
+func (n *ConstExpr) Val() constant.Value { return n.val }
+
+// A ConvExpr is a conversion Type(X).
+// It may end up being a value or a type.
+type ConvExpr struct {
+ miniExpr
+ X Node
+}
+
+func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr {
+ n := &ConvExpr{X: x}
+ n.pos = pos
+ n.typ = typ
+ n.SetOp(op)
+ return n
+}
+
+func (n *ConvExpr) Left() Node { return n.X }
+func (n *ConvExpr) SetLeft(x Node) { n.X = x }
+func (n *ConvExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *ConvExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (n *ConvExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR:
+ n.op = op
+ }
+}
+
+// An IndexExpr is an index expression X[Y].
+type IndexExpr struct {
+ miniExpr
+ X Node
+ Index Node
+ Assigned bool
+}
+
+func NewIndexExpr(pos src.XPos, x, index Node) *IndexExpr {
+ n := &IndexExpr{X: x, Index: index}
+ n.pos = pos
+ n.op = OINDEX
+ return n
+}
+
+func (n *IndexExpr) Left() Node { return n.X }
+func (n *IndexExpr) SetLeft(x Node) { n.X = x }
+func (n *IndexExpr) Right() Node { return n.Index }
+func (n *IndexExpr) SetRight(y Node) { n.Index = y }
+func (n *IndexExpr) IndexMapLValue() bool { return n.Assigned }
+func (n *IndexExpr) SetIndexMapLValue(x bool) { n.Assigned = x }
+
+func (n *IndexExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OINDEX, OINDEXMAP:
+ n.op = op
+ }
+}
+
+// A KeyExpr is a Key: Value composite literal key.
+type KeyExpr struct {
+ miniExpr
+ Key Node
+ Value Node
+}
+
+func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr {
+ n := &KeyExpr{Key: key, Value: value}
+ n.pos = pos
+ n.op = OKEY
+ return n
+}
+
+func (n *KeyExpr) Left() Node { return n.Key }
+func (n *KeyExpr) SetLeft(x Node) { n.Key = x }
+func (n *KeyExpr) Right() Node { return n.Value }
+func (n *KeyExpr) SetRight(y Node) { n.Value = y }
+
+// A StructKeyExpr is an Field: Value composite literal key.
+type StructKeyExpr struct {
+ miniExpr
+ Field *types.Sym
+ Value Node
+ Offset_ int64
+}
+
+func NewStructKeyExpr(pos src.XPos, field *types.Sym, value Node) *StructKeyExpr {
+ n := &StructKeyExpr{Field: field, Value: value}
+ n.pos = pos
+ n.op = OSTRUCTKEY
+ n.Offset_ = types.BADWIDTH
+ return n
+}
+
+func (n *StructKeyExpr) Sym() *types.Sym { return n.Field }
+func (n *StructKeyExpr) SetSym(x *types.Sym) { n.Field = x }
+func (n *StructKeyExpr) Left() Node { return n.Value }
+func (n *StructKeyExpr) SetLeft(x Node) { n.Value = x }
+func (n *StructKeyExpr) Offset() int64 { return n.Offset_ }
+func (n *StructKeyExpr) SetOffset(x int64) { n.Offset_ = x }
+
+// An InlinedCallExpr is an inlined function call.
+type InlinedCallExpr struct {
+ miniExpr
+ Body_ Nodes
+ ReturnVars Nodes
+}
+
+func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr {
+ n := &InlinedCallExpr{}
+ n.pos = pos
+ n.op = OINLCALL
+ n.Body_.Set(body)
+ n.ReturnVars.Set(retvars)
+ return n
+}
+
+func (n *InlinedCallExpr) Body() Nodes { return n.Body_ }
+func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.Body_ }
+func (n *InlinedCallExpr) SetBody(x Nodes) { n.Body_ = x }
+func (n *InlinedCallExpr) Rlist() Nodes { return n.ReturnVars }
+func (n *InlinedCallExpr) PtrRlist() *Nodes { return &n.ReturnVars }
+func (n *InlinedCallExpr) SetRlist(x Nodes) { n.ReturnVars = x }
+
+// A LogicalExpr is a expression X Op Y where Op is && or ||.
+// It is separate from BinaryExpr to make room for statements
+// that must be executed before Y but after X.
+type LogicalExpr struct {
+ miniExpr
+ X Node
+ Y Node
+}
+
+func NewLogicalExpr(pos src.XPos, op Op, x, y Node) *LogicalExpr {
+ n := &LogicalExpr{X: x, Y: y}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *LogicalExpr) Left() Node { return n.X }
+func (n *LogicalExpr) SetLeft(x Node) { n.X = x }
+func (n *LogicalExpr) Right() Node { return n.Y }
+func (n *LogicalExpr) SetRight(y Node) { n.Y = y }
+
+func (n *LogicalExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OANDAND, OOROR:
+ n.op = op
+ }
+}
+
+// A MakeExpr is a make expression: make(Type[, Len[, Cap]]).
+// Op is OMAKECHAN, OMAKEMAP, OMAKESLICE, or OMAKESLICECOPY,
+// but *not* OMAKE (that's a pre-typechecking CallExpr).
+type MakeExpr struct {
+ miniExpr
+ Len Node
+ Cap Node
+}
+
+func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr {
+ n := &MakeExpr{Len: len, Cap: cap}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *MakeExpr) Left() Node { return n.Len }
+func (n *MakeExpr) SetLeft(x Node) { n.Len = x }
+func (n *MakeExpr) Right() Node { return n.Cap }
+func (n *MakeExpr) SetRight(x Node) { n.Cap = x }
+
+func (n *MakeExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY:
+ n.op = op
+ }
+}
+
+// A MethodExpr is a method expression T.M (where T is a type).
+type MethodExpr struct {
+ miniExpr
+ T *types.Type
+ Method *types.Field
+ FuncName_ *Name
+}
+
+func NewMethodExpr(pos src.XPos, t *types.Type, method *types.Field) *MethodExpr {
+ n := &MethodExpr{T: t, Method: method}
+ n.pos = pos
+ n.op = OMETHEXPR
+ return n
+}
+
+func (n *MethodExpr) FuncName() *Name { return n.FuncName_ }
+func (n *MethodExpr) Left() Node { panic("MethodExpr.Left") }
+func (n *MethodExpr) SetLeft(x Node) { panic("MethodExpr.SetLeft") }
+func (n *MethodExpr) Right() Node { panic("MethodExpr.Right") }
+func (n *MethodExpr) SetRight(x Node) { panic("MethodExpr.SetRight") }
+func (n *MethodExpr) Sym() *types.Sym { panic("MethodExpr.Sym") }
+func (n *MethodExpr) Offset() int64 { panic("MethodExpr.Offset") }
+func (n *MethodExpr) SetOffset(x int64) { panic("MethodExpr.SetOffset") }
+func (n *MethodExpr) Class() Class { panic("MethodExpr.Class") }
+func (n *MethodExpr) SetClass(x Class) { panic("MethodExpr.SetClass") }
+
+// A NilExpr represents the predefined untyped constant nil.
+// (It may be copied and assigned a type, though.)
+type NilExpr struct {
+ miniExpr
+ Sym_ *types.Sym // TODO: Remove
+}
+
+func NewNilExpr(pos src.XPos) *NilExpr {
+ n := &NilExpr{}
+ n.pos = pos
+ n.op = ONIL
+ return n
+}
+
+func (n *NilExpr) Sym() *types.Sym { return n.Sym_ }
+func (n *NilExpr) SetSym(x *types.Sym) { n.Sym_ = x }
+
+// A ParenExpr is a parenthesized expression (X).
+// It may end up being a value or a type.
+type ParenExpr struct {
+ miniExpr
+ X Node
+}
+
+func NewParenExpr(pos src.XPos, x Node) *ParenExpr {
+ n := &ParenExpr{X: x}
+ n.op = OPAREN
+ n.pos = pos
+ return n
+}
+
+func (n *ParenExpr) Left() Node { return n.X }
+func (n *ParenExpr) SetLeft(x Node) { n.X = x }
+func (n *ParenExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *ParenExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (*ParenExpr) CanBeNtype() {}
+
+// SetOTYPE changes n to be an OTYPE node returning t,
+// like all the type nodes in type.go.
+func (n *ParenExpr) SetOTYPE(t *types.Type) {
+ n.op = OTYPE
+ n.typ = t
+ t.SetNod(n)
+}
+
+// A ResultExpr represents a direct access to a result slot on the stack frame.
+type ResultExpr struct {
+ miniExpr
+ Offset_ int64
+}
+
+func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr {
+ n := &ResultExpr{Offset_: offset}
+ n.pos = pos
+ n.op = ORESULT
+ n.typ = typ
+ return n
+}
+
+func (n *ResultExpr) Offset() int64 { return n.Offset_ }
+func (n *ResultExpr) SetOffset(x int64) { n.Offset_ = x }
+
+// A NameOffsetExpr refers to an offset within a variable.
+// It is like a SelectorExpr but without the field name.
+type NameOffsetExpr struct {
+ miniExpr
+ Name_ *Name
+ Offset_ int64
+}
+
+func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *NameOffsetExpr {
+ n := &NameOffsetExpr{Name_: name, Offset_: offset}
+ n.typ = typ
+ n.op = ONAMEOFFSET
+ return n
+}
+
+// A SelectorExpr is a selector expression X.Sym.
+type SelectorExpr struct {
+ miniExpr
+ X Node
+ Sel *types.Sym
+ Offset_ int64
+ Selection *types.Field
+}
+
+func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr {
+ n := &SelectorExpr{X: x, Sel: sel}
+ n.pos = pos
+ n.Offset_ = types.BADWIDTH
+ n.SetOp(op)
+ return n
+}
+
+func (n *SelectorExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OXDOT:
+ n.op = op
+ }
+}
+
+func (n *SelectorExpr) Left() Node { return n.X }
+func (n *SelectorExpr) SetLeft(x Node) { n.X = x }
+func (n *SelectorExpr) Sym() *types.Sym { return n.Sel }
+func (n *SelectorExpr) SetSym(x *types.Sym) { n.Sel = x }
+func (n *SelectorExpr) Offset() int64 { return n.Offset_ }
+func (n *SelectorExpr) SetOffset(x int64) { n.Offset_ = x }
+func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+// Before type-checking, bytes.Buffer is a SelectorExpr.
+// After type-checking it becomes a Name.
+func (*SelectorExpr) CanBeNtype() {}
+
+// A SliceExpr is a slice expression X[Low:High] or X[Low:High:Max].
+type SliceExpr struct {
+ miniExpr
+ X Node
+ List_ Nodes // TODO(rsc): Use separate Nodes
+}
+
+func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr {
+ n := &SliceExpr{X: x}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
+func (n *SliceExpr) Left() Node { return n.X }
+func (n *SliceExpr) SetLeft(x Node) { n.X = x }
+func (n *SliceExpr) List() Nodes { return n.List_ }
+func (n *SliceExpr) PtrList() *Nodes { return &n.List_ }
+func (n *SliceExpr) SetList(x Nodes) { n.List_ = x }
+
+func (n *SliceExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+ n.op = op
+ }
+}
+
+// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max].
+// n must be a slice expression. max is nil if n is a simple slice expression.
+func (n *SliceExpr) SliceBounds() (low, high, max Node) {
+ if n.List_.Len() == 0 {
+ return nil, nil, nil
+ }
+
+ switch n.Op() {
+ case OSLICE, OSLICEARR, OSLICESTR:
+ s := n.List_.Slice()
+ return s[0], s[1], nil
+ case OSLICE3, OSLICE3ARR:
+ s := n.List_.Slice()
+ return s[0], s[1], s[2]
+ }
+ base.Fatalf("SliceBounds op %v: %v", n.Op(), n)
+ return nil, nil, nil
+}
+
+// SetSliceBounds sets n's slice bounds, where n is a slice expression.
+// n must be a slice expression. If max is non-nil, n must be a full slice expression.
+func (n *SliceExpr) SetSliceBounds(low, high, max Node) {
+ switch n.Op() {
+ case OSLICE, OSLICEARR, OSLICESTR:
+ if max != nil {
+ base.Fatalf("SetSliceBounds %v given three bounds", n.Op())
+ }
+ s := n.List_.Slice()
+ if s == nil {
+ if low == nil && high == nil {
+ return
+ }
+ n.List_.Set2(low, high)
+ return
+ }
+ s[0] = low
+ s[1] = high
+ return
+ case OSLICE3, OSLICE3ARR:
+ s := n.List_.Slice()
+ if s == nil {
+ if low == nil && high == nil && max == nil {
+ return
+ }
+ n.List_.Set3(low, high, max)
+ return
+ }
+ s[0] = low
+ s[1] = high
+ s[2] = max
+ return
+ }
+ base.Fatalf("SetSliceBounds op %v: %v", n.Op(), n)
+}
+
+// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR).
+// o must be a slicing op.
+func (o Op) IsSlice3() bool {
+ switch o {
+ case OSLICE, OSLICEARR, OSLICESTR:
+ return false
+ case OSLICE3, OSLICE3ARR:
+ return true
+ }
+ base.Fatalf("IsSlice3 op %v", o)
+ return false
+}
+
+// A SliceHeader expression constructs a slice header from its parts.
+type SliceHeaderExpr struct {
+ miniExpr
+ Ptr Node
+ LenCap_ Nodes // TODO(rsc): Split into two Node fields
+}
+
+func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *SliceHeaderExpr {
+ n := &SliceHeaderExpr{Ptr: ptr}
+ n.pos = pos
+ n.op = OSLICEHEADER
+ n.typ = typ
+ n.LenCap_.Set2(len, cap)
+ return n
+}
+
+func (n *SliceHeaderExpr) Left() Node { return n.Ptr }
+func (n *SliceHeaderExpr) SetLeft(x Node) { n.Ptr = x }
+func (n *SliceHeaderExpr) List() Nodes { return n.LenCap_ }
+func (n *SliceHeaderExpr) PtrList() *Nodes { return &n.LenCap_ }
+func (n *SliceHeaderExpr) SetList(x Nodes) { n.LenCap_ = x }
+
+// A StarExpr is a dereference expression *X.
+// It may end up being a value or a type.
+type StarExpr struct {
+ miniExpr
+ X Node
+}
+
+func NewStarExpr(pos src.XPos, x Node) *StarExpr {
+ n := &StarExpr{X: x}
+ n.op = ODEREF
+ n.pos = pos
+ return n
+}
+
+func (n *StarExpr) Left() Node { return n.X }
+func (n *StarExpr) SetLeft(x Node) { n.X = x }
+func (n *StarExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *StarExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (*StarExpr) CanBeNtype() {}
+
+// SetOTYPE changes n to be an OTYPE node returning t,
+// like all the type nodes in type.go.
+func (n *StarExpr) SetOTYPE(t *types.Type) {
+ n.op = OTYPE
+ n.X = nil
+ n.typ = t
+ t.SetNod(n)
+}
+
+// A TypeAssertionExpr is a selector expression X.(Type).
+// Before type-checking, the type is Ntype.
+type TypeAssertExpr struct {
+ miniExpr
+ X Node
+ Ntype Node // TODO: Should be Ntype, but reused as address of type structure
+ Itab Nodes // Itab[0] is itab
+}
+
+func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr {
+ n := &TypeAssertExpr{X: x, Ntype: typ}
+ n.pos = pos
+ n.op = ODOTTYPE
+ return n
+}
+
+func (n *TypeAssertExpr) Left() Node { return n.X }
+func (n *TypeAssertExpr) SetLeft(x Node) { n.X = x }
+func (n *TypeAssertExpr) Right() Node { return n.Ntype }
+func (n *TypeAssertExpr) SetRight(x Node) { n.Ntype = x } // TODO: toNtype(x)
+func (n *TypeAssertExpr) List() Nodes { return n.Itab }
+func (n *TypeAssertExpr) PtrList() *Nodes { return &n.Itab }
+func (n *TypeAssertExpr) SetList(x Nodes) { n.Itab = x }
+
+func (n *TypeAssertExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case ODOTTYPE, ODOTTYPE2:
+ n.op = op
+ }
+}
+
+// A UnaryExpr is a unary expression Op X,
+// or Op(X) for a builtin function that does not end up being a call.
+type UnaryExpr struct {
+ miniExpr
+ X Node
+}
+
+func NewUnaryExpr(pos src.XPos, op Op, x Node) *UnaryExpr {
+ n := &UnaryExpr{X: x}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *UnaryExpr) Left() Node { return n.X }
+func (n *UnaryExpr) SetLeft(x Node) { n.X = x }
+
+func (n *UnaryExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OBITNOT, ONEG, ONOT, OPLUS, ORECV,
+ OALIGNOF, OCAP, OCLOSE, OIMAG, OLEN, ONEW,
+ OOFFSETOF, OPANIC, OREAL, OSIZEOF,
+ OCHECKNIL, OCFUNC, OIDATA, OITAB, ONEWOBJ, OSPTR, OVARDEF, OVARKILL, OVARLIVE:
+ n.op = op
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "bytes"
+ "fmt"
+ "go/constant"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+
+ "unicode/utf8"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// Op
+
+var OpNames = []string{
+ OADDR: "&",
+ OADD: "+",
+ OADDSTR: "+",
+ OALIGNOF: "unsafe.Alignof",
+ OANDAND: "&&",
+ OANDNOT: "&^",
+ OAND: "&",
+ OAPPEND: "append",
+ OAS: "=",
+ OAS2: "=",
+ OBREAK: "break",
+ OCALL: "function call", // not actual syntax
+ OCAP: "cap",
+ OCASE: "case",
+ OCLOSE: "close",
+ OCOMPLEX: "complex",
+ OBITNOT: "^",
+ OCONTINUE: "continue",
+ OCOPY: "copy",
+ ODELETE: "delete",
+ ODEFER: "defer",
+ ODIV: "/",
+ OEQ: "==",
+ OFALL: "fallthrough",
+ OFOR: "for",
+ OFORUNTIL: "foruntil", // not actual syntax; used to avoid off-end pointer live on backedge.892
+ OGE: ">=",
+ OGOTO: "goto",
+ OGT: ">",
+ OIF: "if",
+ OIMAG: "imag",
+ OINLMARK: "inlmark",
+ ODEREF: "*",
+ OLEN: "len",
+ OLE: "<=",
+ OLSH: "<<",
+ OLT: "<",
+ OMAKE: "make",
+ ONEG: "-",
+ OMOD: "%",
+ OMUL: "*",
+ ONEW: "new",
+ ONE: "!=",
+ ONOT: "!",
+ OOFFSETOF: "unsafe.Offsetof",
+ OOROR: "||",
+ OOR: "|",
+ OPANIC: "panic",
+ OPLUS: "+",
+ OPRINTN: "println",
+ OPRINT: "print",
+ ORANGE: "range",
+ OREAL: "real",
+ ORECV: "<-",
+ ORECOVER: "recover",
+ ORETURN: "return",
+ ORSH: ">>",
+ OSELECT: "select",
+ OSEND: "<-",
+ OSIZEOF: "unsafe.Sizeof",
+ OSUB: "-",
+ OSWITCH: "switch",
+ OXOR: "^",
+}
+
+// GoString returns the Go syntax for the Op, or else its name.
+func (o Op) GoString() string {
+ if int(o) < len(OpNames) && OpNames[o] != "" {
+ return OpNames[o]
+ }
+ return o.String()
+}
+
+// Format implements formatting for an Op.
+// The valid formats are:
+//
+// %v Go syntax ("+", "<-", "print")
+// %+v Debug syntax ("ADD", "RECV", "PRINT")
+//
+func (o Op) Format(s fmt.State, verb rune) {
+ switch verb {
+ default:
+ fmt.Fprintf(s, "%%!%c(Op=%d)", verb, int(o))
+ case 'v':
+ if s.Flag('+') {
+ // %+v is OMUL instead of "*"
+ io.WriteString(s, o.String())
+ return
+ }
+ io.WriteString(s, o.GoString())
+ }
+}
+
+// Node
+
+// FmtNode implements formatting for a Node n.
+// Every Node implementation must define a Format method that calls FmtNode.
+// The valid formats are:
+//
+// %v Go syntax
+// %L Go syntax followed by " (type T)" if type is known.
+// %+v Debug syntax, as in Dump.
+//
+func FmtNode(n Node, s fmt.State, verb rune) {
+ // %+v prints Dump.
+ // Otherwise we print Go syntax.
+ if s.Flag('+') && verb == 'v' {
+ dumpNode(s, n, 1)
+ return
+ }
+
+ if verb != 'v' && verb != 'S' && verb != 'L' {
+ fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n)
+ return
+ }
+
+ if n == nil {
+ fmt.Fprint(s, "<nil>")
+ return
+ }
+
+ t := n.Type()
+ if verb == 'L' && t != nil {
+ if t.Kind() == types.TNIL {
+ fmt.Fprint(s, "nil")
+ } else if n.Op() == ONAME && n.Name().AutoTemp() {
+ fmt.Fprintf(s, "%v value", t)
+ } else {
+ fmt.Fprintf(s, "%v (type %v)", n, t)
+ }
+ return
+ }
+
+ // TODO inlining produces expressions with ninits. we can't print these yet.
+
+ if OpPrec[n.Op()] < 0 {
+ stmtFmt(n, s)
+ return
+ }
+
+ exprFmt(n, s, 0)
+}
+
+var OpPrec = []int{
+ OALIGNOF: 8,
+ OAPPEND: 8,
+ OBYTES2STR: 8,
+ OARRAYLIT: 8,
+ OSLICELIT: 8,
+ ORUNES2STR: 8,
+ OCALLFUNC: 8,
+ OCALLINTER: 8,
+ OCALLMETH: 8,
+ OCALL: 8,
+ OCAP: 8,
+ OCLOSE: 8,
+ OCOMPLIT: 8,
+ OCONVIFACE: 8,
+ OCONVNOP: 8,
+ OCONV: 8,
+ OCOPY: 8,
+ ODELETE: 8,
+ OGETG: 8,
+ OLEN: 8,
+ OLITERAL: 8,
+ OMAKESLICE: 8,
+ OMAKESLICECOPY: 8,
+ OMAKE: 8,
+ OMAPLIT: 8,
+ ONAME: 8,
+ ONEW: 8,
+ ONIL: 8,
+ ONONAME: 8,
+ OOFFSETOF: 8,
+ OPACK: 8,
+ OPANIC: 8,
+ OPAREN: 8,
+ OPRINTN: 8,
+ OPRINT: 8,
+ ORUNESTR: 8,
+ OSIZEOF: 8,
+ OSTR2BYTES: 8,
+ OSTR2RUNES: 8,
+ OSTRUCTLIT: 8,
+ OTARRAY: 8,
+ OTSLICE: 8,
+ OTCHAN: 8,
+ OTFUNC: 8,
+ OTINTER: 8,
+ OTMAP: 8,
+ OTSTRUCT: 8,
+ OINDEXMAP: 8,
+ OINDEX: 8,
+ OSLICE: 8,
+ OSLICESTR: 8,
+ OSLICEARR: 8,
+ OSLICE3: 8,
+ OSLICE3ARR: 8,
+ OSLICEHEADER: 8,
+ ODOTINTER: 8,
+ ODOTMETH: 8,
+ ODOTPTR: 8,
+ ODOTTYPE2: 8,
+ ODOTTYPE: 8,
+ ODOT: 8,
+ OXDOT: 8,
+ OCALLPART: 8,
+ OPLUS: 7,
+ ONOT: 7,
+ OBITNOT: 7,
+ ONEG: 7,
+ OADDR: 7,
+ ODEREF: 7,
+ ORECV: 7,
+ OMUL: 6,
+ ODIV: 6,
+ OMOD: 6,
+ OLSH: 6,
+ ORSH: 6,
+ OAND: 6,
+ OANDNOT: 6,
+ OADD: 5,
+ OSUB: 5,
+ OOR: 5,
+ OXOR: 5,
+ OEQ: 4,
+ OLT: 4,
+ OLE: 4,
+ OGE: 4,
+ OGT: 4,
+ ONE: 4,
+ OSEND: 3,
+ OANDAND: 2,
+ OOROR: 1,
+
+ // Statements handled by stmtfmt
+ OAS: -1,
+ OAS2: -1,
+ OAS2DOTTYPE: -1,
+ OAS2FUNC: -1,
+ OAS2MAPR: -1,
+ OAS2RECV: -1,
+ OASOP: -1,
+ OBLOCK: -1,
+ OBREAK: -1,
+ OCASE: -1,
+ OCONTINUE: -1,
+ ODCL: -1,
+ ODEFER: -1,
+ OFALL: -1,
+ OFOR: -1,
+ OFORUNTIL: -1,
+ OGOTO: -1,
+ OIF: -1,
+ OLABEL: -1,
+ OGO: -1,
+ ORANGE: -1,
+ ORETURN: -1,
+ OSELECT: -1,
+ OSWITCH: -1,
+
+ OEND: 0,
+}
+
+// StmtWithInit reports whether op is a statement with an explicit init list.
+func StmtWithInit(op Op) bool {
+ switch op {
+ case OIF, OFOR, OFORUNTIL, OSWITCH:
+ return true
+ }
+ return false
+}
+
+func stmtFmt(n Node, s fmt.State) {
+ // NOTE(rsc): This code used to support the text-based
+ // which was more aggressive about printing full Go syntax
+ // (for example, an actual loop instead of "for loop").
+ // The code is preserved for now in case we want to expand
+ // any of those shortenings later. Or maybe we will delete
+ // the code. But for now, keep it.
+ const exportFormat = false
+
+ // some statements allow for an init, but at most one,
+ // but we may have an arbitrary number added, eg by typecheck
+ // and inlining. If it doesn't fit the syntax, emit an enclosing
+ // block starting with the init statements.
+
+ // if we can just say "for" n->ninit; ... then do so
+ simpleinit := n.Init().Len() == 1 && n.Init().First().Init().Len() == 0 && StmtWithInit(n.Op())
+
+ // otherwise, print the inits as separate statements
+ complexinit := n.Init().Len() != 0 && !simpleinit && exportFormat
+
+ // but if it was for if/for/switch, put in an extra surrounding block to limit the scope
+ extrablock := complexinit && StmtWithInit(n.Op())
+
+ if extrablock {
+ fmt.Fprint(s, "{")
+ }
+
+ if complexinit {
+ fmt.Fprintf(s, " %v; ", n.Init())
+ }
+
+ switch n.Op() {
+ case ODCL:
+ n := n.(*Decl)
+ fmt.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type())
+
+ // Don't export "v = <N>" initializing statements, hope they're always
+ // preceded by the DCL which will be re-parsed and typechecked to reproduce
+ // the "v = <N>" again.
+ case OAS:
+ n := n.(*AssignStmt)
+ if n.Colas() && !complexinit {
+ fmt.Fprintf(s, "%v := %v", n.Left(), n.Right())
+ } else {
+ fmt.Fprintf(s, "%v = %v", n.Left(), n.Right())
+ }
+
+ case OASOP:
+ n := n.(*AssignOpStmt)
+ if n.Implicit() {
+ if n.SubOp() == OADD {
+ fmt.Fprintf(s, "%v++", n.Left())
+ } else {
+ fmt.Fprintf(s, "%v--", n.Left())
+ }
+ break
+ }
+
+ fmt.Fprintf(s, "%v %v= %v", n.Left(), n.SubOp(), n.Right())
+
+ case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ n := n.(*AssignListStmt)
+ if n.Colas() && !complexinit {
+ fmt.Fprintf(s, "%.v := %.v", n.List(), n.Rlist())
+ } else {
+ fmt.Fprintf(s, "%.v = %.v", n.List(), n.Rlist())
+ }
+
+ case OBLOCK:
+ n := n.(*BlockStmt)
+ if n.List().Len() != 0 {
+ fmt.Fprintf(s, "%v", n.List())
+ }
+
+ case ORETURN:
+ n := n.(*ReturnStmt)
+ fmt.Fprintf(s, "return %.v", n.List())
+
+ case ORETJMP:
+ n := n.(*BranchStmt)
+ fmt.Fprintf(s, "retjmp %v", n.Sym())
+
+ case OINLMARK:
+ n := n.(*InlineMarkStmt)
+ fmt.Fprintf(s, "inlmark %d", n.Offset())
+
+ case OGO:
+ n := n.(*GoDeferStmt)
+ fmt.Fprintf(s, "go %v", n.Left())
+
+ case ODEFER:
+ n := n.(*GoDeferStmt)
+ fmt.Fprintf(s, "defer %v", n.Left())
+
+ case OIF:
+ n := n.(*IfStmt)
+ if simpleinit {
+ fmt.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body())
+ } else {
+ fmt.Fprintf(s, "if %v { %v }", n.Left(), n.Body())
+ }
+ if n.Rlist().Len() != 0 {
+ fmt.Fprintf(s, " else { %v }", n.Rlist())
+ }
+
+ case OFOR, OFORUNTIL:
+ n := n.(*ForStmt)
+ opname := "for"
+ if n.Op() == OFORUNTIL {
+ opname = "foruntil"
+ }
+ if !exportFormat { // TODO maybe only if FmtShort, same below
+ fmt.Fprintf(s, "%s loop", opname)
+ break
+ }
+
+ fmt.Fprint(s, opname)
+ if simpleinit {
+ fmt.Fprintf(s, " %v;", n.Init().First())
+ } else if n.Right() != nil {
+ fmt.Fprint(s, " ;")
+ }
+
+ if n.Left() != nil {
+ fmt.Fprintf(s, " %v", n.Left())
+ }
+
+ if n.Right() != nil {
+ fmt.Fprintf(s, "; %v", n.Right())
+ } else if simpleinit {
+ fmt.Fprint(s, ";")
+ }
+
+ if n.Op() == OFORUNTIL && n.List().Len() != 0 {
+ fmt.Fprintf(s, "; %v", n.List())
+ }
+
+ fmt.Fprintf(s, " { %v }", n.Body())
+
+ case ORANGE:
+ n := n.(*RangeStmt)
+ if !exportFormat {
+ fmt.Fprint(s, "for loop")
+ break
+ }
+
+ if n.List().Len() == 0 {
+ fmt.Fprintf(s, "for range %v { %v }", n.Right(), n.Body())
+ break
+ }
+
+ fmt.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body())
+
+ case OSELECT:
+ n := n.(*SelectStmt)
+ if !exportFormat {
+ fmt.Fprintf(s, "%v statement", n.Op())
+ break
+ }
+ fmt.Fprintf(s, "select { %v }", n.List())
+
+ case OSWITCH:
+ n := n.(*SwitchStmt)
+ if !exportFormat {
+ fmt.Fprintf(s, "%v statement", n.Op())
+ break
+ }
+ fmt.Fprintf(s, "switch")
+ if simpleinit {
+ fmt.Fprintf(s, " %v;", n.Init().First())
+ }
+ if n.Left() != nil {
+ fmt.Fprintf(s, " %v ", n.Left())
+ }
+ fmt.Fprintf(s, " { %v }", n.List())
+
+ case OCASE:
+ n := n.(*CaseStmt)
+ if n.List().Len() != 0 {
+ fmt.Fprintf(s, "case %.v", n.List())
+ } else {
+ fmt.Fprint(s, "default")
+ }
+ fmt.Fprintf(s, ": %v", n.Body())
+
+ case OBREAK, OCONTINUE, OGOTO, OFALL:
+ n := n.(*BranchStmt)
+ if n.Sym() != nil {
+ fmt.Fprintf(s, "%v %v", n.Op(), n.Sym())
+ } else {
+ fmt.Fprintf(s, "%v", n.Op())
+ }
+
+ case OLABEL:
+ n := n.(*LabelStmt)
+ fmt.Fprintf(s, "%v: ", n.Sym())
+ }
+
+ if extrablock {
+ fmt.Fprint(s, "}")
+ }
+}
+
+func exprFmt(n Node, s fmt.State, prec int) {
+ // NOTE(rsc): This code used to support the text-based
+ // which was more aggressive about printing full Go syntax
+ // (for example, an actual loop instead of "for loop").
+ // The code is preserved for now in case we want to expand
+ // any of those shortenings later. Or maybe we will delete
+ // the code. But for now, keep it.
+ const exportFormat = false
+
+ for {
+ if n == nil {
+ fmt.Fprint(s, "<nil>")
+ return
+ }
+
+ // We always want the original, if any.
+ if o := Orig(n); o != n {
+ n = o
+ continue
+ }
+
+ // Skip implicit operations introduced during typechecking.
+ switch nn := n; nn.Op() {
+ case OADDR:
+ nn := nn.(*AddrExpr)
+ if nn.Implicit() {
+ n = nn.Left()
+ continue
+ }
+ case ODEREF:
+ nn := nn.(*StarExpr)
+ if nn.Implicit() {
+ n = nn.Left()
+ continue
+ }
+ case OCONV, OCONVNOP, OCONVIFACE:
+ nn := nn.(*ConvExpr)
+ if nn.Implicit() {
+ n = nn.Left()
+ continue
+ }
+ }
+
+ break
+ }
+
+ nprec := OpPrec[n.Op()]
+ if n.Op() == OTYPE && n.Sym() != nil {
+ nprec = 8
+ }
+
+ if prec > nprec {
+ fmt.Fprintf(s, "(%v)", n)
+ return
+ }
+
+ switch n.Op() {
+ case OPAREN:
+ n := n.(*ParenExpr)
+ fmt.Fprintf(s, "(%v)", n.Left())
+
+ case ONIL:
+ fmt.Fprint(s, "nil")
+
+ case OLITERAL: // this is a bit of a mess
+ if !exportFormat && n.Sym() != nil {
+ fmt.Fprint(s, n.Sym())
+ return
+ }
+
+ needUnparen := false
+ if n.Type() != nil && !n.Type().IsUntyped() {
+ // Need parens when type begins with what might
+ // be misinterpreted as a unary operator: * or <-.
+ if n.Type().IsPtr() || (n.Type().IsChan() && n.Type().ChanDir() == types.Crecv) {
+ fmt.Fprintf(s, "(%v)(", n.Type())
+ } else {
+ fmt.Fprintf(s, "%v(", n.Type())
+ }
+ needUnparen = true
+ }
+
+ if n.Type() == types.UntypedRune {
+ switch x, ok := constant.Int64Val(n.Val()); {
+ case !ok:
+ fallthrough
+ default:
+ fmt.Fprintf(s, "('\\x00' + %v)", n.Val())
+
+ case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'':
+ fmt.Fprintf(s, "'%c'", int(x))
+
+ case 0 <= x && x < 1<<16:
+ fmt.Fprintf(s, "'\\u%04x'", uint(int(x)))
+
+ case 0 <= x && x <= utf8.MaxRune:
+ fmt.Fprintf(s, "'\\U%08x'", uint64(x))
+ }
+ } else {
+ fmt.Fprint(s, types.FmtConst(n.Val(), s.Flag('#')))
+ }
+
+ if needUnparen {
+ fmt.Fprintf(s, ")")
+ }
+
+ case ODCLFUNC:
+ n := n.(*Func)
+ if sym := n.Sym(); sym != nil {
+ fmt.Fprint(s, sym)
+ return
+ }
+ fmt.Fprintf(s, "<unnamed Func>")
+
+ case ONAME:
+ n := n.(*Name)
+ // Special case: name used as local variable in export.
+ // _ becomes ~b%d internally; print as _ for export
+ if !exportFormat && n.Sym() != nil && n.Sym().Name[0] == '~' && n.Sym().Name[1] == 'b' {
+ fmt.Fprint(s, "_")
+ return
+ }
+ fallthrough
+ case OPACK, ONONAME:
+ fmt.Fprint(s, n.Sym())
+
+ case OMETHEXPR:
+ n := n.(*MethodExpr)
+ fmt.Fprint(s, n.FuncName().Sym())
+
+ case ONAMEOFFSET:
+ n := n.(*NameOffsetExpr)
+ fmt.Fprintf(s, "(%v)(%v@%d)", n.Type(), n.Name_, n.Offset_)
+
+ case OTYPE:
+ if n.Type() == nil && n.Sym() != nil {
+ fmt.Fprint(s, n.Sym())
+ return
+ }
+ fmt.Fprintf(s, "%v", n.Type())
+
+ case OTSLICE:
+ n := n.(*SliceType)
+ if n.DDD {
+ fmt.Fprintf(s, "...%v", n.Elem)
+ } else {
+ fmt.Fprintf(s, "[]%v", n.Elem) // happens before typecheck
+ }
+
+ case OTARRAY:
+ n := n.(*ArrayType)
+ if n.Len == nil {
+ fmt.Fprintf(s, "[...]%v", n.Elem)
+ } else {
+ fmt.Fprintf(s, "[%v]%v", n.Len, n.Elem)
+ }
+
+ case OTMAP:
+ n := n.(*MapType)
+ fmt.Fprintf(s, "map[%v]%v", n.Key, n.Elem)
+
+ case OTCHAN:
+ n := n.(*ChanType)
+ switch n.Dir {
+ case types.Crecv:
+ fmt.Fprintf(s, "<-chan %v", n.Elem)
+
+ case types.Csend:
+ fmt.Fprintf(s, "chan<- %v", n.Elem)
+
+ default:
+ if n.Elem != nil && n.Elem.Op() == OTCHAN && n.Elem.(*ChanType).Dir == types.Crecv {
+ fmt.Fprintf(s, "chan (%v)", n.Elem)
+ } else {
+ fmt.Fprintf(s, "chan %v", n.Elem)
+ }
+ }
+
+ case OTSTRUCT:
+ fmt.Fprint(s, "<struct>")
+
+ case OTINTER:
+ fmt.Fprint(s, "<inter>")
+
+ case OTFUNC:
+ fmt.Fprint(s, "<func>")
+
+ case OCLOSURE:
+ n := n.(*ClosureExpr)
+ if !exportFormat {
+ fmt.Fprint(s, "func literal")
+ return
+ }
+ fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func().Body())
+
+ case OCOMPLIT:
+ n := n.(*CompLitExpr)
+ if !exportFormat {
+ if n.Implicit() {
+ fmt.Fprintf(s, "... argument")
+ return
+ }
+ if n.Right() != nil {
+ fmt.Fprintf(s, "%v{%s}", n.Right(), ellipsisIf(n.List().Len() != 0))
+ return
+ }
+
+ fmt.Fprint(s, "composite literal")
+ return
+ }
+ fmt.Fprintf(s, "(%v{ %.v })", n.Right(), n.List())
+
+ case OPTRLIT:
+ n := n.(*AddrExpr)
+ fmt.Fprintf(s, "&%v", n.Left())
+
+ case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
+ n := n.(*CompLitExpr)
+ if !exportFormat {
+ fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List().Len() != 0))
+ return
+ }
+ fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List())
+
+ case OKEY:
+ n := n.(*KeyExpr)
+ if n.Left() != nil && n.Right() != nil {
+ fmt.Fprintf(s, "%v:%v", n.Left(), n.Right())
+ return
+ }
+
+ if n.Left() == nil && n.Right() != nil {
+ fmt.Fprintf(s, ":%v", n.Right())
+ return
+ }
+ if n.Left() != nil && n.Right() == nil {
+ fmt.Fprintf(s, "%v:", n.Left())
+ return
+ }
+ fmt.Fprint(s, ":")
+
+ case OSTRUCTKEY:
+ n := n.(*StructKeyExpr)
+ fmt.Fprintf(s, "%v:%v", n.Sym(), n.Left())
+
+ case OCALLPART:
+ n := n.(*CallPartExpr)
+ exprFmt(n.Left(), s, nprec)
+ if n.Sym() == nil {
+ fmt.Fprint(s, ".<nil>")
+ return
+ }
+ fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym()))
+
+ case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
+ n := n.(*SelectorExpr)
+ exprFmt(n.Left(), s, nprec)
+ if n.Sym() == nil {
+ fmt.Fprint(s, ".<nil>")
+ return
+ }
+ fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym()))
+
+ case ODOTTYPE, ODOTTYPE2:
+ n := n.(*TypeAssertExpr)
+ exprFmt(n.Left(), s, nprec)
+ if n.Right() != nil {
+ fmt.Fprintf(s, ".(%v)", n.Right())
+ return
+ }
+ fmt.Fprintf(s, ".(%v)", n.Type())
+
+ case OINDEX, OINDEXMAP:
+ n := n.(*IndexExpr)
+ exprFmt(n.Left(), s, nprec)
+ fmt.Fprintf(s, "[%v]", n.Right())
+
+ case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
+ n := n.(*SliceExpr)
+ exprFmt(n.Left(), s, nprec)
+ fmt.Fprint(s, "[")
+ low, high, max := n.SliceBounds()
+ if low != nil {
+ fmt.Fprint(s, low)
+ }
+ fmt.Fprint(s, ":")
+ if high != nil {
+ fmt.Fprint(s, high)
+ }
+ if n.Op().IsSlice3() {
+ fmt.Fprint(s, ":")
+ if max != nil {
+ fmt.Fprint(s, max)
+ }
+ }
+ fmt.Fprint(s, "]")
+
+ case OSLICEHEADER:
+ n := n.(*SliceHeaderExpr)
+ if n.List().Len() != 2 {
+ base.Fatalf("bad OSLICEHEADER list length %d", n.List().Len())
+ }
+ fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left(), n.List().First(), n.List().Second())
+
+ case OCOMPLEX, OCOPY:
+ n := n.(*BinaryExpr)
+ fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.Left(), n.Right())
+
+ case OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ OBYTES2STR,
+ ORUNES2STR,
+ OSTR2BYTES,
+ OSTR2RUNES,
+ ORUNESTR:
+ n := n.(*ConvExpr)
+ if n.Type() == nil || n.Type().Sym() == nil {
+ fmt.Fprintf(s, "(%v)", n.Type())
+ } else {
+ fmt.Fprintf(s, "%v", n.Type())
+ }
+ fmt.Fprintf(s, "(%v)", n.Left())
+
+ case OREAL,
+ OIMAG,
+ OCAP,
+ OCLOSE,
+ OLEN,
+ ONEW,
+ OPANIC,
+ OALIGNOF,
+ OOFFSETOF,
+ OSIZEOF:
+ n := n.(*UnaryExpr)
+ fmt.Fprintf(s, "%v(%v)", n.Op(), n.Left())
+
+ case OAPPEND,
+ ODELETE,
+ OMAKE,
+ ORECOVER,
+ OPRINT,
+ OPRINTN:
+ n := n.(*CallExpr)
+ if n.IsDDD() {
+ fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.List())
+ return
+ }
+ fmt.Fprintf(s, "%v(%.v)", n.Op(), n.List())
+
+ case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
+ n := n.(*CallExpr)
+ exprFmt(n.Left(), s, nprec)
+ if n.IsDDD() {
+ fmt.Fprintf(s, "(%.v...)", n.List())
+ return
+ }
+ fmt.Fprintf(s, "(%.v)", n.List())
+
+ case OMAKEMAP, OMAKECHAN, OMAKESLICE:
+ n := n.(*MakeExpr)
+ if n.Right() != nil {
+ fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Left(), n.Right())
+ return
+ }
+ if n.Left() != nil && (n.Op() == OMAKESLICE || !n.Left().Type().IsUntyped()) {
+ fmt.Fprintf(s, "make(%v, %v)", n.Type(), n.Left())
+ return
+ }
+ fmt.Fprintf(s, "make(%v)", n.Type())
+
+ case OMAKESLICECOPY:
+ n := n.(*MakeExpr)
+ fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Left(), n.Right())
+
+ case OPLUS, ONEG, OBITNOT, ONOT, ORECV:
+ // Unary
+ n := n.(*UnaryExpr)
+ fmt.Fprintf(s, "%v", n.Op())
+ if n.Left() != nil && n.Left().Op() == n.Op() {
+ fmt.Fprint(s, " ")
+ }
+ exprFmt(n.Left(), s, nprec+1)
+
+ case OADDR:
+ n := n.(*AddrExpr)
+ fmt.Fprintf(s, "%v", n.Op())
+ if n.Left() != nil && n.Left().Op() == n.Op() {
+ fmt.Fprint(s, " ")
+ }
+ exprFmt(n.Left(), s, nprec+1)
+
+ case ODEREF:
+ n := n.(*StarExpr)
+ fmt.Fprintf(s, "%v", n.Op())
+ exprFmt(n.Left(), s, nprec+1)
+
+ // Binary
+ case OADD,
+ OAND,
+ OANDNOT,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLT,
+ OLSH,
+ OMOD,
+ OMUL,
+ ONE,
+ OOR,
+ ORSH,
+ OSUB,
+ OXOR:
+ n := n.(*BinaryExpr)
+ exprFmt(n.Left(), s, nprec)
+ fmt.Fprintf(s, " %v ", n.Op())
+ exprFmt(n.Right(), s, nprec+1)
+
+ case OANDAND,
+ OOROR:
+ n := n.(*LogicalExpr)
+ exprFmt(n.Left(), s, nprec)
+ fmt.Fprintf(s, " %v ", n.Op())
+ exprFmt(n.Right(), s, nprec+1)
+
+ case OSEND:
+ n := n.(*SendStmt)
+ exprFmt(n.Left(), s, nprec)
+ fmt.Fprintf(s, " <- ")
+ exprFmt(n.Right(), s, nprec+1)
+
+ case OADDSTR:
+ n := n.(*AddStringExpr)
+ for i, n1 := range n.List().Slice() {
+ if i != 0 {
+ fmt.Fprint(s, " + ")
+ }
+ exprFmt(n1, s, nprec)
+ }
+ default:
+ fmt.Fprintf(s, "<node %v>", n.Op())
+ }
+}
+
+func ellipsisIf(b bool) string {
+ if b {
+ return "..."
+ }
+ return ""
+}
+
+// Nodes
+
+// Format implements formatting for a Nodes.
+// The valid formats are:
+//
+// %v Go syntax, semicolon-separated
+// %.v Go syntax, comma-separated
+// %+v Debug syntax, as in DumpList.
+//
+func (l Nodes) Format(s fmt.State, verb rune) {
+ if s.Flag('+') && verb == 'v' {
+ // %+v is DumpList output
+ dumpNodes(s, l, 1)
+ return
+ }
+
+ if verb != 'v' {
+ fmt.Fprintf(s, "%%!%c(Nodes)", verb)
+ return
+ }
+
+ sep := "; "
+ if _, ok := s.Precision(); ok { // %.v is expr list
+ sep = ", "
+ }
+
+ for i, n := range l.Slice() {
+ fmt.Fprint(s, n)
+ if i+1 < l.Len() {
+ fmt.Fprint(s, sep)
+ }
+ }
+}
+
+// Dump
+
+// Dump prints the message s followed by a debug dump of n.
+func Dump(s string, n Node) {
+ fmt.Printf("%s [%p]%+v", s, n, n)
+}
+
+// DumpList prints the message s followed by a debug dump of each node in the list.
+func DumpList(s string, list Nodes) {
+ var buf bytes.Buffer
+ FDumpList(&buf, s, list)
+ os.Stdout.Write(buf.Bytes())
+}
+
+// FDumpList prints to w the message s followed by a debug dump of each node in the list.
+func FDumpList(w io.Writer, s string, list Nodes) {
+ io.WriteString(w, s)
+ dumpNodes(w, list, 1)
+ io.WriteString(w, "\n")
+}
+
+// indent prints indentation to w.
+func indent(w io.Writer, depth int) {
+ fmt.Fprint(w, "\n")
+ for i := 0; i < depth; i++ {
+ fmt.Fprint(w, ". ")
+ }
+}
+
+// EscFmt is set by the escape analysis code to add escape analysis details to the node print.
+var EscFmt func(n Node) string
+
+// dumpNodeHeader prints the debug-format node header line to w.
+func dumpNodeHeader(w io.Writer, n Node) {
+ // Useful to see which nodes in an AST printout are actually identical
+ if base.Debug.DumpPtrs != 0 {
+ fmt.Fprintf(w, " p(%p)", n)
+ }
+
+ if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Defn != nil {
+ // Useful to see where Defn is set and what node it points to
+ fmt.Fprintf(w, " defn(%p)", n.Name().Defn)
+ }
+
+ if EscFmt != nil {
+ if esc := EscFmt(n); esc != "" {
+ fmt.Fprintf(w, " %s", esc)
+ }
+ }
+
+ if n.Typecheck() != 0 {
+ fmt.Fprintf(w, " tc(%d)", n.Typecheck())
+ }
+
+ // Print Node-specific fields of basic type in header line.
+ v := reflect.ValueOf(n).Elem()
+ t := v.Type()
+ nf := t.NumField()
+ for i := 0; i < nf; i++ {
+ tf := t.Field(i)
+ if tf.PkgPath != "" {
+ // skip unexported field - Interface will fail
+ continue
+ }
+ k := tf.Type.Kind()
+ if reflect.Bool <= k && k <= reflect.Complex128 {
+ name := strings.TrimSuffix(tf.Name, "_")
+ vf := v.Field(i)
+ vfi := vf.Interface()
+ if name == "Offset" && vfi == types.BADWIDTH || name != "Offset" && isZero(vf) {
+ continue
+ }
+ if vfi == true {
+ fmt.Fprintf(w, " %s", name)
+ } else {
+ fmt.Fprintf(w, " %s:%+v", name, vf.Interface())
+ }
+ }
+ }
+
+ // Print Node-specific booleans by looking for methods.
+ // Different v, t from above - want *Struct not Struct, for methods.
+ v = reflect.ValueOf(n)
+ t = v.Type()
+ nm := t.NumMethod()
+ for i := 0; i < nm; i++ {
+ tm := t.Method(i)
+ if tm.PkgPath != "" {
+ // skip unexported method - call will fail
+ continue
+ }
+ m := v.Method(i)
+ mt := m.Type()
+ if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Kind() == reflect.Bool {
+ // TODO(rsc): Remove the func/defer/recover wrapping,
+ // which is guarding against panics in miniExpr,
+ // once we get down to the simpler state in which
+ // nodes have no getter methods that aren't allowed to be called.
+ func() {
+ defer func() { recover() }()
+ if m.Call(nil)[0].Bool() {
+ name := strings.TrimSuffix(tm.Name, "_")
+ fmt.Fprintf(w, " %s", name)
+ }
+ }()
+ }
+ }
+
+ if n.Op() == OCLOSURE {
+ n := n.(*ClosureExpr)
+ if fn := n.Func(); fn != nil && fn.Nname.Sym() != nil {
+ fmt.Fprintf(w, " fnName(%+v)", fn.Nname.Sym())
+ }
+ }
+
+ if n.Type() != nil {
+ if n.Op() == OTYPE {
+ fmt.Fprintf(w, " type")
+ }
+ fmt.Fprintf(w, " %+v", n.Type())
+ }
+
+ if n.Pos().IsKnown() {
+ pfx := ""
+ switch n.Pos().IsStmt() {
+ case src.PosNotStmt:
+ pfx = "_" // "-" would be confusing
+ case src.PosIsStmt:
+ pfx = "+"
+ }
+ pos := base.Ctxt.PosTable.Pos(n.Pos())
+ file := filepath.Base(pos.Filename())
+ fmt.Fprintf(w, " # %s%s:%d", pfx, file, pos.Line())
+ }
+}
+
+func dumpNode(w io.Writer, n Node, depth int) {
+ indent(w, depth)
+ if depth > 40 {
+ fmt.Fprint(w, "...")
+ return
+ }
+
+ if n.Init().Len() != 0 {
+ fmt.Fprintf(w, "%+v-init", n.Op())
+ dumpNodes(w, n.Init(), depth+1)
+ indent(w, depth)
+ }
+
+ switch n.Op() {
+ default:
+ fmt.Fprintf(w, "%+v", n.Op())
+ dumpNodeHeader(w, n)
+
+ case OLITERAL:
+ fmt.Fprintf(w, "%+v-%v", n.Op(), n.Val())
+ dumpNodeHeader(w, n)
+ return
+
+ case ONAME, ONONAME:
+ if n.Sym() != nil {
+ fmt.Fprintf(w, "%+v-%+v", n.Op(), n.Sym())
+ } else {
+ fmt.Fprintf(w, "%+v", n.Op())
+ }
+ dumpNodeHeader(w, n)
+ if n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-ntype", n.Op())
+ dumpNode(w, n.Name().Ntype, depth+1)
+ }
+ return
+
+ case OMETHEXPR:
+ n := n.(*MethodExpr)
+ fmt.Fprintf(w, "%+v-%+v", n.Op(), n.FuncName().Sym())
+ dumpNodeHeader(w, n)
+ return
+
+ case OASOP:
+ n := n.(*AssignOpStmt)
+ fmt.Fprintf(w, "%+v-%+v", n.Op(), n.SubOp())
+ dumpNodeHeader(w, n)
+
+ case OTYPE:
+ fmt.Fprintf(w, "%+v %+v", n.Op(), n.Sym())
+ dumpNodeHeader(w, n)
+ if n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-ntype", n.Op())
+ dumpNode(w, n.Name().Ntype, depth+1)
+ }
+ return
+
+ case OCLOSURE:
+ fmt.Fprintf(w, "%+v", n.Op())
+ dumpNodeHeader(w, n)
+
+ case ODCLFUNC:
+ // Func has many fields we don't want to print.
+ // Bypass reflection and just print what we want.
+ n := n.(*Func)
+ fmt.Fprintf(w, "%+v", n.Op())
+ dumpNodeHeader(w, n)
+ fn := n.Func()
+ if len(fn.Dcl) > 0 {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-Dcl", n.Op())
+ for _, dcl := range n.Func().Dcl {
+ dumpNode(w, dcl, depth+1)
+ }
+ }
+ if fn.Body().Len() > 0 {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-body", n.Op())
+ dumpNodes(w, fn.Body(), depth+1)
+ }
+ return
+ }
+
+ if n.Sym() != nil {
+ fmt.Fprintf(w, " %+v", n.Sym())
+ }
+ if n.Type() != nil {
+ fmt.Fprintf(w, " %+v", n.Type())
+ }
+
+ v := reflect.ValueOf(n).Elem()
+ t := reflect.TypeOf(n).Elem()
+ nf := t.NumField()
+ for i := 0; i < nf; i++ {
+ tf := t.Field(i)
+ vf := v.Field(i)
+ if tf.PkgPath != "" {
+ // skip unexported field - Interface will fail
+ continue
+ }
+ switch tf.Type.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Slice:
+ if vf.IsNil() {
+ continue
+ }
+ }
+ name := strings.TrimSuffix(tf.Name, "_")
+ // Do not bother with field name header lines for the
+ // most common positional arguments: unary, binary expr,
+ // index expr, send stmt, go and defer call expression.
+ switch name {
+ case "X", "Y", "Index", "Chan", "Value", "Call":
+ name = ""
+ }
+ switch val := vf.Interface().(type) {
+ case Node:
+ if name != "" {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-%s", n.Op(), name)
+ }
+ dumpNode(w, val, depth+1)
+ case Nodes:
+ if val.Len() == 0 {
+ continue
+ }
+ if name != "" {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-%s", n.Op(), name)
+ }
+ dumpNodes(w, val, depth+1)
+ }
+ }
+}
+
+func dumpNodes(w io.Writer, list Nodes, depth int) {
+ if list.Len() == 0 {
+ fmt.Fprintf(w, " <nil>")
+ return
+ }
+
+ for _, n := range list.Slice() {
+ dumpNode(w, n, depth)
+ }
+}
+
+// reflect.IsZero is not available in Go 1.4 (added in Go 1.13), so we use this copy instead.
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return math.Float64bits(v.Float()) == 0
+ case reflect.Complex64, reflect.Complex128:
+ c := v.Complex()
+ return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
+ case reflect.Array:
+ for i := 0; i < v.Len(); i++ {
+ if !isZero(v.Index(i)) {
+ return false
+ }
+ }
+ return true
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+ return v.IsNil()
+ case reflect.String:
+ return v.Len() == 0
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ default:
+ return false
+ }
+}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// A Func corresponds to a single function in a Go program
+// (and vice versa: each function is denoted by exactly one *Func).
+//
+// There are multiple nodes that represent a Func in the IR.
+//
+// The ONAME node (Func.Nname) is used for plain references to it.
+// The ODCLFUNC node (the Func itself) is used for its declaration code.
+// The OCLOSURE node (Func.OClosure) is used for a reference to a
+// function literal.
+//
+// An imported function will have an ONAME node which points to a Func
+// with an empty body.
+// A declared function or method has an ODCLFUNC (the Func itself) and an ONAME.
+// A function literal is represented directly by an OCLOSURE, but it also
+// has an ODCLFUNC (and a matching ONAME) representing the compiled
+// underlying form of the closure, which accesses the captured variables
+// using a special data structure passed in a register.
+//
+// A method declaration is represented like functions, except f.Sym
+// will be the qualified method name (e.g., "T.m") and
+// f.Func.Shortname is the bare method name (e.g., "m").
+//
+// A method expression (T.M) is represented as an OMETHEXPR node,
+// in which n.Left and n.Right point to the type and method, respectively.
+// Each distinct mention of a method expression in the source code
+// constructs a fresh node.
+//
+// A method value (t.M) is represented by ODOTMETH/ODOTINTER
+// when it is called directly and by OCALLPART otherwise.
+// These are like method expressions, except that for ODOTMETH/ODOTINTER,
+// the method name is stored in Sym instead of Right.
+// Each OCALLPART ends up being implemented as a new
+// function, a bit like a closure, with its own ODCLFUNC.
+// The OCALLPART uses n.Func to record the linkage to
+// the generated ODCLFUNC, but there is no
+// pointer from the Func back to the OCALLPART.
+type Func struct {
+ miniNode
+ typ *types.Type
+ Body_ Nodes
+ iota int64
+
+ Nname *Name // ONAME node
+ OClosure *ClosureExpr // OCLOSURE node
+
+ Shortname *types.Sym
+
+ // Extra entry code for the function. For example, allocate and initialize
+ // memory for escaping parameters.
+ Enter Nodes
+ Exit Nodes
+ // ONAME nodes for all params/locals for this func/closure, does NOT
+ // include closurevars until transformclosure runs.
+ Dcl []*Name
+
+ ClosureEnter Nodes // list of ONAME nodes (or OADDR-of-ONAME nodes, for output parameters) of captured variables
+ ClosureType Node // closure representation type
+ ClosureVars []*Name // closure params; each has closurevar set
+
+ // Parents records the parent scope of each scope within a
+ // function. The root scope (0) has no parent, so the i'th
+ // scope's parent is stored at Parents[i-1].
+ Parents []ScopeID
+
+ // Marks records scope boundary changes.
+ Marks []Mark
+
+ FieldTrack map[*types.Sym]struct{}
+ DebugInfo interface{}
+ LSym *obj.LSym
+
+ Inl *Inline
+
+ // Closgen tracks how many closures have been generated within
+ // this function. Used by closurename for creating unique
+ // function names.
+ Closgen int32
+
+ Label int32 // largest auto-generated label in this function
+
+ Endlineno src.XPos
+ WBPos src.XPos // position of first write barrier; see SetWBPos
+
+ Pragma PragmaFlag // go:xxx function annotations
+
+ flags bitset16
+ NumDefers int32 // number of defer calls in the function
+ NumReturns int32 // number of explicit returns in the function
+
+ // nwbrCalls records the LSyms of functions called by this
+ // function for go:nowritebarrierrec analysis. Only filled in
+ // if nowritebarrierrecCheck != nil.
+ NWBRCalls *[]SymAndPos
+}
+
+func NewFunc(pos src.XPos) *Func {
+ f := new(Func)
+ f.pos = pos
+ f.op = ODCLFUNC
+ f.iota = -1
+ return f
+}
+
+func (f *Func) isStmt() {}
+
+func (f *Func) Func() *Func { return f }
+func (f *Func) Body() Nodes { return f.Body_ }
+func (f *Func) PtrBody() *Nodes { return &f.Body_ }
+func (f *Func) SetBody(x Nodes) { f.Body_ = x }
+func (f *Func) Type() *types.Type { return f.typ }
+func (f *Func) SetType(x *types.Type) { f.typ = x }
+func (f *Func) Iota() int64 { return f.iota }
+func (f *Func) SetIota(x int64) { f.iota = x }
+
+func (f *Func) Sym() *types.Sym {
+ if f.Nname != nil {
+ return f.Nname.Sym()
+ }
+ return nil
+}
+
+// An Inline holds fields used for function bodies that can be inlined.
+type Inline struct {
+ Cost int32 // heuristic cost of inlining this function
+
+ // Copies of Func.Dcl and Nbody for use during inlining.
+ Dcl []*Name
+ Body []Node
+}
+
+// A Mark represents a scope boundary.
+type Mark struct {
+ // Pos is the position of the token that marks the scope
+ // change.
+ Pos src.XPos
+
+ // Scope identifies the innermost scope to the right of Pos.
+ Scope ScopeID
+}
+
+// A ScopeID represents a lexical scope within a function.
+type ScopeID int32
+
+const (
+ funcDupok = 1 << iota // duplicate definitions ok
+ funcWrapper // is method wrapper
+ funcNeedctxt // function uses context register (has closure variables)
+ funcReflectMethod // function calls reflect.Type.Method or MethodByName
+ // true if closure inside a function; false if a simple function or a
+ // closure in a global variable initialization
+ funcIsHiddenClosure
+ funcHasDefer // contains a defer statement
+ funcNilCheckDisabled // disable nil checks when compiling this function
+ funcInlinabilityChecked // inliner has already determined whether the function is inlinable
+ funcExportInline // include inline body in export data
+ funcInstrumentBody // add race/msan instrumentation during SSA construction
+ funcOpenCodedDeferDisallowed // can't do open-coded defers
+ funcClosureCalled // closure is only immediately called
+)
+
+type SymAndPos struct {
+ Sym *obj.LSym // LSym of callee
+ Pos src.XPos // line of call
+}
+
+func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 }
+func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 }
+func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 }
+func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 }
+func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 }
+func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 }
+func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 }
+func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 }
+func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 }
+func (f *Func) InstrumentBody() bool { return f.flags&funcInstrumentBody != 0 }
+func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 }
+func (f *Func) ClosureCalled() bool { return f.flags&funcClosureCalled != 0 }
+
+func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) }
+func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) }
+func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) }
+func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) }
+func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) }
+func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) }
+func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) }
+func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) }
+func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) }
+func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) }
+func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
+func (f *Func) SetClosureCalled(b bool) { f.flags.set(funcClosureCalled, b) }
+
+func (f *Func) SetWBPos(pos src.XPos) {
+ if base.Debug.WB != 0 {
+ base.WarnfAt(pos, "write barrier")
+ }
+ if !f.WBPos.IsKnown() {
+ f.WBPos = pos
+ }
+}
+
+// funcname returns the name (without the package) of the function n.
+func FuncName(n Node) string {
+ var f *Func
+ switch n := n.(type) {
+ case *Func:
+ f = n
+ case *Name:
+ f = n.Func()
+ case *CallPartExpr:
+ f = n.Func()
+ case *ClosureExpr:
+ f = n.Func()
+ }
+ if f == nil || f.Nname == nil {
+ return "<nil>"
+ }
+ return f.Nname.Sym().Name
+}
+
+// pkgFuncName returns the name of the function referenced by n, with package prepended.
+// This differs from the compiler's internal convention where local functions lack a package
+// because the ultimate consumer of this is a human looking at an IDE; package is only empty
+// if the compilation package is actually the empty string.
+func PkgFuncName(n Node) string {
+ var s *types.Sym
+ if n == nil {
+ return "<nil>"
+ }
+ if n.Op() == ONAME {
+ s = n.Sym()
+ } else {
+ var f *Func
+ switch n := n.(type) {
+ case *CallPartExpr:
+ f = n.Func()
+ case *ClosureExpr:
+ f = n.Func()
+ case *Func:
+ f = n
+ }
+ if f == nil || f.Nname == nil {
+ return "<nil>"
+ }
+ s = f.Nname.Sym()
+ }
+ pkg := s.Pkg
+
+ p := base.Ctxt.Pkgpath
+ if pkg != nil && pkg.Path != "" {
+ p = pkg.Path
+ }
+ if p == "" {
+ return s.Name
+ }
+ return p + "." + s.Name
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run -mod=mod mknode.go
+
+package ir
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "go/constant"
+)
+
+// A miniNode is a minimal node implementation,
+// meant to be embedded as the first field in a larger node implementation,
+// at a cost of 8 bytes.
+//
+// A miniNode is NOT a valid Node by itself: the embedding struct
+// must at the least provide:
+//
+// func (n *MyNode) String() string { return fmt.Sprint(n) }
+// func (n *MyNode) rawCopy() Node { c := *n; return &c }
+// func (n *MyNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+//
+// The embedding struct should also fill in n.op in its constructor,
+// for more useful panic messages when invalid methods are called,
+// instead of implementing Op itself.
+//
+type miniNode struct {
+ pos src.XPos // uint32
+ op Op // uint8
+ bits bitset8
+ esc uint16
+}
+
+func (n *miniNode) Format(s fmt.State, verb rune) { panic(1) }
+func (n *miniNode) copy() Node { panic(1) }
+func (n *miniNode) doChildren(do func(Node) error) error { panic(1) }
+func (n *miniNode) editChildren(edit func(Node) Node) { panic(1) }
+
+// posOr returns pos if known, or else n.pos.
+// For use in DeepCopy.
+func (n *miniNode) posOr(pos src.XPos) src.XPos {
+ if pos.IsKnown() {
+ return pos
+ }
+ return n.pos
+}
+
+// op can be read, but not written.
+// An embedding implementation can provide a SetOp if desired.
+// (The panicking SetOp is with the other panics below.)
+func (n *miniNode) Op() Op { return n.op }
+func (n *miniNode) Pos() src.XPos { return n.pos }
+func (n *miniNode) SetPos(x src.XPos) { n.pos = x }
+func (n *miniNode) Esc() uint16 { return n.esc }
+func (n *miniNode) SetEsc(x uint16) { n.esc = x }
+
+const (
+ miniWalkdefShift = 0
+ miniTypecheckShift = 2
+ miniDiag = 1 << 4
+ miniHasCall = 1 << 5 // for miniStmt
+)
+
+func (n *miniNode) Walkdef() uint8 { return n.bits.get2(miniWalkdefShift) }
+func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) }
+func (n *miniNode) SetWalkdef(x uint8) {
+ if x > 3 {
+ panic(fmt.Sprintf("cannot SetWalkdef %d", x))
+ }
+ n.bits.set2(miniWalkdefShift, x)
+}
+func (n *miniNode) SetTypecheck(x uint8) {
+ if x > 3 {
+ panic(fmt.Sprintf("cannot SetTypecheck %d", x))
+ }
+ n.bits.set2(miniTypecheckShift, x)
+}
+
+func (n *miniNode) Diag() bool { return n.bits&miniDiag != 0 }
+func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) }
+
+// Empty, immutable graph structure.
+
+func (n *miniNode) Left() Node { return nil }
+func (n *miniNode) Right() Node { return nil }
+func (n *miniNode) Init() Nodes { return Nodes{} }
+func (n *miniNode) PtrInit() *Nodes { return &immutableEmptyNodes }
+func (n *miniNode) Body() Nodes { return Nodes{} }
+func (n *miniNode) PtrBody() *Nodes { return &immutableEmptyNodes }
+func (n *miniNode) List() Nodes { return Nodes{} }
+func (n *miniNode) PtrList() *Nodes { return &immutableEmptyNodes }
+func (n *miniNode) Rlist() Nodes { return Nodes{} }
+func (n *miniNode) PtrRlist() *Nodes { return &immutableEmptyNodes }
+func (n *miniNode) SetLeft(x Node) {
+ if x != nil {
+ panic(n.no("SetLeft"))
+ }
+}
+func (n *miniNode) SetRight(x Node) {
+ if x != nil {
+ panic(n.no("SetRight"))
+ }
+}
+func (n *miniNode) SetInit(x Nodes) {
+ if x != nil {
+ panic(n.no("SetInit"))
+ }
+}
+func (n *miniNode) SetBody(x Nodes) {
+ if x != nil {
+ panic(n.no("SetBody"))
+ }
+}
+func (n *miniNode) SetList(x Nodes) {
+ if x != nil {
+ panic(n.no("SetList"))
+ }
+}
+func (n *miniNode) SetRlist(x Nodes) {
+ if x != nil {
+ panic(n.no("SetRlist"))
+ }
+}
+
+// Additional functionality unavailable.
+
+func (n *miniNode) no(name string) string { return "cannot " + name + " on " + n.op.String() }
+
+func (n *miniNode) SetOp(Op) { panic(n.no("SetOp")) }
+func (n *miniNode) SubOp() Op { panic(n.no("SubOp")) }
+func (n *miniNode) SetSubOp(Op) { panic(n.no("SetSubOp")) }
+func (n *miniNode) Type() *types.Type { return nil }
+func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) }
+func (n *miniNode) Func() *Func { return nil }
+func (n *miniNode) Name() *Name { return nil }
+func (n *miniNode) Sym() *types.Sym { return nil }
+func (n *miniNode) SetSym(*types.Sym) { panic(n.no("SetSym")) }
+func (n *miniNode) Offset() int64 { return types.BADWIDTH }
+func (n *miniNode) SetOffset(x int64) { panic(n.no("SetOffset")) }
+func (n *miniNode) Class() Class { return Pxxx }
+func (n *miniNode) SetClass(Class) { panic(n.no("SetClass")) }
+func (n *miniNode) Likely() bool { panic(n.no("Likely")) }
+func (n *miniNode) SetLikely(bool) { panic(n.no("SetLikely")) }
+func (n *miniNode) SliceBounds() (low, high, max Node) {
+ panic(n.no("SliceBounds"))
+}
+func (n *miniNode) SetSliceBounds(low, high, max Node) {
+ panic(n.no("SetSliceBounds"))
+}
+func (n *miniNode) Iota() int64 { panic(n.no("Iota")) }
+func (n *miniNode) SetIota(int64) { panic(n.no("SetIota")) }
+func (n *miniNode) Colas() bool { return false }
+func (n *miniNode) SetColas(bool) { panic(n.no("SetColas")) }
+func (n *miniNode) NoInline() bool { panic(n.no("NoInline")) }
+func (n *miniNode) SetNoInline(bool) { panic(n.no("SetNoInline")) }
+func (n *miniNode) Transient() bool { panic(n.no("Transient")) }
+func (n *miniNode) SetTransient(bool) { panic(n.no("SetTransient")) }
+func (n *miniNode) Implicit() bool { return false }
+func (n *miniNode) SetImplicit(bool) { panic(n.no("SetImplicit")) }
+func (n *miniNode) IsDDD() bool { return false }
+func (n *miniNode) SetIsDDD(bool) { panic(n.no("SetIsDDD")) }
+func (n *miniNode) Embedded() bool { return false }
+func (n *miniNode) SetEmbedded(bool) { panic(n.no("SetEmbedded")) }
+func (n *miniNode) IndexMapLValue() bool { panic(n.no("IndexMapLValue")) }
+func (n *miniNode) SetIndexMapLValue(bool) { panic(n.no("SetIndexMapLValue")) }
+func (n *miniNode) ResetAux() { panic(n.no("ResetAux")) }
+func (n *miniNode) HasBreak() bool { panic(n.no("HasBreak")) }
+func (n *miniNode) SetHasBreak(bool) { panic(n.no("SetHasBreak")) }
+func (n *miniNode) Val() constant.Value { panic(n.no("Val")) }
+func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) }
+func (n *miniNode) Int64Val() int64 { panic(n.no("Int64Val")) }
+func (n *miniNode) Uint64Val() uint64 { panic(n.no("Uint64Val")) }
+func (n *miniNode) CanInt64() bool { panic(n.no("CanInt64")) }
+func (n *miniNode) BoolVal() bool { panic(n.no("BoolVal")) }
+func (n *miniNode) StringVal() string { panic(n.no("StringVal")) }
+func (n *miniNode) HasCall() bool { return false }
+func (n *miniNode) SetHasCall(bool) { panic(n.no("SetHasCall")) }
+func (n *miniNode) NonNil() bool { return false }
+func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) }
+func (n *miniNode) Bounded() bool { return false }
+func (n *miniNode) SetBounded(bool) { panic(n.no("SetBounded")) }
+func (n *miniNode) Opt() interface{} { return nil }
+func (n *miniNode) SetOpt(interface{}) { panic(n.no("SetOpt")) }
+func (n *miniNode) MarkReadonly() { panic(n.no("MarkReadonly")) }
+func (n *miniNode) TChanDir() types.ChanDir { panic(n.no("TChanDir")) }
+func (n *miniNode) SetTChanDir(types.ChanDir) { panic(n.no("SetTChanDir")) }
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "go/types"
+ "io/ioutil"
+ "log"
+ "strings"
+
+ "golang.org/x/tools/go/packages"
+)
+
+func main() {
+ cfg := &packages.Config{
+ Mode: packages.NeedSyntax | packages.NeedTypes,
+ }
+ pkgs, err := packages.Load(cfg, "cmd/compile/internal/ir")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ pkg := pkgs[0].Types
+ scope := pkg.Scope()
+
+ lookup := func(name string) *types.Named {
+ return scope.Lookup(name).(*types.TypeName).Type().(*types.Named)
+ }
+
+ nodeType := lookup("Node")
+ ntypeType := lookup("Ntype")
+ nodesType := lookup("Nodes")
+ ptrFieldType := types.NewPointer(lookup("Field"))
+ slicePtrFieldType := types.NewSlice(ptrFieldType)
+ ptrIdentType := types.NewPointer(lookup("Ident"))
+
+ var buf bytes.Buffer
+ fmt.Fprintln(&buf, "// Code generated by mknode.go. DO NOT EDIT.")
+ fmt.Fprintln(&buf)
+ fmt.Fprintln(&buf, "package ir")
+ fmt.Fprintln(&buf)
+ fmt.Fprintln(&buf, `import "fmt"`)
+
+ for _, name := range scope.Names() {
+ obj, ok := scope.Lookup(name).(*types.TypeName)
+ if !ok {
+ continue
+ }
+
+ typName := obj.Name()
+ typ, ok := obj.Type().(*types.Named).Underlying().(*types.Struct)
+ if !ok {
+ continue
+ }
+
+ if strings.HasPrefix(typName, "mini") || !hasMiniNode(typ) {
+ continue
+ }
+
+ fmt.Fprintf(&buf, "\n")
+ fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }\n", name)
+
+ switch name {
+ case "Name":
+ fmt.Fprintf(&buf, "func (n *%s) copy() Node {panic(\"%s.copy\")}\n", name, name)
+ default:
+ fmt.Fprintf(&buf, "func (n *%s) copy() Node { c := *n\n", name)
+ forNodeFields(typName, typ, func(name string, is func(types.Type) bool) {
+ switch {
+ case is(nodesType):
+ fmt.Fprintf(&buf, "c.%s = c.%s.Copy()\n", name, name)
+ case is(ptrFieldType):
+ fmt.Fprintf(&buf, "if c.%s != nil { c.%s = c.%s.copy() }\n", name, name, name)
+ case is(slicePtrFieldType):
+ fmt.Fprintf(&buf, "c.%s = copyFields(c.%s)\n", name, name)
+ }
+ })
+ fmt.Fprintf(&buf, "return &c }\n")
+ }
+
+ fmt.Fprintf(&buf, "func (n *%s) doChildren(do func(Node) error) error { var err error\n", name)
+ forNodeFields(typName, typ, func(name string, is func(types.Type) bool) {
+ switch {
+ case is(ptrIdentType):
+ fmt.Fprintf(&buf, "if n.%s != nil { err = maybeDo(n.%s, err, do) }\n", name, name)
+ case is(nodeType), is(ntypeType):
+ fmt.Fprintf(&buf, "err = maybeDo(n.%s, err, do)\n", name)
+ case is(nodesType):
+ fmt.Fprintf(&buf, "err = maybeDoList(n.%s, err, do)\n", name)
+ case is(ptrFieldType):
+ fmt.Fprintf(&buf, "err = maybeDoField(n.%s, err, do)\n", name)
+ case is(slicePtrFieldType):
+ fmt.Fprintf(&buf, "err = maybeDoFields(n.%s, err, do)\n", name)
+ }
+ })
+ fmt.Fprintf(&buf, "return err }\n")
+
+ fmt.Fprintf(&buf, "func (n *%s) editChildren(edit func(Node) Node) {\n", name)
+ forNodeFields(typName, typ, func(name string, is func(types.Type) bool) {
+ switch {
+ case is(ptrIdentType):
+ fmt.Fprintf(&buf, "if n.%s != nil { n.%s = edit(n.%s).(*Ident) }\n", name, name, name)
+ case is(nodeType):
+ fmt.Fprintf(&buf, "n.%s = maybeEdit(n.%s, edit)\n", name, name)
+ case is(ntypeType):
+ fmt.Fprintf(&buf, "n.%s = toNtype(maybeEdit(n.%s, edit))\n", name, name)
+ case is(nodesType):
+ fmt.Fprintf(&buf, "editList(n.%s, edit)\n", name)
+ case is(ptrFieldType):
+ fmt.Fprintf(&buf, "editField(n.%s, edit)\n", name)
+ case is(slicePtrFieldType):
+ fmt.Fprintf(&buf, "editFields(n.%s, edit)\n", name)
+ }
+ })
+ fmt.Fprintf(&buf, "}\n")
+ }
+
+ out, err := format.Source(buf.Bytes())
+ if err != nil {
+ // write out mangled source so we can see the bug.
+ out = buf.Bytes()
+ }
+
+ err = ioutil.WriteFile("node_gen.go", out, 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func forNodeFields(typName string, typ *types.Struct, f func(name string, is func(types.Type) bool)) {
+ for i, n := 0, typ.NumFields(); i < n; i++ {
+ v := typ.Field(i)
+ if v.Embedded() {
+ if typ, ok := v.Type().Underlying().(*types.Struct); ok {
+ forNodeFields(typName, typ, f)
+ continue
+ }
+ }
+ switch typName {
+ case "Func":
+ if strings.ToLower(strings.TrimSuffix(v.Name(), "_")) != "body" {
+ continue
+ }
+ case "Name":
+ continue
+ }
+ switch v.Name() {
+ case "orig":
+ continue
+ }
+ switch typName + "." + v.Name() {
+ case "AddStringExpr.Alloc":
+ continue
+ }
+ f(v.Name(), func(t types.Type) bool { return types.Identical(t, v.Type()) })
+ }
+}
+
+func hasMiniNode(typ *types.Struct) bool {
+ for i, n := 0, typ.NumFields(); i < n; i++ {
+ v := typ.Field(i)
+ if v.Name() == "miniNode" {
+ return true
+ }
+ if v.Embedded() {
+ if typ, ok := v.Type().Underlying().(*types.Struct); ok && hasMiniNode(typ) {
+ return true
+ }
+ }
+ }
+ return false
+}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+
+ "go/constant"
+)
+
+// An Ident is an identifier, possibly qualified.
+type Ident struct {
+ miniExpr
+ sym *types.Sym
+}
+
+func NewIdent(pos src.XPos, sym *types.Sym) *Ident {
+ n := new(Ident)
+ n.op = ONONAME
+ n.pos = pos
+ n.sym = sym
+ return n
+}
+
+func (n *Ident) Sym() *types.Sym { return n.sym }
+
+func (*Ident) CanBeNtype() {}
+
+// Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL).
+type Name struct {
+ miniExpr
+ BuiltinOp Op // uint8
+ Class_ Class // uint8
+ flags bitset16
+ pragma PragmaFlag // int16
+ sym *types.Sym
+ fn *Func
+ Offset_ int64
+ val constant.Value
+ orig Node
+ Embed *[]Embed // list of embedded files, for ONAME var
+
+ PkgName *PkgName // real package for import . names
+ // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
+ // For a closure var, the ONAME node of the outer captured variable
+ Defn Node
+
+ // The function, method, or closure in which local variable or param is declared.
+ Curfn *Func
+
+ // Unique number for ONAME nodes within a function. Function outputs
+ // (results) are numbered starting at one, followed by function inputs
+ // (parameters), and then local variables. Vargen is used to distinguish
+ // local variables/params with the same name.
+ Vargen int32
+ Decldepth int32 // declaration loop depth, increased for every loop or label
+
+ Ntype Ntype
+ Heapaddr *Name // temp holding heap address of param
+
+ // ONAME PAUTOHEAP
+ Stackcopy *Name // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
+
+ // ONAME closure linkage
+ // Consider:
+ //
+ // func f() {
+ // x := 1 // x1
+ // func() {
+ // use(x) // x2
+ // func() {
+ // use(x) // x3
+ // --- parser is here ---
+ // }()
+ // }()
+ // }
+ //
+ // There is an original declaration of x and then a chain of mentions of x
+ // leading into the current function. Each time x is mentioned in a new closure,
+ // we create a variable representing x for use in that specific closure,
+ // since the way you get to x is different in each closure.
+ //
+ // Let's number the specific variables as shown in the code:
+ // x1 is the original x, x2 is when mentioned in the closure,
+ // and x3 is when mentioned in the closure in the closure.
+ //
+ // We keep these linked (assume N > 1):
+ //
+ // - x1.Defn = original declaration statement for x (like most variables)
+ // - x1.Innermost = current innermost closure x (in this case x3), or nil for none
+ // - x1.IsClosureVar() = false
+ //
+ // - xN.Defn = x1, N > 1
+ // - xN.IsClosureVar() = true, N > 1
+ // - x2.Outer = nil
+ // - xN.Outer = x(N-1), N > 2
+ //
+ //
+ // When we look up x in the symbol table, we always get x1.
+ // Then we can use x1.Innermost (if not nil) to get the x
+ // for the innermost known closure function,
+ // but the first reference in a closure will find either no x1.Innermost
+ // or an x1.Innermost with .Funcdepth < Funcdepth.
+ // In that case, a new xN must be created, linked in with:
+ //
+ // xN.Defn = x1
+ // xN.Outer = x1.Innermost
+ // x1.Innermost = xN
+ //
+ // When we finish the function, we'll process its closure variables
+ // and find xN and pop it off the list using:
+ //
+ // x1 := xN.Defn
+ // x1.Innermost = xN.Outer
+ //
+ // We leave x1.Innermost set so that we can still get to the original
+ // variable quickly. Not shown here, but once we're
+ // done parsing a function and no longer need xN.Outer for the
+ // lexical x reference links as described above, funcLit
+ // recomputes xN.Outer as the semantic x reference link tree,
+ // even filling in x in intermediate closures that might not
+ // have mentioned it along the way to inner closures that did.
+ // See funcLit for details.
+ //
+ // During the eventual compilation, then, for closure variables we have:
+ //
+ // xN.Defn = original variable
+ // xN.Outer = variable captured in next outward scope
+ // to make closure where xN appears
+ //
+ // Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
+ // and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
+ Innermost *Name
+ Outer *Name
+}
+
+func (n *Name) isExpr() {}
+
+// CloneName makes a cloned copy of the name.
+// It's not ir.Copy(n) because in general that operation is a mistake on names,
+// which uniquely identify variables.
+// Callers must use n.CloneName to make clear they intend to create a separate name.
+func (n *Name) CloneName() *Name { c := *n; return &c }
+
+// TypeDefn returns the type definition for a named OTYPE.
+// That is, given "type T Defn", it returns Defn.
+// It is used by package types.
+func (n *Name) TypeDefn() *types.Type {
+ return n.Ntype.Type()
+}
+
+// RecordFrameOffset records the frame offset for the name.
+// It is used by package types when laying out function arguments.
+func (n *Name) RecordFrameOffset(offset int64) {
+ if n.Stackcopy != nil {
+ n.Stackcopy.SetFrameOffset(offset)
+ n.SetFrameOffset(0)
+ } else {
+ n.SetFrameOffset(offset)
+ }
+}
+
+// NewNameAt returns a new ONAME Node associated with symbol s at position pos.
+// The caller is responsible for setting Curfn.
+func NewNameAt(pos src.XPos, sym *types.Sym) *Name {
+ if sym == nil {
+ base.Fatalf("NewNameAt nil")
+ }
+ return newNameAt(pos, ONAME, sym)
+}
+
+// NewIota returns a new OIOTA Node.
+func NewIota(pos src.XPos, sym *types.Sym) *Name {
+ if sym == nil {
+ base.Fatalf("NewIota nil")
+ }
+ return newNameAt(pos, OIOTA, sym)
+}
+
+// NewDeclNameAt returns a new Name associated with symbol s at position pos.
+// The caller is responsible for setting Curfn.
+func NewDeclNameAt(pos src.XPos, op Op, sym *types.Sym) *Name {
+ if sym == nil {
+ base.Fatalf("NewDeclNameAt nil")
+ }
+ switch op {
+ case ONAME, OTYPE, OLITERAL:
+ // ok
+ default:
+ base.Fatalf("NewDeclNameAt op %v", op)
+ }
+ return newNameAt(pos, op, sym)
+}
+
+// NewConstAt returns a new OLITERAL Node associated with symbol s at position pos.
+func NewConstAt(pos src.XPos, sym *types.Sym, typ *types.Type, val constant.Value) *Name {
+ if sym == nil {
+ base.Fatalf("NewConstAt nil")
+ }
+ n := newNameAt(pos, OLITERAL, sym)
+ n.SetType(typ)
+ n.SetVal(val)
+ return n
+}
+
+// newNameAt is like NewNameAt but allows sym == nil.
+func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name {
+ n := new(Name)
+ n.op = op
+ n.pos = pos
+ n.orig = n
+ n.sym = sym
+ return n
+}
+
+func (n *Name) Name() *Name { return n }
+func (n *Name) Sym() *types.Sym { return n.sym }
+func (n *Name) SetSym(x *types.Sym) { n.sym = x }
+func (n *Name) SubOp() Op { return n.BuiltinOp }
+func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x }
+func (n *Name) Class() Class { return n.Class_ }
+func (n *Name) SetClass(x Class) { n.Class_ = x }
+func (n *Name) Func() *Func { return n.fn }
+func (n *Name) SetFunc(x *Func) { n.fn = x }
+func (n *Name) Offset() int64 { panic("Name.Offset") }
+func (n *Name) SetOffset(x int64) {
+ if x != 0 {
+ panic("Name.SetOffset")
+ }
+}
+func (n *Name) FrameOffset() int64 { return n.Offset_ }
+func (n *Name) SetFrameOffset(x int64) { n.Offset_ = x }
+func (n *Name) Iota() int64 { return n.Offset_ }
+func (n *Name) SetIota(x int64) { n.Offset_ = x }
+
+func (*Name) CanBeNtype() {}
+func (*Name) CanBeAnSSASym() {}
+func (*Name) CanBeAnSSAAux() {}
+
+// Pragma returns the PragmaFlag for p, which must be for an OTYPE.
+func (n *Name) Pragma() PragmaFlag { return n.pragma }
+
+// SetPragma sets the PragmaFlag for p, which must be for an OTYPE.
+func (n *Name) SetPragma(flag PragmaFlag) { n.pragma = flag }
+
+// Alias reports whether p, which must be for an OTYPE, is a type alias.
+func (n *Name) Alias() bool { return n.flags&nameAlias != 0 }
+
+// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
+func (n *Name) SetAlias(alias bool) { n.flags.set(nameAlias, alias) }
+
+const (
+ nameCaptured = 1 << iota // is the variable captured by a closure
+ nameReadonly
+ nameByval // is the variable captured by value or by reference
+ nameNeedzero // if it contains pointers, needs to be zeroed on function entry
+ nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap)
+ nameUsed // for variable declared and not used error
+ nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original at n.Name.Defn
+ nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy
+ nameAssigned // is the variable ever assigned to
+ nameAddrtaken // address taken, even if not moved to heap
+ nameInlFormal // PAUTO created by inliner, derived from callee formal
+ nameInlLocal // PAUTO created by inliner, derived from callee local
+ nameOpenDeferSlot // if temporary var storing info for open-coded defers
+ nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section
+ nameIsDDD // is function argument a ...
+ nameAlias // is type name an alias
+)
+
+func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 }
+func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 }
+func (n *Name) Byval() bool { return n.flags&nameByval != 0 }
+func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 }
+func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 }
+func (n *Name) Used() bool { return n.flags&nameUsed != 0 }
+func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 }
+func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 }
+func (n *Name) Assigned() bool { return n.flags&nameAssigned != 0 }
+func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 }
+func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 }
+func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 }
+func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 }
+func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 }
+func (n *Name) IsDDD() bool { return n.flags&nameIsDDD != 0 }
+
+func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) }
+func (n *Name) setReadonly(b bool) { n.flags.set(nameReadonly, b) }
+func (n *Name) SetByval(b bool) { n.flags.set(nameByval, b) }
+func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) }
+func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) }
+func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) }
+func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) }
+func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) }
+func (n *Name) SetAssigned(b bool) { n.flags.set(nameAssigned, b) }
+func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) }
+func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) }
+func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) }
+func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) }
+func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) }
+func (n *Name) SetIsDDD(b bool) { n.flags.set(nameIsDDD, b) }
+
+// MarkReadonly indicates that n is an ONAME with readonly contents.
+func (n *Name) MarkReadonly() {
+ if n.Op() != ONAME {
+ base.Fatalf("Node.MarkReadonly %v", n.Op())
+ }
+ n.Name().setReadonly(true)
+ // Mark the linksym as readonly immediately
+ // so that the SSA backend can use this information.
+ // It will be overridden later during dumpglobls.
+ n.Sym().Linksym().Type = objabi.SRODATA
+}
+
+// Val returns the constant.Value for the node.
+func (n *Name) Val() constant.Value {
+ if n.val == nil {
+ return constant.MakeUnknown()
+ }
+ return n.val
+}
+
+// SetVal sets the constant.Value for the node,
+// which must not have been used with SetOpt.
+func (n *Name) SetVal(v constant.Value) {
+ if n.op != OLITERAL {
+ panic(n.no("SetVal"))
+ }
+ AssertValidTypeForConst(n.Type(), v)
+ n.val = v
+}
+
+// SameSource reports whether two nodes refer to the same source
+// element.
+//
+// It exists to help incrementally migrate the compiler towards
+// allowing the introduction of IdentExpr (#42990). Once we have
+// IdentExpr, it will no longer be safe to directly compare Node
+// values to tell if they refer to the same Name. Instead, code will
+// need to explicitly get references to the underlying Name object(s),
+// and compare those instead.
+//
+// It will still be safe to compare Nodes directly for checking if two
+// nodes are syntactically the same. The SameSource function exists to
+// indicate code that intentionally compares Nodes for syntactic
+// equality as opposed to code that has yet to be updated in
+// preparation for IdentExpr.
+func SameSource(n1, n2 Node) bool {
+ return n1 == n2
+}
+
+// Uses reports whether expression x is a (direct) use of the given
+// variable.
+func Uses(x Node, v *Name) bool {
+ if v == nil || v.Op() != ONAME {
+ base.Fatalf("RefersTo bad Name: %v", v)
+ }
+ return x.Op() == ONAME && x.Name() == v
+}
+
+// DeclaredBy reports whether expression x refers (directly) to a
+// variable that was declared by the given statement.
+func DeclaredBy(x, stmt Node) bool {
+ if stmt == nil {
+ base.Fatalf("DeclaredBy nil")
+ }
+ return x.Op() == ONAME && SameSource(x.Name().Defn, stmt)
+}
+
+// The Class of a variable/function describes the "storage class"
+// of a variable or function. During parsing, storage classes are
+// called declaration contexts.
+type Class uint8
+
+//go:generate stringer -type=Class
+const (
+ Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
+ PEXTERN // global variables
+ PAUTO // local variables
+ PAUTOHEAP // local variables or parameters moved to heap
+ PPARAM // input arguments
+ PPARAMOUT // output results
+ PFUNC // global functions
+
+ // Careful: Class is stored in three bits in Node.flags.
+ _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
+)
+
+type Embed struct {
+ Pos src.XPos
+ Patterns []string
+}
+
+// A Pack is an identifier referring to an imported package.
+type PkgName struct {
+ miniNode
+ sym *types.Sym
+ Pkg *types.Pkg
+ Used bool
+}
+
+func (p *PkgName) Sym() *types.Sym { return p.sym }
+
+func (*PkgName) CanBeNtype() {}
+
+func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName {
+ p := &PkgName{sym: sym, Pkg: pkg}
+ p.op = OPACK
+ p.pos = pos
+ return p
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// “Abstract” syntax representation.
+
+package ir
+
+import (
+ "fmt"
+ "go/constant"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// A Node is the abstract interface to an IR node.
+type Node interface {
+ // Formatting
+ Format(s fmt.State, verb rune)
+
+ // Source position.
+ Pos() src.XPos
+ SetPos(x src.XPos)
+
+ // For making copies. For Copy and SepCopy.
+ copy() Node
+
+ doChildren(func(Node) error) error
+ editChildren(func(Node) Node)
+
+ // Abstract graph structure, for generic traversals.
+ Op() Op
+ SetOp(x Op)
+ SubOp() Op
+ SetSubOp(x Op)
+ Left() Node
+ SetLeft(x Node)
+ Right() Node
+ SetRight(x Node)
+ Init() Nodes
+ PtrInit() *Nodes
+ SetInit(x Nodes)
+ Body() Nodes
+ PtrBody() *Nodes
+ SetBody(x Nodes)
+ List() Nodes
+ SetList(x Nodes)
+ PtrList() *Nodes
+ Rlist() Nodes
+ SetRlist(x Nodes)
+ PtrRlist() *Nodes
+
+ // Fields specific to certain Ops only.
+ Type() *types.Type
+ SetType(t *types.Type)
+ Func() *Func
+ Name() *Name
+ Sym() *types.Sym
+ SetSym(x *types.Sym)
+ Offset() int64
+ SetOffset(x int64)
+ Class() Class
+ SetClass(x Class)
+ Likely() bool
+ SetLikely(x bool)
+ SliceBounds() (low, high, max Node)
+ SetSliceBounds(low, high, max Node)
+ Iota() int64
+ SetIota(x int64)
+ Colas() bool
+ SetColas(x bool)
+ NoInline() bool
+ SetNoInline(x bool)
+ Transient() bool
+ SetTransient(x bool)
+ Implicit() bool
+ SetImplicit(x bool)
+ IsDDD() bool
+ SetIsDDD(x bool)
+ IndexMapLValue() bool
+ SetIndexMapLValue(x bool)
+ ResetAux()
+ HasBreak() bool
+ SetHasBreak(x bool)
+ MarkReadonly()
+ Val() constant.Value
+ SetVal(v constant.Value)
+
+ // Storage for analysis passes.
+ Esc() uint16
+ SetEsc(x uint16)
+ Walkdef() uint8
+ SetWalkdef(x uint8)
+ Opt() interface{}
+ SetOpt(x interface{})
+ Diag() bool
+ SetDiag(x bool)
+ Bounded() bool
+ SetBounded(x bool)
+ Typecheck() uint8
+ SetTypecheck(x uint8)
+ NonNil() bool
+ MarkNonNil()
+ HasCall() bool
+ SetHasCall(x bool)
+}
+
+// Line returns n's position as a string. If n has been inlined,
+// it uses the outermost position where n has been inlined.
+func Line(n Node) string {
+ return base.FmtPos(n.Pos())
+}
+
+func IsSynthetic(n Node) bool {
+ name := n.Sym().Name
+ return name[0] == '.' || name[0] == '~'
+}
+
+// IsAutoTmp indicates if n was created by the compiler as a temporary,
+// based on the setting of the .AutoTemp flag in n's Name.
+func IsAutoTmp(n Node) bool {
+ if n == nil || n.Op() != ONAME {
+ return false
+ }
+ return n.Name().AutoTemp()
+}
+
+// mayBeShared reports whether n may occur in multiple places in the AST.
+// Extra care must be taken when mutating such a node.
+func MayBeShared(n Node) bool {
+ switch n.Op() {
+ case ONAME, OLITERAL, ONIL, OTYPE:
+ return true
+ }
+ return false
+}
+
+//go:generate stringer -type=Op -trimprefix=O
+
+type Op uint8
+
+// Node ops.
+const (
+ OXXX Op = iota
+
+ // names
+ ONAME // var or func name
+ // Unnamed arg or return value: f(int, string) (int, error) { etc }
+ // Also used for a qualified package identifier that hasn't been resolved yet.
+ ONONAME
+ OTYPE // type name
+ OPACK // import
+ OLITERAL // literal
+ ONIL // nil
+
+ // expressions
+ OADD // Left + Right
+ OSUB // Left - Right
+ OOR // Left | Right
+ OXOR // Left ^ Right
+ OADDSTR // +{List} (string addition, list elements are strings)
+ OADDR // &Left
+ OANDAND // Left && Right
+ OAPPEND // append(List); after walk, Left may contain elem type descriptor
+ OBYTES2STR // Type(Left) (Type is string, Left is a []byte)
+ OBYTES2STRTMP // Type(Left) (Type is string, Left is a []byte, ephemeral)
+ ORUNES2STR // Type(Left) (Type is string, Left is a []rune)
+ OSTR2BYTES // Type(Left) (Type is []byte, Left is a string)
+ OSTR2BYTESTMP // Type(Left) (Type is []byte, Left is a string, ephemeral)
+ OSTR2RUNES // Type(Left) (Type is []rune, Left is a string)
+ // Left = Right or (if Colas=true) Left := Right
+ // If Colas, then Ninit includes a DCL node for Left.
+ OAS
+ // List = Rlist (x, y, z = a, b, c) or (if Colas=true) List := Rlist
+ // If Colas, then Ninit includes DCL nodes for List
+ OAS2
+ OAS2DOTTYPE // List = Right (x, ok = I.(int))
+ OAS2FUNC // List = Right (x, y = f())
+ OAS2MAPR // List = Right (x, ok = m["foo"])
+ OAS2RECV // List = Right (x, ok = <-c)
+ OASOP // Left Etype= Right (x += y)
+ OCALL // Left(List) (function call, method call or type conversion)
+
+ // OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure.
+ // Prior to walk, they are: Left(List), where List is all regular arguments.
+ // After walk, List is a series of assignments to temporaries,
+ // and Rlist is an updated set of arguments.
+ // Nbody is all OVARLIVE nodes that are attached to OCALLxxx.
+ // TODO(josharian/khr): Use Ninit instead of List for the assignments to temporaries. See CL 114797.
+ OCALLFUNC // Left(List/Rlist) (function call f(args))
+ OCALLMETH // Left(List/Rlist) (direct method call x.Method(args))
+ OCALLINTER // Left(List/Rlist) (interface method call x.Method(args))
+ OCALLPART // Left.Right (method expression x.Method, not called)
+ OCAP // cap(Left)
+ OCLOSE // close(Left)
+ OCLOSURE // func Type { Func.Closure.Nbody } (func literal)
+ OCOMPLIT // Right{List} (composite literal, not yet lowered to specific form)
+ OMAPLIT // Type{List} (composite literal, Type is map)
+ OSTRUCTLIT // Type{List} (composite literal, Type is struct)
+ OARRAYLIT // Type{List} (composite literal, Type is array)
+ OSLICELIT // Type{List} (composite literal, Type is slice) Right.Int64() = slice length.
+ OPTRLIT // &Left (left is composite literal)
+ OCONV // Type(Left) (type conversion)
+ OCONVIFACE // Type(Left) (type conversion, to interface)
+ OCONVNOP // Type(Left) (type conversion, no effect)
+ OCOPY // copy(Left, Right)
+ ODCL // var Left (declares Left of type Left.Type)
+
+ // Used during parsing but don't last.
+ ODCLFUNC // func f() or func (r) f()
+ ODCLCONST // const pi = 3.14
+ ODCLTYPE // type Int int or type Int = int
+
+ ODELETE // delete(List)
+ ODOT // Left.Sym (Left is of struct type)
+ ODOTPTR // Left.Sym (Left is of pointer to struct type)
+ ODOTMETH // Left.Sym (Left is non-interface, Right is method name)
+ ODOTINTER // Left.Sym (Left is interface, Right is method name)
+ OXDOT // Left.Sym (before rewrite to one of the preceding)
+ ODOTTYPE // Left.Right or Left.Type (.Right during parsing, .Type once resolved); after walk, .Right contains address of interface type descriptor and .Right.Right contains address of concrete type descriptor
+ ODOTTYPE2 // Left.Right or Left.Type (.Right during parsing, .Type once resolved; on rhs of OAS2DOTTYPE); after walk, .Right contains address of interface type descriptor
+ OEQ // Left == Right
+ ONE // Left != Right
+ OLT // Left < Right
+ OLE // Left <= Right
+ OGE // Left >= Right
+ OGT // Left > Right
+ ODEREF // *Left
+ OINDEX // Left[Right] (index of array or slice)
+ OINDEXMAP // Left[Right] (index of map)
+ OKEY // Left:Right (key:value in struct/array/map literal)
+ OSTRUCTKEY // Sym:Left (key:value in struct literal, after type checking)
+ OLEN // len(Left)
+ OMAKE // make(List) (before type checking converts to one of the following)
+ OMAKECHAN // make(Type, Left) (type is chan)
+ OMAKEMAP // make(Type, Left) (type is map)
+ OMAKESLICE // make(Type, Left, Right) (type is slice)
+ OMAKESLICECOPY // makeslicecopy(Type, Left, Right) (type is slice; Left is length and Right is the copied from slice)
+ // OMAKESLICECOPY is created by the order pass and corresponds to:
+ // s = make(Type, Left); copy(s, Right)
+ //
+ // Bounded can be set on the node when Left == len(Right) is known at compile time.
+ //
+ // This node is created so the walk pass can optimize this pattern which would
+ // otherwise be hard to detect after the order pass.
+ OMUL // Left * Right
+ ODIV // Left / Right
+ OMOD // Left % Right
+ OLSH // Left << Right
+ ORSH // Left >> Right
+ OAND // Left & Right
+ OANDNOT // Left &^ Right
+ ONEW // new(Left); corresponds to calls to new in source code
+ ONEWOBJ // runtime.newobject(n.Type); introduced by walk; Left is type descriptor
+ ONOT // !Left
+ OBITNOT // ^Left
+ OPLUS // +Left
+ ONEG // -Left
+ OOROR // Left || Right
+ OPANIC // panic(Left)
+ OPRINT // print(List)
+ OPRINTN // println(List)
+ OPAREN // (Left)
+ OSEND // Left <- Right
+ OSLICE // Left[List[0] : List[1]] (Left is untypechecked or slice)
+ OSLICEARR // Left[List[0] : List[1]] (Left is array)
+ OSLICESTR // Left[List[0] : List[1]] (Left is string)
+ OSLICE3 // Left[List[0] : List[1] : List[2]] (Left is untypedchecked or slice)
+ OSLICE3ARR // Left[List[0] : List[1] : List[2]] (Left is array)
+ OSLICEHEADER // sliceheader{Left, List[0], List[1]} (Left is unsafe.Pointer, List[0] is length, List[1] is capacity)
+ ORECOVER // recover()
+ ORECV // <-Left
+ ORUNESTR // Type(Left) (Type is string, Left is rune)
+ OSELRECV2 // like OAS2: List = Rlist where len(List)=2, len(Rlist)=1, Rlist[0].Op = ORECV (appears as .Left of OCASE)
+ OIOTA // iota
+ OREAL // real(Left)
+ OIMAG // imag(Left)
+ OCOMPLEX // complex(Left, Right) or complex(List[0]) where List[0] is a 2-result function call
+ OALIGNOF // unsafe.Alignof(Left)
+ OOFFSETOF // unsafe.Offsetof(Left)
+ OSIZEOF // unsafe.Sizeof(Left)
+ OMETHEXPR // method expression
+ OSTMTEXPR // statement expression (Init; Left)
+
+ // statements
+ OBLOCK // { List } (block of code)
+ OBREAK // break [Sym]
+ // OCASE: case List: Nbody (List==nil means default)
+ // For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
+ // for nil), and, if a type-switch variable is specified, Rlist is an
+ // ONAME for the version of the type-switch variable with the specified
+ // type.
+ OCASE
+ OCONTINUE // continue [Sym]
+ ODEFER // defer Left (Left must be call)
+ OFALL // fallthrough
+ OFOR // for Ninit; Left; Right { Nbody }
+ // OFORUNTIL is like OFOR, but the test (Left) is applied after the body:
+ // Ninit
+ // top: { Nbody } // Execute the body at least once
+ // cont: Right
+ // if Left { // And then test the loop condition
+ // List // Before looping to top, execute List
+ // goto top
+ // }
+ // OFORUNTIL is created by walk. There's no way to write this in Go code.
+ OFORUNTIL
+ OGOTO // goto Sym
+ OIF // if Ninit; Left { Nbody } else { Rlist }
+ OLABEL // Sym:
+ OGO // go Left (Left must be call)
+ ORANGE // for List = range Right { Nbody }
+ ORETURN // return List
+ OSELECT // select { List } (List is list of OCASE)
+ OSWITCH // switch Ninit; Left { List } (List is a list of OCASE)
+ // OTYPESW: Left := Right.(type) (appears as .Left of OSWITCH)
+ // Left is nil if there is no type-switch variable
+ OTYPESW
+
+ // types
+ OTCHAN // chan int
+ OTMAP // map[string]int
+ OTSTRUCT // struct{}
+ OTINTER // interface{}
+ // OTFUNC: func() - Left is receiver field, List is list of param fields, Rlist is
+ // list of result fields.
+ OTFUNC
+ OTARRAY // [8]int or [...]int
+ OTSLICE // []int
+
+ // misc
+ OINLCALL // intermediary representation of an inlined call.
+ OEFACE // itable and data words of an empty-interface value.
+ OITAB // itable word of an interface value.
+ OIDATA // data word of an interface value in Left
+ OSPTR // base pointer of a slice or string.
+ OCLOSUREREAD // read from inside closure struct at beginning of closure function
+ OCFUNC // reference to c function pointer (not go func value)
+ OCHECKNIL // emit code to ensure pointer/interface not nil
+ OVARDEF // variable is about to be fully initialized
+ OVARKILL // variable is dead
+ OVARLIVE // variable is alive
+ ORESULT // result of a function call; Xoffset is stack offset
+ OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
+ ONAMEOFFSET // offset within a name
+
+ // arch-specific opcodes
+ ORETJMP // return to other function
+ OGETG // runtime.getg() (read g pointer)
+
+ OEND
+)
+
+// Nodes is a pointer to a slice of *Node.
+// For fields that are not used in most nodes, this is used instead of
+// a slice to save space.
+type Nodes []Node
+
+// immutableEmptyNodes is an immutable, empty Nodes list.
+// The methods that would modify it panic instead.
+var immutableEmptyNodes = Nodes{}
+
+// asNodes returns a slice of *Node as a Nodes value.
+func AsNodes(s []Node) Nodes {
+ return s
+}
+
+// Slice returns the entries in Nodes as a slice.
+// Changes to the slice entries (as in s[i] = n) will be reflected in
+// the Nodes.
+func (n Nodes) Slice() []Node {
+ return n
+}
+
+// Len returns the number of entries in Nodes.
+func (n Nodes) Len() int {
+ return len(n)
+}
+
+// Index returns the i'th element of Nodes.
+// It panics if n does not have at least i+1 elements.
+func (n Nodes) Index(i int) Node {
+ return n[i]
+}
+
+// First returns the first element of Nodes (same as n.Index(0)).
+// It panics if n has no elements.
+func (n Nodes) First() Node {
+ return n[0]
+}
+
+// Second returns the second element of Nodes (same as n.Index(1)).
+// It panics if n has fewer than two elements.
+func (n Nodes) Second() Node {
+ return n[1]
+}
+
+func (n *Nodes) mutate() {
+ if n == &immutableEmptyNodes {
+ panic("immutable Nodes.Set")
+ }
+}
+
+// Set sets n to a slice.
+// This takes ownership of the slice.
+func (n *Nodes) Set(s []Node) {
+ if n == &immutableEmptyNodes {
+ if len(s) == 0 {
+ // Allow immutableEmptyNodes.Set(nil) (a no-op).
+ return
+ }
+ n.mutate()
+ }
+ *n = s
+}
+
+// Set1 sets n to a slice containing a single node.
+func (n *Nodes) Set1(n1 Node) {
+ n.mutate()
+ *n = []Node{n1}
+}
+
+// Set2 sets n to a slice containing two nodes.
+func (n *Nodes) Set2(n1, n2 Node) {
+ n.mutate()
+ *n = []Node{n1, n2}
+}
+
+// Set3 sets n to a slice containing three nodes.
+func (n *Nodes) Set3(n1, n2, n3 Node) {
+ n.mutate()
+ *n = []Node{n1, n2, n3}
+}
+
+// MoveNodes sets n to the contents of n2, then clears n2.
+func (n *Nodes) MoveNodes(n2 *Nodes) {
+ n.mutate()
+ *n = *n2
+ *n2 = nil
+}
+
+// SetIndex sets the i'th element of Nodes to node.
+// It panics if n does not have at least i+1 elements.
+func (n Nodes) SetIndex(i int, node Node) {
+ n[i] = node
+}
+
+// SetFirst sets the first element of Nodes to node.
+// It panics if n does not have at least one elements.
+func (n Nodes) SetFirst(node Node) {
+ n[0] = node
+}
+
+// SetSecond sets the second element of Nodes to node.
+// It panics if n does not have at least two elements.
+func (n Nodes) SetSecond(node Node) {
+ n[1] = node
+}
+
+// Addr returns the address of the i'th element of Nodes.
+// It panics if n does not have at least i+1 elements.
+func (n Nodes) Addr(i int) *Node {
+ return &n[i]
+}
+
+// Append appends entries to Nodes.
+func (n *Nodes) Append(a ...Node) {
+ if len(a) == 0 {
+ return
+ }
+ n.mutate()
+ *n = append(*n, a...)
+}
+
+// Prepend prepends entries to Nodes.
+// If a slice is passed in, this will take ownership of it.
+func (n *Nodes) Prepend(a ...Node) {
+ if len(a) == 0 {
+ return
+ }
+ n.mutate()
+ *n = append(a, *n...)
+}
+
+// Take clears n, returning its former contents.
+func (n *Nodes) Take() []Node {
+ ret := *n
+ *n = nil
+ return ret
+}
+
+// AppendNodes appends the contents of *n2 to n, then clears n2.
+func (n *Nodes) AppendNodes(n2 *Nodes) {
+ n.mutate()
+ *n = append(*n, n2.Take()...)
+}
+
+// Copy returns a copy of the content of the slice.
+func (n Nodes) Copy() Nodes {
+ if n == nil {
+ return nil
+ }
+ c := make(Nodes, n.Len())
+ copy(c, n)
+ return c
+}
+
+// NameQueue is a FIFO queue of *Name. The zero value of NameQueue is
+// a ready-to-use empty queue.
+type NameQueue struct {
+ ring []*Name
+ head, tail int
+}
+
+// Empty reports whether q contains no Names.
+func (q *NameQueue) Empty() bool {
+ return q.head == q.tail
+}
+
+// PushRight appends n to the right of the queue.
+func (q *NameQueue) PushRight(n *Name) {
+ if len(q.ring) == 0 {
+ q.ring = make([]*Name, 16)
+ } else if q.head+len(q.ring) == q.tail {
+ // Grow the ring.
+ nring := make([]*Name, len(q.ring)*2)
+ // Copy the old elements.
+ part := q.ring[q.head%len(q.ring):]
+ if q.tail-q.head <= len(part) {
+ part = part[:q.tail-q.head]
+ copy(nring, part)
+ } else {
+ pos := copy(nring, part)
+ copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+ }
+ q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+ }
+
+ q.ring[q.tail%len(q.ring)] = n
+ q.tail++
+}
+
+// PopLeft pops a Name from the left of the queue. It panics if q is
+// empty.
+func (q *NameQueue) PopLeft() *Name {
+ if q.Empty() {
+ panic("dequeue empty")
+ }
+ n := q.ring[q.head%len(q.ring)]
+ q.head++
+ return n
+}
+
+// NameSet is a set of Names.
+type NameSet map[*Name]struct{}
+
+// Has reports whether s contains n.
+func (s NameSet) Has(n *Name) bool {
+ _, isPresent := s[n]
+ return isPresent
+}
+
+// Add adds n to s.
+func (s *NameSet) Add(n *Name) {
+ if *s == nil {
+ *s = make(map[*Name]struct{})
+ }
+ (*s)[n] = struct{}{}
+}
+
+// Sorted returns s sorted according to less.
+func (s NameSet) Sorted(less func(*Name, *Name) bool) []*Name {
+ var res []*Name
+ for n := range s {
+ res = append(res, n)
+ }
+ sort.Slice(res, func(i, j int) bool { return less(res[i], res[j]) })
+ return res
+}
+
+type PragmaFlag int16
+
+const (
+ // Func pragmas.
+ Nointerface PragmaFlag = 1 << iota
+ Noescape // func parameters don't escape
+ Norace // func must not have race detector annotations
+ Nosplit // func should not execute on separate stack
+ Noinline // func should not be inlined
+ NoCheckPtr // func should not be instrumented by checkptr
+ CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
+ UintptrEscapes // pointers converted to uintptr escape
+
+ // Runtime-only func pragmas.
+ // See ../../../../runtime/README.md for detailed descriptions.
+ Systemstack // func must run on system stack
+ Nowritebarrier // emit compiler error instead of write barrier
+ Nowritebarrierrec // error on write barrier in this or recursive callees
+ Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
+
+ // Runtime and cgo type pragmas
+ NotInHeap // values of this type must not be heap allocated
+
+ // Go command pragmas
+ GoBuildPragma
+)
+
+func AsNode(n types.Object) Node {
+ if n == nil {
+ return nil
+ }
+ return n.(Node)
+}
+
+var BlankNode Node
+
+func IsConst(n Node, ct constant.Kind) bool {
+ return ConstType(n) == ct
+}
+
+// isNil reports whether n represents the universal untyped zero value "nil".
+func IsNil(n Node) bool {
+ // Check n.Orig because constant propagation may produce typed nil constants,
+ // which don't exist in the Go spec.
+ return n != nil && Orig(n).Op() == ONIL
+}
+
+func IsBlank(n Node) bool {
+ if n == nil {
+ return false
+ }
+ return n.Sym().IsBlank()
+}
+
+// IsMethod reports whether n is a method.
+// n must be a function or a method.
+func IsMethod(n Node) bool {
+ return n.Type().Recv() != nil
+}
+
+func Nod(op Op, nleft, nright Node) Node {
+ return NodAt(base.Pos, op, nleft, nright)
+}
+
+func NodAt(pos src.XPos, op Op, nleft, nright Node) Node {
+ switch op {
+ default:
+ panic("NodAt " + op.String())
+ case OADD, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE,
+ OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR,
+ OCOPY, OCOMPLEX,
+ OEFACE:
+ return NewBinaryExpr(pos, op, nleft, nright)
+ case OADDR:
+ return NewAddrExpr(pos, nleft)
+ case OADDSTR:
+ return NewAddStringExpr(pos, nil)
+ case OANDAND, OOROR:
+ return NewLogicalExpr(pos, op, nleft, nright)
+ case OARRAYLIT, OCOMPLIT, OMAPLIT, OSTRUCTLIT, OSLICELIT:
+ var typ Ntype
+ if nright != nil {
+ typ = nright.(Ntype)
+ }
+ return NewCompLitExpr(pos, op, typ, nil)
+ case OAS:
+ return NewAssignStmt(pos, nleft, nright)
+ case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2:
+ n := NewAssignListStmt(pos, op, nil, nil)
+ return n
+ case OASOP:
+ return NewAssignOpStmt(pos, OXXX, nleft, nright)
+ case OBITNOT, ONEG, ONOT, OPLUS, ORECV,
+ OALIGNOF, OCAP, OCLOSE, OIMAG, OLEN, ONEW, ONEWOBJ,
+ OOFFSETOF, OPANIC, OREAL, OSIZEOF,
+ OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR, OVARDEF, OVARKILL, OVARLIVE:
+ if nright != nil {
+ panic("unary nright")
+ }
+ return NewUnaryExpr(pos, op, nleft)
+ case OBLOCK:
+ return NewBlockStmt(pos, nil)
+ case OBREAK, OCONTINUE, OFALL, OGOTO, ORETJMP:
+ return NewBranchStmt(pos, op, nil)
+ case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
+ OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER:
+ return NewCallExpr(pos, op, nleft, nil)
+ case OCASE:
+ return NewCaseStmt(pos, nil, nil)
+ case OCONV, OCONVIFACE, OCONVNOP, ORUNESTR:
+ return NewConvExpr(pos, op, nil, nleft)
+ case ODCL, ODCLCONST, ODCLTYPE:
+ return NewDecl(pos, op, nleft)
+ case ODCLFUNC:
+ return NewFunc(pos)
+ case ODEFER, OGO:
+ return NewGoDeferStmt(pos, op, nleft)
+ case ODEREF:
+ return NewStarExpr(pos, nleft)
+ case ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OXDOT:
+ return NewSelectorExpr(pos, op, nleft, nil)
+ case ODOTTYPE, ODOTTYPE2:
+ var typ Ntype
+ if nright != nil {
+ typ = nright.(Ntype)
+ }
+ n := NewTypeAssertExpr(pos, nleft, typ)
+ if op != ODOTTYPE {
+ n.SetOp(op)
+ }
+ return n
+ case OFOR:
+ return NewForStmt(pos, nil, nleft, nright, nil)
+ case OIF:
+ return NewIfStmt(pos, nleft, nil, nil)
+ case OINDEX, OINDEXMAP:
+ n := NewIndexExpr(pos, nleft, nright)
+ if op != OINDEX {
+ n.SetOp(op)
+ }
+ return n
+ case OINLMARK:
+ return NewInlineMarkStmt(pos, types.BADWIDTH)
+ case OKEY:
+ return NewKeyExpr(pos, nleft, nright)
+ case OSTRUCTKEY:
+ return NewStructKeyExpr(pos, nil, nleft)
+ case OLABEL:
+ return NewLabelStmt(pos, nil)
+ case OLITERAL, OTYPE, OIOTA:
+ return newNameAt(pos, op, nil)
+ case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY:
+ return NewMakeExpr(pos, op, nleft, nright)
+ case ONIL:
+ return NewNilExpr(pos)
+ case OPACK:
+ return NewPkgName(pos, nil, nil)
+ case OPAREN:
+ return NewParenExpr(pos, nleft)
+ case ORANGE:
+ return NewRangeStmt(pos, nil, nright, nil)
+ case ORESULT:
+ return NewResultExpr(pos, nil, types.BADWIDTH)
+ case ORETURN:
+ return NewReturnStmt(pos, nil)
+ case OSELECT:
+ return NewSelectStmt(pos, nil)
+ case OSEND:
+ return NewSendStmt(pos, nleft, nright)
+ case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+ return NewSliceExpr(pos, op, nleft)
+ case OSLICEHEADER:
+ return NewSliceHeaderExpr(pos, nil, nleft, nil, nil)
+ case OSWITCH:
+ return NewSwitchStmt(pos, nleft, nil)
+ case OINLCALL:
+ return NewInlinedCallExpr(pos, nil, nil)
+ }
+}
--- /dev/null
+// Code generated by mknode.go. DO NOT EDIT.
+
+package ir
+
+import "fmt"
+
+func (n *AddStringExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *AddStringExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.List_ = c.List_.Copy()
+ return &c
+}
+func (n *AddStringExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDoList(n.List_, err, do)
+ return err
+}
+func (n *AddStringExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ editList(n.List_, edit)
+}
+
+func (n *AddrExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *AddrExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *AddrExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ err = maybeDo(n.Alloc, err, do)
+ return err
+}
+func (n *AddrExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+ n.Alloc = maybeEdit(n.Alloc, edit)
+}
+
+func (n *ArrayType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *ArrayType) copy() Node {
+ c := *n
+ return &c
+}
+func (n *ArrayType) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDo(n.Len, err, do)
+ err = maybeDo(n.Elem, err, do)
+ return err
+}
+func (n *ArrayType) editChildren(edit func(Node) Node) {
+ n.Len = maybeEdit(n.Len, edit)
+ n.Elem = maybeEdit(n.Elem, edit)
+}
+
+func (n *AssignListStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *AssignListStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.Lhs = c.Lhs.Copy()
+ c.Rhs = c.Rhs.Copy()
+ return &c
+}
+func (n *AssignListStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDoList(n.Lhs, err, do)
+ err = maybeDoList(n.Rhs, err, do)
+ return err
+}
+func (n *AssignListStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ editList(n.Lhs, edit)
+ editList(n.Rhs, edit)
+}
+
+func (n *AssignOpStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *AssignOpStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *AssignOpStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ err = maybeDo(n.Y, err, do)
+ return err
+}
+func (n *AssignOpStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+ n.Y = maybeEdit(n.Y, edit)
+}
+
+func (n *AssignStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *AssignStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *AssignStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ err = maybeDo(n.Y, err, do)
+ return err
+}
+func (n *AssignStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+ n.Y = maybeEdit(n.Y, edit)
+}
+
+func (n *BasicLit) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *BasicLit) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *BasicLit) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ return err
+}
+func (n *BasicLit) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+}
+
+func (n *BinaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *BinaryExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *BinaryExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ err = maybeDo(n.Y, err, do)
+ return err
+}
+func (n *BinaryExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+ n.Y = maybeEdit(n.Y, edit)
+}
+
+func (n *BlockStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *BlockStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.List_ = c.List_.Copy()
+ return &c
+}
+func (n *BlockStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDoList(n.List_, err, do)
+ return err
+}
+func (n *BlockStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ editList(n.List_, edit)
+}
+
+func (n *BranchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *BranchStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *BranchStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ return err
+}
+func (n *BranchStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+}
+
+func (n *CallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *CallExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.Args = c.Args.Copy()
+ c.Rargs = c.Rargs.Copy()
+ c.Body_ = c.Body_.Copy()
+ return &c
+}
+func (n *CallExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ err = maybeDoList(n.Args, err, do)
+ err = maybeDoList(n.Rargs, err, do)
+ err = maybeDoList(n.Body_, err, do)
+ return err
+}
+func (n *CallExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+ editList(n.Args, edit)
+ editList(n.Rargs, edit)
+ editList(n.Body_, edit)
+}
+
+func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *CallPartExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *CallPartExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ return err
+}
+func (n *CallPartExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+}
+
+func (n *CaseStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *CaseStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.Vars = c.Vars.Copy()
+ c.List_ = c.List_.Copy()
+ c.Body_ = c.Body_.Copy()
+ return &c
+}
+func (n *CaseStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDoList(n.Vars, err, do)
+ err = maybeDoList(n.List_, err, do)
+ err = maybeDo(n.Comm, err, do)
+ err = maybeDoList(n.Body_, err, do)
+ return err
+}
+func (n *CaseStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ editList(n.Vars, edit)
+ editList(n.List_, edit)
+ n.Comm = maybeEdit(n.Comm, edit)
+ editList(n.Body_, edit)
+}
+
+func (n *ChanType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *ChanType) copy() Node {
+ c := *n
+ return &c
+}
+func (n *ChanType) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDo(n.Elem, err, do)
+ return err
+}
+func (n *ChanType) editChildren(edit func(Node) Node) {
+ n.Elem = maybeEdit(n.Elem, edit)
+}
+
+func (n *ClosureExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *ClosureExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *ClosureExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ return err
+}
+func (n *ClosureExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+}
+
+func (n *ClosureReadExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *ClosureReadExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *ClosureReadExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ return err
+}
+func (n *ClosureReadExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+}
+
+func (n *CompLitExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *CompLitExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.List_ = c.List_.Copy()
+ return &c
+}
+func (n *CompLitExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.Ntype, err, do)
+ err = maybeDoList(n.List_, err, do)
+ return err
+}
+func (n *CompLitExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.Ntype = toNtype(maybeEdit(n.Ntype, edit))
+ editList(n.List_, edit)
+}
+
+func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *ConstExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *ConstExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ return err
+}
+func (n *ConstExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+}
+
+func (n *ConvExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *ConvExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *ConvExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ return err
+}
+func (n *ConvExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+}
+
+func (n *Decl) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *Decl) copy() Node {
+ c := *n
+ return &c
+}
+func (n *Decl) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDo(n.X, err, do)
+ return err
+}
+func (n *Decl) editChildren(edit func(Node) Node) {
+ n.X = maybeEdit(n.X, edit)
+}
+
+func (n *ForStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *ForStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.Late = c.Late.Copy()
+ c.Body_ = c.Body_.Copy()
+ return &c
+}
+func (n *ForStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.Cond, err, do)
+ err = maybeDoList(n.Late, err, do)
+ err = maybeDo(n.Post, err, do)
+ err = maybeDoList(n.Body_, err, do)
+ return err
+}
+func (n *ForStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.Cond = maybeEdit(n.Cond, edit)
+ editList(n.Late, edit)
+ n.Post = maybeEdit(n.Post, edit)
+ editList(n.Body_, edit)
+}
+
+func (n *Func) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *Func) copy() Node {
+ c := *n
+ c.Body_ = c.Body_.Copy()
+ return &c
+}
+func (n *Func) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.Body_, err, do)
+ return err
+}
+func (n *Func) editChildren(edit func(Node) Node) {
+ editList(n.Body_, edit)
+}
+
+func (n *FuncType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *FuncType) copy() Node {
+ c := *n
+ if c.Recv != nil {
+ c.Recv = c.Recv.copy()
+ }
+ c.Params = copyFields(c.Params)
+ c.Results = copyFields(c.Results)
+ return &c
+}
+func (n *FuncType) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoField(n.Recv, err, do)
+ err = maybeDoFields(n.Params, err, do)
+ err = maybeDoFields(n.Results, err, do)
+ return err
+}
+func (n *FuncType) editChildren(edit func(Node) Node) {
+ editField(n.Recv, edit)
+ editFields(n.Params, edit)
+ editFields(n.Results, edit)
+}
+
+func (n *GoDeferStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *GoDeferStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *GoDeferStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.Call, err, do)
+ return err
+}
+func (n *GoDeferStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.Call = maybeEdit(n.Call, edit)
+}
+
+func (n *Ident) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *Ident) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *Ident) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ return err
+}
+func (n *Ident) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+}
+
+func (n *IfStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *IfStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.Body_ = c.Body_.Copy()
+ c.Else = c.Else.Copy()
+ return &c
+}
+func (n *IfStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.Cond, err, do)
+ err = maybeDoList(n.Body_, err, do)
+ err = maybeDoList(n.Else, err, do)
+ return err
+}
+func (n *IfStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.Cond = maybeEdit(n.Cond, edit)
+ editList(n.Body_, edit)
+ editList(n.Else, edit)
+}
+
+func (n *IndexExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *IndexExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *IndexExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ err = maybeDo(n.Index, err, do)
+ return err
+}
+func (n *IndexExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+ n.Index = maybeEdit(n.Index, edit)
+}
+
+func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *InlineMarkStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *InlineMarkStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ return err
+}
+func (n *InlineMarkStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+}
+
+func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *InlinedCallExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.Body_ = c.Body_.Copy()
+ c.ReturnVars = c.ReturnVars.Copy()
+ return &c
+}
+func (n *InlinedCallExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDoList(n.Body_, err, do)
+ err = maybeDoList(n.ReturnVars, err, do)
+ return err
+}
+func (n *InlinedCallExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ editList(n.Body_, edit)
+ editList(n.ReturnVars, edit)
+}
+
+func (n *InterfaceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *InterfaceType) copy() Node {
+ c := *n
+ c.Methods = copyFields(c.Methods)
+ return &c
+}
+func (n *InterfaceType) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoFields(n.Methods, err, do)
+ return err
+}
+func (n *InterfaceType) editChildren(edit func(Node) Node) {
+ editFields(n.Methods, edit)
+}
+
+func (n *KeyExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *KeyExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *KeyExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.Key, err, do)
+ err = maybeDo(n.Value, err, do)
+ return err
+}
+func (n *KeyExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.Key = maybeEdit(n.Key, edit)
+ n.Value = maybeEdit(n.Value, edit)
+}
+
+func (n *LabelStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *LabelStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *LabelStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ return err
+}
+func (n *LabelStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+}
+
+func (n *LogicalExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *LogicalExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *LogicalExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ err = maybeDo(n.Y, err, do)
+ return err
+}
+func (n *LogicalExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+ n.Y = maybeEdit(n.Y, edit)
+}
+
+func (n *MakeExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *MakeExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *MakeExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.Len, err, do)
+ err = maybeDo(n.Cap, err, do)
+ return err
+}
+func (n *MakeExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.Len = maybeEdit(n.Len, edit)
+ n.Cap = maybeEdit(n.Cap, edit)
+}
+
+func (n *MapType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *MapType) copy() Node {
+ c := *n
+ return &c
+}
+func (n *MapType) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDo(n.Key, err, do)
+ err = maybeDo(n.Elem, err, do)
+ return err
+}
+func (n *MapType) editChildren(edit func(Node) Node) {
+ n.Key = maybeEdit(n.Key, edit)
+ n.Elem = maybeEdit(n.Elem, edit)
+}
+
+func (n *MethodExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *MethodExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *MethodExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ return err
+}
+func (n *MethodExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+}
+
+func (n *Name) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *Name) copy() Node { panic("Name.copy") }
+func (n *Name) doChildren(do func(Node) error) error {
+ var err error
+ return err
+}
+func (n *Name) editChildren(edit func(Node) Node) {
+}
+
+func (n *NameOffsetExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *NameOffsetExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *NameOffsetExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ return err
+}
+func (n *NameOffsetExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+}
+
+func (n *NilExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *NilExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *NilExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ return err
+}
+func (n *NilExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+}
+
+func (n *ParenExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *ParenExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *ParenExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ return err
+}
+func (n *ParenExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+}
+
+func (n *PkgName) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *PkgName) copy() Node {
+ c := *n
+ return &c
+}
+func (n *PkgName) doChildren(do func(Node) error) error {
+ var err error
+ return err
+}
+func (n *PkgName) editChildren(edit func(Node) Node) {
+}
+
+func (n *RangeStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *RangeStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.Vars = c.Vars.Copy()
+ c.Body_ = c.Body_.Copy()
+ return &c
+}
+func (n *RangeStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDoList(n.Vars, err, do)
+ err = maybeDo(n.X, err, do)
+ err = maybeDoList(n.Body_, err, do)
+ return err
+}
+func (n *RangeStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ editList(n.Vars, edit)
+ n.X = maybeEdit(n.X, edit)
+ editList(n.Body_, edit)
+}
+
+func (n *ResultExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *ResultExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *ResultExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ return err
+}
+func (n *ResultExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+}
+
+func (n *ReturnStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *ReturnStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.Results = c.Results.Copy()
+ return &c
+}
+func (n *ReturnStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDoList(n.Results, err, do)
+ return err
+}
+func (n *ReturnStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ editList(n.Results, edit)
+}
+
+func (n *SelectStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *SelectStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.Cases = c.Cases.Copy()
+ c.Compiled = c.Compiled.Copy()
+ return &c
+}
+func (n *SelectStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDoList(n.Cases, err, do)
+ err = maybeDoList(n.Compiled, err, do)
+ return err
+}
+func (n *SelectStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ editList(n.Cases, edit)
+ editList(n.Compiled, edit)
+}
+
+func (n *SelectorExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *SelectorExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *SelectorExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ return err
+}
+func (n *SelectorExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+}
+
+func (n *SendStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *SendStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *SendStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.Chan, err, do)
+ err = maybeDo(n.Value, err, do)
+ return err
+}
+func (n *SendStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.Chan = maybeEdit(n.Chan, edit)
+ n.Value = maybeEdit(n.Value, edit)
+}
+
+func (n *SliceExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *SliceExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.List_ = c.List_.Copy()
+ return &c
+}
+func (n *SliceExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ err = maybeDoList(n.List_, err, do)
+ return err
+}
+func (n *SliceExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+ editList(n.List_, edit)
+}
+
+func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *SliceHeaderExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.LenCap_ = c.LenCap_.Copy()
+ return &c
+}
+func (n *SliceHeaderExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.Ptr, err, do)
+ err = maybeDoList(n.LenCap_, err, do)
+ return err
+}
+func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.Ptr = maybeEdit(n.Ptr, edit)
+ editList(n.LenCap_, edit)
+}
+
+func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *SliceType) copy() Node {
+ c := *n
+ return &c
+}
+func (n *SliceType) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDo(n.Elem, err, do)
+ return err
+}
+func (n *SliceType) editChildren(edit func(Node) Node) {
+ n.Elem = maybeEdit(n.Elem, edit)
+}
+
+func (n *StarExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *StarExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *StarExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ return err
+}
+func (n *StarExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+}
+
+func (n *StructKeyExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *StructKeyExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *StructKeyExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.Value, err, do)
+ return err
+}
+func (n *StructKeyExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.Value = maybeEdit(n.Value, edit)
+}
+
+func (n *StructType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *StructType) copy() Node {
+ c := *n
+ c.Fields = copyFields(c.Fields)
+ return &c
+}
+func (n *StructType) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoFields(n.Fields, err, do)
+ return err
+}
+func (n *StructType) editChildren(edit func(Node) Node) {
+ editFields(n.Fields, edit)
+}
+
+func (n *SwitchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *SwitchStmt) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.Cases = c.Cases.Copy()
+ c.Compiled = c.Compiled.Copy()
+ return &c
+}
+func (n *SwitchStmt) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.Tag, err, do)
+ err = maybeDoList(n.Cases, err, do)
+ err = maybeDoList(n.Compiled, err, do)
+ return err
+}
+func (n *SwitchStmt) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.Tag = maybeEdit(n.Tag, edit)
+ editList(n.Cases, edit)
+ editList(n.Compiled, edit)
+}
+
+func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *TypeAssertExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ c.Itab = c.Itab.Copy()
+ return &c
+}
+func (n *TypeAssertExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ err = maybeDo(n.Ntype, err, do)
+ err = maybeDoList(n.Itab, err, do)
+ return err
+}
+func (n *TypeAssertExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+ n.Ntype = maybeEdit(n.Ntype, edit)
+ editList(n.Itab, edit)
+}
+
+func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *TypeSwitchGuard) copy() Node {
+ c := *n
+ return &c
+}
+func (n *TypeSwitchGuard) doChildren(do func(Node) error) error {
+ var err error
+ if n.Tag != nil {
+ err = maybeDo(n.Tag, err, do)
+ }
+ err = maybeDo(n.X, err, do)
+ return err
+}
+func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) {
+ if n.Tag != nil {
+ n.Tag = edit(n.Tag).(*Ident)
+ }
+ n.X = maybeEdit(n.X, edit)
+}
+
+func (n *UnaryExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *UnaryExpr) copy() Node {
+ c := *n
+ c.init = c.init.Copy()
+ return &c
+}
+func (n *UnaryExpr) doChildren(do func(Node) error) error {
+ var err error
+ err = maybeDoList(n.init, err, do)
+ err = maybeDo(n.X, err, do)
+ return err
+}
+func (n *UnaryExpr) editChildren(edit func(Node) Node) {
+ editList(n.init, edit)
+ n.X = maybeEdit(n.X, edit)
+}
+
+func (n *typeNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+func (n *typeNode) copy() Node {
+ c := *n
+ return &c
+}
+func (n *typeNode) doChildren(do func(Node) error) error {
+ var err error
+ return err
+}
+func (n *typeNode) editChildren(edit func(Node) Node) {
+}
--- /dev/null
+// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT.
+
+package ir
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[OXXX-0]
+ _ = x[ONAME-1]
+ _ = x[ONONAME-2]
+ _ = x[OTYPE-3]
+ _ = x[OPACK-4]
+ _ = x[OLITERAL-5]
+ _ = x[ONIL-6]
+ _ = x[OADD-7]
+ _ = x[OSUB-8]
+ _ = x[OOR-9]
+ _ = x[OXOR-10]
+ _ = x[OADDSTR-11]
+ _ = x[OADDR-12]
+ _ = x[OANDAND-13]
+ _ = x[OAPPEND-14]
+ _ = x[OBYTES2STR-15]
+ _ = x[OBYTES2STRTMP-16]
+ _ = x[ORUNES2STR-17]
+ _ = x[OSTR2BYTES-18]
+ _ = x[OSTR2BYTESTMP-19]
+ _ = x[OSTR2RUNES-20]
+ _ = x[OAS-21]
+ _ = x[OAS2-22]
+ _ = x[OAS2DOTTYPE-23]
+ _ = x[OAS2FUNC-24]
+ _ = x[OAS2MAPR-25]
+ _ = x[OAS2RECV-26]
+ _ = x[OASOP-27]
+ _ = x[OCALL-28]
+ _ = x[OCALLFUNC-29]
+ _ = x[OCALLMETH-30]
+ _ = x[OCALLINTER-31]
+ _ = x[OCALLPART-32]
+ _ = x[OCAP-33]
+ _ = x[OCLOSE-34]
+ _ = x[OCLOSURE-35]
+ _ = x[OCOMPLIT-36]
+ _ = x[OMAPLIT-37]
+ _ = x[OSTRUCTLIT-38]
+ _ = x[OARRAYLIT-39]
+ _ = x[OSLICELIT-40]
+ _ = x[OPTRLIT-41]
+ _ = x[OCONV-42]
+ _ = x[OCONVIFACE-43]
+ _ = x[OCONVNOP-44]
+ _ = x[OCOPY-45]
+ _ = x[ODCL-46]
+ _ = x[ODCLFUNC-47]
+ _ = x[ODCLCONST-48]
+ _ = x[ODCLTYPE-49]
+ _ = x[ODELETE-50]
+ _ = x[ODOT-51]
+ _ = x[ODOTPTR-52]
+ _ = x[ODOTMETH-53]
+ _ = x[ODOTINTER-54]
+ _ = x[OXDOT-55]
+ _ = x[ODOTTYPE-56]
+ _ = x[ODOTTYPE2-57]
+ _ = x[OEQ-58]
+ _ = x[ONE-59]
+ _ = x[OLT-60]
+ _ = x[OLE-61]
+ _ = x[OGE-62]
+ _ = x[OGT-63]
+ _ = x[ODEREF-64]
+ _ = x[OINDEX-65]
+ _ = x[OINDEXMAP-66]
+ _ = x[OKEY-67]
+ _ = x[OSTRUCTKEY-68]
+ _ = x[OLEN-69]
+ _ = x[OMAKE-70]
+ _ = x[OMAKECHAN-71]
+ _ = x[OMAKEMAP-72]
+ _ = x[OMAKESLICE-73]
+ _ = x[OMAKESLICECOPY-74]
+ _ = x[OMUL-75]
+ _ = x[ODIV-76]
+ _ = x[OMOD-77]
+ _ = x[OLSH-78]
+ _ = x[ORSH-79]
+ _ = x[OAND-80]
+ _ = x[OANDNOT-81]
+ _ = x[ONEW-82]
+ _ = x[ONEWOBJ-83]
+ _ = x[ONOT-84]
+ _ = x[OBITNOT-85]
+ _ = x[OPLUS-86]
+ _ = x[ONEG-87]
+ _ = x[OOROR-88]
+ _ = x[OPANIC-89]
+ _ = x[OPRINT-90]
+ _ = x[OPRINTN-91]
+ _ = x[OPAREN-92]
+ _ = x[OSEND-93]
+ _ = x[OSLICE-94]
+ _ = x[OSLICEARR-95]
+ _ = x[OSLICESTR-96]
+ _ = x[OSLICE3-97]
+ _ = x[OSLICE3ARR-98]
+ _ = x[OSLICEHEADER-99]
+ _ = x[ORECOVER-100]
+ _ = x[ORECV-101]
+ _ = x[ORUNESTR-102]
+ _ = x[OSELRECV2-103]
+ _ = x[OIOTA-104]
+ _ = x[OREAL-105]
+ _ = x[OIMAG-106]
+ _ = x[OCOMPLEX-107]
+ _ = x[OALIGNOF-108]
+ _ = x[OOFFSETOF-109]
+ _ = x[OSIZEOF-110]
+ _ = x[OMETHEXPR-111]
+ _ = x[OSTMTEXPR-112]
+ _ = x[OBLOCK-113]
+ _ = x[OBREAK-114]
+ _ = x[OCASE-115]
+ _ = x[OCONTINUE-116]
+ _ = x[ODEFER-117]
+ _ = x[OFALL-118]
+ _ = x[OFOR-119]
+ _ = x[OFORUNTIL-120]
+ _ = x[OGOTO-121]
+ _ = x[OIF-122]
+ _ = x[OLABEL-123]
+ _ = x[OGO-124]
+ _ = x[ORANGE-125]
+ _ = x[ORETURN-126]
+ _ = x[OSELECT-127]
+ _ = x[OSWITCH-128]
+ _ = x[OTYPESW-129]
+ _ = x[OTCHAN-130]
+ _ = x[OTMAP-131]
+ _ = x[OTSTRUCT-132]
+ _ = x[OTINTER-133]
+ _ = x[OTFUNC-134]
+ _ = x[OTARRAY-135]
+ _ = x[OTSLICE-136]
+ _ = x[OINLCALL-137]
+ _ = x[OEFACE-138]
+ _ = x[OITAB-139]
+ _ = x[OIDATA-140]
+ _ = x[OSPTR-141]
+ _ = x[OCLOSUREREAD-142]
+ _ = x[OCFUNC-143]
+ _ = x[OCHECKNIL-144]
+ _ = x[OVARDEF-145]
+ _ = x[OVARKILL-146]
+ _ = x[OVARLIVE-147]
+ _ = x[ORESULT-148]
+ _ = x[OINLMARK-149]
+ _ = x[ONAMEOFFSET-150]
+ _ = x[ORETJMP-151]
+ _ = x[OGETG-152]
+ _ = x[OEND-153]
+}
+
+const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCLOSUREREADCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKNAMEOFFSETRETJMPGETGEND"
+
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 595, 599, 603, 607, 614, 621, 629, 635, 643, 651, 656, 661, 665, 673, 678, 682, 685, 693, 697, 699, 704, 706, 711, 717, 723, 729, 735, 740, 744, 751, 757, 762, 768, 774, 781, 786, 790, 795, 799, 810, 815, 823, 829, 836, 843, 849, 856, 866, 872, 876, 879}
+
+func (i Op) String() string {
+ if i >= Op(len(_Op_index)-1) {
+ return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Op_name[_Op_index[i]:_Op_index[i+1]]
+}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import "cmd/compile/internal/types"
+
+// A Package holds information about the package being compiled.
+type Package struct {
+ // Imports, listed in source order.
+ // See golang.org/issue/31636.
+ Imports []*types.Pkg
+
+ // Init functions, listed in source order.
+ Inits []*Func
+
+ // Top-level declarations.
+ Decls []Node
+
+ // Extern (package global) declarations.
+ Externs []Node
+
+ // Assembly function declarations.
+ Asms []*Name
+
+ // Cgo directives.
+ CgoPragmas [][]string
+
+ // Variables with //go:embed lines.
+ Embeds []*Name
+
+ // Exported (or re-exported) symbols.
+ Exports []*Name
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package ir
import (
"reflect"
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {Func{}, 124, 224},
- {Name{}, 32, 56},
- {Param{}, 24, 48},
- {Node{}, 76, 128},
+ {Func{}, 200, 352},
+ {Name{}, 132, 232},
}
for _, tt := range tests {
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// A Decl is a declaration of a const, type, or var. (A declared func is a Func.)
+type Decl struct {
+ miniNode
+ X Node // the thing being declared
+}
+
+func NewDecl(pos src.XPos, op Op, x Node) *Decl {
+ n := &Decl{X: x}
+ n.pos = pos
+ switch op {
+ default:
+ panic("invalid Decl op " + op.String())
+ case ODCL, ODCLCONST, ODCLTYPE:
+ n.op = op
+ }
+ return n
+}
+
+func (*Decl) isStmt() {}
+
+func (n *Decl) Left() Node { return n.X }
+func (n *Decl) SetLeft(x Node) { n.X = x }
+
+// A Stmt is a Node that can appear as a statement.
+// This includes statement-like expressions such as f().
+//
+// (It's possible it should include <-c, but that would require
+// splitting ORECV out of UnaryExpr, which hasn't yet been
+// necessary. Maybe instead we will introduce ExprStmt at
+// some point.)
+type Stmt interface {
+ Node
+ isStmt()
+}
+
+// A miniStmt is a miniNode with extra fields common to statements.
+type miniStmt struct {
+ miniNode
+ init Nodes
+}
+
+func (*miniStmt) isStmt() {}
+
+func (n *miniStmt) Init() Nodes { return n.init }
+func (n *miniStmt) SetInit(x Nodes) { n.init = x }
+func (n *miniStmt) PtrInit() *Nodes { return &n.init }
+func (n *miniStmt) HasCall() bool { return n.bits&miniHasCall != 0 }
+func (n *miniStmt) SetHasCall(b bool) { n.bits.set(miniHasCall, b) }
+
+// An AssignListStmt is an assignment statement with
+// more than one item on at least one side: Lhs = Rhs.
+// If Def is true, the assignment is a :=.
+type AssignListStmt struct {
+ miniStmt
+ Lhs Nodes
+ Def bool
+ Rhs Nodes
+}
+
+func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt {
+ n := &AssignListStmt{}
+ n.pos = pos
+ n.SetOp(op)
+ n.Lhs.Set(lhs)
+ n.Rhs.Set(rhs)
+ return n
+}
+
+func (n *AssignListStmt) List() Nodes { return n.Lhs }
+func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs }
+func (n *AssignListStmt) SetList(x Nodes) { n.Lhs = x }
+func (n *AssignListStmt) Rlist() Nodes { return n.Rhs }
+func (n *AssignListStmt) PtrRlist() *Nodes { return &n.Rhs }
+func (n *AssignListStmt) SetRlist(x Nodes) { n.Rhs = x }
+func (n *AssignListStmt) Colas() bool { return n.Def }
+func (n *AssignListStmt) SetColas(x bool) { n.Def = x }
+
+func (n *AssignListStmt) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2:
+ n.op = op
+ }
+}
+
+// An AssignStmt is a simple assignment statement: X = Y.
+// If Def is true, the assignment is a :=.
+type AssignStmt struct {
+ miniStmt
+ X Node
+ Def bool
+ Y Node
+}
+
+func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt {
+ n := &AssignStmt{X: x, Y: y}
+ n.pos = pos
+ n.op = OAS
+ return n
+}
+
+func (n *AssignStmt) Left() Node { return n.X }
+func (n *AssignStmt) SetLeft(x Node) { n.X = x }
+func (n *AssignStmt) Right() Node { return n.Y }
+func (n *AssignStmt) SetRight(y Node) { n.Y = y }
+func (n *AssignStmt) Colas() bool { return n.Def }
+func (n *AssignStmt) SetColas(x bool) { n.Def = x }
+
+func (n *AssignStmt) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OAS:
+ n.op = op
+ }
+}
+
+// An AssignOpStmt is an AsOp= assignment statement: X AsOp= Y.
+type AssignOpStmt struct {
+ miniStmt
+ typ *types.Type
+ X Node
+ AsOp Op // OADD etc
+ Y Node
+ IncDec bool // actually ++ or --
+}
+
+func NewAssignOpStmt(pos src.XPos, asOp Op, x, y Node) *AssignOpStmt {
+ n := &AssignOpStmt{AsOp: asOp, X: x, Y: y}
+ n.pos = pos
+ n.op = OASOP
+ return n
+}
+
+func (n *AssignOpStmt) Left() Node { return n.X }
+func (n *AssignOpStmt) SetLeft(x Node) { n.X = x }
+func (n *AssignOpStmt) Right() Node { return n.Y }
+func (n *AssignOpStmt) SetRight(y Node) { n.Y = y }
+func (n *AssignOpStmt) SubOp() Op { return n.AsOp }
+func (n *AssignOpStmt) SetSubOp(x Op) { n.AsOp = x }
+func (n *AssignOpStmt) Implicit() bool { return n.IncDec }
+func (n *AssignOpStmt) SetImplicit(b bool) { n.IncDec = b }
+func (n *AssignOpStmt) Type() *types.Type { return n.typ }
+func (n *AssignOpStmt) SetType(x *types.Type) { n.typ = x }
+
+// A BlockStmt is a block: { List }.
+type BlockStmt struct {
+ miniStmt
+ List_ Nodes
+}
+
+func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt {
+ n := &BlockStmt{}
+ n.pos = pos
+ if !pos.IsKnown() {
+ n.pos = base.Pos
+ if len(list) > 0 {
+ n.pos = list[0].Pos()
+ }
+ }
+ n.op = OBLOCK
+ n.List_.Set(list)
+ return n
+}
+
+func (n *BlockStmt) List() Nodes { return n.List_ }
+func (n *BlockStmt) PtrList() *Nodes { return &n.List_ }
+func (n *BlockStmt) SetList(x Nodes) { n.List_ = x }
+
+// A BranchStmt is a break, continue, fallthrough, or goto statement.
+//
+// For back-end code generation, Op may also be RETJMP (return+jump),
+// in which case the label names another function entirely.
+type BranchStmt struct {
+ miniStmt
+ Label *types.Sym // label if present
+}
+
+func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt {
+ switch op {
+ case OBREAK, OCONTINUE, OFALL, OGOTO, ORETJMP:
+ // ok
+ default:
+ panic("NewBranch " + op.String())
+ }
+ n := &BranchStmt{Label: label}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
+func (n *BranchStmt) Sym() *types.Sym { return n.Label }
+func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym }
+
+// A CaseStmt is a case statement in a switch or select: case List: Body.
+type CaseStmt struct {
+ miniStmt
+ Vars Nodes // declared variable for this case in type switch
+ List_ Nodes // list of expressions for switch, early select
+ Comm Node // communication case (Exprs[0]) after select is type-checked
+ Body_ Nodes
+}
+
+func NewCaseStmt(pos src.XPos, list, body []Node) *CaseStmt {
+ n := &CaseStmt{}
+ n.pos = pos
+ n.op = OCASE
+ n.List_.Set(list)
+ n.Body_.Set(body)
+ return n
+}
+
+func (n *CaseStmt) List() Nodes { return n.List_ }
+func (n *CaseStmt) PtrList() *Nodes { return &n.List_ }
+func (n *CaseStmt) SetList(x Nodes) { n.List_ = x }
+func (n *CaseStmt) Body() Nodes { return n.Body_ }
+func (n *CaseStmt) PtrBody() *Nodes { return &n.Body_ }
+func (n *CaseStmt) SetBody(x Nodes) { n.Body_ = x }
+func (n *CaseStmt) Rlist() Nodes { return n.Vars }
+func (n *CaseStmt) PtrRlist() *Nodes { return &n.Vars }
+func (n *CaseStmt) SetRlist(x Nodes) { n.Vars = x }
+func (n *CaseStmt) Left() Node { return n.Comm }
+func (n *CaseStmt) SetLeft(x Node) { n.Comm = x }
+
+// A ForStmt is a non-range for loop: for Init; Cond; Post { Body }
+// Op can be OFOR or OFORUNTIL (!Cond).
+type ForStmt struct {
+ miniStmt
+ Label *types.Sym
+ Cond Node
+ Late Nodes
+ Post Node
+ Body_ Nodes
+ HasBreak_ bool
+}
+
+func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStmt {
+ n := &ForStmt{Cond: cond, Post: post}
+ n.pos = pos
+ n.op = OFOR
+ n.init.Set(init)
+ n.Body_.Set(body)
+ return n
+}
+
+func (n *ForStmt) Sym() *types.Sym { return n.Label }
+func (n *ForStmt) SetSym(x *types.Sym) { n.Label = x }
+func (n *ForStmt) Left() Node { return n.Cond }
+func (n *ForStmt) SetLeft(x Node) { n.Cond = x }
+func (n *ForStmt) Right() Node { return n.Post }
+func (n *ForStmt) SetRight(x Node) { n.Post = x }
+func (n *ForStmt) Body() Nodes { return n.Body_ }
+func (n *ForStmt) PtrBody() *Nodes { return &n.Body_ }
+func (n *ForStmt) SetBody(x Nodes) { n.Body_ = x }
+func (n *ForStmt) List() Nodes { return n.Late }
+func (n *ForStmt) PtrList() *Nodes { return &n.Late }
+func (n *ForStmt) SetList(x Nodes) { n.Late = x }
+func (n *ForStmt) HasBreak() bool { return n.HasBreak_ }
+func (n *ForStmt) SetHasBreak(b bool) { n.HasBreak_ = b }
+
+func (n *ForStmt) SetOp(op Op) {
+ if op != OFOR && op != OFORUNTIL {
+ panic(n.no("SetOp " + op.String()))
+ }
+ n.op = op
+}
+
+// A GoDeferStmt is a go or defer statement: go Call / defer Call.
+//
+// The two opcodes use a signle syntax because the implementations
+// are very similar: both are concerned with saving Call and running it
+// in a different context (a separate goroutine or a later time).
+type GoDeferStmt struct {
+ miniStmt
+ Call Node
+}
+
+func NewGoDeferStmt(pos src.XPos, op Op, call Node) *GoDeferStmt {
+ n := &GoDeferStmt{Call: call}
+ n.pos = pos
+ switch op {
+ case ODEFER, OGO:
+ n.op = op
+ default:
+ panic("NewGoDeferStmt " + op.String())
+ }
+ return n
+}
+
+func (n *GoDeferStmt) Left() Node { return n.Call }
+func (n *GoDeferStmt) SetLeft(x Node) { n.Call = x }
+
+// A IfStmt is a return statement: if Init; Cond { Then } else { Else }.
+type IfStmt struct {
+ miniStmt
+ Cond Node
+ Body_ Nodes
+ Else Nodes
+ Likely_ bool // code layout hint
+}
+
+func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt {
+ n := &IfStmt{Cond: cond}
+ n.pos = pos
+ n.op = OIF
+ n.Body_.Set(body)
+ n.Else.Set(els)
+ return n
+}
+
+func (n *IfStmt) Left() Node { return n.Cond }
+func (n *IfStmt) SetLeft(x Node) { n.Cond = x }
+func (n *IfStmt) Body() Nodes { return n.Body_ }
+func (n *IfStmt) PtrBody() *Nodes { return &n.Body_ }
+func (n *IfStmt) SetBody(x Nodes) { n.Body_ = x }
+func (n *IfStmt) Rlist() Nodes { return n.Else }
+func (n *IfStmt) PtrRlist() *Nodes { return &n.Else }
+func (n *IfStmt) SetRlist(x Nodes) { n.Else = x }
+func (n *IfStmt) Likely() bool { return n.Likely_ }
+func (n *IfStmt) SetLikely(x bool) { n.Likely_ = x }
+
+// An InlineMarkStmt is a marker placed just before an inlined body.
+type InlineMarkStmt struct {
+ miniStmt
+ Index int64
+}
+
+func NewInlineMarkStmt(pos src.XPos, index int64) *InlineMarkStmt {
+ n := &InlineMarkStmt{Index: index}
+ n.pos = pos
+ n.op = OINLMARK
+ return n
+}
+
+func (n *InlineMarkStmt) Offset() int64 { return n.Index }
+func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x }
+
+// A LabelStmt is a label statement (just the label, not including the statement it labels).
+type LabelStmt struct {
+ miniStmt
+ Label *types.Sym // "Label:"
+}
+
+func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt {
+ n := &LabelStmt{Label: label}
+ n.pos = pos
+ n.op = OLABEL
+ return n
+}
+
+func (n *LabelStmt) Sym() *types.Sym { return n.Label }
+func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x }
+
+// A RangeStmt is a range loop: for Vars = range X { Stmts }
+// Op can be OFOR or OFORUNTIL (!Cond).
+type RangeStmt struct {
+ miniStmt
+ Label *types.Sym
+ Vars Nodes // TODO(rsc): Replace with Key, Value Node
+ Def bool
+ X Node
+ Body_ Nodes
+ HasBreak_ bool
+ typ *types.Type // TODO(rsc): Remove - use X.Type() instead
+ Prealloc *Name
+}
+
+func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt {
+ n := &RangeStmt{X: x}
+ n.pos = pos
+ n.op = ORANGE
+ n.Vars.Set(vars)
+ n.Body_.Set(body)
+ return n
+}
+
+func (n *RangeStmt) Sym() *types.Sym { return n.Label }
+func (n *RangeStmt) SetSym(x *types.Sym) { n.Label = x }
+func (n *RangeStmt) Right() Node { return n.X }
+func (n *RangeStmt) SetRight(x Node) { n.X = x }
+func (n *RangeStmt) Body() Nodes { return n.Body_ }
+func (n *RangeStmt) PtrBody() *Nodes { return &n.Body_ }
+func (n *RangeStmt) SetBody(x Nodes) { n.Body_ = x }
+func (n *RangeStmt) List() Nodes { return n.Vars }
+func (n *RangeStmt) PtrList() *Nodes { return &n.Vars }
+func (n *RangeStmt) SetList(x Nodes) { n.Vars = x }
+func (n *RangeStmt) HasBreak() bool { return n.HasBreak_ }
+func (n *RangeStmt) SetHasBreak(b bool) { n.HasBreak_ = b }
+func (n *RangeStmt) Colas() bool { return n.Def }
+func (n *RangeStmt) SetColas(b bool) { n.Def = b }
+func (n *RangeStmt) Type() *types.Type { return n.typ }
+func (n *RangeStmt) SetType(x *types.Type) { n.typ = x }
+
+// A ReturnStmt is a return statement.
+type ReturnStmt struct {
+ miniStmt
+ orig Node // for typecheckargs rewrite
+ Results Nodes // return list
+}
+
+func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt {
+ n := &ReturnStmt{}
+ n.pos = pos
+ n.op = ORETURN
+ n.orig = n
+ n.Results.Set(results)
+ return n
+}
+
+func (n *ReturnStmt) Orig() Node { return n.orig }
+func (n *ReturnStmt) SetOrig(x Node) { n.orig = x }
+func (n *ReturnStmt) List() Nodes { return n.Results }
+func (n *ReturnStmt) PtrList() *Nodes { return &n.Results }
+func (n *ReturnStmt) SetList(x Nodes) { n.Results = x }
+func (n *ReturnStmt) IsDDD() bool { return false } // typecheckargs asks
+
+// A SelectStmt is a block: { Cases }.
+type SelectStmt struct {
+ miniStmt
+ Label *types.Sym
+ Cases Nodes
+ HasBreak_ bool
+
+ // TODO(rsc): Instead of recording here, replace with a block?
+ Compiled Nodes // compiled form, after walkswitch
+}
+
+func NewSelectStmt(pos src.XPos, cases []Node) *SelectStmt {
+ n := &SelectStmt{}
+ n.pos = pos
+ n.op = OSELECT
+ n.Cases.Set(cases)
+ return n
+}
+
+func (n *SelectStmt) List() Nodes { return n.Cases }
+func (n *SelectStmt) PtrList() *Nodes { return &n.Cases }
+func (n *SelectStmt) SetList(x Nodes) { n.Cases = x }
+func (n *SelectStmt) Sym() *types.Sym { return n.Label }
+func (n *SelectStmt) SetSym(x *types.Sym) { n.Label = x }
+func (n *SelectStmt) HasBreak() bool { return n.HasBreak_ }
+func (n *SelectStmt) SetHasBreak(x bool) { n.HasBreak_ = x }
+func (n *SelectStmt) Body() Nodes { return n.Compiled }
+func (n *SelectStmt) PtrBody() *Nodes { return &n.Compiled }
+func (n *SelectStmt) SetBody(x Nodes) { n.Compiled = x }
+
+// A SendStmt is a send statement: X <- Y.
+type SendStmt struct {
+ miniStmt
+ Chan Node
+ Value Node
+}
+
+func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt {
+ n := &SendStmt{Chan: ch, Value: value}
+ n.pos = pos
+ n.op = OSEND
+ return n
+}
+
+func (n *SendStmt) Left() Node { return n.Chan }
+func (n *SendStmt) SetLeft(x Node) { n.Chan = x }
+func (n *SendStmt) Right() Node { return n.Value }
+func (n *SendStmt) SetRight(y Node) { n.Value = y }
+
+// A SwitchStmt is a switch statement: switch Init; Expr { Cases }.
+type SwitchStmt struct {
+ miniStmt
+ Tag Node
+ Cases Nodes // list of *CaseStmt
+ Label *types.Sym
+ HasBreak_ bool
+
+ // TODO(rsc): Instead of recording here, replace with a block?
+ Compiled Nodes // compiled form, after walkswitch
+}
+
+func NewSwitchStmt(pos src.XPos, tag Node, cases []Node) *SwitchStmt {
+ n := &SwitchStmt{Tag: tag}
+ n.pos = pos
+ n.op = OSWITCH
+ n.Cases.Set(cases)
+ return n
+}
+
+func (n *SwitchStmt) Left() Node { return n.Tag }
+func (n *SwitchStmt) SetLeft(x Node) { n.Tag = x }
+func (n *SwitchStmt) List() Nodes { return n.Cases }
+func (n *SwitchStmt) PtrList() *Nodes { return &n.Cases }
+func (n *SwitchStmt) SetList(x Nodes) { n.Cases = x }
+func (n *SwitchStmt) Body() Nodes { return n.Compiled }
+func (n *SwitchStmt) PtrBody() *Nodes { return &n.Compiled }
+func (n *SwitchStmt) SetBody(x Nodes) { n.Compiled = x }
+func (n *SwitchStmt) Sym() *types.Sym { return n.Label }
+func (n *SwitchStmt) SetSym(x *types.Sym) { n.Label = x }
+func (n *SwitchStmt) HasBreak() bool { return n.HasBreak_ }
+func (n *SwitchStmt) SetHasBreak(x bool) { n.HasBreak_ = x }
+
+// A TypeSwitchGuard is the [Name :=] X.(type) in a type switch.
+type TypeSwitchGuard struct {
+ miniNode
+ Tag *Ident
+ X Node
+ Used bool
+}
+
+func NewTypeSwitchGuard(pos src.XPos, tag *Ident, x Node) *TypeSwitchGuard {
+ n := &TypeSwitchGuard{Tag: tag, X: x}
+ n.pos = pos
+ n.op = OTYPESW
+ return n
+}
+
+func (n *TypeSwitchGuard) Left() Node {
+ if n.Tag == nil {
+ return nil
+ }
+ return n.Tag
+}
+func (n *TypeSwitchGuard) SetLeft(x Node) {
+ if x == nil {
+ n.Tag = nil
+ return
+ }
+ n.Tag = x.(*Ident)
+}
+func (n *TypeSwitchGuard) Right() Node { return n.X }
+func (n *TypeSwitchGuard) SetRight(x Node) { n.X = x }
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+)
+
+// Nodes that represent the syntax of a type before type-checking.
+// After type-checking, they serve only as shells around a *types.Type.
+// Calling TypeNode converts a *types.Type to a Node shell.
+
+// An Ntype is a Node that syntactically looks like a type.
+// It can be the raw syntax for a type before typechecking,
+// or it can be an OTYPE with Type() set to a *types.Type.
+// Note that syntax doesn't guarantee it's a type: an expression
+// like *fmt is an Ntype (we don't know whether names are types yet),
+// but at least 1+1 is not an Ntype.
+type Ntype interface {
+ Node
+ CanBeNtype()
+}
+
+// A miniType is a minimal type syntax Node implementation,
+// to be embedded as the first field in a larger node implementation.
+type miniType struct {
+ miniNode
+ typ *types.Type
+}
+
+func (*miniType) CanBeNtype() {}
+
+func (n *miniType) Type() *types.Type { return n.typ }
+
+// setOTYPE changes n to be an OTYPE node returning t.
+// Rewriting the node in place this way should not be strictly
+// necessary (we should be able to update the uses with
+// proper OTYPE nodes), but it's mostly harmless and easy
+// to keep doing for now.
+//
+// setOTYPE also records t.Nod = self if t.Nod is not already set.
+// (Some types are shared by multiple OTYPE nodes, so only
+// the first such node is used as t.Nod.)
+func (n *miniType) setOTYPE(t *types.Type, self Node) {
+ if n.typ != nil {
+ panic(n.op.String() + " SetType: type already set")
+ }
+ n.op = OTYPE
+ n.typ = t
+ t.SetNod(self)
+}
+
+func (n *miniType) Sym() *types.Sym { return nil } // for Format OTYPE
+func (n *miniType) Implicit() bool { return false } // for Format OTYPE
+
+// A ChanType represents a chan Elem syntax with the direction Dir.
+type ChanType struct {
+ miniType
+ Elem Node
+ Dir types.ChanDir
+}
+
+func NewChanType(pos src.XPos, elem Node, dir types.ChanDir) *ChanType {
+ n := &ChanType{Elem: elem, Dir: dir}
+ n.op = OTCHAN
+ n.pos = pos
+ return n
+}
+
+func (n *ChanType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Elem = nil
+}
+
+// A MapType represents a map[Key]Value type syntax.
+type MapType struct {
+ miniType
+ Key Node
+ Elem Node
+}
+
+func NewMapType(pos src.XPos, key, elem Node) *MapType {
+ n := &MapType{Key: key, Elem: elem}
+ n.op = OTMAP
+ n.pos = pos
+ return n
+}
+
+func (n *MapType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Key = nil
+ n.Elem = nil
+}
+
+// A StructType represents a struct { ... } type syntax.
+type StructType struct {
+ miniType
+ Fields []*Field
+}
+
+func NewStructType(pos src.XPos, fields []*Field) *StructType {
+ n := &StructType{Fields: fields}
+ n.op = OTSTRUCT
+ n.pos = pos
+ return n
+}
+
+func (n *StructType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Fields = nil
+}
+
+func deepCopyFields(pos src.XPos, fields []*Field) []*Field {
+ var out []*Field
+ for _, f := range fields {
+ out = append(out, f.deepCopy(pos))
+ }
+ return out
+}
+
+// An InterfaceType represents a struct { ... } type syntax.
+type InterfaceType struct {
+ miniType
+ Methods []*Field
+}
+
+func NewInterfaceType(pos src.XPos, methods []*Field) *InterfaceType {
+ n := &InterfaceType{Methods: methods}
+ n.op = OTINTER
+ n.pos = pos
+ return n
+}
+
+func (n *InterfaceType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Methods = nil
+}
+
+// A FuncType represents a func(Args) Results type syntax.
+type FuncType struct {
+ miniType
+ Recv *Field
+ Params []*Field
+ Results []*Field
+}
+
+func NewFuncType(pos src.XPos, rcvr *Field, args, results []*Field) *FuncType {
+ n := &FuncType{Recv: rcvr, Params: args, Results: results}
+ n.op = OTFUNC
+ n.pos = pos
+ return n
+}
+
+func (n *FuncType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Recv = nil
+ n.Params = nil
+ n.Results = nil
+}
+
+// A Field is a declared struct field, interface method, or function argument.
+// It is not a Node.
+type Field struct {
+ Pos src.XPos
+ Sym *types.Sym
+ Ntype Ntype
+ Type *types.Type
+ Embedded bool
+ IsDDD bool
+ Note string
+ Decl *Name
+}
+
+func NewField(pos src.XPos, sym *types.Sym, ntyp Ntype, typ *types.Type) *Field {
+ return &Field{Pos: pos, Sym: sym, Ntype: ntyp, Type: typ}
+}
+
+func (f *Field) String() string {
+ var typ string
+ if f.Type != nil {
+ typ = fmt.Sprint(f.Type)
+ } else {
+ typ = fmt.Sprint(f.Ntype)
+ }
+ if f.Sym != nil {
+ return fmt.Sprintf("%v %v", f.Sym, typ)
+ }
+ return typ
+}
+
+func (f *Field) copy() *Field {
+ c := *f
+ return &c
+}
+
+func copyFields(list []*Field) []*Field {
+ out := make([]*Field, len(list))
+ copy(out, list)
+ for i, f := range out {
+ out[i] = f.copy()
+ }
+ return out
+}
+
+func maybeDoField(f *Field, err error, do func(Node) error) error {
+ if f != nil {
+ if err == nil && f.Decl != nil {
+ err = do(f.Decl)
+ }
+ if err == nil && f.Ntype != nil {
+ err = do(f.Ntype)
+ }
+ }
+ return err
+}
+
+func maybeDoFields(list []*Field, err error, do func(Node) error) error {
+ if err != nil {
+ return err
+ }
+ for _, f := range list {
+ err = maybeDoField(f, err, do)
+ if err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+func editField(f *Field, edit func(Node) Node) {
+ if f == nil {
+ return
+ }
+ if f.Decl != nil {
+ f.Decl = edit(f.Decl).(*Name)
+ }
+ if f.Ntype != nil {
+ f.Ntype = toNtype(edit(f.Ntype))
+ }
+}
+
+func editFields(list []*Field, edit func(Node) Node) {
+ for _, f := range list {
+ editField(f, edit)
+ }
+}
+
+func (f *Field) deepCopy(pos src.XPos) *Field {
+ if f == nil {
+ return nil
+ }
+ fpos := pos
+ if !pos.IsKnown() {
+ fpos = f.Pos
+ }
+ decl := f.Decl
+ if decl != nil {
+ decl = DeepCopy(pos, decl).(*Name)
+ }
+ ntype := f.Ntype
+ if ntype != nil {
+ ntype = DeepCopy(pos, ntype).(Ntype)
+ }
+ // No keyed literal here: if a new struct field is added, we want this to stop compiling.
+ return &Field{fpos, f.Sym, ntype, f.Type, f.Embedded, f.IsDDD, f.Note, decl}
+}
+
+// A SliceType represents a []Elem type syntax.
+// If DDD is true, it's the ...Elem at the end of a function list.
+type SliceType struct {
+ miniType
+ Elem Node
+ DDD bool
+}
+
+func NewSliceType(pos src.XPos, elem Node) *SliceType {
+ n := &SliceType{Elem: elem}
+ n.op = OTSLICE
+ n.pos = pos
+ return n
+}
+
+func (n *SliceType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Elem = nil
+}
+
+// An ArrayType represents a [Len]Elem type syntax.
+// If Len is nil, the type is a [...]Elem in an array literal.
+type ArrayType struct {
+ miniType
+ Len Node
+ Elem Node
+}
+
+func NewArrayType(pos src.XPos, size Node, elem Node) *ArrayType {
+ n := &ArrayType{Len: size, Elem: elem}
+ n.op = OTARRAY
+ n.pos = pos
+ return n
+}
+
+func (n *ArrayType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Len = nil
+ n.Elem = nil
+}
+
+// A typeNode is a Node wrapper for type t.
+type typeNode struct {
+ miniNode
+ typ *types.Type
+}
+
+func newTypeNode(pos src.XPos, typ *types.Type) *typeNode {
+ n := &typeNode{typ: typ}
+ n.pos = pos
+ n.op = OTYPE
+ return n
+}
+
+func (n *typeNode) Type() *types.Type { return n.typ }
+func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() }
+func (n *typeNode) CanBeNtype() {}
+
+// TypeNode returns the Node representing the type t.
+func TypeNode(t *types.Type) Ntype {
+ if n := t.Obj(); n != nil {
+ if n.Type() != t {
+ base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t)
+ }
+ return n.(Ntype)
+ }
+ return newTypeNode(src.NoXPos, t)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "go/constant"
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+)
+
+func ConstType(n Node) constant.Kind {
+ if n == nil || n.Op() != OLITERAL {
+ return constant.Unknown
+ }
+ return n.Val().Kind()
+}
+
+// ValueInterface returns the constant value stored in n as an interface{}.
+// It returns int64s for ints and runes, float64s for floats,
+// and complex128s for complex values.
+func ConstValue(n Node) interface{} {
+ switch v := n.Val(); v.Kind() {
+ default:
+ base.Fatalf("unexpected constant: %v", v)
+ panic("unreachable")
+ case constant.Bool:
+ return constant.BoolVal(v)
+ case constant.String:
+ return constant.StringVal(v)
+ case constant.Int:
+ return IntVal(n.Type(), v)
+ case constant.Float:
+ return Float64Val(v)
+ case constant.Complex:
+ return complex(Float64Val(constant.Real(v)), Float64Val(constant.Imag(v)))
+ }
+}
+
+// int64Val returns v converted to int64.
+// Note: if t is uint64, very large values will be converted to negative int64.
+func IntVal(t *types.Type, v constant.Value) int64 {
+ if t.IsUnsigned() {
+ if x, ok := constant.Uint64Val(v); ok {
+ return int64(x)
+ }
+ } else {
+ if x, ok := constant.Int64Val(v); ok {
+ return x
+ }
+ }
+ base.Fatalf("%v out of range for %v", v, t)
+ panic("unreachable")
+}
+
+func Float64Val(v constant.Value) float64 {
+ if x, _ := constant.Float64Val(v); !math.IsInf(x, 0) {
+ return x + 0 // avoid -0 (should not be needed, but be conservative)
+ }
+ base.Fatalf("bad float64 value: %v", v)
+ panic("unreachable")
+}
+
+func AssertValidTypeForConst(t *types.Type, v constant.Value) {
+ if !ValidTypeForConst(t, v) {
+ base.Fatalf("%v does not represent %v", t, v)
+ }
+}
+
+func ValidTypeForConst(t *types.Type, v constant.Value) bool {
+ switch v.Kind() {
+ case constant.Unknown:
+ return OKForConst[t.Kind()]
+ case constant.Bool:
+ return t.IsBoolean()
+ case constant.String:
+ return t.IsString()
+ case constant.Int:
+ return t.IsInteger()
+ case constant.Float:
+ return t.IsFloat()
+ case constant.Complex:
+ return t.IsComplex()
+ }
+
+ base.Fatalf("unexpected constant kind: %v", v)
+ panic("unreachable")
+}
+
+// nodlit returns a new untyped constant with value v.
+func NewLiteral(v constant.Value) Node {
+ return NewBasicLit(base.Pos, v)
+}
+
+func idealType(ct constant.Kind) *types.Type {
+ switch ct {
+ case constant.String:
+ return types.UntypedString
+ case constant.Bool:
+ return types.UntypedBool
+ case constant.Int:
+ return types.UntypedInt
+ case constant.Float:
+ return types.UntypedFloat
+ case constant.Complex:
+ return types.UntypedComplex
+ }
+ base.Fatalf("unexpected Ctype: %v", ct)
+ return nil
+}
+
+var OKForConst [types.NTYPE]bool
+
+// CanInt64 reports whether it is safe to call Int64Val() on n.
+func CanInt64(n Node) bool {
+ if !IsConst(n, constant.Int) {
+ return false
+ }
+
+ // if the value inside n cannot be represented as an int64, the
+ // return value of Int64 is undefined
+ _, ok := constant.Int64Val(n.Val())
+ return ok
+}
+
+// Int64Val returns n as an int64.
+// n must be an integer or rune constant.
+func Int64Val(n Node) int64 {
+ if !IsConst(n, constant.Int) {
+ base.Fatalf("Int64Val(%v)", n)
+ }
+ x, ok := constant.Int64Val(n.Val())
+ if !ok {
+ base.Fatalf("Int64Val(%v)", n)
+ }
+ return x
+}
+
+// Uint64Val returns n as an uint64.
+// n must be an integer or rune constant.
+func Uint64Val(n Node) uint64 {
+ if !IsConst(n, constant.Int) {
+ base.Fatalf("Uint64Val(%v)", n)
+ }
+ x, ok := constant.Uint64Val(n.Val())
+ if !ok {
+ base.Fatalf("Uint64Val(%v)", n)
+ }
+ return x
+}
+
+// BoolVal returns n as a bool.
+// n must be a boolean constant.
+func BoolVal(n Node) bool {
+ if !IsConst(n, constant.Bool) {
+ base.Fatalf("BoolVal(%v)", n)
+ }
+ return constant.BoolVal(n.Val())
+}
+
+// StringVal returns the value of a literal string Node as a string.
+// n must be a string constant.
+func StringVal(n Node) string {
+ if !IsConst(n, constant.String) {
+ base.Fatalf("StringVal(%v)", n)
+ }
+ return constant.StringVal(n.Val())
+}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// IR visitors for walking the IR tree.
+//
+// The lowest level helpers are DoChildren and EditChildren,
+// which nodes help implement (TODO(rsc): eventually) and
+// provide control over whether and when recursion happens
+// during the walk of the IR.
+//
+// Although these are both useful directly, two simpler patterns
+// are fairly common and also provided: Inspect and Scan.
+
+package ir
+
+import (
+ "errors"
+)
+
+// DoChildren calls do(x) on each of n's non-nil child nodes x.
+// If any call returns a non-nil error, DoChildren stops and returns that error.
+// Otherwise, DoChildren returns nil.
+//
+// Note that DoChildren(n, do) only calls do(x) for n's immediate children.
+// If x's children should be processed, then do(x) must call DoChildren(x, do).
+//
+// DoChildren allows constructing general traversals of the IR graph
+// that can stop early if needed. The most general usage is:
+//
+// var do func(ir.Node) error
+// do = func(x ir.Node) error {
+// ... processing BEFORE visting children ...
+// if ... should visit children ... {
+// ir.DoChildren(x, do)
+// ... processing AFTER visting children ...
+// }
+// if ... should stop parent DoChildren call from visiting siblings ... {
+// return non-nil error
+// }
+// return nil
+// }
+// do(root)
+//
+// Since DoChildren does not generate any errors itself, if the do function
+// never wants to stop the traversal, it can assume that DoChildren itself
+// will always return nil, simplifying to:
+//
+// var do func(ir.Node) error
+// do = func(x ir.Node) error {
+// ... processing BEFORE visting children ...
+// if ... should visit children ... {
+// ir.DoChildren(x, do)
+// }
+// ... processing AFTER visting children ...
+// return nil
+// }
+// do(root)
+//
+// The Visit function illustrates a further simplification of the pattern,
+// only processing before visiting children and never stopping:
+//
+// func Visit(n ir.Node, visit func(ir.Node)) {
+// var do func(ir.Node) error
+// do = func(x ir.Node) error {
+// visit(x)
+// return ir.DoChildren(x, do)
+// }
+// if n != nil {
+// visit(n)
+// }
+// }
+//
+// The Any function illustrates a different simplification of the pattern,
+// visiting each node and then its children, recursively, until finding
+// a node x for which cond(x) returns true, at which point the entire
+// traversal stops and returns true.
+//
+// func Any(n ir.Node, find cond(ir.Node)) bool {
+// stop := errors.New("stop")
+// var do func(ir.Node) error
+// do = func(x ir.Node) error {
+// if cond(x) {
+// return stop
+// }
+// return ir.DoChildren(x, do)
+// }
+// return do(n) == stop
+// }
+//
+// Visit and Any are presented above as examples of how to use
+// DoChildren effectively, but of course, usage that fits within the
+// simplifications captured by Visit or Any will be best served
+// by directly calling the ones provided by this package.
+func DoChildren(n Node, do func(Node) error) error {
+ if n == nil {
+ return nil
+ }
+ return n.doChildren(do)
+}
+
+// DoList calls f on each non-nil node x in the list, in list order.
+// If any call returns a non-nil error, DoList stops and returns that error.
+// Otherwise DoList returns nil.
+//
+// Note that DoList only calls do on the nodes in the list, not their children.
+// If x's children should be processed, do(x) must call DoChildren(x, do) itself.
+func DoList(list Nodes, do func(Node) error) error {
+ for _, x := range list.Slice() {
+ if x != nil {
+ if err := do(x); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Visit visits each non-nil node x in the IR tree rooted at n
+// in a depth-first preorder traversal, calling visit on each node visited.
+func Visit(n Node, visit func(Node)) {
+ var do func(Node) error
+ do = func(x Node) error {
+ visit(x)
+ return DoChildren(x, do)
+ }
+ if n != nil {
+ do(n)
+ }
+}
+
+// VisitList calls Visit(x, visit) for each node x in the list.
+func VisitList(list Nodes, visit func(Node)) {
+ for _, x := range list.Slice() {
+ Visit(x, visit)
+ }
+}
+
+var stop = errors.New("stop")
+
+// Any looks for a non-nil node x in the IR tree rooted at n
+// for which cond(x) returns true.
+// Any considers nodes in a depth-first, preorder traversal.
+// When Any finds a node x such that cond(x) is true,
+// Any ends the traversal and returns true immediately.
+// Otherwise Any returns false after completing the entire traversal.
+func Any(n Node, cond func(Node) bool) bool {
+ if n == nil {
+ return false
+ }
+ var do func(Node) error
+ do = func(x Node) error {
+ if cond(x) {
+ return stop
+ }
+ return DoChildren(x, do)
+ }
+ return do(n) == stop
+}
+
+// AnyList calls Any(x, cond) for each node x in the list, in order.
+// If any call returns true, AnyList stops and returns true.
+// Otherwise, AnyList returns false after calling Any(x, cond)
+// for every x in the list.
+func AnyList(list Nodes, cond func(Node) bool) bool {
+ for _, x := range list.Slice() {
+ if Any(x, cond) {
+ return true
+ }
+ }
+ return false
+}
+
+// EditChildren edits the child nodes of n, replacing each child x with edit(x).
+//
+// Note that EditChildren(n, edit) only calls edit(x) for n's immediate children.
+// If x's children should be processed, then edit(x) must call EditChildren(x, edit).
+//
+// EditChildren allows constructing general editing passes of the IR graph.
+// The most general usage is:
+//
+// var edit func(ir.Node) ir.Node
+// edit = func(x ir.Node) ir.Node {
+// ... processing BEFORE editing children ...
+// if ... should edit children ... {
+// EditChildren(x, edit)
+// ... processing AFTER editing children ...
+// }
+// ... return x ...
+// }
+// n = edit(n)
+//
+// EditChildren edits the node in place. To edit a copy, call Copy first.
+// As an example, a simple deep copy implementation would be:
+//
+// func deepCopy(n ir.Node) ir.Node {
+// var edit func(ir.Node) ir.Node
+// edit = func(x ir.Node) ir.Node {
+// x = ir.Copy(x)
+// ir.EditChildren(x, edit)
+// return x
+// }
+// return edit(n)
+// }
+//
+// Of course, in this case it is better to call ir.DeepCopy than to build one anew.
+func EditChildren(n Node, edit func(Node) Node) {
+ if n == nil {
+ return
+ }
+ n.editChildren(edit)
+}
+
+// editList calls edit on each non-nil node x in the list,
+// saving the result of edit back into the list.
+//
+// Note that editList only calls edit on the nodes in the list, not their children.
+// If x's children should be processed, edit(x) must call EditChildren(x, edit) itself.
+func editList(list Nodes, edit func(Node) Node) {
+ s := list.Slice()
+ for i, x := range list.Slice() {
+ if x != nil {
+ s[i] = edit(x)
+ }
+ }
+}
// Check at both 1 and 8-byte alignments.
t.Run("Copy", func(t *testing.T) {
const copyCode = `package x
-func s128a1(x *[128]int8) [128]int8 {
+func s128a1(x *[128]int8) [128]int8 {
return *x
}
func s127a1(x *[127]int8) [127]int8 {
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
- `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u003cN\u003e (assign-pair)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u0026y.b (assign-pair)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r2 = ~R0:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return (*int)(~R0) (return)"}]}`)
})
package mips
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/mips"
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
- p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, gc.Ctxt.FixedFrameSize()+off+i)
+ p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
}
} else {
//fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
// MOVW R0, (Widthptr)r1
// ADD $Widthptr, r1
// BNE r1, r2, loop
- p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1
import (
"math"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *gc.Node:
+ case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpMIPSFPFlagTrue,
ssa.OpMIPSFPFlagFalse:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
import (
"math"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *gc.Node:
+ case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpMIPS64FPFlagTrue,
ssa.OpMIPS64FPFlagFalse:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
package ppc64
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
- p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+off+i)
+ p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
}
} else if cnt <= int64(128*gc.Widthptr) {
- p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
- p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
// on ppc64 in both shared and non-shared modes.
ginsnop(pp)
- if gc.Ctxt.Flag_shared {
+ if base.Ctxt.Flag_shared {
p := pp.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_MEM
p.From.Offset = 24
package ppc64
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p.To.Reg = v.Reg()
}
- case *obj.LSym, *gc.Node:
+ case *obj.LSym, ir.Node:
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
// Insert a hint this is not a subroutine return.
pp.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 1})
- if gc.Ctxt.Flag_shared {
+ if base.Ctxt.Flag_shared {
// When compiling Go into PIC, the function we just
// called via pointer might have been implemented in
// a separate module and so overwritten the TOC
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
// These should be resolved by rules and not make it here.
package riscv64
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
}
// Adjust the frame to account for LR.
- off += gc.Ctxt.FixedFrameSize()
+ off += base.Ctxt.FixedFrameSize()
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
package riscv64
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
case 8:
return riscv.AMOVD
default:
- gc.Fatalf("unknown float width for load %d in type %v", width, t)
+ base.Fatalf("unknown float width for load %d in type %v", width, t)
return 0
}
}
case 8:
return riscv.AMOV
default:
- gc.Fatalf("unknown width for load %d in type %v", width, t)
+ base.Fatalf("unknown width for load %d in type %v", width, t)
return 0
}
}
case 8:
return riscv.AMOVD
default:
- gc.Fatalf("unknown float width for store %d in type %v", width, t)
+ base.Fatalf("unknown float width for store %d in type %v", width, t)
return 0
}
}
case 8:
return riscv.AMOV
default:
- gc.Fatalf("unknown width for store %d in type %v", width, t)
+ base.Fatalf("unknown width for store %d in type %v", width, t)
return 0
}
}
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *gc.Node:
+ case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = riscv.REG_ZERO
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpRISCV64LoweredGetClosurePtr:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(riscv.AMOV)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
package s390x
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
}
// Adjust the frame to account for LR.
- off += gc.Ctxt.FixedFrameSize()
+ off += base.Ctxt.FixedFrameSize()
reg := int16(s390x.REGSP)
// If the off cannot fit in a 12-bit unsigned displacement then we
import (
"math"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpS390XMVC:
vo := v.AuxValAndOff()
Controls [2]*Value
// Auxiliary info for the block. Its value depends on the Kind.
- Aux interface{}
+ Aux Aux
AuxInt int64
// The unordered set of Values that define the operation of this block.
f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
}
case auxString:
- if _, ok := v.Aux.(string); !ok {
+ if _, ok := v.Aux.(stringAux); !ok {
f.Fatalf("value %v has Aux type %T, want string", v, v.Aux)
}
canHaveAux = true
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
// Auto returns a Node for an auto variable of the given type.
// The SSA compiler uses this function to allocate space for spills.
- Auto(src.XPos, *types.Type) GCNode
+ Auto(src.XPos, *types.Type) *ir.Name
// Given the name for a compound type, returns the name we should use
// for the parts of that compound type.
MyImportPath() string
}
-// interface used to hold a *gc.Node (a stack variable).
-// We'd use *gc.Node directly but that would lead to an import cycle.
-type GCNode interface {
- Typ() *types.Type
- String() string
- IsSynthetic() bool
- IsAutoTmp() bool
- StorageClass() StorageClass
-}
-
-type StorageClass uint8
-
-const (
- ClassAuto StorageClass = iota // local stack variable
- ClassParam // argument
- ClassParamOut // return value
-)
-
const go116lateCallExpansion = true
// LateCallExpansionEnabledWithin returns true if late call expansion should be tested
return types.CMPgt
}
-type auxmap map[interface{}]int32
+type auxmap map[Aux]int32
func cmpVal(v, w *Value, auxIDs auxmap) types.Cmp {
// Try to order these comparison by cost (cheaper first)
s string
}
+func (*tstAux) CanBeAnSSAAux() {}
+
// This tests for a bug found when partitioning, but not sorting by the Aux value.
func TestCSEAuxPartitionBug(t *testing.T) {
c := testConfig(t)
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
)
// reaches stores then we delete all the stores. The other operations will then
// be eliminated by the dead code elimination pass.
func elimDeadAutosGeneric(f *Func) {
- addr := make(map[*Value]GCNode) // values that the address of the auto reaches
- elim := make(map[*Value]GCNode) // values that could be eliminated if the auto is
- used := make(map[GCNode]bool) // used autos that must be kept
+ addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches
+ elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is
+ used := make(map[*ir.Name]bool) // used autos that must be kept
// visit the value and report whether any of the maps are updated
visit := func(v *Value) (changed bool) {
switch v.Op {
case OpAddr, OpLocalAddr:
// Propagate the address if it points to an auto.
- n, ok := v.Aux.(GCNode)
- if !ok || n.StorageClass() != ClassAuto {
+ n, ok := v.Aux.(*ir.Name)
+ if !ok || n.Class() != ir.PAUTO {
return
}
if addr[v] == nil {
return
case OpVarDef, OpVarKill:
// v should be eliminated if we eliminate the auto.
- n, ok := v.Aux.(GCNode)
- if !ok || n.StorageClass() != ClassAuto {
+ n, ok := v.Aux.(*ir.Name)
+ if !ok || n.Class() != ir.PAUTO {
return
}
if elim[v] == nil {
// for open-coded defers from being removed (since they
// may not be used by the inline code, but will be used by
// panic processing).
- n, ok := v.Aux.(GCNode)
- if !ok || n.StorageClass() != ClassAuto {
+ n, ok := v.Aux.(*ir.Name)
+ if !ok || n.Class() != ir.PAUTO {
return
}
if !used[n] {
}
// Propagate any auto addresses through v.
- node := GCNode(nil)
+ var node *ir.Name
for _, a := range args {
if n, ok := addr[a]; ok && !used[n] {
if node == nil {
// Loop over all ops that affect autos taking note of which
// autos we need and also stores that we might be able to
// eliminate.
- seen := make(map[GCNode]bool)
+ seen := make(map[*ir.Name]bool)
var stores []*Value
for _, b := range f.Blocks {
for _, v := range b.Values {
- n, ok := v.Aux.(GCNode)
+ n, ok := v.Aux.(*ir.Name)
if !ok {
continue
}
- if n.StorageClass() != ClassAuto {
+ if n.Class() != ir.PAUTO {
continue
}
// Eliminate stores to unread autos.
for _, store := range stores {
- n, _ := store.Aux.(GCNode)
+ n, _ := store.Aux.(*ir.Name)
if seen[n] {
continue
}
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"encoding/hex"
// Slots is all the slots used in the debug info, indexed by their SlotID.
Slots []LocalSlot
// The user variables, indexed by VarID.
- Vars []GCNode
+ Vars []*ir.Name
// The slots that make up each variable, indexed by VarID.
VarSlots [][]SlotID
// The location list data, indexed by VarID. Must be processed by PutLocationList.
var BlockStart = &Value{
ID: -10000,
Op: OpInvalid,
- Aux: "BlockStart",
+ Aux: StringToAux("BlockStart"),
}
var BlockEnd = &Value{
ID: -20000,
Op: OpInvalid,
- Aux: "BlockEnd",
+ Aux: StringToAux("BlockEnd"),
}
// RegisterSet is a bitmap of registers, indexed by Register.num.
type debugState struct {
// See FuncDebug.
slots []LocalSlot
- vars []GCNode
+ vars []*ir.Name
varSlots [][]SlotID
lists [][]byte
// The pending location list entry for each user variable, indexed by VarID.
pendingEntries []pendingEntry
- varParts map[GCNode][]SlotID
+ varParts map[*ir.Name][]SlotID
blockDebug []BlockDebug
pendingSlotLocs []VarLoc
liveSlots []liveSlot
}
if state.varParts == nil {
- state.varParts = make(map[GCNode][]SlotID)
+ state.varParts = make(map[*ir.Name][]SlotID)
} else {
for n := range state.varParts {
delete(state.varParts, n)
state.vars = state.vars[:0]
for i, slot := range f.Names {
state.slots = append(state.slots, slot)
- if slot.N.IsSynthetic() {
+ if ir.IsSynthetic(slot.N) {
continue
}
for _, b := range f.Blocks {
for _, v := range b.Values {
if v.Op == OpVarDef || v.Op == OpVarKill {
- n := v.Aux.(GCNode)
- if n.IsSynthetic() {
+ n := v.Aux.(*ir.Name)
+ if ir.IsSynthetic(n) {
continue
}
state.initializeCache(f, len(state.varParts), len(state.slots))
for i, slot := range f.Names {
- if slot.N.IsSynthetic() {
+ if ir.IsSynthetic(slot.N) {
continue
}
for _, value := range f.NamedValues[slot] {
switch {
case v.Op == OpVarDef, v.Op == OpVarKill:
- n := v.Aux.(GCNode)
- if n.IsSynthetic() {
+ n := v.Aux.(*ir.Name)
+ if ir.IsSynthetic(n) {
break
}
// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
// that has no 64-bit integer registers.
- intPairTypes := func(et types.EType) (tHi, tLo *types.Type) {
+ intPairTypes := func(et types.Kind) (tHi, tLo *types.Type) {
tHi = typ.UInt32
if et == types.TINT64 {
tHi = typ.Int32
case OpStructSelect:
w := selector.Args[0]
var ls []LocalSlot
- if w.Type.Etype != types.TSTRUCT { // IData artifact
+ if w.Type.Kind() != types.TSTRUCT { // IData artifact
ls = rewriteSelect(leaf, w, offset)
} else {
ls = rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt)))
decomposeOne func(pos src.XPos, b *Block, base, source, mem *Value, t1 *types.Type, offArg, offStore int64) *Value,
decomposeTwo func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value) *Value {
u := source.Type
- switch u.Etype {
+ switch u.Kind() {
case types.TARRAY:
elem := u.Elem()
for i := int64(0); i < u.NumElem(); i++ {
if t.Width == regSize {
break
}
- tHi, tLo := intPairTypes(t.Etype)
+ tHi, tLo := intPairTypes(t.Kind())
mem = decomposeOne(pos, b, base, source, mem, tHi, source.AuxInt+hiOffset, offset+hiOffset)
pos = pos.WithNotStmt()
return decomposeOne(pos, b, base, source, mem, tLo, source.AuxInt+lowOffset, offset+lowOffset)
return storeArgOrLoad(pos, b, base, source.Args[0], mem, t.Elem(), offset)
case OpInt64Make:
- tHi, tLo := intPairTypes(t.Etype)
+ tHi, tLo := intPairTypes(t.Kind())
mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, tHi, offset+hiOffset)
pos = pos.WithNotStmt()
return storeArgOrLoad(pos, b, base, source.Args[1], mem, tLo, offset+lowOffset)
}
// For nodes that cannot be taken apart -- OpSelectN, other structure selectors.
- switch t.Etype {
+ switch t.Kind() {
case types.TARRAY:
elt := t.Elem()
if source.Type != t && t.NumElem() == 1 && elt.Width == t.Width && t.Width == regSize {
if t.Width == regSize {
break
}
- tHi, tLo := intPairTypes(t.Etype)
+ tHi, tLo := intPairTypes(t.Kind())
sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
mem = storeArgOrLoad(pos, b, base, sel, mem, tHi, offset+hiOffset)
pos = pos.WithNotStmt()
offset := int64(0)
switch v.Op {
case OpStructSelect:
- if w.Type.Etype == types.TSTRUCT {
+ if w.Type.Kind() == types.TSTRUCT {
offset = w.Type.FieldOff(int(v.AuxInt))
} else { // Immediate interface data artifact, offset is zero.
f.Fatalf("Expand calls interface data problem, func %s, v=%s, w=%s\n", f.Name, v.LongString(), w.LongString())
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
"cmd/internal/obj/s390x"
"cmd/internal/obj/x86"
"cmd/internal/src"
- "fmt"
"testing"
)
tb.Fatalf("unknown arch %s", arch)
}
if ctxt.Arch.PtrSize != 8 {
- tb.Fatal("dummyTypes is 64-bit only")
+ tb.Fatal("testTypes is 64-bit only")
}
c := &Conf{
- config: NewConfig(arch, dummyTypes, ctxt, true),
+ config: NewConfig(arch, testTypes, ctxt, true),
tb: tb,
}
return c
func (c *Conf) Frontend() Frontend {
if c.fe == nil {
- c.fe = DummyFrontend{t: c.tb, ctxt: c.config.ctxt}
+ c.fe = TestFrontend{t: c.tb, ctxt: c.config.ctxt}
}
return c.fe
}
-// DummyFrontend is a test-only frontend.
+// TestFrontend is a test-only frontend.
// It assumes 64 bit integers and pointers.
-type DummyFrontend struct {
+type TestFrontend struct {
t testing.TB
ctxt *obj.Link
}
-type DummyAuto struct {
- t *types.Type
- s string
-}
-
-func (d *DummyAuto) Typ() *types.Type {
- return d.t
-}
-
-func (d *DummyAuto) String() string {
- return d.s
-}
-
-func (d *DummyAuto) StorageClass() StorageClass {
- return ClassAuto
-}
-
-func (d *DummyAuto) IsSynthetic() bool {
- return false
-}
-
-func (d *DummyAuto) IsAutoTmp() bool {
- return true
-}
-
-func (DummyFrontend) StringData(s string) *obj.LSym {
+func (TestFrontend) StringData(s string) *obj.LSym {
return nil
}
-func (DummyFrontend) Auto(pos src.XPos, t *types.Type) GCNode {
- return &DummyAuto{t: t, s: "aDummyAuto"}
+func (TestFrontend) Auto(pos src.XPos, t *types.Type) *ir.Name {
+ n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"})
+ n.SetClass(ir.PAUTO)
+ return n
}
-func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
- return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8}
+func (d TestFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
+ return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8}
}
-func (d DummyFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
- return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off + 8}
+func (d TestFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
+ return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off + 8}
}
-func (d DummyFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
+func (d TestFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
return LocalSlot{N: s.N, Type: s.Type.Elem().PtrTo(), Off: s.Off},
- LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8},
- LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 16}
+ LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8},
+ LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 16}
}
-func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
+func (d TestFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
if s.Type.Size() == 16 {
- return LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off + 8}
+ return LocalSlot{N: s.N, Type: testTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Float64, Off: s.Off + 8}
}
- return LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off + 4}
+ return LocalSlot{N: s.N, Type: testTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Float32, Off: s.Off + 4}
}
-func (d DummyFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
+func (d TestFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
if s.Type.IsSigned() {
- return LocalSlot{N: s.N, Type: dummyTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off}
+ return LocalSlot{N: s.N, Type: testTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off}
}
- return LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off}
+ return LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off}
}
-func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
+func (d TestFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
return LocalSlot{N: s.N, Type: s.Type.FieldType(i), Off: s.Off + s.Type.FieldOff(i)}
}
-func (d DummyFrontend) SplitArray(s LocalSlot) LocalSlot {
+func (d TestFrontend) SplitArray(s LocalSlot) LocalSlot {
return LocalSlot{N: s.N, Type: s.Type.Elem(), Off: s.Off}
}
-func (d DummyFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
+func (d TestFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
return LocalSlot{N: parent.N, Type: t, Off: offset}
}
-func (DummyFrontend) Line(_ src.XPos) string {
+func (TestFrontend) Line(_ src.XPos) string {
return "unknown.go:0"
}
-func (DummyFrontend) AllocFrame(f *Func) {
+func (TestFrontend) AllocFrame(f *Func) {
}
-func (d DummyFrontend) Syslook(s string) *obj.LSym {
+func (d TestFrontend) Syslook(s string) *obj.LSym {
return d.ctxt.Lookup(s)
}
-func (DummyFrontend) UseWriteBarrier() bool {
+func (TestFrontend) UseWriteBarrier() bool {
return true // only writebarrier_test cares
}
-func (DummyFrontend) SetWBPos(pos src.XPos) {
+func (TestFrontend) SetWBPos(pos src.XPos) {
}
-func (d DummyFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
-func (d DummyFrontend) Log() bool { return true }
+func (d TestFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
+func (d TestFrontend) Log() bool { return true }
-func (d DummyFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
-func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
-func (d DummyFrontend) Debug_checknil() bool { return false }
+func (d TestFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
+func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
+func (d TestFrontend) Debug_checknil() bool { return false }
-func (d DummyFrontend) MyImportPath() string {
+func (d TestFrontend) MyImportPath() string {
return "my/import/path"
}
-var dummyTypes Types
+var testTypes Types
func init() {
// Initialize just enough of the universe and the types package to make our tests function.
// TODO(josharian): move universe initialization to the types package,
// so this test setup can share it.
-
- types.Tconv = func(t *types.Type, flag, mode int) string {
- return t.Etype.String()
- }
- types.Sconv = func(s *types.Sym, flag, mode int) string {
- return "sym"
- }
- types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) {
- fmt.Fprintf(s, "sym")
- }
- types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) {
- fmt.Fprintf(s, "%v", t.Etype)
- }
types.Dowidth = func(t *types.Type) {}
for _, typ := range [...]struct {
width int64
- et types.EType
+ et types.Kind
}{
{1, types.TINT8},
{1, types.TUINT8},
t.Align = uint8(typ.width)
types.Types[typ.et] = t
}
- dummyTypes.SetTypPtrs()
+ testTypes.SetTypPtrs()
}
-func (d DummyFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
+func (d TestFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
-func (d DummyFrontend) CanSSA(t *types.Type) bool {
- // There are no un-SSAable types in dummy land.
+func (d TestFrontend) CanSSA(t *types.Type) bool {
+ // There are no un-SSAable types in test land.
return true
}
}
// NewValue returns a new value in the block with no arguments and an aux value.
-func (b *Block) NewValue0A(pos src.XPos, op Op, t *types.Type, aux interface{}) *Value {
- if _, ok := aux.(int64); ok {
- // Disallow int64 aux values. They should be in the auxint field instead.
- // Maybe we want to allow this at some point, but for now we disallow it
- // to prevent errors like using NewValue1A instead of NewValue1I.
- b.Fatalf("aux field has int64 type op=%s type=%s aux=%v", op, t, aux)
- }
+func (b *Block) NewValue0A(pos src.XPos, op Op, t *types.Type, aux Aux) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = 0
v.Aux = aux
}
// NewValue returns a new value in the block with no arguments and both an auxint and aux values.
-func (b *Block) NewValue0IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}) *Value {
+func (b *Block) NewValue0IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = auxint
v.Aux = aux
}
// NewValue1A returns a new value in the block with one argument and an aux value.
-func (b *Block) NewValue1A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg *Value) *Value {
+func (b *Block) NewValue1A(pos src.XPos, op Op, t *types.Type, aux Aux, arg *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = 0
v.Aux = aux
}
// NewValue1IA returns a new value in the block with one argument and both an auxint and aux values.
-func (b *Block) NewValue1IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}, arg *Value) *Value {
+func (b *Block) NewValue1IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux, arg *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = auxint
v.Aux = aux
}
// NewValue2A returns a new value in the block with two arguments and one aux values.
-func (b *Block) NewValue2A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg0, arg1 *Value) *Value {
+func (b *Block) NewValue2A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1 *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = 0
v.Aux = aux
}
// NewValue2IA returns a new value in the block with two arguments and both an auxint and aux values.
-func (b *Block) NewValue2IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}, arg0, arg1 *Value) *Value {
+func (b *Block) NewValue2IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux, arg0, arg1 *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = auxint
v.Aux = aux
}
// NewValue3A returns a new value in the block with three argument and an aux value.
-func (b *Block) NewValue3A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *Value) *Value {
+func (b *Block) NewValue3A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1, arg2 *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = 0
v.Aux = aux
}
func (f *Func) ConstEmptyString(t *types.Type) *Value {
v := f.constVal(OpConstString, t, constEmptyStringMagic, false)
- v.Aux = ""
+ v.Aux = StringToAux("")
return v
}
func (f *Func) ConstOffPtrSP(t *types.Type, c int64, sp *Value) *Value {
}
// Valu defines a value in a block.
-func Valu(name string, op Op, t *types.Type, auxint int64, aux interface{}, args ...string) valu {
+func Valu(name string, op Op, t *types.Type, auxint int64, aux Aux, args ...string) valu {
return valu{name, op, t, auxint, aux, args}
}
op Op
t *types.Type
auxint int64
- aux interface{}
+ aux Aux
args []string
}
cfg.Fun("entry",
Bloc("entry",
Valu("mem", OpInitMem, types.TypeMem, 0, nil),
- Valu("a", OpConst64, cfg.config.Types.Int64, 0, 14),
+ Valu("a", OpConstString, cfg.config.Types.String, 0, StringToAux("foo")),
Exit("mem"))),
cfg.Fun("entry",
Bloc("entry",
Valu("mem", OpInitMem, types.TypeMem, 0, nil),
- Valu("a", OpConst64, cfg.config.Types.Int64, 0, 26),
+ Valu("a", OpConstString, cfg.config.Types.String, 0, StringToAux("bar")),
Exit("mem"))),
},
// value args different
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"fmt"
)
// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
// parent = &{N: s, Type: string}
type LocalSlot struct {
- N GCNode // an ONAME *gc.Node representing a stack location.
+ N *ir.Name // an ONAME *ir.Name representing a stack location.
Type *types.Type // type of slot
Off int64 // offset of slot in N
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/internal/objabi"
"cmd/internal/src"
)
continue
}
if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
- if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasPointers()) {
+ if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Name).Type().HasPointers()) {
// These ops don't really change memory.
continue
// Note: OpVarDef requires that the defined variable not have pointers.
Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
- Valu("baddr", OpLocalAddr, c.config.Types.Bool, 0, "b", "sp", "mem"),
+ Valu("baddr", OpLocalAddr, c.config.Types.Bool, 0, StringToAux("b"), "sp", "mem"),
Valu("bool1", OpLoad, c.config.Types.Bool, 0, nil, "baddr", "mem"),
If("bool1", "b1", "b2")),
Bloc("b1",
return &AuxCall{Fn: nil, args: args, results: results}
}
+func (*AuxCall) CanBeAnSSAAux() {}
+
const (
auxNone auxType = iota
auxBool // auxInt is 0/1 for false/true
// - a *obj.LSym, for an offset from SB (the global pointer)
// - nil, for no offset
type Sym interface {
- String() string
CanBeAnSSASym()
+ CanBeAnSSAAux()
}
// A ValAndOff is used by the several opcodes. It holds
// Most internal data structures are pre-allocated and flat, so for instance adding a
// new relation does not cause any allocation. For performance reasons,
// each node has only up to two outgoing edges (like a binary tree), so intermediate
-// "dummy" nodes are required to represent more than two relations. For instance,
+// "extra" nodes are required to represent more than two relations. For instance,
// to record that A<I, A<J, A<K (with no known relation between I,J,K), we create the
// following DAG:
//
// A
// / \
-// I dummy
+// I extra
// / \
// J K
//
po.setchr(i1, e2)
po.upush(undoSetChr, i1, 0)
} else {
- // If n1 already has two children, add an intermediate dummy
+ // If n1 already has two children, add an intermediate extra
// node to record the relation correctly (without relating
// n2 to other existing nodes). Use a non-deterministic value
// to decide whether to append on the left or the right, to avoid
//
// n1
// / \
- // i1l dummy
+ // i1l extra
// / \
// i1r n2
//
- dummy := po.newnode(nil)
+ extra := po.newnode(nil)
if (i1^i2)&1 != 0 { // non-deterministic
- po.setchl(dummy, i1r)
- po.setchr(dummy, e2)
- po.setchr(i1, newedge(dummy, false))
+ po.setchl(extra, i1r)
+ po.setchr(extra, e2)
+ po.setchr(i1, newedge(extra, false))
po.upush(undoSetChr, i1, i1r)
} else {
- po.setchl(dummy, i1l)
- po.setchr(dummy, e2)
- po.setchl(i1, newedge(dummy, false))
+ po.setchl(extra, i1l)
+ po.setchr(extra, e2)
+ po.setchl(i1, newedge(extra, false))
po.upush(undoSetChl, i1, i1l)
}
}
}
// newnode allocates a new node bound to SSA value n.
-// If n is nil, this is a dummy node (= only used internally).
+// If n is nil, this is an extra node (= only used internally).
func (po *poset) newnode(n *Value) uint32 {
i := po.lastidx + 1
po.lastidx++
case higherptr != 0:
// Higher bound only. To record n < higher, we need
- // a dummy root:
+ // an extra root:
//
- // dummy
+ // extra
// / \
// root \
// / n
if r2 != po.roots[0] { // all constants should be in root #0
panic("constant not in root #0")
}
- dummy := po.newnode(nil)
- po.changeroot(r2, dummy)
- po.upush(undoChangeRoot, dummy, newedge(r2, false))
- po.addchild(dummy, r2, false)
- po.addchild(dummy, i, false)
+ extra := po.newnode(nil)
+ po.changeroot(r2, extra)
+ po.upush(undoChangeRoot, extra, newedge(r2, false))
+ po.addchild(extra, r2, false)
+ po.addchild(extra, i, false)
po.addchild(i, i2, true)
}
panic("findroot didn't find any root")
}
-// mergeroot merges two DAGs into one DAG by creating a new dummy root
+// mergeroot merges two DAGs into one DAG by creating a new extra root
func (po *poset) mergeroot(r1, r2 uint32) uint32 {
// Root #0 is special as it contains all constants. Since mergeroot
// discards r2 as root and keeps r1, make sure that r2 is not root #0,
case !f1 && f2:
// n1 is not in any DAG but n2 is. If n2 is a root, we can put
// n1 in its place as a root; otherwise, we need to create a new
- // dummy root to record the relation.
+ // extra root to record the relation.
i1 = po.newnode(n1)
if po.isroot(i2) {
// Re-parent as follows:
//
- // dummy
+ // extra
// r / \
// \ ===> r i1
// i2 \ /
// i2
//
- dummy := po.newnode(nil)
- po.changeroot(r, dummy)
- po.upush(undoChangeRoot, dummy, newedge(r, false))
- po.addchild(dummy, r, false)
- po.addchild(dummy, i1, false)
+ extra := po.newnode(nil)
+ po.changeroot(r, extra)
+ po.upush(undoChangeRoot, extra, newedge(r, false))
+ po.addchild(extra, r, false)
+ po.addchild(extra, i1, false)
po.addchild(i1, i2, strict)
case f1 && f2:
// If b3 is the primary predecessor of b2, then we use x3 in b2 and
// add a x4:CX->BX copy at the end of b4.
// But the definition of x3 doesn't dominate b2. We should really
-// insert a dummy phi at the start of b2 (x5=phi(x3,x4):BX) to keep
+// insert an extra phi at the start of b2 (x5=phi(x3,x4):BX) to keep
// SSA form. For now, we ignore this problem as remaining in strict
// SSA form isn't needed after regalloc. We'll just leave the use
// of x3 not dominated by the definition of x3, and the CX->BX copy
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/objabi"
"cmd/internal/src"
return 0
}
if t.IsFloat() || t == types.TypeInt128 {
- if t.Etype == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 {
+ if t.Kind() == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 {
m = s.f.Config.fp32RegMask
- } else if t.Etype == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 {
+ } else if t.Kind() == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 {
m = s.f.Config.fp64RegMask
} else {
m = s.f.Config.fpRegMask
// This forces later liveness analysis to make the
// value live at this point.
v.SetArg(0, s.makeSpill(a, b))
- } else if _, ok := a.Aux.(GCNode); ok && vi.rematerializeable {
+ } else if _, ok := a.Aux.(*ir.Name); ok && vi.rematerializeable {
// Rematerializeable value with a gc.Node. This is the address of
// a stack object (e.g. an LEAQ). Keep the object live.
// Change it to VarLive, which is what plive expects for locals.
return int64(o)
}
-func auxToString(i interface{}) string {
- return i.(string)
+// Aux is an interface to hold miscellaneous data in Blocks and Values.
+type Aux interface {
+ CanBeAnSSAAux()
}
-func auxToSym(i interface{}) Sym {
+
+// stringAux wraps string values for use in Aux.
+type stringAux string
+
+func (stringAux) CanBeAnSSAAux() {}
+
+func auxToString(i Aux) string {
+ return string(i.(stringAux))
+}
+func auxToSym(i Aux) Sym {
// TODO: kind of a hack - allows nil interface through
s, _ := i.(Sym)
return s
}
-func auxToType(i interface{}) *types.Type {
+func auxToType(i Aux) *types.Type {
return i.(*types.Type)
}
-func auxToCall(i interface{}) *AuxCall {
+func auxToCall(i Aux) *AuxCall {
return i.(*AuxCall)
}
-func auxToS390xCCMask(i interface{}) s390x.CCMask {
+func auxToS390xCCMask(i Aux) s390x.CCMask {
return i.(s390x.CCMask)
}
-func auxToS390xRotateParams(i interface{}) s390x.RotateParams {
+func auxToS390xRotateParams(i Aux) s390x.RotateParams {
return i.(s390x.RotateParams)
}
-func stringToAux(s string) interface{} {
- return s
+func StringToAux(s string) Aux {
+ return stringAux(s)
}
-func symToAux(s Sym) interface{} {
+func symToAux(s Sym) Aux {
return s
}
-func callToAux(s *AuxCall) interface{} {
+func callToAux(s *AuxCall) Aux {
return s
}
-func typeToAux(t *types.Type) interface{} {
+func typeToAux(t *types.Type) Aux {
return t
}
-func s390xCCMaskToAux(c s390x.CCMask) interface{} {
+func s390xCCMaskToAux(c s390x.CCMask) Aux {
return c
}
-func s390xRotateParamsToAux(r s390x.RotateParams) interface{} {
+func s390xRotateParamsToAux(r s390x.RotateParams) Aux {
return r
}
// de-virtualize an InterCall
// 'sym' is the symbol for the itab
-func devirt(v *Value, aux interface{}, sym Sym, offset int64) *AuxCall {
+func devirt(v *Value, aux Aux, sym Sym, offset int64) *AuxCall {
f := v.Block.Func
n, ok := sym.(*obj.LSym)
if !ok {
// de-virtualize an InterLECall
// 'sym' is the symbol for the itab
-func devirtLESym(v *Value, aux interface{}, sym Sym, offset int64) *obj.LSym {
+func devirtLESym(v *Value, aux Aux, sym Sym, offset int64) *obj.LSym {
n, ok := sym.(*obj.LSym)
if !ok {
return nil
}{
{Value{}, 72, 112},
{Block{}, 164, 304},
- {LocalSlot{}, 32, 48},
+ {LocalSlot{}, 28, 40},
{valState{}, 28, 40},
}
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
if v.Aux == nil {
f.Fatalf("%s has nil Aux\n", v.LongString())
}
- loc := LocalSlot{N: v.Aux.(GCNode), Type: v.Type, Off: v.AuxInt}
+ loc := LocalSlot{N: v.Aux.(*ir.Name), Type: v.Type, Off: v.AuxInt}
if f.pass.debug > stackDebug {
fmt.Printf("stackalloc %s to %s\n", v, loc)
}
// Users of AuxInt which interpret AuxInt as unsigned (e.g. shifts) must be careful.
// Use Value.AuxUnsigned to get the zero-extended value of AuxInt.
AuxInt int64
- Aux interface{}
+ Aux Aux
// Arguments of this value
Args []*Value
}
return true
}
+
+// TODO(mdempsky): Shouldn't be necessary; see discussion at golang.org/cl/275756
+func (*Value) CanBeAnSSAAux() {}
type vkey struct {
op Op
ai int64 // aux int
- ax interface{} // aux
+ ax Aux // aux
t *types.Type // type
}
t.Skip("skipping test in short mode")
}
- // provide a dummy error handler so parsing doesn't stop after first error
+ // provide a no-op error handler so parsing doesn't stop after first error
ast, err := ParseFile(*src_, func(error) {}, nil, CheckBranches)
if err != nil {
t.Error(err)
// All declarations belonging to the same group point to the same Group node.
type Group struct {
- dummy int // not empty so we are guaranteed different Group instances
+ _ int // not empty so we are guaranteed different Group instances
}
// ----------------------------------------------------------------------------
t.Skip("skipping test in short mode")
}
- // provide a dummy error handler so parsing doesn't stop after first error
+ // provide a no-op error handler so parsing doesn't stop after first error
ast, err := ParseFile(*src_, func(error) {}, nil, 0)
if err != nil {
t.Error(err)
var _EType_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 185, 190, 197, 202}
-func (i EType) String() string {
- if i >= EType(len(_EType_index)-1) {
+func (i Kind) String() string {
+ if i >= Kind(len(_EType_index)-1) {
return "EType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _EType_name[_EType_index[i]:_EType_index[i+1]]
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "go/constant"
+ "strconv"
+ "strings"
+ "sync"
+
+ "cmd/compile/internal/base"
+)
+
+// BuiltinPkg is a fake package that declares the universe block.
+var BuiltinPkg *Pkg
+
+// LocalPkg is the package being compiled.
+var LocalPkg *Pkg
+
+// BlankSym is the blank (_) symbol.
+var BlankSym *Sym
+
+// OrigSym returns the original symbol written by the user.
+func OrigSym(s *Sym) *Sym {
+ if s == nil {
+ return nil
+ }
+
+ if len(s.Name) > 1 && s.Name[0] == '~' {
+ switch s.Name[1] {
+ case 'r': // originally an unnamed result
+ return nil
+ case 'b': // originally the blank identifier _
+ // TODO(mdempsky): Does s.Pkg matter here?
+ return BlankSym
+ }
+ return s
+ }
+
+ if strings.HasPrefix(s.Name, ".anon") {
+ // originally an unnamed or _ name (see subr.go: structargs)
+ return nil
+ }
+
+ return s
+}
+
+// numImport tracks how often a package with a given name is imported.
+// It is used to provide a better error message (by using the package
+// path to disambiguate) if a package that appears multiple times with
+// the same name appears in an error message.
+var NumImport = make(map[string]int)
+
+// fmtMode represents the kind of printing being done.
+// The default is regular Go syntax (fmtGo).
+// fmtDebug is like fmtGo but for debugging dumps and prints the type kind too.
+// fmtTypeID and fmtTypeIDName are for generating various unique representations
+// of types used in hashes and the linker.
+type fmtMode int
+
+const (
+ fmtGo fmtMode = iota
+ fmtDebug
+ fmtTypeID
+ fmtTypeIDName
+)
+
+// Sym
+
+// Format implements formatting for a Sym.
+// The valid formats are:
+//
+// %v Go syntax: Name for symbols in the local package, PkgName.Name for imported symbols.
+// %+v Debug syntax: always include PkgName. prefix even for local names.
+// %S Short syntax: Name only, no matter what.
+//
+func (s *Sym) Format(f fmt.State, verb rune) {
+ mode := fmtGo
+ switch verb {
+ case 'v', 'S':
+ if verb == 'v' && f.Flag('+') {
+ mode = fmtDebug
+ }
+ fmt.Fprint(f, sconv(s, verb, mode))
+
+ default:
+ fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s)
+ }
+}
+
+func (s *Sym) String() string {
+ return sconv(s, 0, fmtGo)
+}
+
+// See #16897 for details about performance implications
+// before changing the implementation of sconv.
+func sconv(s *Sym, verb rune, mode fmtMode) string {
+ if verb == 'L' {
+ panic("linksymfmt")
+ }
+
+ if s == nil {
+ return "<S>"
+ }
+
+ if s.Name == "_" {
+ return "_"
+ }
+ buf := fmtBufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer fmtBufferPool.Put(buf)
+
+ symfmt(buf, s, verb, mode)
+ return InternString(buf.Bytes())
+}
+
+func sconv2(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) {
+ if verb == 'L' {
+ panic("linksymfmt")
+ }
+ if s == nil {
+ b.WriteString("<S>")
+ return
+ }
+ if s.Name == "_" {
+ b.WriteString("_")
+ return
+ }
+
+ symfmt(b, s, verb, mode)
+}
+
+func symfmt(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) {
+ if verb != 'S' {
+ switch mode {
+ case fmtGo: // This is for the user
+ if s.Pkg == BuiltinPkg || s.Pkg == LocalPkg {
+ b.WriteString(s.Name)
+ return
+ }
+
+ // If the name was used by multiple packages, display the full path,
+ if s.Pkg.Name != "" && NumImport[s.Pkg.Name] > 1 {
+ fmt.Fprintf(b, "%q.%s", s.Pkg.Path, s.Name)
+ return
+ }
+ b.WriteString(s.Pkg.Name)
+ b.WriteByte('.')
+ b.WriteString(s.Name)
+ return
+
+ case fmtDebug:
+ b.WriteString(s.Pkg.Name)
+ b.WriteByte('.')
+ b.WriteString(s.Name)
+ return
+
+ case fmtTypeIDName:
+ // dcommontype, typehash
+ b.WriteString(s.Pkg.Name)
+ b.WriteByte('.')
+ b.WriteString(s.Name)
+ return
+
+ case fmtTypeID:
+ // (methodsym), typesym, weaksym
+ b.WriteString(s.Pkg.Prefix)
+ b.WriteByte('.')
+ b.WriteString(s.Name)
+ return
+ }
+ }
+
+ b.WriteString(s.Name)
+}
+
+func SymMethodName(s *Sym) string {
+ // Skip leading "type." in method name
+ name := s.Name
+ if i := strings.LastIndex(name, "."); i >= 0 {
+ name = name[i+1:]
+ }
+ return name
+}
+
+// Type
+
+var BasicTypeNames = []string{
+ TINT: "int",
+ TUINT: "uint",
+ TINT8: "int8",
+ TUINT8: "uint8",
+ TINT16: "int16",
+ TUINT16: "uint16",
+ TINT32: "int32",
+ TUINT32: "uint32",
+ TINT64: "int64",
+ TUINT64: "uint64",
+ TUINTPTR: "uintptr",
+ TFLOAT32: "float32",
+ TFLOAT64: "float64",
+ TCOMPLEX64: "complex64",
+ TCOMPLEX128: "complex128",
+ TBOOL: "bool",
+ TANY: "any",
+ TSTRING: "string",
+ TNIL: "nil",
+ TIDEAL: "untyped number",
+ TBLANK: "blank",
+}
+
+var fmtBufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+// Format implements formatting for a Type.
+// The valid formats are:
+//
+// %v Go syntax
+// %+v Debug syntax: Go syntax with a KIND- prefix for all but builtins.
+// %L Go syntax for underlying type if t is named
+// %S short Go syntax: drop leading "func" in function type
+// %-S special case for method receiver symbol
+//
+func (t *Type) Format(s fmt.State, verb rune) {
+ mode := fmtGo
+ switch verb {
+ case 'v', 'S', 'L':
+ if verb == 'v' && s.Flag('+') { // %+v is debug format
+ mode = fmtDebug
+ }
+ if verb == 'S' && s.Flag('-') { // %-S is special case for receiver - short typeid format
+ mode = fmtTypeID
+ }
+ fmt.Fprint(s, tconv(t, verb, mode))
+ default:
+ fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t)
+ }
+}
+
+// String returns the Go syntax for the type t.
+func (t *Type) String() string {
+ return tconv(t, 0, fmtGo)
+}
+
+// ShortString generates a short description of t.
+// It is used in autogenerated method names, reflection,
+// and itab names.
+func (t *Type) ShortString() string {
+ return tconv(t, 0, fmtTypeID)
+}
+
+// LongString generates a complete description of t.
+// It is useful for reflection,
+// or when a unique fingerprint or hash of a type is required.
+func (t *Type) LongString() string {
+ return tconv(t, 0, fmtTypeIDName)
+}
+
+func tconv(t *Type, verb rune, mode fmtMode) string {
+ buf := fmtBufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer fmtBufferPool.Put(buf)
+
+ tconv2(buf, t, verb, mode, nil)
+ return InternString(buf.Bytes())
+}
+
+// tconv2 writes a string representation of t to b.
+// flag and mode control exactly what is printed.
+// Any types x that are already in the visited map get printed as @%d where %d=visited[x].
+// See #16897 before changing the implementation of tconv.
+func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type]int) {
+ if off, ok := visited[t]; ok {
+ // We've seen this type before, so we're trying to print it recursively.
+ // Print a reference to it instead.
+ fmt.Fprintf(b, "@%d", off)
+ return
+ }
+ if t == nil {
+ b.WriteString("<T>")
+ return
+ }
+ if t.Kind() == TSSA {
+ b.WriteString(t.Extra.(string))
+ return
+ }
+ if t.Kind() == TTUPLE {
+ b.WriteString(t.FieldType(0).String())
+ b.WriteByte(',')
+ b.WriteString(t.FieldType(1).String())
+ return
+ }
+
+ if t.Kind() == TRESULTS {
+ tys := t.Extra.(*Results).Types
+ for i, et := range tys {
+ if i > 0 {
+ b.WriteByte(',')
+ }
+ b.WriteString(et.String())
+ }
+ return
+ }
+
+ if t == ByteType || t == RuneType {
+ // in %-T mode collapse rune and byte with their originals.
+ switch mode {
+ case fmtTypeIDName, fmtTypeID:
+ t = Types[t.Kind()]
+ default:
+ sconv2(b, t.Sym(), 'S', mode)
+ return
+ }
+ }
+ if t == ErrorType {
+ b.WriteString("error")
+ return
+ }
+
+ // Unless the 'L' flag was specified, if the type has a name, just print that name.
+ if verb != 'L' && t.Sym() != nil && t != Types[t.Kind()] {
+ switch mode {
+ case fmtTypeID, fmtTypeIDName:
+ if verb == 'S' {
+ if t.Vargen != 0 {
+ sconv2(b, t.Sym(), 'S', mode)
+ fmt.Fprintf(b, "·%d", t.Vargen)
+ return
+ }
+ sconv2(b, t.Sym(), 'S', mode)
+ return
+ }
+
+ if mode == fmtTypeIDName {
+ sconv2(b, t.Sym(), 'v', fmtTypeIDName)
+ return
+ }
+
+ if t.Sym().Pkg == LocalPkg && t.Vargen != 0 {
+ sconv2(b, t.Sym(), 'v', mode)
+ fmt.Fprintf(b, "·%d", t.Vargen)
+ return
+ }
+ }
+
+ sconv2(b, t.Sym(), 'v', mode)
+ return
+ }
+
+ if int(t.Kind()) < len(BasicTypeNames) && BasicTypeNames[t.Kind()] != "" {
+ var name string
+ switch t {
+ case UntypedBool:
+ name = "untyped bool"
+ case UntypedString:
+ name = "untyped string"
+ case UntypedInt:
+ name = "untyped int"
+ case UntypedRune:
+ name = "untyped rune"
+ case UntypedFloat:
+ name = "untyped float"
+ case UntypedComplex:
+ name = "untyped complex"
+ default:
+ name = BasicTypeNames[t.Kind()]
+ }
+ b.WriteString(name)
+ return
+ }
+
+ if mode == fmtDebug {
+ b.WriteString(t.Kind().String())
+ b.WriteByte('-')
+ tconv2(b, t, 'v', fmtGo, visited)
+ return
+ }
+
+ // At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't
+ // try to print it recursively.
+ // We record the offset in the result buffer where the type's text starts. This offset serves as a reference
+ // point for any later references to the same type.
+ // Note that we remove the type from the visited map as soon as the recursive call is done.
+ // This prevents encoding types like map[*int]*int as map[*int]@4. (That encoding would work,
+ // but I'd like to use the @ notation only when strictly necessary.)
+ if visited == nil {
+ visited = map[*Type]int{}
+ }
+ visited[t] = b.Len()
+ defer delete(visited, t)
+
+ switch t.Kind() {
+ case TPTR:
+ b.WriteByte('*')
+ switch mode {
+ case fmtTypeID, fmtTypeIDName:
+ if verb == 'S' {
+ tconv2(b, t.Elem(), 'S', mode, visited)
+ return
+ }
+ }
+ tconv2(b, t.Elem(), 'v', mode, visited)
+
+ case TARRAY:
+ b.WriteByte('[')
+ b.WriteString(strconv.FormatInt(t.NumElem(), 10))
+ b.WriteByte(']')
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TSLICE:
+ b.WriteString("[]")
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TCHAN:
+ switch t.ChanDir() {
+ case Crecv:
+ b.WriteString("<-chan ")
+ tconv2(b, t.Elem(), 0, mode, visited)
+ case Csend:
+ b.WriteString("chan<- ")
+ tconv2(b, t.Elem(), 0, mode, visited)
+ default:
+ b.WriteString("chan ")
+ if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym() == nil && t.Elem().ChanDir() == Crecv {
+ b.WriteByte('(')
+ tconv2(b, t.Elem(), 0, mode, visited)
+ b.WriteByte(')')
+ } else {
+ tconv2(b, t.Elem(), 0, mode, visited)
+ }
+ }
+
+ case TMAP:
+ b.WriteString("map[")
+ tconv2(b, t.Key(), 0, mode, visited)
+ b.WriteByte(']')
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TINTER:
+ if t.IsEmptyInterface() {
+ b.WriteString("interface {}")
+ break
+ }
+ b.WriteString("interface {")
+ for i, f := range t.Fields().Slice() {
+ if i != 0 {
+ b.WriteByte(';')
+ }
+ b.WriteByte(' ')
+ switch {
+ case f.Sym == nil:
+ // Check first that a symbol is defined for this type.
+ // Wrong interface definitions may have types lacking a symbol.
+ break
+ case IsExported(f.Sym.Name):
+ sconv2(b, f.Sym, 'S', mode)
+ default:
+ if mode != fmtTypeIDName {
+ mode = fmtTypeID
+ }
+ sconv2(b, f.Sym, 'v', mode)
+ }
+ tconv2(b, f.Type, 'S', mode, visited)
+ }
+ if t.NumFields() != 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteByte('}')
+
+ case TFUNC:
+ if verb == 'S' {
+ // no leading func
+ } else {
+ if t.Recv() != nil {
+ b.WriteString("method")
+ tconv2(b, t.Recvs(), 0, mode, visited)
+ b.WriteByte(' ')
+ }
+ b.WriteString("func")
+ }
+ tconv2(b, t.Params(), 0, mode, visited)
+
+ switch t.NumResults() {
+ case 0:
+ // nothing to do
+
+ case 1:
+ b.WriteByte(' ')
+ tconv2(b, t.Results().Field(0).Type, 0, mode, visited) // struct->field->field's type
+
+ default:
+ b.WriteByte(' ')
+ tconv2(b, t.Results(), 0, mode, visited)
+ }
+
+ case TSTRUCT:
+ if m := t.StructType().Map; m != nil {
+ mt := m.MapType()
+ // Format the bucket struct for map[x]y as map.bucket[x]y.
+ // This avoids a recursive print that generates very long names.
+ switch t {
+ case mt.Bucket:
+ b.WriteString("map.bucket[")
+ case mt.Hmap:
+ b.WriteString("map.hdr[")
+ case mt.Hiter:
+ b.WriteString("map.iter[")
+ default:
+ base.Fatalf("unknown internal map type")
+ }
+ tconv2(b, m.Key(), 0, mode, visited)
+ b.WriteByte(']')
+ tconv2(b, m.Elem(), 0, mode, visited)
+ break
+ }
+
+ if funarg := t.StructType().Funarg; funarg != FunargNone {
+ b.WriteByte('(')
+ fieldVerb := 'v'
+ switch mode {
+ case fmtTypeID, fmtTypeIDName, fmtGo:
+ // no argument names on function signature, and no "noescape"/"nosplit" tags
+ fieldVerb = 'S'
+ }
+ for i, f := range t.Fields().Slice() {
+ if i != 0 {
+ b.WriteString(", ")
+ }
+ fldconv(b, f, fieldVerb, mode, visited, funarg)
+ }
+ b.WriteByte(')')
+ } else {
+ b.WriteString("struct {")
+ for i, f := range t.Fields().Slice() {
+ if i != 0 {
+ b.WriteByte(';')
+ }
+ b.WriteByte(' ')
+ fldconv(b, f, 'L', mode, visited, funarg)
+ }
+ if t.NumFields() != 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteByte('}')
+ }
+
+ case TFORW:
+ b.WriteString("undefined")
+ if t.Sym() != nil {
+ b.WriteByte(' ')
+ sconv2(b, t.Sym(), 'v', mode)
+ }
+
+ case TUNSAFEPTR:
+ b.WriteString("unsafe.Pointer")
+
+ case Txxx:
+ b.WriteString("Txxx")
+
+ default:
+ // Don't know how to handle - fall back to detailed prints
+ b.WriteString(t.Kind().String())
+ b.WriteString(" <")
+ sconv2(b, t.Sym(), 'v', mode)
+ b.WriteString(">")
+
+ }
+}
+
+func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Type]int, funarg Funarg) {
+ if f == nil {
+ b.WriteString("<T>")
+ return
+ }
+
+ var name string
+ if verb != 'S' {
+ s := f.Sym
+
+ // Take the name from the original.
+ if mode == fmtGo {
+ s = OrigSym(s)
+ }
+
+ if s != nil && f.Embedded == 0 {
+ if funarg != FunargNone {
+ name = fmt.Sprint(f.Nname)
+ } else if verb == 'L' {
+ name = SymMethodName(s)
+ if !IsExported(name) && mode != fmtTypeIDName {
+ name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg)
+ }
+ } else {
+ name = sconv(s, 0, mode)
+ }
+ }
+ }
+
+ if name != "" {
+ b.WriteString(name)
+ b.WriteString(" ")
+ }
+
+ if f.IsDDD() {
+ var et *Type
+ if f.Type != nil {
+ et = f.Type.Elem()
+ }
+ b.WriteString("...")
+ tconv2(b, et, 0, mode, visited)
+ } else {
+ tconv2(b, f.Type, 0, mode, visited)
+ }
+
+ if verb != 'S' && funarg == FunargNone && f.Note != "" {
+ b.WriteString(" ")
+ b.WriteString(strconv.Quote(f.Note))
+ }
+}
+
+// Val
+
+func FmtConst(v constant.Value, sharp bool) string {
+ if !sharp && v.Kind() == constant.Complex {
+ real, imag := constant.Real(v), constant.Imag(v)
+
+ var re string
+ sre := constant.Sign(real)
+ if sre != 0 {
+ re = real.String()
+ }
+
+ var im string
+ sim := constant.Sign(imag)
+ if sim != 0 {
+ im = imag.String()
+ }
+
+ switch {
+ case sre == 0 && sim == 0:
+ return "0"
+ case sre == 0:
+ return im + "i"
+ case sim == 0:
+ return re
+ case sim < 0:
+ return fmt.Sprintf("(%s%si)", re, im)
+ default:
+ return fmt.Sprintf("(%s+%si)", re, im)
+ }
+ }
+
+ return v.String()
+}
if t1 == t2 {
return true
}
- if t1 == nil || t2 == nil || t1.Etype != t2.Etype || t1.Broke() || t2.Broke() {
+ if t1 == nil || t2 == nil || t1.kind != t2.kind || t1.Broke() || t2.Broke() {
return false
}
- if t1.Sym != nil || t2.Sym != nil {
+ if t1.sym != nil || t2.sym != nil {
// Special case: we keep byte/uint8 and rune/int32
// separate for error messages. Treat them as equal.
- switch t1.Etype {
+ switch t1.kind {
case TUINT8:
- return (t1 == Types[TUINT8] || t1 == Bytetype) && (t2 == Types[TUINT8] || t2 == Bytetype)
+ return (t1 == Types[TUINT8] || t1 == ByteType) && (t2 == Types[TUINT8] || t2 == ByteType)
case TINT32:
- return (t1 == Types[TINT32] || t1 == Runetype) && (t2 == Types[TINT32] || t2 == Runetype)
+ return (t1 == Types[TINT32] || t1 == RuneType) && (t2 == Types[TINT32] || t2 == RuneType)
default:
return false
}
}
assumedEqual[typePair{t1, t2}] = struct{}{}
- switch t1.Etype {
+ switch t1.kind {
case TIDEAL:
// Historically, cmd/compile used a single "untyped
// number" type, so all untyped number types were
return s
}
-// List of .inittask entries in imported packages, in source code order.
-var InitSyms []*Sym
-
// LookupOK looks up name in pkg and reports whether it previously existed.
func (pkg *Pkg) LookupOK(name string) (s *Sym, existed bool) {
// TODO(gri) remove this check in favor of specialized lookup
Name: name,
Pkg: pkg,
}
- if name == ".inittask" {
- InitSyms = append(InitSyms, s)
- }
pkg.Syms[name] = s
return s, false
}
package types
-import "cmd/internal/src"
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/src"
+)
// Declaration stack & operations
// restored once the block scope ends.
type dsym struct {
sym *Sym // sym == nil indicates stack mark
- def *Node
+ def Object
block int32
lastlineno src.XPos // last declaration for diagnostic
}
d.sym = nil
d.def = nil
}
- Fatalf("popdcl: no stack mark")
+ base.Fatalf("popdcl: no stack mark")
}
// Markdcl records the start of a new block scope for declarations.
}
// PkgDef returns the definition associated with s at package scope.
-func (s *Sym) PkgDef() *Node {
+func (s *Sym) PkgDef() Object {
return *s.pkgDefPtr()
}
// SetPkgDef sets the definition associated with s at package scope.
-func (s *Sym) SetPkgDef(n *Node) {
+func (s *Sym) SetPkgDef(n Object) {
*s.pkgDefPtr() = n
}
-func (s *Sym) pkgDefPtr() **Node {
+func (s *Sym) pkgDefPtr() *Object {
// Look for outermost saved declaration, which must be the
// package scope definition, if present.
- for _, d := range dclstack {
+ for i := range dclstack {
+ d := &dclstack[i]
if s == d.sym {
return &d.def
}
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {Sym{}, 52, 88},
- {Type{}, 52, 88},
+ {Sym{}, 48, 80},
+ {Type{}, 56, 96},
{Map{}, 20, 40},
{Forward{}, 20, 32},
- {Func{}, 32, 56},
+ {Func{}, 24, 40},
{Struct{}, 16, 32},
{Interface{}, 8, 16},
{Chan{}, 8, 16},
package types
import (
+ "cmd/compile/internal/base"
"cmd/internal/obj"
"cmd/internal/src"
"unicode"
Name string // object name
// saved and restored by dcopy
- Def *Node // definition: ONAME OTYPE OPACK or OLITERAL
+ Def Object // definition: ONAME OTYPE OPACK or OLITERAL
Block int32 // blocknumber to catch redeclaration
Lastlineno src.XPos // last declaration for diagnostic
- flags bitset8
- Label *Node // corresponding label (ephemeral)
- Origpkg *Pkg // original package for . import
+ flags bitset8
}
const (
}
if sym.Func() {
// This is a function symbol. Mark it as "internal ABI".
- return Ctxt.LookupABIInit(sym.LinksymName(), obj.ABIInternal, initPkg)
+ return base.Ctxt.LookupABIInit(sym.LinksymName(), obj.ABIInternal, initPkg)
}
- return Ctxt.LookupInit(sym.LinksymName(), initPkg)
+ return base.Ctxt.LookupInit(sym.LinksymName(), initPkg)
+}
+
+// LinksymABI0 looks up or creates an ABI0 linker symbol for "sym",
+// in cases where we want to specifically select the ABI0 version of
+// a symbol (typically used only for ABI wrappers).
+func (sym *Sym) LinksymABI0() *obj.LSym {
+ if sym == nil {
+ return nil
+ }
+ initPkg := func(r *obj.LSym) {
+ if sym.Linkname != "" {
+ r.Pkg = "_"
+ } else {
+ r.Pkg = sym.Pkg.Prefix
+ }
+ }
+ return base.Ctxt.LookupABIInit(sym.LinksymName(), obj.ABI0, initPkg)
}
// Less reports whether symbol a is ordered before symbol b.
package types
import (
+ "cmd/compile/internal/base"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
)
-// Dummy Node so we can refer to *Node without actually
-// having a gc.Node. Necessary to break import cycles.
-// TODO(gri) try to eliminate soon
-type Node struct{ _ int }
+// IRNode represents an ir.Node, but without needing to import cmd/compile/internal/ir,
+// which would cause an import cycle. The uses in other packages must type assert
+// values of type IRNode to ir.Node or a more specific type.
+type Object interface {
+ Pos() src.XPos
+ Sym() *Sym
+ Type() *Type
+}
+
+// A TypeObject is an Object representing a named type.
+type TypeObject interface {
+ Object
+ TypeDefn() *Type // for "type T Defn", returns Defn
+}
+
+// A VarObject is an Object representing a function argument, variable, or struct field.
+type VarObject interface {
+ Object
+ RecordFrameOffset(int64) // save frame offset
+}
//go:generate stringer -type EType -trimprefix T
// EType describes a kind of type.
-type EType uint8
+type Kind uint8
const (
- Txxx EType = iota
+ Txxx Kind = iota
TINT8
TUINT8
var (
// Predeclared alias types. Kept separate for better error messages.
- Bytetype *Type
- Runetype *Type
+ ByteType *Type
+ RuneType *Type
// Predeclared error interface type.
- Errortype *Type
+ ErrorType *Type
// Types to represent untyped string and boolean constants.
- UntypedString *Type
- UntypedBool *Type
+ UntypedString = New(TSTRING)
+ UntypedBool = New(TBOOL)
// Types to represent untyped numeric constants.
UntypedInt = New(TIDEAL)
methods Fields
allMethods Fields
- Nod *Node // canonical OTYPE node
- Orig *Type // original type (type literal or predefined type)
+ nod Object // canonical OTYPE node
+ underlying *Type // original type (type literal or predefined type)
// Cache of composite types, with this type being the element type.
- Cache struct {
+ cache struct {
ptr *Type // *T, or nil
slice *Type // []T, or nil
}
- Sym *Sym // symbol containing name, for named types
+ sym *Sym // symbol containing name, for named types
Vargen int32 // unique name for OTYPE/ONAME
- Etype EType // kind of type
+ kind Kind // kind of type
Align uint8 // the required alignment of this type, in bytes (0 means Width and Align have not yet been computed)
flags bitset8
}
+func (*Type) CanBeAnSSAAux() {}
+
const (
typeNotInHeap = 1 << iota // type cannot be heap allocated
typeBroke // broken type definition
func (t *Type) SetDeferwidth(b bool) { t.flags.set(typeDeferwidth, b) }
func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) }
+// Kind returns the kind of type t.
+func (t *Type) Kind() Kind { return t.kind }
+
+// Sym returns the name of type t.
+func (t *Type) Sym() *Sym { return t.sym }
+
+// Underlying returns the underlying type of type t.
+func (t *Type) Underlying() *Type { return t.underlying }
+
+// SetNod associates t with syntax node n.
+func (t *Type) SetNod(n Object) {
+ // t.nod can be non-nil already
+ // in the case of shared *Types, like []byte or interface{}.
+ if t.nod == nil {
+ t.nod = n
+ }
+}
+
+// Pos returns a position associated with t, if any.
+// This should only be used for diagnostics.
+func (t *Type) Pos() src.XPos {
+ if t.nod != nil {
+ return t.nod.Pos()
+ }
+ return src.NoXPos
+}
+
+// NoPkg is a nil *Pkg value for clarity.
+// It's intended for use when constructing types that aren't exported
+// and thus don't need to be associated with any package.
+var NoPkg *Pkg = nil
+
// Pkg returns the package that t appeared in.
//
// Pkg is only defined for function, struct, and interface types
// cmd/compile itself, but we need to track it because it's exposed by
// the go/types API.
func (t *Type) Pkg() *Pkg {
- switch t.Etype {
+ switch t.kind {
case TFUNC:
return t.Extra.(*Func).pkg
case TSTRUCT:
case TINTER:
return t.Extra.(*Interface).pkg
default:
- Fatalf("Pkg: unexpected kind: %v", t)
+ base.Fatalf("Pkg: unexpected kind: %v", t)
return nil
}
}
-// SetPkg sets the package that t appeared in.
-func (t *Type) SetPkg(pkg *Pkg) {
- switch t.Etype {
- case TFUNC:
- t.Extra.(*Func).pkg = pkg
- case TSTRUCT:
- t.Extra.(*Struct).pkg = pkg
- case TINTER:
- t.Extra.(*Interface).pkg = pkg
- default:
- Fatalf("Pkg: unexpected kind: %v", t)
- }
-}
-
// Map contains Type fields specific to maps.
type Map struct {
Key *Type // Key type
Results *Type // function results
Params *Type // function params
- Nname *Node
- pkg *Pkg
+ pkg *Pkg
// Argwid is the total width of the function receiver, params, and results.
// It gets calculated via a temporary TFUNCARGS type.
// Note that TFUNC's Width is Widthptr.
Argwid int64
-
- Outnamed bool
}
// FuncType returns t's extra func-specific fields.
// For fields that represent function parameters, Nname points
// to the associated ONAME Node.
- Nname *Node
+ Nname Object
// Offset in bytes of this field or method within its enclosing struct
// or interface Type.
// IsMethod reports whether f represents a method rather than a struct field.
func (f *Field) IsMethod() bool {
- return f.Type.Etype == TFUNC && f.Type.Recv() != nil
+ return f.Type.kind == TFUNC && f.Type.Recv() != nil
}
// Fields is a pointer to a slice of *Field.
}
// New returns a new Type of the specified kind.
-func New(et EType) *Type {
+func New(et Kind) *Type {
t := &Type{
- Etype: et,
+ kind: et,
Width: BADWIDTH,
}
- t.Orig = t
+ t.underlying = t
// TODO(josharian): lazily initialize some of these?
- switch t.Etype {
+ switch t.kind {
case TMAP:
t.Extra = new(Map)
case TFORW:
// NewArray returns a new fixed-length array Type.
func NewArray(elem *Type, bound int64) *Type {
if bound < 0 {
- Fatalf("NewArray: invalid bound %v", bound)
+ base.Fatalf("NewArray: invalid bound %v", bound)
}
t := New(TARRAY)
t.Extra = &Array{Elem: elem, Bound: bound}
// NewSlice returns the slice Type with element type elem.
func NewSlice(elem *Type) *Type {
- if t := elem.Cache.slice; t != nil {
+ if t := elem.cache.slice; t != nil {
if t.Elem() != elem {
- Fatalf("elem mismatch")
+ base.Fatalf("elem mismatch")
}
return t
}
t := New(TSLICE)
t.Extra = Slice{Elem: elem}
- elem.Cache.slice = t
+ elem.cache.slice = t
return t
}
// NewPtr returns the pointer type pointing to t.
func NewPtr(elem *Type) *Type {
if elem == nil {
- Fatalf("NewPtr: pointer to elem Type is nil")
+ base.Fatalf("NewPtr: pointer to elem Type is nil")
}
- if t := elem.Cache.ptr; t != nil {
+ if t := elem.cache.ptr; t != nil {
if t.Elem() != elem {
- Fatalf("NewPtr: elem mismatch")
+ base.Fatalf("NewPtr: elem mismatch")
}
return t
}
t.Width = int64(Widthptr)
t.Align = uint8(Widthptr)
if NewPtrCacheEnabled {
- elem.Cache.ptr = t
+ elem.cache.ptr = t
}
return t
}
return t
}
-func NewField() *Field {
- return &Field{
+func NewField(pos src.XPos, sym *Sym, typ *Type) *Field {
+ f := &Field{
+ Pos: pos,
+ Sym: sym,
+ Type: typ,
Offset: BADWIDTH,
}
+ if typ == nil {
+ f.SetBroke(true)
+ }
+ return f
}
// SubstAny walks t, replacing instances of "any" with successive
return nil
}
- switch t.Etype {
+ switch t.kind {
default:
// Leave the type unchanged.
case TANY:
if len(*types) == 0 {
- Fatalf("substArgTypes: not enough argument types")
+ base.Fatalf("substArgTypes: not enough argument types")
}
t = (*types)[0]
*types = (*types)[1:]
}
nt := *t
// copy any *T Extra fields, to avoid aliasing
- switch t.Etype {
+ switch t.kind {
case TMAP:
x := *t.Extra.(*Map)
nt.Extra = &x
x := *t.Extra.(*Array)
nt.Extra = &x
case TTUPLE, TSSA, TRESULTS:
- Fatalf("ssa types cannot be copied")
+ base.Fatalf("ssa types cannot be copied")
}
// TODO(mdempsky): Find out why this is necessary and explain.
- if t.Orig == t {
- nt.Orig = &nt
+ if t.underlying == t {
+ nt.underlying = &nt
}
return &nt
}
return &nf
}
-func (t *Type) wantEtype(et EType) {
- if t.Etype != et {
- Fatalf("want %v, but have %v", et, t)
+func (t *Type) wantEtype(et Kind) {
+ if t.kind != et {
+ base.Fatalf("want %v, but have %v", et, t)
}
}
// Elem returns the type of elements of t.
// Usable with pointers, channels, arrays, slices, and maps.
func (t *Type) Elem() *Type {
- switch t.Etype {
+ switch t.kind {
case TPTR:
return t.Extra.(Ptr).Elem
case TARRAY:
case TMAP:
return t.Extra.(*Map).Elem
}
- Fatalf("Type.Elem %s", t.Etype)
+ base.Fatalf("Type.Elem %s", t.kind)
return nil
}
return t.Extra.(FuncArgs).T
}
-// Nname returns the associated function's nname.
-func (t *Type) Nname() *Node {
- switch t.Etype {
- case TFUNC:
- return t.Extra.(*Func).Nname
- }
- Fatalf("Type.Nname %v %v", t.Etype, t)
- return nil
-}
-
-// Nname sets the associated function's nname.
-func (t *Type) SetNname(n *Node) {
- switch t.Etype {
- case TFUNC:
- t.Extra.(*Func).Nname = n
- default:
- Fatalf("Type.SetNname %v %v", t.Etype, t)
- }
-}
-
// IsFuncArgStruct reports whether t is a struct representing function parameters.
func (t *Type) IsFuncArgStruct() bool {
- return t.Etype == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone
+ return t.kind == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone
}
func (t *Type) Methods() *Fields {
}
func (t *Type) Fields() *Fields {
- switch t.Etype {
+ switch t.kind {
case TSTRUCT:
return &t.Extra.(*Struct).fields
case TINTER:
Dowidth(t)
return &t.Extra.(*Interface).Fields
}
- Fatalf("Fields: type %v does not have fields", t)
+ base.Fatalf("Fields: type %v does not have fields", t)
return nil
}
// enforce that SetFields cannot be called once
// t's width has been calculated.
if t.WidthCalculated() {
- Fatalf("SetFields of %v: width previously calculated", t)
+ base.Fatalf("SetFields of %v: width previously calculated", t)
}
t.wantEtype(TSTRUCT)
for _, f := range fields {
}
func (t *Type) Size() int64 {
- if t.Etype == TSSA {
+ if t.kind == TSSA {
if t == TypeInt128 {
return 16
}
}
func (t *Type) SimpleString() string {
- return t.Etype.String()
+ return t.kind.String()
}
// Cmp is a comparison between values a and b.
return CMPgt
}
- if t.Etype != x.Etype {
- return cmpForNe(t.Etype < x.Etype)
+ if t.kind != x.kind {
+ return cmpForNe(t.kind < x.kind)
}
- if t.Sym != nil || x.Sym != nil {
+ if t.sym != nil || x.sym != nil {
// Special case: we keep byte and uint8 separate
// for error messages. Treat them as equal.
- switch t.Etype {
+ switch t.kind {
case TUINT8:
- if (t == Types[TUINT8] || t == Bytetype) && (x == Types[TUINT8] || x == Bytetype) {
+ if (t == Types[TUINT8] || t == ByteType) && (x == Types[TUINT8] || x == ByteType) {
return CMPeq
}
case TINT32:
- if (t == Types[Runetype.Etype] || t == Runetype) && (x == Types[Runetype.Etype] || x == Runetype) {
+ if (t == Types[RuneType.kind] || t == RuneType) && (x == Types[RuneType.kind] || x == RuneType) {
return CMPeq
}
}
}
- if c := t.Sym.cmpsym(x.Sym); c != CMPeq {
+ if c := t.sym.cmpsym(x.sym); c != CMPeq {
return c
}
- if x.Sym != nil {
+ if x.sym != nil {
// Syms non-nil, if vargens match then equal.
if t.Vargen != x.Vargen {
return cmpForNe(t.Vargen < x.Vargen)
}
// both syms nil, look at structure below.
- switch t.Etype {
+ switch t.kind {
case TBOOL, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TUNSAFEPTR, TUINTPTR,
TINT8, TINT16, TINT32, TINT64, TINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINT:
return CMPeq
}
// IsKind reports whether t is a Type of the specified kind.
-func (t *Type) IsKind(et EType) bool {
- return t != nil && t.Etype == et
+func (t *Type) IsKind(et Kind) bool {
+ return t != nil && t.kind == et
}
func (t *Type) IsBoolean() bool {
- return t.Etype == TBOOL
+ return t.kind == TBOOL
}
-var unsignedEType = [...]EType{
+var unsignedEType = [...]Kind{
TINT8: TUINT8,
TUINT8: TUINT8,
TINT16: TUINT16,
// ToUnsigned returns the unsigned equivalent of integer type t.
func (t *Type) ToUnsigned() *Type {
if !t.IsInteger() {
- Fatalf("unsignedType(%v)", t)
+ base.Fatalf("unsignedType(%v)", t)
}
- return Types[unsignedEType[t.Etype]]
+ return Types[unsignedEType[t.kind]]
}
func (t *Type) IsInteger() bool {
- switch t.Etype {
+ switch t.kind {
case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR:
return true
}
- return false
+ return t == UntypedInt || t == UntypedRune
}
func (t *Type) IsSigned() bool {
- switch t.Etype {
+ switch t.kind {
case TINT8, TINT16, TINT32, TINT64, TINT:
return true
}
return false
}
+func (t *Type) IsUnsigned() bool {
+ switch t.kind {
+ case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR:
+ return true
+ }
+ return false
+}
+
func (t *Type) IsFloat() bool {
- return t.Etype == TFLOAT32 || t.Etype == TFLOAT64
+ return t.kind == TFLOAT32 || t.kind == TFLOAT64 || t == UntypedFloat
}
func (t *Type) IsComplex() bool {
- return t.Etype == TCOMPLEX64 || t.Etype == TCOMPLEX128
+ return t.kind == TCOMPLEX64 || t.kind == TCOMPLEX128 || t == UntypedComplex
}
// IsPtr reports whether t is a regular Go pointer type.
// This does not include unsafe.Pointer.
func (t *Type) IsPtr() bool {
- return t.Etype == TPTR
+ return t.kind == TPTR
}
// IsPtrElem reports whether t is the element of a pointer (to t).
func (t *Type) IsPtrElem() bool {
- return t.Cache.ptr != nil
+ return t.cache.ptr != nil
}
// IsUnsafePtr reports whether t is an unsafe pointer.
func (t *Type) IsUnsafePtr() bool {
- return t.Etype == TUNSAFEPTR
+ return t.kind == TUNSAFEPTR
}
// IsUintptr reports whether t is an uintptr.
func (t *Type) IsUintptr() bool {
- return t.Etype == TUINTPTR
+ return t.kind == TUINTPTR
}
// IsPtrShaped reports whether t is represented by a single machine pointer.
// that consist of a single pointer shaped type.
// TODO(mdempsky): Should it? See golang.org/issue/15028.
func (t *Type) IsPtrShaped() bool {
- return t.Etype == TPTR || t.Etype == TUNSAFEPTR ||
- t.Etype == TMAP || t.Etype == TCHAN || t.Etype == TFUNC
+ return t.kind == TPTR || t.kind == TUNSAFEPTR ||
+ t.kind == TMAP || t.kind == TCHAN || t.kind == TFUNC
}
// HasNil reports whether the set of values determined by t includes nil.
func (t *Type) HasNil() bool {
- switch t.Etype {
- case TCHAN, TFUNC, TINTER, TMAP, TPTR, TSLICE, TUNSAFEPTR:
+ switch t.kind {
+ case TCHAN, TFUNC, TINTER, TMAP, TNIL, TPTR, TSLICE, TUNSAFEPTR:
return true
}
return false
}
func (t *Type) IsString() bool {
- return t.Etype == TSTRING
+ return t.kind == TSTRING
}
func (t *Type) IsMap() bool {
- return t.Etype == TMAP
+ return t.kind == TMAP
}
func (t *Type) IsChan() bool {
- return t.Etype == TCHAN
+ return t.kind == TCHAN
}
func (t *Type) IsSlice() bool {
- return t.Etype == TSLICE
+ return t.kind == TSLICE
}
func (t *Type) IsArray() bool {
- return t.Etype == TARRAY
+ return t.kind == TARRAY
}
func (t *Type) IsStruct() bool {
- return t.Etype == TSTRUCT
+ return t.kind == TSTRUCT
}
func (t *Type) IsInterface() bool {
- return t.Etype == TINTER
+ return t.kind == TINTER
}
// IsEmptyInterface reports whether t is an empty interface type.
return t.IsInterface() && t.NumFields() == 0
}
+// IsScalar reports whether 't' is a scalar Go type, e.g.
+// bool/int/float/complex. Note that struct and array types consisting
+// of a single scalar element are not considered scalar, likewise
+// pointer types are also not considered scalar.
+func (t *Type) IsScalar() bool {
+ switch t.kind {
+ case TBOOL, TINT8, TUINT8, TINT16, TUINT16, TINT32,
+ TUINT32, TINT64, TUINT64, TINT, TUINT,
+ TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64:
+ return true
+ }
+ return false
+}
+
func (t *Type) PtrTo() *Type {
return NewPtr(t)
}
return t.Fields().Len()
}
func (t *Type) FieldType(i int) *Type {
- if t.Etype == TTUPLE {
+ if t.kind == TTUPLE {
switch i {
case 0:
return t.Extra.(*Tuple).first
panic("bad tuple index")
}
}
- if t.Etype == TRESULTS {
+ if t.kind == TRESULTS {
return t.Extra.(*Results).Types[i]
}
return t.Field(i).Type
// (and their comprised elements) are excluded from the count.
// struct { x, y [3]int } has six components; [10]struct{ x, y string } has twenty.
func (t *Type) NumComponents(countBlank componentsIncludeBlankFields) int64 {
- switch t.Etype {
+ switch t.kind {
case TSTRUCT:
if t.IsFuncArgStruct() {
- Fatalf("NumComponents func arg struct")
+ base.Fatalf("NumComponents func arg struct")
}
var n int64
for _, f := range t.FieldSlice() {
// if there is exactly one. Otherwise, it returns nil.
// Components are counted as in NumComponents, including blank fields.
func (t *Type) SoleComponent() *Type {
- switch t.Etype {
+ switch t.kind {
case TSTRUCT:
if t.IsFuncArgStruct() {
- Fatalf("SoleComponent func arg struct")
+ base.Fatalf("SoleComponent func arg struct")
}
if t.NumFields() != 1 {
return nil
}
func (t *Type) IsMemory() bool {
- if t == TypeMem || t.Etype == TTUPLE && t.Extra.(*Tuple).second == TypeMem {
+ if t == TypeMem || t.kind == TTUPLE && t.Extra.(*Tuple).second == TypeMem {
return true
}
- if t.Etype == TRESULTS {
+ if t.kind == TRESULTS {
if types := t.Extra.(*Results).Types; len(types) > 0 && types[len(types)-1] == TypeMem {
return true
}
}
func (t *Type) IsFlags() bool { return t == TypeFlags }
func (t *Type) IsVoid() bool { return t == TypeVoid }
-func (t *Type) IsTuple() bool { return t.Etype == TTUPLE }
-func (t *Type) IsResults() bool { return t.Etype == TRESULTS }
+func (t *Type) IsTuple() bool { return t.kind == TTUPLE }
+func (t *Type) IsResults() bool { return t.kind == TRESULTS }
// IsUntyped reports whether t is an untyped type.
func (t *Type) IsUntyped() bool {
if t == UntypedString || t == UntypedBool {
return true
}
- switch t.Etype {
+ switch t.kind {
case TNIL, TIDEAL:
return true
}
// HasPointers reports whether t contains a heap pointer.
// Note that this function ignores pointers to go:notinheap types.
func (t *Type) HasPointers() bool {
- switch t.Etype {
+ switch t.kind {
case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64,
TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL, TSSA:
return false
TypeVoid = newSSA("void")
TypeInt128 = newSSA("int128")
)
+
+// NewNamed returns a new named type for the given type name.
+func NewNamed(obj Object) *Type {
+ t := New(TFORW)
+ t.sym = obj.Sym()
+ t.nod = obj
+ return t
+}
+
+// Obj returns the type name for the named type t.
+func (t *Type) Obj() Object {
+ if t.sym != nil {
+ return t.nod
+ }
+ return nil
+}
+
+// SetUnderlying sets the underlying type.
+func (t *Type) SetUnderlying(underlying *Type) {
+ if underlying.kind == TFORW {
+ // This type isn't computed yet; when it is, update n.
+ underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t)
+ return
+ }
+
+ ft := t.ForwardType()
+
+ // TODO(mdempsky): Fix Type rekinding.
+ t.kind = underlying.kind
+ t.Extra = underlying.Extra
+ t.Width = underlying.Width
+ t.Align = underlying.Align
+ t.underlying = underlying.underlying
+
+ if underlying.NotInHeap() {
+ t.SetNotInHeap(true)
+ }
+ if underlying.Broke() {
+ t.SetBroke(true)
+ }
+
+ // spec: "The declared type does not inherit any methods bound
+ // to the existing type, but the method set of an interface
+ // type [...] remains unchanged."
+ if t.IsInterface() {
+ t.methods = underlying.methods
+ t.allMethods = underlying.allMethods
+ }
+
+ // Update types waiting on this type.
+ for _, w := range ft.Copyto {
+ w.SetUnderlying(t)
+ }
+
+ // Double-check use of type as embedded type.
+ if ft.Embedlineno.IsKnown() {
+ if t.IsPtr() || t.IsUnsafePtr() {
+ base.ErrorfAt(ft.Embedlineno, "embedded type cannot be a pointer")
+ }
+ }
+}
+
+// NewBasic returns a new basic type of the given kind.
+func NewBasic(kind Kind, obj Object) *Type {
+ t := New(kind)
+ t.sym = obj.Sym()
+ t.nod = obj
+ return t
+}
+
+// NewInterface returns a new interface for the given methods and
+// embedded types. Embedded types are specified as fields with no Sym.
+func NewInterface(pkg *Pkg, methods []*Field) *Type {
+ t := New(TINTER)
+ t.SetInterface(methods)
+ if anyBroke(methods) {
+ t.SetBroke(true)
+ }
+ t.Extra.(*Interface).pkg = pkg
+ return t
+}
+
+// NewSignature returns a new function type for the given receiver,
+// parameters, and results, any of which may be nil.
+func NewSignature(pkg *Pkg, recv *Field, params, results []*Field) *Type {
+ var recvs []*Field
+ if recv != nil {
+ recvs = []*Field{recv}
+ }
+
+ t := New(TFUNC)
+ ft := t.FuncType()
+
+ funargs := func(fields []*Field, funarg Funarg) *Type {
+ s := NewStruct(NoPkg, fields)
+ s.StructType().Funarg = funarg
+ if s.Broke() {
+ t.SetBroke(true)
+ }
+ return s
+ }
+
+ ft.Receiver = funargs(recvs, FunargRcvr)
+ ft.Params = funargs(params, FunargParams)
+ ft.Results = funargs(results, FunargResults)
+ ft.pkg = pkg
+
+ return t
+}
+
+// NewStruct returns a new struct with the given fields.
+func NewStruct(pkg *Pkg, fields []*Field) *Type {
+ t := New(TSTRUCT)
+ t.SetFields(fields)
+ if anyBroke(fields) {
+ t.SetBroke(true)
+ }
+ t.Extra.(*Struct).pkg = pkg
+ return t
+}
+
+func anyBroke(fields []*Field) bool {
+ for _, f := range fields {
+ if f.Broke() {
+ return true
+ }
+ }
+ return false
+}
import (
"cmd/internal/obj"
- "fmt"
)
const BADWIDTH = -1000000000
var (
Widthptr int
Dowidth func(*Type)
- Fatalf func(string, ...interface{})
- Sconv func(*Sym, int, int) string // orig: func sconv(s *Sym, flag FmtFlag, mode fmtMode) string
- Tconv func(*Type, int, int) string // orig: func tconv(t *Type, flag FmtFlag, mode fmtMode) string
- FormatSym func(*Sym, fmt.State, rune, int) // orig: func symFormat(sym *Sym, s fmt.State, verb rune, mode fmtMode)
- FormatType func(*Type, fmt.State, rune, int) // orig: func typeFormat(t *Type, s fmt.State, verb rune, mode fmtMode)
TypeLinkSym func(*Type) *obj.LSym
- Ctxt *obj.Link
-
- FmtLeft int
- FmtUnsigned int
- FErr int
)
-func (s *Sym) String() string {
- return Sconv(s, 0, FErr)
-}
-
-func (sym *Sym) Format(s fmt.State, verb rune) {
- FormatSym(sym, s, verb, FErr)
-}
-
-func (t *Type) String() string {
- // The implementation of tconv (including typefmt and fldconv)
- // must handle recursive types correctly.
- return Tconv(t, 0, FErr)
-}
-
-// ShortString generates a short description of t.
-// It is used in autogenerated method names, reflection,
-// and itab names.
-func (t *Type) ShortString() string {
- return Tconv(t, FmtLeft, FErr)
-}
-
-// LongString generates a complete description of t.
-// It is useful for reflection,
-// or when a unique fingerprint or hash of a type is required.
-func (t *Type) LongString() string {
- return Tconv(t, FmtLeft|FmtUnsigned, FErr)
-}
-
-func (t *Type) Format(s fmt.State, verb rune) {
- FormatType(t, s, verb, FErr)
-}
-
type bitset8 uint8
func (f *bitset8) set(mask uint8, b bool) {
package wasm
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
return p
}
if cnt%8 != 0 {
- gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
+ base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
for i := int64(0); i < cnt; i += 8 {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpWasmLoweredWB:
switch v.Aux.(type) {
case *obj.LSym:
gc.AddAux(&p.From, v)
- case *gc.Node:
+ case *ir.Name:
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
default:
package x86
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
arch.SoftFloat = true
case "387":
fmt.Fprintf(os.Stderr, "unsupported setting GO386=387. Consider using GO386=softfloat instead.\n")
- gc.Exit(1)
+ base.Exit(1)
default:
fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v)
- gc.Exit(1)
+ base.Exit(1)
}
"fmt"
"math"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
p.From.Name = obj.NAME_EXTERN
f := math.Float64frombits(uint64(v.AuxInt))
if v.Op == ssa.Op386MOVSDconst1 {
- p.From.Sym = gc.Ctxt.Float64Sym(f)
+ p.From.Sym = base.Ctxt.Float64Sym(f)
} else {
- p.From.Sym = gc.Ctxt.Float32Sym(float32(f))
+ p.From.Sym = base.Ctxt.Float32Sym(float32(f))
}
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
r := v.Reg()
// See the comments in cmd/internal/obj/x86/obj6.go
// near CanUse1InsnTLS for a detailed explanation of these instructions.
- if x86.CanUse1InsnTLS(gc.Ctxt) {
+ if x86.CanUse1InsnTLS(base.Ctxt) {
// MOVL (TLS), r
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_MEM
// caller's SP is the address of the first arg
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures
+ p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpClobber:
p := s.Prog(x86.AMOVL)
"cmd/compile/internal/amd64"
"cmd/compile/internal/arm"
"cmd/compile/internal/arm64"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/mips"
"cmd/compile/internal/mips64"
}
gc.Main(archInit)
- gc.Exit(0)
+ base.Exit(0)
}
"cmd/cgo",
"cmd/compile",
"cmd/compile/internal/amd64",
+ "cmd/compile/internal/base",
"cmd/compile/internal/arm",
"cmd/compile/internal/arm64",
"cmd/compile/internal/gc",
+ "cmd/compile/internal/ir",
"cmd/compile/internal/logopt",
"cmd/compile/internal/mips",
"cmd/compile/internal/mips64",
"cmd/internal/sys",
"cmd/link",
"cmd/link/internal/amd64",
+ "cmd/compile/internal/base",
"cmd/link/internal/arm",
"cmd/link/internal/arm64",
"cmd/link/internal/benchmark",
"debug/elf",
"debug/macho",
"debug/pe",
+ "go/constant",
"internal/goversion",
"internal/race",
"internal/unsafeheader",
"math/big",
"math/bits",
"sort",
+ "strconv",
}
// File prefixes that are ignored by go/build anyway, and cause
a.Val = t
}
+func (a *Addr) SetConst(v int64) {
+ a.Sym = nil
+ a.Type = TYPE_CONST
+ a.Offset = v
+}
+
// Prog describes a single machine instruction.
//
// The general instruction form is:
// ContentAddressable indicates this is a content-addressable symbol.
AttrContentAddressable
+ // ABI wrapper is set for compiler-generated text symbols that
+ // convert between ABI0 and ABIInternal calling conventions.
+ AttrABIWrapper
+
// attrABIBase is the value at which the ABI is encoded in
// Attribute. This must be last; all bits after this are
// assumed to be an ABI value.
func (a Attribute) Indexed() bool { return a&AttrIndexed != 0 }
func (a Attribute) UsedInIface() bool { return a&AttrUsedInIface != 0 }
func (a Attribute) ContentAddressable() bool { return a&AttrContentAddressable != 0 }
+func (a Attribute) ABIWrapper() bool { return a&AttrABIWrapper != 0 }
func (a *Attribute) Set(flag Attribute, value bool) {
if value {
{bit: AttrTopFrame, s: "TOPFRAME"},
{bit: AttrIndexed, s: ""},
{bit: AttrContentAddressable, s: ""},
+ {bit: AttrABIWrapper, s: "ABIWRAPPER"},
}
// TextAttrString formats a for printing in as part of a TEXT prog.
}
// The compiler needs *LSym to be assignable to cmd/compile/internal/ssa.Sym.
-func (s *LSym) CanBeAnSSASym() {
-}
+func (*LSym) CanBeAnSSASym() {}
+func (*LSym) CanBeAnSSAAux() {}
type Pcln struct {
// Aux symbols for pcln
if !strings.HasPrefix(s.Name, "\"\".") {
continue
}
+ if s.ABIWrapper() {
+ // Don't create an args_stackmap symbol reference for an ABI
+ // wrapper function
+ continue
+ }
found := false
for p := s.Func().Text; p != nil; p = p.Link {
if p.As == AFUNCDATA && p.From.Type == TYPE_CONST && p.From.Offset == objabi.FUNCDATA_ArgsPointerMaps {
s.Set(AttrNoSplit, flag&NOSPLIT != 0)
s.Set(AttrReflectMethod, flag&REFLECTMETHOD != 0)
s.Set(AttrWrapper, flag&WRAPPER != 0)
+ s.Set(AttrABIWrapper, flag&ABIWRAPPER != 0)
s.Set(AttrNeedCtxt, flag&NEEDCTXT != 0)
s.Set(AttrNoFrame, flag&NOFRAME != 0)
s.Set(AttrTopFrame, flag&TOPFRAME != 0)
// invalid
return fmt.Sprintf("Invalid (%#x)", c)
}
+
+func (CCMask) CanBeAnSSAAux() {}
func (r RotateParams) InMerge(mask uint64) *RotateParams {
return r.OutMerge(bits.RotateLeft64(mask, int(r.Amount)))
}
+
+func (RotateParams) CanBeAnSSAAux() {}
// Function is the top of the call stack. Call stack unwinders should stop
// at this function.
TOPFRAME = 2048
+
+ // Function is an ABI wrapper.
+ ABIWRAPPER = 4096
)
}
}
- if !p.From.Sym.NoSplit() || p.From.Sym.Wrapper() {
+ if !p.From.Sym.NoSplit() || (p.From.Sym.Wrapper() && !p.From.Sym.ABIWrapper()) {
p = obj.Appendp(p, newprog)
p = load_g_cx(ctxt, p, newprog) // load g into CX
}
p.To.Reg = REG_BP
}
- if cursym.Func().Text.From.Sym.Wrapper() {
+ if cursym.Func().Text.From.Sym.Wrapper() && !cursym.Func().Text.From.Sym.ABIWrapper() {
// if g._panic != nil && g._panic.argp == FP {
// g._panic.argp = bottom-of-frame
// }
FlagRound = flag.Int("R", -1, "set address rounding `quantum`")
FlagTextAddr = flag.Int64("T", -1, "set text segment `address`")
flagEntrySymbol = flag.String("E", "", "set `entry` symbol name")
-
- cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`")
- memprofile = flag.String("memprofile", "", "write memory profile to `file`")
- memprofilerate = flag.Int64("memprofilerate", 0, "set runtime.MemProfileRate to `rate`")
-
+ cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`")
+ memprofile = flag.String("memprofile", "", "write memory profile to `file`")
+ memprofilerate = flag.Int64("memprofilerate", 0, "set runtime.MemProfileRate to `rate`")
+ flagAbiWrap = false
benchmarkFlag = flag.String("benchmark", "", "set to 'mem' or 'cpu' to enable phase benchmarking")
benchmarkFileFlag = flag.String("benchmarkprofile", "", "emit phase profiles to `base`_phase.{cpu,mem}prof")
)
objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) })
objabi.Flagcount("v", "print link trace", &ctxt.Debugvlog)
objabi.Flagfn1("importcfg", "read import configuration from `file`", ctxt.readImportCfg)
+ if objabi.Regabi_enabled != 0 {
+ flag.BoolVar(&flagAbiWrap, "abiwrap", true, "support ABI wrapper functions")
+ }
objabi.Flagparse(usage)
elfshnum = xosect.Elfsect.(*ElfShdr).shnum
}
+ sname := ldr.SymExtname(x)
+
+ // For functions with ABI wrappers, we have to make sure that we
+ // don't wind up with two elf symbol table entries with the same
+ // name (since this will generated an error from the external
+ // linker). In the CgoExportStatic case, we want the ABI0 symbol
+ // to have the primary symbol table entry (since it's going to be
+ // called from C), so we rename the ABIInternal symbol. In all
+ // other cases, we rename the ABI0 symbol, since we want
+ // cross-load-module calls to target ABIInternal.
+ //
+ // TODO: generalize this for non-ELF (put the rename code in the
+ // loader, and store the rename result in SymExtname).
+ //
+ // TODO: avoid the ldr.Lookup calls below by instead using an aux
+ // sym or marker relocation to associate the wrapper with the
+ // wrapped function.
+ //
+ if flagAbiWrap {
+ if !ldr.IsExternal(x) && ldr.SymType(x) == sym.STEXT {
+ // First case
+ if ldr.SymVersion(x) == sym.SymVerABIInternal {
+ if s2 := ldr.Lookup(sname, sym.SymVerABI0); s2 != 0 && ldr.AttrCgoExportStatic(s2) && ldr.SymType(s2) == sym.STEXT {
+ sname = sname + ".abiinternal"
+ }
+ }
+ // Second case
+ if ldr.SymVersion(x) == sym.SymVerABI0 && !ldr.AttrCgoExportStatic(x) {
+ if s2 := ldr.Lookup(sname, sym.SymVerABIInternal); s2 != 0 && ldr.SymType(s2) == sym.STEXT {
+ sname = sname + ".abi0"
+ }
+ }
+ }
+ }
+
// One pass for each binding: elf.STB_LOCAL, elf.STB_GLOBAL,
// maybe one day elf.STB_WEAK.
bind := elf.STB_GLOBAL
other |= 3 << 5
}
- sname := ldr.SymExtname(x)
-
// When dynamically linking, we create Symbols by reading the names from
// the symbol tables of the shared libraries and so the names need to
// match exactly. Tools like DTrace will have to wait for now.
const kCFAllocatorDefault = 0
const kCFStringEncodingUTF8 = 0x08000100
-//go:linkname x509_CFStringCreateWithBytes x509_CFStringCreateWithBytes
//go:cgo_import_dynamic x509_CFStringCreateWithBytes CFStringCreateWithBytes "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
// StringToCFString returns a copy of the UTF-8 contents of s as a new CFString.
}
func x509_CFStringCreateWithBytes_trampoline()
-//go:linkname x509_CFDictionaryGetValueIfPresent x509_CFDictionaryGetValueIfPresent
//go:cgo_import_dynamic x509_CFDictionaryGetValueIfPresent CFDictionaryGetValueIfPresent "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
func CFDictionaryGetValueIfPresent(dict CFRef, key CFString) (value CFRef, ok bool) {
const kCFNumberSInt32Type = 3
-//go:linkname x509_CFNumberGetValue x509_CFNumberGetValue
//go:cgo_import_dynamic x509_CFNumberGetValue CFNumberGetValue "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
func CFNumberGetValue(num CFRef) (int32, error) {
}
func x509_CFNumberGetValue_trampoline()
-//go:linkname x509_CFDataGetLength x509_CFDataGetLength
//go:cgo_import_dynamic x509_CFDataGetLength CFDataGetLength "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
func CFDataGetLength(data CFRef) int {
}
func x509_CFDataGetLength_trampoline()
-//go:linkname x509_CFDataGetBytePtr x509_CFDataGetBytePtr
//go:cgo_import_dynamic x509_CFDataGetBytePtr CFDataGetBytePtr "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
func CFDataGetBytePtr(data CFRef) uintptr {
}
func x509_CFDataGetBytePtr_trampoline()
-//go:linkname x509_CFArrayGetCount x509_CFArrayGetCount
//go:cgo_import_dynamic x509_CFArrayGetCount CFArrayGetCount "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
func CFArrayGetCount(array CFRef) int {
}
func x509_CFArrayGetCount_trampoline()
-//go:linkname x509_CFArrayGetValueAtIndex x509_CFArrayGetValueAtIndex
//go:cgo_import_dynamic x509_CFArrayGetValueAtIndex CFArrayGetValueAtIndex "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
func CFArrayGetValueAtIndex(array CFRef, index int) CFRef {
}
func x509_CFArrayGetValueAtIndex_trampoline()
-//go:linkname x509_CFEqual x509_CFEqual
//go:cgo_import_dynamic x509_CFEqual CFEqual "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
func CFEqual(a, b CFRef) bool {
}
func x509_CFEqual_trampoline()
-//go:linkname x509_CFRelease x509_CFRelease
//go:cgo_import_dynamic x509_CFRelease CFRelease "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
func CFRelease(ref CFRef) {
const errSecNoTrustSettings = -25263
-//go:linkname x509_SecTrustSettingsCopyCertificates x509_SecTrustSettingsCopyCertificates
//go:cgo_import_dynamic x509_SecTrustSettingsCopyCertificates SecTrustSettingsCopyCertificates "/System/Library/Frameworks/Security.framework/Versions/A/Security"
func SecTrustSettingsCopyCertificates(domain SecTrustSettingsDomain) (certArray CFRef, err error) {
const kSecFormatX509Cert int32 = 9
-//go:linkname x509_SecItemExport x509_SecItemExport
//go:cgo_import_dynamic x509_SecItemExport SecItemExport "/System/Library/Frameworks/Security.framework/Versions/A/Security"
func SecItemExport(cert CFRef) (data CFRef, err error) {
const errSecItemNotFound = -25300
-//go:linkname x509_SecTrustSettingsCopyTrustSettings x509_SecTrustSettingsCopyTrustSettings
//go:cgo_import_dynamic x509_SecTrustSettingsCopyTrustSettings SecTrustSettingsCopyTrustSettings "/System/Library/Frameworks/Security.framework/Versions/A/Security"
func SecTrustSettingsCopyTrustSettings(cert CFRef, domain SecTrustSettingsDomain) (trustSettings CFRef, err error) {
}
func x509_SecTrustSettingsCopyTrustSettings_trampoline()
-//go:linkname x509_SecPolicyCopyProperties x509_SecPolicyCopyProperties
//go:cgo_import_dynamic x509_SecPolicyCopyProperties SecPolicyCopyProperties "/System/Library/Frameworks/Security.framework/Versions/A/Security"
func SecPolicyCopyProperties(policy CFRef) CFRef {
testString(t, string(glass), "glass", "I can eat glass and it doesn't hurt me.\n")
}
-func TestLocal(t *testing.T) {
- //go:embed testdata/k*.txt
- var local embed.FS
- testFiles(t, local, "testdata/ken.txt", "If a program is too slow, it must have a loop.\n")
-
- //go:embed testdata/k*.txt
- var s string
- testString(t, s, "local variable s", "If a program is too slow, it must have a loop.\n")
-
- //go:embed testdata/h*.txt
- var b []byte
- testString(t, string(b), "local variable b", "hello, world\n")
-}
+//go:embed testdata
+var dir embed.FS
-func TestDir(t *testing.T) {
- //go:embed testdata
- var all embed.FS
+//go:embed testdata/*
+var star embed.FS
+func TestDir(t *testing.T) {
+ all := dir
testFiles(t, all, "testdata/hello.txt", "hello, world\n")
testFiles(t, all, "testdata/i/i18n.txt", "internationalization\n")
testFiles(t, all, "testdata/i/j/k/k8s.txt", "kubernetes\n")
}
func TestHidden(t *testing.T) {
- //go:embed testdata
- var dir embed.FS
-
- //go:embed testdata/*
- var star embed.FS
-
t.Logf("//go:embed testdata")
testDir(t, dir, "testdata",
}
bbig[0] = old
}
-
-func TestXLocal(t *testing.T) {
- //go:embed testdata/*o.txt
- var local embed.FS
- testFiles(t, local, "testdata/hello.txt", "hello, world\n")
-
- //go:embed testdata/k*.txt
- var s string
- testString(t, s, "local variable s", "If a program is too slow, it must have a loop.\n")
-
- //go:embed testdata/h*.txt
- var b []byte
- testString(t, string(b), "local variable b", "hello, world\n")
-}
"go/token"
"math"
"math/big"
+ "math/bits"
"strconv"
"strings"
"sync"
func BitLen(x Value) int {
switch x := x.(type) {
case int64Val:
- return i64toi(x).val.BitLen()
+ u := uint64(x)
+ if x < 0 {
+ u = uint64(-x)
+ }
+ return 64 - bits.LeadingZeros64(u)
case intVal:
return x.val.BitLen()
case unknownVal:
}
// ord(x) <= ord(y)
- switch x := x.(type) {
+ // Prefer to return the original x and y arguments when possible,
+ // to avoid unnecessary heap allocations.
+
+ switch x1 := x.(type) {
case boolVal, *stringVal, complexVal:
return x, y
case int64Val:
- switch y := y.(type) {
+ switch y.(type) {
case int64Val:
return x, y
case intVal:
- return i64toi(x), y
+ return i64toi(x1), y
case ratVal:
- return i64tor(x), y
+ return i64tor(x1), y
case floatVal:
- return i64tof(x), y
+ return i64tof(x1), y
case complexVal:
- return vtoc(x), y
+ return vtoc(x1), y
}
case intVal:
- switch y := y.(type) {
+ switch y.(type) {
case intVal:
return x, y
case ratVal:
- return itor(x), y
+ return itor(x1), y
case floatVal:
- return itof(x), y
+ return itof(x1), y
case complexVal:
- return vtoc(x), y
+ return vtoc(x1), y
}
case ratVal:
- switch y := y.(type) {
+ switch y.(type) {
case ratVal:
return x, y
case floatVal:
- return rtof(x), y
+ return rtof(x1), y
case complexVal:
- return vtoc(x), y
+ return vtoc(x1), y
}
case floatVal:
- switch y := y.(type) {
+ switch y.(type) {
case floatVal:
return x, y
case complexVal:
- return vtoc(x), y
+ return vtoc(x1), y
}
}
})
}
}
+
+var bitLenTests = []struct {
+ val int64
+ want int
+}{
+ {0, 0},
+ {1, 1},
+ {-16, 5},
+ {1 << 61, 62},
+ {1 << 62, 63},
+ {-1 << 62, 63},
+ {-1 << 63, 64},
+}
+
+func TestBitLen(t *testing.T) {
+ for _, test := range bitLenTests {
+ if got := BitLen(MakeInt64(test.val)); got != test.want {
+ t.Errorf("%v: got %v, want %v", test.val, got, test.want)
+ }
+ }
+}
testTestDir(t, filepath.Join(runtime.GOROOT(), "test"),
"cmplxdivide.go", // also needs file cmplxdivide1.go - ignore
"directive.go", // tests compiler rejection of bad directive placement - ignore
+ "linkname2.go", // go/types doesn't check validity of //go:xxx directives
)
}
TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ AX, x+0(FP)
MOVQ CX, y+8(FP)
- JMP runtime·goPanicIndex(SB)
+ JMP runtime·goPanicIndex<ABIInternal>(SB)
TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ AX, x+0(FP)
MOVQ CX, y+8(FP)
- JMP runtime·goPanicIndexU(SB)
+ JMP runtime·goPanicIndexU<ABIInternal>(SB)
TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ CX, x+0(FP)
MOVQ DX, y+8(FP)
- JMP runtime·goPanicSliceAlen(SB)
+ JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ CX, x+0(FP)
MOVQ DX, y+8(FP)
- JMP runtime·goPanicSliceAlenU(SB)
+ JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ CX, x+0(FP)
MOVQ DX, y+8(FP)
- JMP runtime·goPanicSliceAcap(SB)
+ JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ CX, x+0(FP)
MOVQ DX, y+8(FP)
- JMP runtime·goPanicSliceAcapU(SB)
+ JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ AX, x+0(FP)
MOVQ CX, y+8(FP)
- JMP runtime·goPanicSliceB(SB)
+ JMP runtime·goPanicSliceB<ABIInternal>(SB)
TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ AX, x+0(FP)
MOVQ CX, y+8(FP)
- JMP runtime·goPanicSliceBU(SB)
+ JMP runtime·goPanicSliceBU<ABIInternal>(SB)
TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ DX, x+0(FP)
MOVQ BX, y+8(FP)
- JMP runtime·goPanicSlice3Alen(SB)
+ JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ DX, x+0(FP)
MOVQ BX, y+8(FP)
- JMP runtime·goPanicSlice3AlenU(SB)
+ JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ DX, x+0(FP)
MOVQ BX, y+8(FP)
- JMP runtime·goPanicSlice3Acap(SB)
+ JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ DX, x+0(FP)
MOVQ BX, y+8(FP)
- JMP runtime·goPanicSlice3AcapU(SB)
+ JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ CX, x+0(FP)
MOVQ DX, y+8(FP)
- JMP runtime·goPanicSlice3B(SB)
+ JMP runtime·goPanicSlice3B<ABIInternal>(SB)
TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ CX, x+0(FP)
MOVQ DX, y+8(FP)
- JMP runtime·goPanicSlice3BU(SB)
+ JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ AX, x+0(FP)
MOVQ CX, y+8(FP)
- JMP runtime·goPanicSlice3C(SB)
+ JMP runtime·goPanicSlice3C<ABIInternal>(SB)
TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ AX, x+0(FP)
MOVQ CX, y+8(FP)
- JMP runtime·goPanicSlice3CU(SB)
+ JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
#ifdef GOOS_android
// Use the free TLS_SLOT_APP slot #2 on Android Q.
//go:cgo_import_dynamic libc___n_pthreads __n_pthreads "libpthread.a/shr_xpg5_64.o"
//go:cgo_import_dynamic libc___mod_init __mod_init "libc.a/shr_64.o"
-//go:linkname libc___n_pthreads libc___n_pthread
+//go:linkname libc___n_pthreads libc___n_pthreads
//go:linkname libc___mod_init libc___mod_init
var (
- libc___n_pthread,
+ libc___n_pthreads,
libc___mod_init libFunc
)
package race_test
import (
+ "fmt"
"internal/testenv"
"os"
"os/exec"
"GORACE="+test.gorace,
)
got, _ := cmd.CombinedOutput()
- if !regexp.MustCompile(test.re).MatchString(string(got)) {
- t.Fatalf("failed test case %v, expect:\n%v\ngot:\n%s",
- test.name, test.re, got)
+ matched := false
+ for _, re := range test.re {
+ if regexp.MustCompile(re).MatchString(string(got)) {
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ exp := fmt.Sprintf("expect:\n%v\n", test.re[0])
+ if len(test.re) > 1 {
+ exp = fmt.Sprintf("expected one of %d patterns:\n",
+ len(test.re))
+ for k, re := range test.re {
+ exp += fmt.Sprintf("pattern %d:\n%v\n", k, re)
+ }
+ }
+ t.Fatalf("failed test case %v, %sgot:\n%s",
+ test.name, exp, got)
}
}
}
goos string
gorace string
source string
- re string
+ re []string
}{
{"simple", "run", "", "atexit_sleep_ms=0", `
package main
store(x, 42)
done <- true
}
-`, `==================
+`, []string{`==================
WARNING: DATA RACE
Write at 0x[0-9,a-f]+ by goroutine [0-9]:
main\.store\(\)
==================
Found 1 data race\(s\)
exit status 66
-`},
+`}},
{"exitcode", "run", "", "atexit_sleep_ms=0 exitcode=13", `
package main
x = 43
<-done
}
-`, `exit status 13`},
+`, []string{`exit status 13`}},
{"strip_path_prefix", "run", "", "atexit_sleep_ms=0 strip_path_prefix=/main.", `
package main
x = 43
<-done
}
-`, `
+`, []string{`
go:7 \+0x[0-9,a-f]+
-`},
+`}},
{"halt_on_error", "run", "", "atexit_sleep_ms=0 halt_on_error=1", `
package main
x = 43
<-done
}
-`, `
+`, []string{`
==================
exit status 66
-`},
+`}},
{"test_fails_on_race", "test", "", "atexit_sleep_ms=0", `
package main_test
<-done
t.Log(t.Failed())
}
-`, `
+`, []string{`
==================
--- FAIL: TestFail \(0...s\)
.*main_test.go:14: true
.*testing.go:.*: race detected during execution of test
-FAIL`},
+FAIL`}},
{"slicebytetostring_pc", "run", "", "atexit_sleep_ms=0", `
package main
data[0] = 1
<-done
}
-`, `
+`, []string{`
runtime\.slicebytetostring\(\)
.*/runtime/string\.go:.*
main\.main\.func1\(\)
- .*/main.go:7`},
+ .*/main.go:7`}},
// Test for https://golang.org/issue/33309
{"midstack_inlining_traceback", "run", "linux", "atexit_sleep_ms=0", `
func h(c chan int) {
c <- x
}
-`, `==================
+`, []string{`==================
WARNING: DATA RACE
Read at 0x[0-9,a-f]+ by goroutine [0-9]:
main\.h\(\)
==================
Found 1 data race\(s\)
exit status 66
-`},
+`}},
// Test for https://golang.org/issue/17190
{"external_cgo_thread", "run", "linux", "atexit_sleep_ms=0", `
racy++
<- done
}
-`, `==================
+`, []string{`==================
+WARNING: DATA RACE
+Read at 0x[0-9,a-f]+ by main goroutine:
+ main\.main\(\)
+ .*/main\.go:34 \+0x[0-9,a-f]+
+
+Previous write at 0x[0-9,a-f]+ by goroutine [0-9]:
+ main\.goCallback\(\)
+ .*/main\.go:27 \+0x[0-9,a-f]+
+ _cgoexp_[0-9a-z]+_goCallback\(\)
+ .*_cgo_gotypes\.go:[0-9]+ \+0x[0-9,a-f]+
+ _cgoexp_[0-9a-z]+_goCallback\(\)
+ <autogenerated>:1 \+0x[0-9,a-f]+
+
+Goroutine [0-9] \(running\) created at:
+ runtime\.newextram\(\)
+ .*/runtime/proc.go:[0-9]+ \+0x[0-9,a-f]+
+==================`,
+ `==================
WARNING: DATA RACE
Read at 0x[0-9,a-f]+ by .*:
main\..*
Goroutine [0-9] \(running\) created at:
runtime\.newextram\(\)
.*/runtime/proc.go:[0-9]+ \+0x[0-9,a-f]+
-==================`},
+==================`}},
{"second_test_passes", "test", "", "atexit_sleep_ms=0", `
package main_test
import "testing"
func TestPass(t *testing.T) {
}
-`, `
+`, []string{`
==================
--- FAIL: TestFail \(0...s\)
.*testing.go:.*: race detected during execution of test
-FAIL`},
+FAIL`}},
{"mutex", "run", "", "atexit_sleep_ms=0", `
package main
import (
}
wg.Wait()
if (data == iterations*(threads+1)) { fmt.Println("pass") }
-}`, `pass`},
+}`, []string{`pass`}},
// Test for https://github.com/golang/go/issues/37355
{"chanmm", "run", "", "atexit_sleep_ms=0", `
package main
wg.Wait()
_ = data
}
-`, `==================
+`, []string{`==================
WARNING: DATA RACE
Write at 0x[0-9,a-f]+ by goroutine [0-9]:
main\.main\.func2\(\)
Goroutine [0-9] \(running\) created at:
main\.main\(\)
.*/main.go:[0-9]+ \+0x[0-9,a-f]+
-==================`},
+==================`}},
}
//go:cgo_import_dynamic libc_gethostname gethostname "libc.so"
//go:cgo_import_dynamic libc_getpid getpid "libc.so"
//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-//go:cgo_import_dynamic libc_pipe pipe "libc.so"
//go:cgo_import_dynamic libc_setgid setgid "libc.so"
//go:cgo_import_dynamic libc_setgroups setgroups "libc.so"
//go:cgo_import_dynamic libc_setsid setsid "libc.so"
//go:linkname libc_gethostname libc_gethostname
//go:linkname libc_getpid libc_getpid
//go:linkname libc_ioctl libc_ioctl
-//go:linkname libc_pipe libc_pipe
//go:linkname libc_setgid libc_setgid
//go:linkname libc_setgroups libc_setgroups
//go:linkname libc_setsid libc_setsid
// Function is the top of the call stack. Call stack unwinders should stop
// at this function.
#define TOPFRAME 2048
+// Function is an ABI wrapper.
+#define ABIWRAPPER 4096
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !compiler_bootstrap
+
+package strconv
+
+import "internal/bytealg"
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ return bytealg.IndexByteString(s, c) != -1
+}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build compiler_bootstrap
+
+package strconv
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
// Exp10 Range.
if man == 0 {
if neg {
- f = math.Float64frombits(0x80000000_00000000) // Negative zero.
+ f = math.Float64frombits(0x8000000000000000) // Negative zero.
}
return f, true
}
// Normalization.
clz := bits.LeadingZeros64(man)
- man <<= clz
+ man <<= uint(clz)
const float64ExponentBias = 1023
retExp2 := uint64(217706*exp10>>16+64+float64ExponentBias) - uint64(clz)
if retExp2-1 >= 0x7FF-1 {
return 0, false
}
- retBits := retExp2<<52 | retMantissa&0x000FFFFF_FFFFFFFF
+ retBits := retExp2<<52 | retMantissa&0x000FFFFFFFFFFFFF
if neg {
- retBits |= 0x80000000_00000000
+ retBits |= 0x8000000000000000
}
return math.Float64frombits(retBits), true
}
// Normalization.
clz := bits.LeadingZeros64(man)
- man <<= clz
+ man <<= uint(clz)
const float32ExponentBias = 127
retExp2 := uint64(217706*exp10>>16+64+float32ExponentBias) - uint64(clz)
xHi, xLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][1])
// Wider Approximation.
- if xHi&0x3F_FFFFFFFF == 0x3F_FFFFFFFF && xLo+man < man {
+ if xHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && xLo+man < man {
yHi, yLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][0])
mergedHi, mergedLo := xHi, xLo+yHi
if mergedLo < xLo {
mergedHi++
}
- if mergedHi&0x3F_FFFFFFFF == 0x3F_FFFFFFFF && mergedLo+1 == 0 && yLo+man < man {
+ if mergedHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && mergedLo+1 == 0 && yLo+man < man {
return 0, false
}
xHi, xLo = mergedHi, mergedLo
retExp2 -= 1 ^ msb
// Half-way Ambiguity.
- if xLo == 0 && xHi&0x3F_FFFFFFFF == 0 && retMantissa&3 == 1 {
+ if xLo == 0 && xHi&0x3FFFFFFFFF == 0 && retMantissa&3 == 1 {
return 0, false
}
package strconv
import (
- "internal/bytealg"
"unicode/utf8"
)
return string(buf), nil
}
-// contains reports whether the string contains the byte c.
-func contains(s string, c byte) bool {
- return bytealg.IndexByteString(s, c) != -1
-}
-
// bsearch16 returns the smallest i such that a[i] >= x.
// If there is no such i, bsearch16 returns len(a).
func bsearch16(a []uint16, x uint16) int {
$trampolines{$funcname} = 1;
# The assembly trampoline that jumps to the libc routine.
$text .= "func ${funcname}_trampoline()\n";
- # Map syscall.funcname to just plain funcname.
- # (The jump to this function is in the assembly trampoline, generated by mksyscallasm_darwin.go.)
- $text .= "//go:linkname $funcname $funcname\n";
# Tell the linker that funcname can be found in libSystem using varname without the libc_ prefix.
my $basename = substr $funcname, 5;
$text .= "//go:cgo_import_dynamic $funcname $basename \"/usr/lib/libSystem.B.dylib\"\n\n";
func libc_getfsstat_trampoline()
-//go:linkname libc_getfsstat libc_getfsstat
//go:cgo_import_dynamic libc_getfsstat getfsstat "/usr/lib/libSystem.B.dylib"
func setattrlistTimes(path string, times []Timespec) error {
func libc_setattrlist_trampoline()
-//go:linkname libc_setattrlist libc_setattrlist
//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib"
func utimensat(dirfd int, path string, times *[2]Timespec, flag int) error {
func libc_fdopendir_trampoline()
-//go:linkname libc_fdopendir libc_fdopendir
//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib"
func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
func libc_sendfile_trampoline()
-//go:linkname libc_sendfile libc_sendfile
//go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib"
// Implemented in the runtime package (runtime/sys_darwin_64.go)
func libc_sendfile_trampoline()
-//go:linkname libc_sendfile libc_sendfile
//go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib"
// Implemented in the runtime package (runtime/sys_darwin_64.go)
func libc_getgroups_trampoline()
-//go:linkname libc_getgroups libc_getgroups
//go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setgroups_trampoline()
-//go:linkname libc_setgroups libc_setgroups
//go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_wait4_trampoline()
-//go:linkname libc_wait4 libc_wait4
//go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_accept_trampoline()
-//go:linkname libc_accept libc_accept
//go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_bind_trampoline()
-//go:linkname libc_bind libc_bind
//go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_connect_trampoline()
-//go:linkname libc_connect libc_connect
//go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_socket_trampoline()
-//go:linkname libc_socket libc_socket
//go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getsockopt_trampoline()
-//go:linkname libc_getsockopt libc_getsockopt
//go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setsockopt_trampoline()
-//go:linkname libc_setsockopt libc_setsockopt
//go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getpeername_trampoline()
-//go:linkname libc_getpeername libc_getpeername
//go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getsockname_trampoline()
-//go:linkname libc_getsockname libc_getsockname
//go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_shutdown_trampoline()
-//go:linkname libc_shutdown libc_shutdown
//go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_socketpair_trampoline()
-//go:linkname libc_socketpair libc_socketpair
//go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_recvfrom_trampoline()
-//go:linkname libc_recvfrom libc_recvfrom
//go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_sendto_trampoline()
-//go:linkname libc_sendto libc_sendto
//go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_recvmsg_trampoline()
-//go:linkname libc_recvmsg libc_recvmsg
//go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_sendmsg_trampoline()
-//go:linkname libc_sendmsg libc_sendmsg
//go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_kevent_trampoline()
-//go:linkname libc_kevent libc_kevent
//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_utimes_trampoline()
-//go:linkname libc_utimes libc_utimes
//go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_futimes_trampoline()
-//go:linkname libc_futimes libc_futimes
//go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fcntl_trampoline()
-//go:linkname libc_fcntl libc_fcntl
//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_pipe_trampoline()
-//go:linkname libc_pipe libc_pipe
//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_kill_trampoline()
-//go:linkname libc_kill libc_kill
//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_access_trampoline()
-//go:linkname libc_access libc_access
//go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_adjtime_trampoline()
-//go:linkname libc_adjtime libc_adjtime
//go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_chdir_trampoline()
-//go:linkname libc_chdir libc_chdir
//go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_chflags_trampoline()
-//go:linkname libc_chflags libc_chflags
//go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_chmod_trampoline()
-//go:linkname libc_chmod libc_chmod
//go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_chown_trampoline()
-//go:linkname libc_chown libc_chown
//go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_chroot_trampoline()
-//go:linkname libc_chroot libc_chroot
//go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_close_trampoline()
-//go:linkname libc_close libc_close
//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_closedir_trampoline()
-//go:linkname libc_closedir libc_closedir
//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_dup_trampoline()
-//go:linkname libc_dup libc_dup
//go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_dup2_trampoline()
-//go:linkname libc_dup2 libc_dup2
//go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_exchangedata_trampoline()
-//go:linkname libc_exchangedata libc_exchangedata
//go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fchdir_trampoline()
-//go:linkname libc_fchdir libc_fchdir
//go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fchflags_trampoline()
-//go:linkname libc_fchflags libc_fchflags
//go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fchmod_trampoline()
-//go:linkname libc_fchmod libc_fchmod
//go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fchown_trampoline()
-//go:linkname libc_fchown libc_fchown
//go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_flock_trampoline()
-//go:linkname libc_flock libc_flock
//go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fpathconf_trampoline()
-//go:linkname libc_fpathconf libc_fpathconf
//go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fsync_trampoline()
-//go:linkname libc_fsync libc_fsync
//go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_ftruncate_trampoline()
-//go:linkname libc_ftruncate libc_ftruncate
//go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getdtablesize_trampoline()
-//go:linkname libc_getdtablesize libc_getdtablesize
//go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getegid_trampoline()
-//go:linkname libc_getegid libc_getegid
//go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_geteuid_trampoline()
-//go:linkname libc_geteuid libc_geteuid
//go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getgid_trampoline()
-//go:linkname libc_getgid libc_getgid
//go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getpgid_trampoline()
-//go:linkname libc_getpgid libc_getpgid
//go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getpgrp_trampoline()
-//go:linkname libc_getpgrp libc_getpgrp
//go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getpid_trampoline()
-//go:linkname libc_getpid libc_getpid
//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getppid_trampoline()
-//go:linkname libc_getppid libc_getppid
//go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getpriority_trampoline()
-//go:linkname libc_getpriority libc_getpriority
//go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getrlimit_trampoline()
-//go:linkname libc_getrlimit libc_getrlimit
//go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getrusage_trampoline()
-//go:linkname libc_getrusage libc_getrusage
//go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getsid_trampoline()
-//go:linkname libc_getsid libc_getsid
//go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getuid_trampoline()
-//go:linkname libc_getuid libc_getuid
//go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_issetugid_trampoline()
-//go:linkname libc_issetugid libc_issetugid
//go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_kqueue_trampoline()
-//go:linkname libc_kqueue libc_kqueue
//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_lchown_trampoline()
-//go:linkname libc_lchown libc_lchown
//go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_link_trampoline()
-//go:linkname libc_link libc_link
//go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_listen_trampoline()
-//go:linkname libc_listen libc_listen
//go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mkdir_trampoline()
-//go:linkname libc_mkdir libc_mkdir
//go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mkfifo_trampoline()
-//go:linkname libc_mkfifo libc_mkfifo
//go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mknod_trampoline()
-//go:linkname libc_mknod libc_mknod
//go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mlock_trampoline()
-//go:linkname libc_mlock libc_mlock
//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mlockall_trampoline()
-//go:linkname libc_mlockall libc_mlockall
//go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mprotect_trampoline()
-//go:linkname libc_mprotect libc_mprotect
//go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_munlock_trampoline()
-//go:linkname libc_munlock libc_munlock
//go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_munlockall_trampoline()
-//go:linkname libc_munlockall libc_munlockall
//go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_open_trampoline()
-//go:linkname libc_open libc_open
//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_pathconf_trampoline()
-//go:linkname libc_pathconf libc_pathconf
//go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_pread_trampoline()
-//go:linkname libc_pread libc_pread
//go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_pwrite_trampoline()
-//go:linkname libc_pwrite libc_pwrite
//go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_read_trampoline()
-//go:linkname libc_read libc_read
//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_readdir_r_trampoline()
-//go:linkname libc_readdir_r libc_readdir_r
//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_readlink_trampoline()
-//go:linkname libc_readlink libc_readlink
//go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_rename_trampoline()
-//go:linkname libc_rename libc_rename
//go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_revoke_trampoline()
-//go:linkname libc_revoke libc_revoke
//go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_rmdir_trampoline()
-//go:linkname libc_rmdir libc_rmdir
//go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_lseek_trampoline()
-//go:linkname libc_lseek libc_lseek
//go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_select_trampoline()
-//go:linkname libc_select libc_select
//go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setegid_trampoline()
-//go:linkname libc_setegid libc_setegid
//go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_seteuid_trampoline()
-//go:linkname libc_seteuid libc_seteuid
//go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setgid_trampoline()
-//go:linkname libc_setgid libc_setgid
//go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setlogin_trampoline()
-//go:linkname libc_setlogin libc_setlogin
//go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setpgid_trampoline()
-//go:linkname libc_setpgid libc_setpgid
//go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setpriority_trampoline()
-//go:linkname libc_setpriority libc_setpriority
//go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setprivexec_trampoline()
-//go:linkname libc_setprivexec libc_setprivexec
//go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setregid_trampoline()
-//go:linkname libc_setregid libc_setregid
//go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setreuid_trampoline()
-//go:linkname libc_setreuid libc_setreuid
//go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setrlimit_trampoline()
-//go:linkname libc_setrlimit libc_setrlimit
//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setsid_trampoline()
-//go:linkname libc_setsid libc_setsid
//go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_settimeofday_trampoline()
-//go:linkname libc_settimeofday libc_settimeofday
//go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setuid_trampoline()
-//go:linkname libc_setuid libc_setuid
//go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_symlink_trampoline()
-//go:linkname libc_symlink libc_symlink
//go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_sync_trampoline()
-//go:linkname libc_sync libc_sync
//go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_truncate_trampoline()
-//go:linkname libc_truncate libc_truncate
//go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_umask_trampoline()
-//go:linkname libc_umask libc_umask
//go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_undelete_trampoline()
-//go:linkname libc_undelete libc_undelete
//go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_unlink_trampoline()
-//go:linkname libc_unlink libc_unlink
//go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_unmount_trampoline()
-//go:linkname libc_unmount libc_unmount
//go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_write_trampoline()
-//go:linkname libc_write libc_write
//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_writev_trampoline()
-//go:linkname libc_writev libc_writev
//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mmap_trampoline()
-//go:linkname libc_mmap libc_mmap
//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_munmap_trampoline()
-//go:linkname libc_munmap libc_munmap
//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fork_trampoline()
-//go:linkname libc_fork libc_fork
//go:cgo_import_dynamic libc_fork fork "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_ioctl_trampoline()
-//go:linkname libc_ioctl libc_ioctl
//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_execve_trampoline()
-//go:linkname libc_execve libc_execve
//go:cgo_import_dynamic libc_execve execve "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_exit_trampoline()
-//go:linkname libc_exit libc_exit
//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_sysctl_trampoline()
-//go:linkname libc_sysctl libc_sysctl
//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_unlinkat_trampoline()
-//go:linkname libc_unlinkat libc_unlinkat
//go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_openat_trampoline()
-//go:linkname libc_openat libc_openat
//go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getcwd_trampoline()
-//go:linkname libc_getcwd libc_getcwd
//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fstat64_trampoline()
-//go:linkname libc_fstat64 libc_fstat64
//go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fstatfs64_trampoline()
-//go:linkname libc_fstatfs64 libc_fstatfs64
//go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_gettimeofday_trampoline()
-//go:linkname libc_gettimeofday libc_gettimeofday
//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_lstat64_trampoline()
-//go:linkname libc_lstat64 libc_lstat64
//go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_stat64_trampoline()
-//go:linkname libc_stat64 libc_stat64
//go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_statfs64_trampoline()
-//go:linkname libc_statfs64 libc_statfs64
//go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fstatat64_trampoline()
-//go:linkname libc_fstatat64 libc_fstatat64
//go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_ptrace_trampoline()
-//go:linkname libc_ptrace libc_ptrace
//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib"
func libc_getgroups_trampoline()
-//go:linkname libc_getgroups libc_getgroups
//go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setgroups_trampoline()
-//go:linkname libc_setgroups libc_setgroups
//go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_wait4_trampoline()
-//go:linkname libc_wait4 libc_wait4
//go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_accept_trampoline()
-//go:linkname libc_accept libc_accept
//go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_bind_trampoline()
-//go:linkname libc_bind libc_bind
//go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_connect_trampoline()
-//go:linkname libc_connect libc_connect
//go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_socket_trampoline()
-//go:linkname libc_socket libc_socket
//go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getsockopt_trampoline()
-//go:linkname libc_getsockopt libc_getsockopt
//go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setsockopt_trampoline()
-//go:linkname libc_setsockopt libc_setsockopt
//go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getpeername_trampoline()
-//go:linkname libc_getpeername libc_getpeername
//go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getsockname_trampoline()
-//go:linkname libc_getsockname libc_getsockname
//go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_shutdown_trampoline()
-//go:linkname libc_shutdown libc_shutdown
//go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_socketpair_trampoline()
-//go:linkname libc_socketpair libc_socketpair
//go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_recvfrom_trampoline()
-//go:linkname libc_recvfrom libc_recvfrom
//go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_sendto_trampoline()
-//go:linkname libc_sendto libc_sendto
//go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_recvmsg_trampoline()
-//go:linkname libc_recvmsg libc_recvmsg
//go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_sendmsg_trampoline()
-//go:linkname libc_sendmsg libc_sendmsg
//go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_kevent_trampoline()
-//go:linkname libc_kevent libc_kevent
//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_utimes_trampoline()
-//go:linkname libc_utimes libc_utimes
//go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_futimes_trampoline()
-//go:linkname libc_futimes libc_futimes
//go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fcntl_trampoline()
-//go:linkname libc_fcntl libc_fcntl
//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_pipe_trampoline()
-//go:linkname libc_pipe libc_pipe
//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_kill_trampoline()
-//go:linkname libc_kill libc_kill
//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_access_trampoline()
-//go:linkname libc_access libc_access
//go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_adjtime_trampoline()
-//go:linkname libc_adjtime libc_adjtime
//go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_chdir_trampoline()
-//go:linkname libc_chdir libc_chdir
//go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_chflags_trampoline()
-//go:linkname libc_chflags libc_chflags
//go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_chmod_trampoline()
-//go:linkname libc_chmod libc_chmod
//go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_chown_trampoline()
-//go:linkname libc_chown libc_chown
//go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_chroot_trampoline()
-//go:linkname libc_chroot libc_chroot
//go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_close_trampoline()
-//go:linkname libc_close libc_close
//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_closedir_trampoline()
-//go:linkname libc_closedir libc_closedir
//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_dup_trampoline()
-//go:linkname libc_dup libc_dup
//go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_dup2_trampoline()
-//go:linkname libc_dup2 libc_dup2
//go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_exchangedata_trampoline()
-//go:linkname libc_exchangedata libc_exchangedata
//go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fchdir_trampoline()
-//go:linkname libc_fchdir libc_fchdir
//go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fchflags_trampoline()
-//go:linkname libc_fchflags libc_fchflags
//go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fchmod_trampoline()
-//go:linkname libc_fchmod libc_fchmod
//go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fchown_trampoline()
-//go:linkname libc_fchown libc_fchown
//go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_flock_trampoline()
-//go:linkname libc_flock libc_flock
//go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fpathconf_trampoline()
-//go:linkname libc_fpathconf libc_fpathconf
//go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fsync_trampoline()
-//go:linkname libc_fsync libc_fsync
//go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_ftruncate_trampoline()
-//go:linkname libc_ftruncate libc_ftruncate
//go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getdtablesize_trampoline()
-//go:linkname libc_getdtablesize libc_getdtablesize
//go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getegid_trampoline()
-//go:linkname libc_getegid libc_getegid
//go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_geteuid_trampoline()
-//go:linkname libc_geteuid libc_geteuid
//go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getgid_trampoline()
-//go:linkname libc_getgid libc_getgid
//go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getpgid_trampoline()
-//go:linkname libc_getpgid libc_getpgid
//go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getpgrp_trampoline()
-//go:linkname libc_getpgrp libc_getpgrp
//go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getpid_trampoline()
-//go:linkname libc_getpid libc_getpid
//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getppid_trampoline()
-//go:linkname libc_getppid libc_getppid
//go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getpriority_trampoline()
-//go:linkname libc_getpriority libc_getpriority
//go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getrlimit_trampoline()
-//go:linkname libc_getrlimit libc_getrlimit
//go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getrusage_trampoline()
-//go:linkname libc_getrusage libc_getrusage
//go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getsid_trampoline()
-//go:linkname libc_getsid libc_getsid
//go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getuid_trampoline()
-//go:linkname libc_getuid libc_getuid
//go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_issetugid_trampoline()
-//go:linkname libc_issetugid libc_issetugid
//go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_kqueue_trampoline()
-//go:linkname libc_kqueue libc_kqueue
//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_lchown_trampoline()
-//go:linkname libc_lchown libc_lchown
//go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_link_trampoline()
-//go:linkname libc_link libc_link
//go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_listen_trampoline()
-//go:linkname libc_listen libc_listen
//go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mkdir_trampoline()
-//go:linkname libc_mkdir libc_mkdir
//go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mkfifo_trampoline()
-//go:linkname libc_mkfifo libc_mkfifo
//go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mknod_trampoline()
-//go:linkname libc_mknod libc_mknod
//go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mlock_trampoline()
-//go:linkname libc_mlock libc_mlock
//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mlockall_trampoline()
-//go:linkname libc_mlockall libc_mlockall
//go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mprotect_trampoline()
-//go:linkname libc_mprotect libc_mprotect
//go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_munlock_trampoline()
-//go:linkname libc_munlock libc_munlock
//go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_munlockall_trampoline()
-//go:linkname libc_munlockall libc_munlockall
//go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_open_trampoline()
-//go:linkname libc_open libc_open
//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_pathconf_trampoline()
-//go:linkname libc_pathconf libc_pathconf
//go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_pread_trampoline()
-//go:linkname libc_pread libc_pread
//go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_pwrite_trampoline()
-//go:linkname libc_pwrite libc_pwrite
//go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_read_trampoline()
-//go:linkname libc_read libc_read
//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_readdir_r_trampoline()
-//go:linkname libc_readdir_r libc_readdir_r
//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_readlink_trampoline()
-//go:linkname libc_readlink libc_readlink
//go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_rename_trampoline()
-//go:linkname libc_rename libc_rename
//go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_revoke_trampoline()
-//go:linkname libc_revoke libc_revoke
//go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_rmdir_trampoline()
-//go:linkname libc_rmdir libc_rmdir
//go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_lseek_trampoline()
-//go:linkname libc_lseek libc_lseek
//go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_select_trampoline()
-//go:linkname libc_select libc_select
//go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setegid_trampoline()
-//go:linkname libc_setegid libc_setegid
//go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_seteuid_trampoline()
-//go:linkname libc_seteuid libc_seteuid
//go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setgid_trampoline()
-//go:linkname libc_setgid libc_setgid
//go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setlogin_trampoline()
-//go:linkname libc_setlogin libc_setlogin
//go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setpgid_trampoline()
-//go:linkname libc_setpgid libc_setpgid
//go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setpriority_trampoline()
-//go:linkname libc_setpriority libc_setpriority
//go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setprivexec_trampoline()
-//go:linkname libc_setprivexec libc_setprivexec
//go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setregid_trampoline()
-//go:linkname libc_setregid libc_setregid
//go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setreuid_trampoline()
-//go:linkname libc_setreuid libc_setreuid
//go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setrlimit_trampoline()
-//go:linkname libc_setrlimit libc_setrlimit
//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setsid_trampoline()
-//go:linkname libc_setsid libc_setsid
//go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_settimeofday_trampoline()
-//go:linkname libc_settimeofday libc_settimeofday
//go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_setuid_trampoline()
-//go:linkname libc_setuid libc_setuid
//go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_symlink_trampoline()
-//go:linkname libc_symlink libc_symlink
//go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_sync_trampoline()
-//go:linkname libc_sync libc_sync
//go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_truncate_trampoline()
-//go:linkname libc_truncate libc_truncate
//go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_umask_trampoline()
-//go:linkname libc_umask libc_umask
//go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_undelete_trampoline()
-//go:linkname libc_undelete libc_undelete
//go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_unlink_trampoline()
-//go:linkname libc_unlink libc_unlink
//go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_unmount_trampoline()
-//go:linkname libc_unmount libc_unmount
//go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_write_trampoline()
-//go:linkname libc_write libc_write
//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_writev_trampoline()
-//go:linkname libc_writev libc_writev
//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_mmap_trampoline()
-//go:linkname libc_mmap libc_mmap
//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_munmap_trampoline()
-//go:linkname libc_munmap libc_munmap
//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fork_trampoline()
-//go:linkname libc_fork libc_fork
//go:cgo_import_dynamic libc_fork fork "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_ioctl_trampoline()
-//go:linkname libc_ioctl libc_ioctl
//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_execve_trampoline()
-//go:linkname libc_execve libc_execve
//go:cgo_import_dynamic libc_execve execve "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_exit_trampoline()
-//go:linkname libc_exit libc_exit
//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_sysctl_trampoline()
-//go:linkname libc_sysctl libc_sysctl
//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_unlinkat_trampoline()
-//go:linkname libc_unlinkat libc_unlinkat
//go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_openat_trampoline()
-//go:linkname libc_openat libc_openat
//go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_getcwd_trampoline()
-//go:linkname libc_getcwd libc_getcwd
//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fstat_trampoline()
-//go:linkname libc_fstat libc_fstat
//go:cgo_import_dynamic libc_fstat fstat "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fstatfs_trampoline()
-//go:linkname libc_fstatfs libc_fstatfs
//go:cgo_import_dynamic libc_fstatfs fstatfs "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_gettimeofday_trampoline()
-//go:linkname libc_gettimeofday libc_gettimeofday
//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_lstat_trampoline()
-//go:linkname libc_lstat libc_lstat
//go:cgo_import_dynamic libc_lstat lstat "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_stat_trampoline()
-//go:linkname libc_stat libc_stat
//go:cgo_import_dynamic libc_stat stat "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_statfs_trampoline()
-//go:linkname libc_statfs libc_statfs
//go:cgo_import_dynamic libc_statfs statfs "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_fstatat_trampoline()
-//go:linkname libc_fstatat libc_fstatat
//go:cgo_import_dynamic libc_fstatat fstatat "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func libc_ptrace_trampoline()
-//go:linkname libc_ptrace libc_ptrace
//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib"
const LargeC = LargeB * LargeB * LargeB // GC_ERROR "constant multiplication overflow"
const AlsoLargeA = LargeA << 400 << 400 >> 400 >> 400 // GC_ERROR "constant shift overflow"
+
+// Issue #42732.
+
+const a = 1e+500000000
+const b = a * a // ERROR "constant multiplication overflow"
+const c = b * b
+
+const MaxInt512 = (1<<256 - 1) * (1<<256 + 1)
+const _ = MaxInt512 + 1 // ERROR "constant addition overflow"
+const _ = MaxInt512 ^ -1 // ERROR "constant bitwise XOR overflow"
+const _ = ^MaxInt512 // ERROR "constant bitwise complement overflow"
// **in -> heap
func param8(i **int) { // ERROR "i does not escape$"
- sink = **i // ERROR "\* \(\*i\) escapes to heap"
+ sink = **i // ERROR "\*\(\*i\) escapes to heap"
}
func caller8() {
var p *int
v := &Val{&p} // ERROR "&Val{...} does not escape$"
v.param13(&i)
- sink = **v.p // ERROR "\* \(\*v\.p\) escapes to heap"
+ sink = **v.p // ERROR "\*\(\*v\.p\) escapes to heap"
}
type Node struct {
var x interface{}
switch t := x.(type) {
case 0: // ERROR "type"
- t.x = 1 // ERROR "type interface \{\}|reference to undefined field or method|interface with no methods"
+ t.x = 1
+ x.x = 1 // ERROR "type interface \{\}|reference to undefined field or method|interface with no methods"
}
}
}
func main() {
- _ = T {
- os.File: 1, // ERROR "unknown T? ?field"
+ _ = T{
+ os.File: 1, // ERROR "invalid field name os.File|unknown field"
}
}
package main
-const _ = 6e5518446744 // ERROR "malformed constant: 6e5518446744 \(exponent overflow\)"
+const x = 6e5518446744 // ERROR "malformed constant: 6e5518446744"
+const _ = x * x
const _ = 1e-1000000000
-const _ = 1e+1000000000 // ERROR "constant too large"
+const _ = 1e+1000000000 // ERROR "malformed constant: 1e\+1000000000"
// 1
var f byte
-var f interface{} // ERROR "previous declaration at issue20415.go:12|redefinition"
+var f interface{} // ERROR "issue20415.go:12: previous declaration|redefinition"
func _(f int) {
}
func _(g int) {
}
-var g interface{} // ERROR "previous declaration at issue20415.go:20|redefinition"
+var g interface{} // ERROR "issue20415.go:20: previous declaration|redefinition"
// 3
func _(h int) {
var h byte
-var h interface{} // ERROR "previous declaration at issue20415.go:31|redefinition"
+var h interface{} // ERROR "issue20415.go:31: previous declaration|redefinition"
func F() {
slice := []int{1, 2, 3}
len := int(2)
- println(len(slice)) // ERROR "cannot call non-function len .type int., declared at|expected function"
+ println(len(slice)) // ERROR "cannot call non-function len .type int., declared at LINE-1|expected function"
+ const iota = 1
+ println(iota(slice)) // ERROR "cannot call non-function iota .type int., declared at LINE-1|expected function"
}
type T [uintptr(unsafe.Pointer(nil))]int // ERROR "non-constant array bound|array bound is not constant"
func f() {
- _ = complex(1<<uintptr(unsafe.Pointer(nil)), 0) // GCCGO_ERROR "non-integer type for left operand of shift"
+ _ = complex(1<<uintptr(unsafe.Pointer(nil)), 0) // ERROR "shift of type float64|non-integer type for left operand of shift"
}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import . "strings"
+
+var _ = Index // use strings
+
+type t struct{ Index int }
+
+var _ = t{Index: 0}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import . "bytes"
+
+var _ = Index // use bytes
+
+var _ = t{Index: 0}
--- /dev/null
+// compiledir
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
--- /dev/null
+// errorcheck
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import . "bytes"
+
+var _ Buffer // use package bytes
+
+var Index byte // ERROR "Index redeclared.*\n\tLINE-4: previous declaration during import .bytes.|already declared|redefinition"
--- /dev/null
+// errorcheck
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import . "testing" // ERROR "imported and not used"
+
+type S struct {
+ T int
+}
+
+var _ = S{T: 0}
func (t *T2) M() {}
func (t *T2) _() {}
-// Check that nothing satisfies an interface with blank methods.
-var b1 B1 = &T2{} // ERROR "incompatible|missing _ method"
-var b2 B2 = &T2{} // ERROR "incompatible|missing _ method"
+// Already reported about the invalid blank interface method above;
+// no need to report about not implementing it.
+var b1 B1 = &T2{}
+var b2 B2 = &T2{}
--- /dev/null
+// errorcheck
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests that errors are reported for misuse of linkname.
+package p
+
+import _ "unsafe"
+
+type t int
+
+var x, y int
+
+//go:linkname x ok
+
+// ERROR "//go:linkname requires linkname argument or -p compiler flag"
+// BAD: want error "//go:linkname must refer to declared function or variable"
+// BAD: want error "//go:linkname must refer to declared function or variable"
+// ERROR "duplicate //go:linkname for x"
+
+// The two BAD lines are just waiting for #42938 before we can
+// re-enable the errors.
+
+//line linkname2.go:18
+//go:linkname y
+//go:linkname nonexist nonexist
+//go:linkname t notvarfunc
+//go:linkname x duplicate
}
ret := T{}
ret.s[0] = f()
- return ret // ERROR "stack object .autotmp_5 T"
+ return ret // ERROR "stack object .autotmp_[0-9]+ T"
}
--- /dev/null
+// errorcheck
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main(int) {} // ERROR "func main must have no arguments and no return values"
+func main() int { return 1 } // ERROR "func main must have no arguments and no return values" "main redeclared in this block"
+
+func init(int) {} // ERROR "func init must have no arguments and no return values"
+func init() int { return 1 } // ERROR "func init must have no arguments and no return values"
log.Fatal(err)
}
- cmd := exec.Command("go", "build")
+ // Turn off ABI0 wrapper generation for now. The problem here is
+ // that in these test cases main.main is an assembly routine,
+ // thus calls to it will have to go through an ABI wrapper. The
+ // ABI wrapper will consume some stack space, which throws off
+ // the numbers.
+ workaround := "-gcflags=-abiwrap=0"
+
+ cmd := exec.Command("go", "build", workaround)
cmd.Dir = dir
output, err := cmd.CombinedOutput()
if err == nil {
--- /dev/null
+// errorcheck
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+const C = 1
+
+var x, x1, x2 int
+var b bool
+var s string
+var c chan int
+var cp complex128
+var slice []int
+var array [2]int
+var bytes []byte
+var runes []rune
+var r rune
+
+func f0() {}
+func f1() int { return 1 }
+func f2() (int, int) { return 1, 1 }
+
+type T struct{ X int }
+
+func (T) M1() int { return 1 }
+func (T) M0() {}
+func (T) M() {}
+
+var t T
+var tp *T
+
+type I interface{ M() }
+
+var i I
+
+var m map[int]int
+
+func _() {
+ // Note: if the next line changes to x, the error silences the x+x etc below!
+ x1 // ERROR "x1 evaluated but not used"
+
+ nil // ERROR "nil evaluated but not used"
+ C // ERROR "C evaluated but not used"
+ 1 // ERROR "1 evaluated but not used"
+ x + x // ERROR "x \+ x evaluated but not used"
+ x - x // ERROR "x - x evaluated but not used"
+ x | x // ERROR "x \| x evaluated but not used"
+ "a" + s // ERROR ".a. \+ s evaluated but not used"
+ &x // ERROR "&x evaluated but not used"
+ b && b // ERROR "b && b evaluated but not used"
+ append(slice, 1) // ERROR "append\(slice, 1\) evaluated but not used"
+ string(bytes) // ERROR "string\(bytes\) evaluated but not used"
+ string(runes) // ERROR "string\(runes\) evaluated but not used"
+ f0() // ok
+ f1() // ok
+ f2() // ok
+ _ = f0() // ERROR "f0\(\) used as value"
+ _ = f1() // ok
+ _, _ = f2() // ok
+ _ = f2() // ERROR "assignment mismatch: 1 variable but f2 returns 2 values"
+ T.M0 // ERROR "T.M0 evaluated but not used"
+ t.M0 // ERROR "t.M0 evaluated but not used"
+ cap // ERROR "use of builtin cap not in function call"
+ cap(slice) // ERROR "cap\(slice\) evaluated but not used"
+ close(c) // ok
+ _ = close(c) // ERROR "close\(c\) used as value"
+ func() {} // ERROR "func literal evaluated but not used"
+ X{} // ERROR "undefined: X"
+ map[string]int{} // ERROR "map\[string\]int{} evaluated but not used"
+ struct{}{} // ERROR "struct ?{}{} evaluated but not used"
+ [1]int{} // ERROR "\[1\]int{} evaluated but not used"
+ []int{} // ERROR "\[\]int{} evaluated but not used"
+ &struct{}{} // ERROR "&struct ?{}{} evaluated but not used"
+ float32(x) // ERROR "float32\(x\) evaluated but not used"
+ I(t) // ERROR "I\(t\) evaluated but not used"
+ int(x) // ERROR "int\(x\) evaluated but not used"
+ copy(slice, slice) // ok
+ _ = copy(slice, slice) // ok
+ delete(m, 1) // ok
+ _ = delete(m, 1) // ERROR "delete\(m, 1\) used as value"
+ t.X // ERROR "t.X evaluated but not used"
+ tp.X // ERROR "tp.X evaluated but not used"
+ t.M // ERROR "t.M evaluated but not used"
+ I.M // ERROR "I.M evaluated but not used"
+ i.(T) // ERROR "i.\(T\) evaluated but not used"
+ x == x // ERROR "x == x evaluated but not used"
+ x != x // ERROR "x != x evaluated but not used"
+ x != x // ERROR "x != x evaluated but not used"
+ x < x // ERROR "x < x evaluated but not used"
+ x >= x // ERROR "x >= x evaluated but not used"
+ x > x // ERROR "x > x evaluated but not used"
+ *tp // ERROR "\*tp evaluated but not used"
+ slice[0] // ERROR "slice\[0\] evaluated but not used"
+ m[1] // ERROR "m\[1\] evaluated but not used"
+ len(slice) // ERROR "len\(slice\) evaluated but not used"
+ make(chan int) // ERROR "make\(chan int\) evaluated but not used"
+ make(map[int]int) // ERROR "make\(map\[int\]int\) evaluated but not used"
+ make([]int, 1) // ERROR "make\(\[\]int, 1\) evaluated but not used"
+ x * x // ERROR "x \* x evaluated but not used"
+ x / x // ERROR "x / x evaluated but not used"
+ x % x // ERROR "x % x evaluated but not used"
+ x << x // ERROR "x << x evaluated but not used"
+ x >> x // ERROR "x >> x evaluated but not used"
+ x & x // ERROR "x & x evaluated but not used"
+ x &^ x // ERROR "x &\^ x evaluated but not used"
+ new(int) // ERROR "new\(int\) evaluated but not used"
+ !b // ERROR "!b evaluated but not used"
+ ^x // ERROR "\^x evaluated but not used"
+ +x // ERROR "\+x evaluated but not used"
+ -x // ERROR "-x evaluated but not used"
+ b || b // ERROR "b \|\| b evaluated but not used"
+ panic(1) // ok
+ _ = panic(1) // ERROR "panic\(1\) used as value"
+ print(1) // ok
+ _ = print(1) // ERROR "print\(1\) used as value"
+ println(1) // ok
+ _ = println(1) // ERROR "println\(1\) used as value"
+ c <- 1 // ok
+ slice[1:1] // ERROR "slice\[1:1\] evaluated but not used"
+ array[1:1] // ERROR "array\[1:1\] evaluated but not used"
+ s[1:1] // ERROR "s\[1:1\] evaluated but not used"
+ slice[1:1:1] // ERROR "slice\[1:1:1\] evaluated but not used"
+ array[1:1:1] // ERROR "array\[1:1:1\] evaluated but not used"
+ recover() // ok
+ <-c // ok
+ string(r) // ERROR "string\(r\) evaluated but not used"
+ iota // ERROR "undefined: iota"
+ real(cp) // ERROR "real\(cp\) evaluated but not used"
+ imag(cp) // ERROR "imag\(cp\) evaluated but not used"
+ complex(1, 2) // ERROR "complex\(1, 2\) evaluated but not used"
+ unsafe.Alignof(t.X) // ERROR "unsafe.Alignof\(t.X\) evaluated but not used"
+ unsafe.Offsetof(t.X) // ERROR "unsafe.Offsetof\(t.X\) evaluated but not used"
+ unsafe.Sizeof(t) // ERROR "unsafe.Sizeof\(t\) evaluated but not used"
+ _ = int // ERROR "type int is not an expression"
+ (x) // ERROR "x evaluated but not used"
+ _ = new(x2) // ERROR "x2 is not a type"
+ _ = new(1 + 1) // ERROR "1 \+ 1 is not a type"
+}