x86.ACWD: {gc.OK, AX, AX | DX, 0},
x86.ACLD: {gc.OK, 0, 0, 0},
x86.ASTD: {gc.OK, 0, 0, 0},
+ x86.ACMOVQCC: {gc.SizeQ | gc.LeftRead | gc.RightRead | gc.RightWrite | gc.UseCarry, 0, 0, 0},
+ x86.ACMOVQCS: {gc.SizeQ | gc.LeftRead | gc.RightRead | gc.RightWrite | gc.UseCarry, 0, 0, 0},
x86.ACMPB: {gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
x86.ACMPL: {gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
x86.ACMPQ: {gc.SizeQ | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
TFORW: "FORW",
TFIELD: "FIELD",
TSTRING: "STRING",
+ TUNSAFEPTR: "TUNSAFEPTR",
TANY: "ANY",
}
return lab
}
+// There is a copy of checkgoto in the new SSA backend.
+// Please keep them in sync.
func checkgoto(from *Node, to *Node) {
if from.Sym == to.Sym {
return
cgen_dcl(n.Left)
case OAS:
- if gen_as_init(n) {
+ if gen_as_init(n, false) {
break
}
Cgen_as(n.Left, n.Right)
Note *string // literal string annotation
// TARRAY
- Bound int64 // negative is dynamic array
+ Bound int64 // negative is slice
// TMAP
Bucket *Type // internal type representing a hash bucket
package gc
import (
+ "cmd/compile/internal/ssa"
"cmd/internal/obj"
"crypto/md5"
"fmt"
var nam *Node
var gcargs *Sym
var gclocals *Sym
+ var ssafn *ssa.Func
+ var usessa bool
if fn.Nbody == nil {
if pure_go != 0 || strings.HasPrefix(fn.Func.Nname.Sym.Name, "init.") {
Yyerror("missing function body for %q", fn.Func.Nname.Sym.Name)
goto ret
}
+ // Build an SSA backend function.
+ // TODO: get rid of usessa.
+ ssafn, usessa = buildssa(Curfn)
+
continpc = nil
breakpc = nil
}
}
+ if ssafn != nil && usessa {
+ genssa(ssafn, ptxt, gcargs, gclocals)
+ return
+ }
Genlist(Curfn.Func.Enter)
Genlist(Curfn.Nbody)
gclean()
return -1
}
+// stataddr sets nam to the static address of n and reports whether it succeeeded.
func stataddr(nam *Node, n *Node) bool {
if n == nil {
return false
return &p.E[len(p.E)-1]
}
-func gen_as_init(n *Node) bool {
+// gen_as_init attempts to emit static data for n and reports whether it succeeded.
+// If reportOnly is true, it does not emit static data and does not modify the AST.
+func gen_as_init(n *Node, reportOnly bool) bool {
var nr *Node
var nl *Node
var nam Node
case OSLICEARR:
if nr.Right.Op == OKEY && nr.Right.Left == nil && nr.Right.Right == nil {
nr = nr.Left
- gused(nil) // in case the data is the dest of a goto
+ if !reportOnly {
+ gused(nil) // in case the data is the dest of a goto
+ }
nl := nr
if nr == nil || nr.Op != OADDR {
goto no
goto no
}
- nam.Xoffset += int64(Array_array)
- gdata(&nam, nl, int(Types[Tptr].Width))
+ if !reportOnly {
+ nam.Xoffset += int64(Array_array)
+ gdata(&nam, nl, int(Types[Tptr].Width))
- nam.Xoffset += int64(Array_nel) - int64(Array_array)
- var nod1 Node
- Nodconst(&nod1, Types[TINT], nr.Type.Bound)
- gdata(&nam, &nod1, Widthint)
+ nam.Xoffset += int64(Array_nel) - int64(Array_array)
+ var nod1 Node
+ Nodconst(&nod1, Types[TINT], nr.Type.Bound)
+ gdata(&nam, &nod1, Widthint)
- nam.Xoffset += int64(Array_cap) - int64(Array_nel)
- gdata(&nam, &nod1, Widthint)
+ nam.Xoffset += int64(Array_cap) - int64(Array_nel)
+ gdata(&nam, &nod1, Widthint)
+ }
return true
}
TPTR64,
TFLOAT32,
TFLOAT64:
- gdata(&nam, nr, int(nr.Type.Width))
+ if !reportOnly {
+ gdata(&nam, nr, int(nr.Type.Width))
+ }
case TCOMPLEX64, TCOMPLEX128:
- gdatacomplex(&nam, nr.Val().U.(*Mpcplx))
+ if !reportOnly {
+ gdatacomplex(&nam, nr.Val().U.(*Mpcplx))
+ }
case TSTRING:
- gdatastring(&nam, nr.Val().U.(string))
+ if !reportOnly {
+ gdatastring(&nam, nr.Val().U.(string))
+ }
}
return true
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+// buildssa builds an SSA function
+// and reports whether it should be used.
+// Once the SSA implementation is complete,
+// it will never return nil, and the bool can be removed.
+func buildssa(fn *Node) (ssafn *ssa.Func, usessa bool) {
+ name := fn.Func.Nname.Sym.Name
+ usessa = strings.HasSuffix(name, "_ssa")
+
+ if usessa {
+ fmt.Println("generating SSA for", name)
+ dumplist("buildssa-enter", fn.Func.Enter)
+ dumplist("buildssa-body", fn.Nbody)
+ }
+
+ var s state
+ s.pushLine(fn.Lineno)
+ defer s.popLine()
+
+ // TODO(khr): build config just once at the start of the compiler binary
+
+ var e ssaExport
+ e.log = usessa
+ s.config = ssa.NewConfig(Thearch.Thestring, &e)
+ s.f = s.config.NewFunc()
+ s.f.Name = name
+
+ // If SSA support for the function is incomplete,
+ // assume that any panics are due to violated
+ // invariants. Swallow them silently.
+ defer func() {
+ if err := recover(); err != nil {
+ if !e.unimplemented {
+ panic(err)
+ }
+ }
+ }()
+
+ // We construct SSA using an algorithm similar to
+ // Brau, Buchwald, Hack, Leißa, Mallon, and Zwinkau
+ // http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
+ // TODO: check this comment
+
+ // Allocate starting block
+ s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
+
+ // Allocate exit block
+ s.exit = s.f.NewBlock(ssa.BlockExit)
+
+ // Allocate starting values
+ s.vars = map[*Node]*ssa.Value{}
+ s.labels = map[string]*ssaLabel{}
+ s.labeledNodes = map[*Node]*ssaLabel{}
+ s.startmem = s.entryNewValue0(ssa.OpArg, ssa.TypeMem)
+ s.sp = s.entryNewValue0(ssa.OpSP, s.config.Uintptr) // TODO: use generic pointer type (unsafe.Pointer?) instead
+ s.sb = s.entryNewValue0(ssa.OpSB, s.config.Uintptr)
+
+ // Generate addresses of local declarations
+ s.decladdrs = map[*Node]*ssa.Value{}
+ for d := fn.Func.Dcl; d != nil; d = d.Next {
+ n := d.N
+ switch n.Class {
+ case PPARAM, PPARAMOUT:
+ aux := &ssa.ArgSymbol{Typ: n.Type, Offset: n.Xoffset, Sym: n.Sym}
+ s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
+ case PAUTO:
+ aux := &ssa.AutoSymbol{Typ: n.Type, Offset: -1, Sym: n.Sym} // offset TBD by SSA pass
+ s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
+ default:
+ str := ""
+ if n.Class&PHEAP != 0 {
+ str = ",heap"
+ }
+ s.Unimplementedf("local variable %v with class %s%s unimplemented", n, classnames[n.Class&^PHEAP], str)
+ }
+ }
+ // nodfp is a special argument which is the function's FP.
+ aux := &ssa.ArgSymbol{Typ: s.config.Uintptr, Offset: 0, Sym: nodfp.Sym}
+ s.decladdrs[nodfp] = s.entryNewValue1A(ssa.OpAddr, s.config.Uintptr, aux, s.sp)
+
+ // Convert the AST-based IR to the SSA-based IR
+ s.startBlock(s.f.Entry)
+ s.stmtList(fn.Func.Enter)
+ s.stmtList(fn.Nbody)
+
+ // fallthrough to exit
+ if b := s.endBlock(); b != nil {
+ addEdge(b, s.exit)
+ }
+
+ // Finish up exit block
+ s.startBlock(s.exit)
+ s.exit.Control = s.mem()
+ s.endBlock()
+
+ // Check that we used all labels
+ for name, lab := range s.labels {
+ if !lab.used() && !lab.reported {
+ yyerrorl(int(lab.defNode.Lineno), "label %v defined and not used", name)
+ lab.reported = true
+ }
+ if lab.used() && !lab.defined() && !lab.reported {
+ yyerrorl(int(lab.useNode.Lineno), "label %v not defined", name)
+ lab.reported = true
+ }
+ }
+
+ // Check any forward gotos. Non-forward gotos have already been checked.
+ for _, n := range s.fwdGotos {
+ lab := s.labels[n.Left.Sym.Name]
+ // If the label is undefined, we have already have printed an error.
+ if lab.defined() {
+ s.checkgoto(n, lab.defNode)
+ }
+ }
+
+ if nerrors > 0 {
+ return nil, false
+ }
+
+ // Link up variable uses to variable definitions
+ s.linkForwardReferences()
+
+ // Main call to ssa package to compile function
+ ssa.Compile(s.f)
+
+ // Calculate stats about what percentage of functions SSA handles.
+ if false {
+ fmt.Printf("SSA implemented: %t\n", !e.unimplemented)
+ }
+
+ if e.unimplemented {
+ return nil, false
+ }
+
+ // TODO: enable codegen more broadly once the codegen stabilizes
+ // and runtime support is in (gc maps, write barriers, etc.)
+ return s.f, usessa || name == os.Getenv("GOSSAFUNC") || localpkg.Name == os.Getenv("GOSSAPKG")
+}
+
+type state struct {
+ // configuration (arch) information
+ config *ssa.Config
+
+ // function we're building
+ f *ssa.Func
+
+ // exit block that "return" jumps to (and panics jump to)
+ exit *ssa.Block
+
+ // labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f
+ labels map[string]*ssaLabel
+ labeledNodes map[*Node]*ssaLabel
+
+ // gotos that jump forward; required for deferred checkgoto calls
+ fwdGotos []*Node
+
+ // unlabeled break and continue statement tracking
+ breakTo *ssa.Block // current target for plain break statement
+ continueTo *ssa.Block // current target for plain continue statement
+
+ // current location where we're interpreting the AST
+ curBlock *ssa.Block
+
+ // variable assignments in the current block (map from variable symbol to ssa value)
+ // *Node is the unique identifier (an ONAME Node) for the variable.
+ vars map[*Node]*ssa.Value
+
+ // all defined variables at the end of each block. Indexed by block ID.
+ defvars []map[*Node]*ssa.Value
+
+ // addresses of PPARAM, PPARAMOUT, and PAUTO variables.
+ decladdrs map[*Node]*ssa.Value
+
+ // starting values. Memory, frame pointer, and stack pointer
+ startmem *ssa.Value
+ sp *ssa.Value
+ sb *ssa.Value
+
+ // line number stack. The current line number is top of stack
+ line []int32
+}
+
+type ssaLabel struct {
+ target *ssa.Block // block identified by this label
+ breakTarget *ssa.Block // block to break to in control flow node identified by this label
+ continueTarget *ssa.Block // block to continue to in control flow node identified by this label
+ defNode *Node // label definition Node (OLABEL)
+ // Label use Node (OGOTO, OBREAK, OCONTINUE).
+ // Used only for error detection and reporting.
+ // There might be multiple uses, but we only need to track one.
+ useNode *Node
+ reported bool // reported indicates whether an error has already been reported for this label
+}
+
+// defined reports whether the label has a definition (OLABEL node).
+func (l *ssaLabel) defined() bool { return l.defNode != nil }
+
+// used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node).
+func (l *ssaLabel) used() bool { return l.useNode != nil }
+
+// label returns the label associated with sym, creating it if necessary.
+func (s *state) label(sym *Sym) *ssaLabel {
+ lab := s.labels[sym.Name]
+ if lab == nil {
+ lab = new(ssaLabel)
+ s.labels[sym.Name] = lab
+ }
+ return lab
+}
+
+func (s *state) Logf(msg string, args ...interface{}) { s.config.Logf(msg, args...) }
+func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(msg, args...) }
+func (s *state) Unimplementedf(msg string, args ...interface{}) { s.config.Unimplementedf(msg, args...) }
+
+// dummy node for the memory variable
+var memvar = Node{Op: ONAME, Sym: &Sym{Name: "mem"}}
+
+// startBlock sets the current block we're generating code in to b.
+func (s *state) startBlock(b *ssa.Block) {
+ if s.curBlock != nil {
+ s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
+ }
+ s.curBlock = b
+ s.vars = map[*Node]*ssa.Value{}
+}
+
+// endBlock marks the end of generating code for the current block.
+// Returns the (former) current block. Returns nil if there is no current
+// block, i.e. if no code flows to the current execution point.
+func (s *state) endBlock() *ssa.Block {
+ b := s.curBlock
+ if b == nil {
+ return nil
+ }
+ for len(s.defvars) <= int(b.ID) {
+ s.defvars = append(s.defvars, nil)
+ }
+ s.defvars[b.ID] = s.vars
+ s.curBlock = nil
+ s.vars = nil
+ b.Line = s.peekLine()
+ return b
+}
+
+// pushLine pushes a line number on the line number stack.
+func (s *state) pushLine(line int32) {
+ s.line = append(s.line, line)
+}
+
+// popLine pops the top of the line number stack.
+func (s *state) popLine() {
+ s.line = s.line[:len(s.line)-1]
+}
+
+// peekLine peek the top of the line number stack.
+func (s *state) peekLine() int32 {
+ return s.line[len(s.line)-1]
+}
+
+func (s *state) Error(msg string, args ...interface{}) {
+ yyerrorl(int(s.peekLine()), msg, args...)
+}
+
+// newValue0 adds a new value with no arguments to the current block.
+func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value {
+ return s.curBlock.NewValue0(s.peekLine(), op, t)
+}
+
+// newValue0A adds a new value with no arguments and an aux value to the current block.
+func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
+ return s.curBlock.NewValue0A(s.peekLine(), op, t, aux)
+}
+
+// newValue1 adds a new value with one argument to the current block.
+func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue1(s.peekLine(), op, t, arg)
+}
+
+// newValue1A adds a new value with one argument and an aux value to the current block.
+func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue1A(s.peekLine(), op, t, aux, arg)
+}
+
+// newValue1I adds a new value with one argument and an auxint value to the current block.
+func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue1I(s.peekLine(), op, t, aux, arg)
+}
+
+// newValue2 adds a new value with two arguments to the current block.
+func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue2(s.peekLine(), op, t, arg0, arg1)
+}
+
+// newValue2I adds a new value with two arguments and an auxint value to the current block.
+func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue2I(s.peekLine(), op, t, aux, arg0, arg1)
+}
+
+// newValue3 adds a new value with three arguments to the current block.
+func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue3(s.peekLine(), op, t, arg0, arg1, arg2)
+}
+
+// entryNewValue adds a new value with no arguments to the entry block.
+func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value {
+ return s.f.Entry.NewValue0(s.peekLine(), op, t)
+}
+
+// entryNewValue adds a new value with no arguments and an aux value to the entry block.
+func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
+ return s.f.Entry.NewValue0A(s.peekLine(), op, t, aux)
+}
+
+// entryNewValue1 adds a new value with one argument to the entry block.
+func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
+ return s.f.Entry.NewValue1(s.peekLine(), op, t, arg)
+}
+
+// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
+func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value {
+ return s.f.Entry.NewValue1I(s.peekLine(), op, t, auxint, arg)
+}
+
+// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
+func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
+ return s.f.Entry.NewValue1A(s.peekLine(), op, t, aux, arg)
+}
+
+// entryNewValue2 adds a new value with two arguments to the entry block.
+func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.f.Entry.NewValue2(s.peekLine(), op, t, arg0, arg1)
+}
+
+// constInt* routines add a new const int value to the entry block.
+func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value {
+ return s.f.ConstInt8(s.peekLine(), t, c)
+}
+func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value {
+ return s.f.ConstInt16(s.peekLine(), t, c)
+}
+func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value {
+ return s.f.ConstInt32(s.peekLine(), t, c)
+}
+func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value {
+ return s.f.ConstInt64(s.peekLine(), t, c)
+}
+func (s *state) constIntPtr(t ssa.Type, c int64) *ssa.Value {
+ if s.config.PtrSize == 4 && int64(int32(c)) != c {
+ s.Fatalf("pointer constant too big %d", c)
+ }
+ return s.f.ConstIntPtr(s.peekLine(), t, c)
+}
+func (s *state) constInt(t ssa.Type, c int64) *ssa.Value {
+ if s.config.IntSize == 8 {
+ return s.constInt64(t, c)
+ }
+ if int64(int32(c)) != c {
+ s.Fatalf("integer constant too big %d", c)
+ }
+ return s.constInt32(t, int32(c))
+}
+
+// ssaStmtList converts the statement n to SSA and adds it to s.
+func (s *state) stmtList(l *NodeList) {
+ for ; l != nil; l = l.Next {
+ s.stmt(l.N)
+ }
+}
+
+// ssaStmt converts the statement n to SSA and adds it to s.
+func (s *state) stmt(n *Node) {
+ s.pushLine(n.Lineno)
+ defer s.popLine()
+
+ // If s.curBlock is nil, then we're about to generate dead code.
+ // We can't just short-circuit here, though,
+ // because we check labels and gotos as part of SSA generation.
+ // Provide a block for the dead code so that we don't have
+ // to add special cases everywhere else.
+ if s.curBlock == nil {
+ dead := s.f.NewBlock(ssa.BlockPlain)
+ s.startBlock(dead)
+ }
+
+ s.stmtList(n.Ninit)
+ switch n.Op {
+
+ case OBLOCK:
+ s.stmtList(n.List)
+
+ // No-ops
+ case OEMPTY, ODCLCONST, ODCLTYPE:
+
+ // Expression statements
+ case OCALLFUNC, OCALLMETH, OCALLINTER:
+ s.expr(n)
+
+ case ODCL:
+ if n.Left.Class&PHEAP == 0 {
+ return
+ }
+ if compiling_runtime != 0 {
+ Fatal("%v escapes to heap, not allowed in runtime.", n)
+ }
+
+ // TODO: the old pass hides the details of PHEAP
+ // variables behind ONAME nodes. Figure out if it's better
+ // to rewrite the tree and make the heapaddr construct explicit
+ // or to keep this detail hidden behind the scenes.
+ palloc := prealloc[n.Left]
+ if palloc == nil {
+ palloc = callnew(n.Left.Type)
+ prealloc[n.Left] = palloc
+ }
+ s.assign(OAS, n.Left.Name.Heapaddr, palloc)
+
+ case OLABEL:
+ sym := n.Left.Sym
+
+ if isblanksym(sym) {
+ // Empty identifier is valid but useless.
+ // See issues 11589, 11593.
+ return
+ }
+
+ lab := s.label(sym)
+
+ // Associate label with its control flow node, if any
+ if ctl := n.Name.Defn; ctl != nil {
+ switch ctl.Op {
+ case OFOR, OSWITCH, OSELECT:
+ s.labeledNodes[ctl] = lab
+ }
+ }
+
+ if !lab.defined() {
+ lab.defNode = n
+ } else {
+ s.Error("label %v already defined at %v", sym, Ctxt.Line(int(lab.defNode.Lineno)))
+ lab.reported = true
+ }
+ // The label might already have a target block via a goto.
+ if lab.target == nil {
+ lab.target = s.f.NewBlock(ssa.BlockPlain)
+ }
+
+ // go to that label (we pretend "label:" is preceded by "goto label")
+ b := s.endBlock()
+ addEdge(b, lab.target)
+ s.startBlock(lab.target)
+
+ case OGOTO:
+ sym := n.Left.Sym
+
+ lab := s.label(sym)
+ if lab.target == nil {
+ lab.target = s.f.NewBlock(ssa.BlockPlain)
+ }
+ if !lab.used() {
+ lab.useNode = n
+ }
+
+ if lab.defined() {
+ s.checkgoto(n, lab.defNode)
+ } else {
+ s.fwdGotos = append(s.fwdGotos, n)
+ }
+
+ b := s.endBlock()
+ addEdge(b, lab.target)
+
+ case OAS, OASWB:
+ // Check whether we can generate static data rather than code.
+ // If so, ignore n and defer data generation until codegen.
+ // Failure to do this causes writes to readonly symbols.
+ if gen_as_init(n, true) {
+ var data []*Node
+ if s.f.StaticData != nil {
+ data = s.f.StaticData.([]*Node)
+ }
+ s.f.StaticData = append(data, n)
+ return
+ }
+ s.assign(n.Op, n.Left, n.Right)
+
+ case OIF:
+ cond := s.expr(n.Left)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.Control = cond
+ // TODO(khr): likely direction
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ var bElse *ssa.Block
+
+ if n.Rlist == nil {
+ addEdge(b, bThen)
+ addEdge(b, bEnd)
+ } else {
+ bElse = s.f.NewBlock(ssa.BlockPlain)
+ addEdge(b, bThen)
+ addEdge(b, bElse)
+ }
+
+ s.startBlock(bThen)
+ s.stmtList(n.Nbody)
+ if b := s.endBlock(); b != nil {
+ addEdge(b, bEnd)
+ }
+
+ if n.Rlist != nil {
+ s.startBlock(bElse)
+ s.stmtList(n.Rlist)
+ if b := s.endBlock(); b != nil {
+ addEdge(b, bEnd)
+ }
+ }
+ s.startBlock(bEnd)
+
+ case ORETURN:
+ s.stmtList(n.List)
+ b := s.endBlock()
+ addEdge(b, s.exit)
+
+ case OCONTINUE, OBREAK:
+ var op string
+ var to *ssa.Block
+ switch n.Op {
+ case OCONTINUE:
+ op = "continue"
+ to = s.continueTo
+ case OBREAK:
+ op = "break"
+ to = s.breakTo
+ }
+ if n.Left == nil {
+ // plain break/continue
+ if to == nil {
+ s.Error("%s is not in a loop", op)
+ return
+ }
+ // nothing to do; "to" is already the correct target
+ } else {
+ // labeled break/continue; look up the target
+ sym := n.Left.Sym
+ lab := s.label(sym)
+ if !lab.used() {
+ lab.useNode = n.Left
+ }
+ if !lab.defined() {
+ s.Error("%s label not defined: %v", op, sym)
+ lab.reported = true
+ return
+ }
+ switch n.Op {
+ case OCONTINUE:
+ to = lab.continueTarget
+ case OBREAK:
+ to = lab.breakTarget
+ }
+ if to == nil {
+ // Valid label but not usable with a break/continue here, e.g.:
+ // for {
+ // continue abc
+ // }
+ // abc:
+ // for {}
+ s.Error("invalid %s label %v", op, sym)
+ lab.reported = true
+ return
+ }
+ }
+
+ b := s.endBlock()
+ addEdge(b, to)
+
+ case OFOR:
+ // OFOR: for Ninit; Left; Right { Nbody }
+ bCond := s.f.NewBlock(ssa.BlockPlain)
+ bBody := s.f.NewBlock(ssa.BlockPlain)
+ bIncr := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ // first, jump to condition test
+ b := s.endBlock()
+ addEdge(b, bCond)
+
+ // generate code to test condition
+ s.startBlock(bCond)
+ var cond *ssa.Value
+ if n.Left != nil {
+ cond = s.expr(n.Left)
+ } else {
+ cond = s.entryNewValue0A(ssa.OpConstBool, Types[TBOOL], true)
+ }
+ b = s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.Control = cond
+ // TODO(khr): likely direction
+ addEdge(b, bBody)
+ addEdge(b, bEnd)
+
+ // set up for continue/break in body
+ prevContinue := s.continueTo
+ prevBreak := s.breakTo
+ s.continueTo = bIncr
+ s.breakTo = bEnd
+ lab := s.labeledNodes[n]
+ if lab != nil {
+ // labeled for loop
+ lab.continueTarget = bIncr
+ lab.breakTarget = bEnd
+ }
+
+ // generate body
+ s.startBlock(bBody)
+ s.stmtList(n.Nbody)
+
+ // tear down continue/break
+ s.continueTo = prevContinue
+ s.breakTo = prevBreak
+ if lab != nil {
+ lab.continueTarget = nil
+ lab.breakTarget = nil
+ }
+
+ // done with body, goto incr
+ if b := s.endBlock(); b != nil {
+ addEdge(b, bIncr)
+ }
+
+ // generate incr
+ s.startBlock(bIncr)
+ if n.Right != nil {
+ s.stmt(n.Right)
+ }
+ if b := s.endBlock(); b != nil {
+ addEdge(b, bCond)
+ }
+ s.startBlock(bEnd)
+
+ case OSWITCH, OSELECT:
+ // These have been mostly rewritten by the front end into their Nbody fields.
+ // Our main task is to correctly hook up any break statements.
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ prevBreak := s.breakTo
+ s.breakTo = bEnd
+ lab := s.labeledNodes[n]
+ if lab != nil {
+ // labeled
+ lab.breakTarget = bEnd
+ }
+
+ // generate body code
+ s.stmtList(n.Nbody)
+
+ s.breakTo = prevBreak
+ if lab != nil {
+ lab.breakTarget = nil
+ }
+
+ if b := s.endBlock(); b != nil {
+ addEdge(b, bEnd)
+ }
+ s.startBlock(bEnd)
+
+ case OVARKILL:
+ // TODO(khr): ??? anything to do here? Only for addrtaken variables?
+ // Maybe just link it in the store chain?
+ default:
+ s.Unimplementedf("unhandled stmt %s", opnames[n.Op])
+ }
+}
+
+type opAndType struct {
+ op uint8
+ etype uint8
+}
+
+var opToSSA = map[opAndType]ssa.Op{
+ opAndType{OADD, TINT8}: ssa.OpAdd8,
+ opAndType{OADD, TUINT8}: ssa.OpAdd8,
+ opAndType{OADD, TINT16}: ssa.OpAdd16,
+ opAndType{OADD, TUINT16}: ssa.OpAdd16,
+ opAndType{OADD, TINT32}: ssa.OpAdd32,
+ opAndType{OADD, TUINT32}: ssa.OpAdd32,
+ opAndType{OADD, TPTR32}: ssa.OpAdd32,
+ opAndType{OADD, TINT64}: ssa.OpAdd64,
+ opAndType{OADD, TUINT64}: ssa.OpAdd64,
+ opAndType{OADD, TPTR64}: ssa.OpAdd64,
+
+ opAndType{OSUB, TINT8}: ssa.OpSub8,
+ opAndType{OSUB, TUINT8}: ssa.OpSub8,
+ opAndType{OSUB, TINT16}: ssa.OpSub16,
+ opAndType{OSUB, TUINT16}: ssa.OpSub16,
+ opAndType{OSUB, TINT32}: ssa.OpSub32,
+ opAndType{OSUB, TUINT32}: ssa.OpSub32,
+ opAndType{OSUB, TINT64}: ssa.OpSub64,
+ opAndType{OSUB, TUINT64}: ssa.OpSub64,
+
+ opAndType{ONOT, TBOOL}: ssa.OpNot,
+
+ opAndType{OMINUS, TINT8}: ssa.OpNeg8,
+ opAndType{OMINUS, TUINT8}: ssa.OpNeg8,
+ opAndType{OMINUS, TINT16}: ssa.OpNeg16,
+ opAndType{OMINUS, TUINT16}: ssa.OpNeg16,
+ opAndType{OMINUS, TINT32}: ssa.OpNeg32,
+ opAndType{OMINUS, TUINT32}: ssa.OpNeg32,
+ opAndType{OMINUS, TINT64}: ssa.OpNeg64,
+ opAndType{OMINUS, TUINT64}: ssa.OpNeg64,
+
+ opAndType{OCOM, TINT8}: ssa.OpCom8,
+ opAndType{OCOM, TUINT8}: ssa.OpCom8,
+ opAndType{OCOM, TINT16}: ssa.OpCom16,
+ opAndType{OCOM, TUINT16}: ssa.OpCom16,
+ opAndType{OCOM, TINT32}: ssa.OpCom32,
+ opAndType{OCOM, TUINT32}: ssa.OpCom32,
+ opAndType{OCOM, TINT64}: ssa.OpCom64,
+ opAndType{OCOM, TUINT64}: ssa.OpCom64,
+
+ opAndType{OMUL, TINT8}: ssa.OpMul8,
+ opAndType{OMUL, TUINT8}: ssa.OpMul8,
+ opAndType{OMUL, TINT16}: ssa.OpMul16,
+ opAndType{OMUL, TUINT16}: ssa.OpMul16,
+ opAndType{OMUL, TINT32}: ssa.OpMul32,
+ opAndType{OMUL, TUINT32}: ssa.OpMul32,
+ opAndType{OMUL, TINT64}: ssa.OpMul64,
+ opAndType{OMUL, TUINT64}: ssa.OpMul64,
+
+ opAndType{OAND, TINT8}: ssa.OpAnd8,
+ opAndType{OAND, TUINT8}: ssa.OpAnd8,
+ opAndType{OAND, TINT16}: ssa.OpAnd16,
+ opAndType{OAND, TUINT16}: ssa.OpAnd16,
+ opAndType{OAND, TINT32}: ssa.OpAnd32,
+ opAndType{OAND, TUINT32}: ssa.OpAnd32,
+ opAndType{OAND, TINT64}: ssa.OpAnd64,
+ opAndType{OAND, TUINT64}: ssa.OpAnd64,
+
+ opAndType{OOR, TINT8}: ssa.OpOr8,
+ opAndType{OOR, TUINT8}: ssa.OpOr8,
+ opAndType{OOR, TINT16}: ssa.OpOr16,
+ opAndType{OOR, TUINT16}: ssa.OpOr16,
+ opAndType{OOR, TINT32}: ssa.OpOr32,
+ opAndType{OOR, TUINT32}: ssa.OpOr32,
+ opAndType{OOR, TINT64}: ssa.OpOr64,
+ opAndType{OOR, TUINT64}: ssa.OpOr64,
+
+ opAndType{OEQ, TBOOL}: ssa.OpEq8,
+ opAndType{OEQ, TINT8}: ssa.OpEq8,
+ opAndType{OEQ, TUINT8}: ssa.OpEq8,
+ opAndType{OEQ, TINT16}: ssa.OpEq16,
+ opAndType{OEQ, TUINT16}: ssa.OpEq16,
+ opAndType{OEQ, TINT32}: ssa.OpEq32,
+ opAndType{OEQ, TUINT32}: ssa.OpEq32,
+ opAndType{OEQ, TINT64}: ssa.OpEq64,
+ opAndType{OEQ, TUINT64}: ssa.OpEq64,
+ opAndType{OEQ, TPTR64}: ssa.OpEq64,
+ opAndType{OEQ, TINTER}: ssa.OpEqFat, // e == nil only
+ opAndType{OEQ, TARRAY}: ssa.OpEqFat, // slice only; a == nil only
+ opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
+ opAndType{OEQ, TMAP}: ssa.OpEqPtr,
+ opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
+ opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
+ opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
+
+ opAndType{ONE, TBOOL}: ssa.OpNeq8,
+ opAndType{ONE, TINT8}: ssa.OpNeq8,
+ opAndType{ONE, TUINT8}: ssa.OpNeq8,
+ opAndType{ONE, TINT16}: ssa.OpNeq16,
+ opAndType{ONE, TUINT16}: ssa.OpNeq16,
+ opAndType{ONE, TINT32}: ssa.OpNeq32,
+ opAndType{ONE, TUINT32}: ssa.OpNeq32,
+ opAndType{ONE, TINT64}: ssa.OpNeq64,
+ opAndType{ONE, TUINT64}: ssa.OpNeq64,
+ opAndType{ONE, TPTR64}: ssa.OpNeq64,
+ opAndType{ONE, TINTER}: ssa.OpNeqFat, // e != nil only
+ opAndType{ONE, TARRAY}: ssa.OpNeqFat, // slice only; a != nil only
+ opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
+ opAndType{ONE, TMAP}: ssa.OpNeqPtr,
+ opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
+ opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
+ opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
+
+ opAndType{OLT, TINT8}: ssa.OpLess8,
+ opAndType{OLT, TUINT8}: ssa.OpLess8U,
+ opAndType{OLT, TINT16}: ssa.OpLess16,
+ opAndType{OLT, TUINT16}: ssa.OpLess16U,
+ opAndType{OLT, TINT32}: ssa.OpLess32,
+ opAndType{OLT, TUINT32}: ssa.OpLess32U,
+ opAndType{OLT, TINT64}: ssa.OpLess64,
+ opAndType{OLT, TUINT64}: ssa.OpLess64U,
+
+ opAndType{OGT, TINT8}: ssa.OpGreater8,
+ opAndType{OGT, TUINT8}: ssa.OpGreater8U,
+ opAndType{OGT, TINT16}: ssa.OpGreater16,
+ opAndType{OGT, TUINT16}: ssa.OpGreater16U,
+ opAndType{OGT, TINT32}: ssa.OpGreater32,
+ opAndType{OGT, TUINT32}: ssa.OpGreater32U,
+ opAndType{OGT, TINT64}: ssa.OpGreater64,
+ opAndType{OGT, TUINT64}: ssa.OpGreater64U,
+
+ opAndType{OLE, TINT8}: ssa.OpLeq8,
+ opAndType{OLE, TUINT8}: ssa.OpLeq8U,
+ opAndType{OLE, TINT16}: ssa.OpLeq16,
+ opAndType{OLE, TUINT16}: ssa.OpLeq16U,
+ opAndType{OLE, TINT32}: ssa.OpLeq32,
+ opAndType{OLE, TUINT32}: ssa.OpLeq32U,
+ opAndType{OLE, TINT64}: ssa.OpLeq64,
+ opAndType{OLE, TUINT64}: ssa.OpLeq64U,
+
+ opAndType{OGE, TINT8}: ssa.OpGeq8,
+ opAndType{OGE, TUINT8}: ssa.OpGeq8U,
+ opAndType{OGE, TINT16}: ssa.OpGeq16,
+ opAndType{OGE, TUINT16}: ssa.OpGeq16U,
+ opAndType{OGE, TINT32}: ssa.OpGeq32,
+ opAndType{OGE, TUINT32}: ssa.OpGeq32U,
+ opAndType{OGE, TINT64}: ssa.OpGeq64,
+ opAndType{OGE, TUINT64}: ssa.OpGeq64U,
+}
+
+func (s *state) concreteEtype(t *Type) uint8 {
+ e := t.Etype
+ switch e {
+ default:
+ return e
+ case TINT:
+ if s.config.IntSize == 8 {
+ return TINT64
+ }
+ return TINT32
+ case TUINT:
+ if s.config.IntSize == 8 {
+ return TUINT64
+ }
+ return TUINT32
+ case TUINTPTR:
+ if s.config.PtrSize == 8 {
+ return TUINT64
+ }
+ return TUINT32
+ }
+}
+
+func (s *state) ssaOp(op uint8, t *Type) ssa.Op {
+ etype := s.concreteEtype(t)
+ x, ok := opToSSA[opAndType{op, etype}]
+ if !ok {
+ s.Unimplementedf("unhandled binary op %s etype=%s", opnames[op], Econv(int(etype), 0))
+ }
+ return x
+}
+
+type opAndTwoTypes struct {
+ op uint8
+ etype1 uint8
+ etype2 uint8
+}
+
+var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
+ opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
+ opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
+ opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
+ opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
+ opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
+ opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
+ opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
+ opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
+
+ opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
+ opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
+ opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
+ opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
+ opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
+ opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
+ opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
+ opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
+
+ opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
+ opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
+ opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
+ opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
+ opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
+ opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
+ opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
+ opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
+
+ opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
+ opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
+ opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
+ opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
+ opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
+ opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
+ opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
+ opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
+
+ opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
+ opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
+ opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
+ opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
+ opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
+ opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
+ opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
+ opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
+
+ opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
+ opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
+ opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
+ opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
+ opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
+ opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
+ opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
+ opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
+
+ opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
+ opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
+ opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
+ opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
+ opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
+ opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
+ opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
+ opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
+
+ opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
+ opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
+ opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
+ opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
+ opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
+ opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
+ opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
+ opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
+}
+
+func (s *state) ssaShiftOp(op uint8, t *Type, u *Type) ssa.Op {
+ etype1 := s.concreteEtype(t)
+ etype2 := s.concreteEtype(u)
+ x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
+ if !ok {
+ s.Unimplementedf("unhandled shift op %s etype=%s/%s", opnames[op], Econv(int(etype1), 0), Econv(int(etype2), 0))
+ }
+ return x
+}
+
+// expr converts the expression n to ssa, adds it to s and returns the ssa result.
+func (s *state) expr(n *Node) *ssa.Value {
+ s.pushLine(n.Lineno)
+ defer s.popLine()
+
+ s.stmtList(n.Ninit)
+ switch n.Op {
+ case ONAME:
+ if n.Class == PFUNC {
+ // "value" of a function is the address of the function's closure
+ sym := funcsym(n.Sym)
+ aux := &ssa.ExternSymbol{n.Type, sym}
+ return s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sb)
+ }
+ if canSSA(n) {
+ return s.variable(n, n.Type)
+ }
+ addr := s.addr(n)
+ return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
+ case OLITERAL:
+ switch n.Val().Ctype() {
+ case CTINT:
+ i := Mpgetfix(n.Val().U.(*Mpint))
+ switch n.Type.Size() {
+ case 1:
+ return s.constInt8(n.Type, int8(i))
+ case 2:
+ return s.constInt16(n.Type, int16(i))
+ case 4:
+ return s.constInt32(n.Type, int32(i))
+ case 8:
+ return s.constInt64(n.Type, i)
+ default:
+ s.Fatalf("bad integer size %d", n.Type.Size())
+ return nil
+ }
+ case CTSTR:
+ return s.entryNewValue0A(ssa.OpConstString, n.Type, n.Val().U)
+ case CTBOOL:
+ return s.entryNewValue0A(ssa.OpConstBool, n.Type, n.Val().U)
+ case CTNIL:
+ return s.entryNewValue0(ssa.OpConstNil, n.Type)
+ default:
+ s.Unimplementedf("unhandled OLITERAL %v", n.Val().Ctype())
+ return nil
+ }
+ case OCONVNOP:
+ x := s.expr(n.Left)
+ return s.newValue1(ssa.OpConvNop, n.Type, x)
+ case OCONV:
+ x := s.expr(n.Left)
+ ft := n.Left.Type // from type
+ tt := n.Type // to type
+ if ft.IsInteger() && tt.IsInteger() {
+ var op ssa.Op
+ if tt.Size() == ft.Size() {
+ op = ssa.OpConvNop
+ } else if tt.Size() < ft.Size() {
+ // truncation
+ switch 10*ft.Size() + tt.Size() {
+ case 21:
+ op = ssa.OpTrunc16to8
+ case 41:
+ op = ssa.OpTrunc32to8
+ case 42:
+ op = ssa.OpTrunc32to16
+ case 81:
+ op = ssa.OpTrunc64to8
+ case 82:
+ op = ssa.OpTrunc64to16
+ case 84:
+ op = ssa.OpTrunc64to32
+ default:
+ s.Fatalf("weird integer truncation %s -> %s", ft, tt)
+ }
+ } else if ft.IsSigned() {
+ // sign extension
+ switch 10*ft.Size() + tt.Size() {
+ case 12:
+ op = ssa.OpSignExt8to16
+ case 14:
+ op = ssa.OpSignExt8to32
+ case 18:
+ op = ssa.OpSignExt8to64
+ case 24:
+ op = ssa.OpSignExt16to32
+ case 28:
+ op = ssa.OpSignExt16to64
+ case 48:
+ op = ssa.OpSignExt32to64
+ default:
+ s.Fatalf("bad integer sign extension %s -> %s", ft, tt)
+ }
+ } else {
+ // zero extension
+ switch 10*ft.Size() + tt.Size() {
+ case 12:
+ op = ssa.OpZeroExt8to16
+ case 14:
+ op = ssa.OpZeroExt8to32
+ case 18:
+ op = ssa.OpZeroExt8to64
+ case 24:
+ op = ssa.OpZeroExt16to32
+ case 28:
+ op = ssa.OpZeroExt16to64
+ case 48:
+ op = ssa.OpZeroExt32to64
+ default:
+ s.Fatalf("weird integer sign extension %s -> %s", ft, tt)
+ }
+ }
+ return s.newValue1(op, n.Type, x)
+ }
+ s.Unimplementedf("unhandled OCONV %s -> %s", n.Left.Type, n.Type)
+ return nil
+
+ // binary ops
+ case OLT, OEQ, ONE, OLE, OGE, OGT:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ return s.newValue2(s.ssaOp(n.Op, n.Left.Type), ssa.TypeBool, a, b)
+ case OADD, OSUB, OMUL, OAND, OOR:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ case OLSH, ORSH:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b)
+ case OANDAND, OOROR:
+ // To implement OANDAND (and OOROR), we introduce a
+ // new temporary variable to hold the result. The
+ // variable is associated with the OANDAND node in the
+ // s.vars table (normally variables are only
+ // associated with ONAME nodes). We convert
+ // A && B
+ // to
+ // var = A
+ // if var {
+ // var = B
+ // }
+ // Using var in the subsequent block introduces the
+ // necessary phi variable.
+ el := s.expr(n.Left)
+ s.vars[n] = el
+
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.Control = el
+
+ bRight := s.f.NewBlock(ssa.BlockPlain)
+ bResult := s.f.NewBlock(ssa.BlockPlain)
+ if n.Op == OANDAND {
+ addEdge(b, bRight)
+ addEdge(b, bResult)
+ } else if n.Op == OOROR {
+ addEdge(b, bResult)
+ addEdge(b, bRight)
+ }
+
+ s.startBlock(bRight)
+ er := s.expr(n.Right)
+ s.vars[n] = er
+
+ b = s.endBlock()
+ addEdge(b, bResult)
+
+ s.startBlock(bResult)
+ return s.variable(n, n.Type)
+
+ // unary ops
+ case ONOT, OMINUS, OCOM:
+ a := s.expr(n.Left)
+ return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
+
+ case OADDR:
+ return s.addr(n.Left)
+
+ case OINDREG:
+ if int(n.Reg) != Thearch.REGSP {
+ s.Unimplementedf("OINDREG of non-SP register %s in expr: %v", obj.Rconv(int(n.Reg)), n)
+ return nil
+ }
+ addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.sp)
+ return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
+
+ case OIND:
+ p := s.expr(n.Left)
+ s.nilCheck(p)
+ return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
+
+ case ODOT:
+ v := s.expr(n.Left)
+ return s.newValue1I(ssa.OpStructSelect, n.Type, n.Xoffset, v)
+
+ case ODOTPTR:
+ p := s.expr(n.Left)
+ s.nilCheck(p)
+ p = s.newValue2(ssa.OpAddPtr, p.Type, p, s.constIntPtr(s.config.Uintptr, n.Xoffset))
+ return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
+
+ case OINDEX:
+ if n.Left.Type.Bound >= 0 { // array or string
+ a := s.expr(n.Left)
+ i := s.expr(n.Right)
+ i = s.extendIndex(i)
+ var elemtype *Type
+ var len *ssa.Value
+ if n.Left.Type.IsString() {
+ len = s.newValue1(ssa.OpStringLen, s.config.Int, a)
+ elemtype = Types[TUINT8]
+ } else {
+ len = s.constInt(s.config.Int, n.Left.Type.Bound)
+ elemtype = n.Left.Type.Type
+ }
+ s.boundsCheck(i, len)
+ return s.newValue2(ssa.OpArrayIndex, elemtype, a, i)
+ } else { // slice
+ p := s.addr(n)
+ return s.newValue2(ssa.OpLoad, n.Left.Type.Type, p, s.mem())
+ }
+
+ case OLEN, OCAP:
+ switch {
+ case n.Left.Type.IsSlice():
+ op := ssa.OpSliceLen
+ if n.Op == OCAP {
+ op = ssa.OpSliceCap
+ }
+ return s.newValue1(op, s.config.Int, s.expr(n.Left))
+ case n.Left.Type.IsString(): // string; not reachable for OCAP
+ return s.newValue1(ssa.OpStringLen, s.config.Int, s.expr(n.Left))
+ default: // array
+ return s.constInt(s.config.Int, n.Left.Type.Bound)
+ }
+
+ case OCALLFUNC, OCALLMETH:
+ left := n.Left
+ static := left.Op == ONAME && left.Class == PFUNC
+
+ if n.Op == OCALLMETH {
+ // Rewrite to an OCALLFUNC: (p.f)(...) becomes (f)(p, ...)
+ // Take care not to modify the original AST.
+ if left.Op != ODOTMETH {
+ Fatal("OCALLMETH: n.Left not an ODOTMETH: %v", left)
+ }
+
+ newLeft := *left.Right
+ newLeft.Type = left.Type
+ if newLeft.Op == ONAME {
+ newLeft.Class = PFUNC
+ }
+ left = &newLeft
+ static = true
+ }
+
+ // evaluate closure
+ var closure *ssa.Value
+ if !static {
+ closure = s.expr(left)
+ }
+
+ // run all argument assignments
+ s.stmtList(n.List)
+
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ var call *ssa.Value
+ if static {
+ call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, left.Sym, s.mem())
+ } else {
+ entry := s.newValue2(ssa.OpLoad, s.config.Uintptr, closure, s.mem())
+ call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, entry, closure, s.mem())
+ }
+ dowidth(left.Type)
+ call.AuxInt = left.Type.Argwid // call operations carry the argsize of the callee along with them
+ b := s.endBlock()
+ b.Kind = ssa.BlockCall
+ b.Control = call
+ addEdge(b, bNext)
+ addEdge(b, s.exit)
+
+ // read result from stack at the start of the fallthrough block
+ s.startBlock(bNext)
+ var titer Iter
+ fp := Structfirst(&titer, Getoutarg(left.Type))
+ if fp == nil {
+ // CALLFUNC has no return value. Continue with the next statement.
+ return nil
+ }
+ a := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Width, s.sp)
+ return s.newValue2(ssa.OpLoad, fp.Type, a, call)
+ default:
+ s.Unimplementedf("unhandled expr %s", opnames[n.Op])
+ return nil
+ }
+}
+
+func (s *state) assign(op uint8, left *Node, right *Node) {
+ // TODO: do write barrier
+ // if op == OASWB
+ var val *ssa.Value
+ if right == nil {
+ // right == nil means use the zero value of the assigned type.
+ t := left.Type
+ if !canSSA(left) {
+ // if we can't ssa this memory, treat it as just zeroing out the backing memory
+ addr := s.addr(left)
+ s.vars[&memvar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem())
+ return
+ }
+ val = s.zeroVal(t)
+ } else {
+ val = s.expr(right)
+ }
+ if left.Op == ONAME && canSSA(left) {
+ // Update variable assignment.
+ s.vars[left] = val
+ return
+ }
+ // not ssa-able. Treat as a store.
+ addr := s.addr(left)
+ s.vars[&memvar] = s.newValue3(ssa.OpStore, ssa.TypeMem, addr, val, s.mem())
+}
+
+// zeroVal returns the zero value for type t.
+func (s *state) zeroVal(t *Type) *ssa.Value {
+ switch {
+ case t.IsInteger():
+ switch t.Size() {
+ case 1:
+ return s.constInt8(t, 0)
+ case 2:
+ return s.constInt16(t, 0)
+ case 4:
+ return s.constInt32(t, 0)
+ case 8:
+ return s.constInt64(t, 0)
+ default:
+ s.Fatalf("bad sized integer type %s", t)
+ }
+ case t.IsString():
+ return s.entryNewValue0A(ssa.OpConstString, t, "")
+ case t.IsPtr():
+ return s.entryNewValue0(ssa.OpConstNil, t)
+ case t.IsBoolean():
+ return s.entryNewValue0A(ssa.OpConstBool, t, false) // TODO: store bools as 0/1 in AuxInt?
+ }
+ s.Unimplementedf("zero for type %v not implemented", t)
+ return nil
+}
+
+// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
+// The value that the returned Value represents is guaranteed to be non-nil.
+func (s *state) addr(n *Node) *ssa.Value {
+ switch n.Op {
+ case ONAME:
+ switch n.Class {
+ case PEXTERN:
+ // global variable
+ aux := &ssa.ExternSymbol{n.Type, n.Sym}
+ v := s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sb)
+ // TODO: Make OpAddr use AuxInt as well as Aux.
+ if n.Xoffset != 0 {
+ v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
+ }
+ return v
+ case PPARAM, PPARAMOUT, PAUTO:
+ // parameter/result slot or local variable
+ v := s.decladdrs[n]
+ if v == nil {
+ if flag_race != 0 && n.String() == ".fp" {
+ s.Unimplementedf("race detector mishandles nodfp")
+ }
+ s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
+ }
+ return v
+ case PAUTO | PHEAP:
+ return s.expr(n.Name.Heapaddr)
+ default:
+ s.Unimplementedf("variable address of %v not implemented", n)
+ return nil
+ }
+ case OINDREG:
+ // indirect off a register
+ // used for storing/loading arguments/returns to/from callees
+ if int(n.Reg) != Thearch.REGSP {
+ s.Unimplementedf("OINDREG of non-SP register %s in addr: %v", obj.Rconv(int(n.Reg)), n)
+ return nil
+ }
+ return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.sp)
+ case OINDEX:
+ if n.Left.Type.IsSlice() {
+ a := s.expr(n.Left)
+ i := s.expr(n.Right)
+ i = s.extendIndex(i)
+ len := s.newValue1(ssa.OpSliceLen, s.config.Uintptr, a)
+ s.boundsCheck(i, len)
+ p := s.newValue1(ssa.OpSlicePtr, Ptrto(n.Left.Type.Type), a)
+ return s.newValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Type), p, i)
+ } else { // array
+ a := s.addr(n.Left)
+ i := s.expr(n.Right)
+ i = s.extendIndex(i)
+ len := s.constInt(s.config.Int, n.Left.Type.Bound)
+ s.boundsCheck(i, len)
+ return s.newValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Type), a, i)
+ }
+ case OIND:
+ p := s.expr(n.Left)
+ s.nilCheck(p)
+ return p
+ case ODOT:
+ p := s.addr(n.Left)
+ return s.newValue2(ssa.OpAddPtr, p.Type, p, s.constIntPtr(s.config.Uintptr, n.Xoffset))
+ case ODOTPTR:
+ p := s.expr(n.Left)
+ s.nilCheck(p)
+ return s.newValue2(ssa.OpAddPtr, p.Type, p, s.constIntPtr(s.config.Uintptr, n.Xoffset))
+ default:
+ s.Unimplementedf("addr: bad op %v", Oconv(int(n.Op), 0))
+ return nil
+ }
+}
+
+// canSSA reports whether n is SSA-able.
+// n must be an ONAME.
+func canSSA(n *Node) bool {
+ if n.Op != ONAME {
+ return false
+ }
+ if n.Addrtaken {
+ return false
+ }
+ if n.Class&PHEAP != 0 {
+ return false
+ }
+ if n.Class == PEXTERN {
+ return false
+ }
+ if n.Class == PPARAMOUT {
+ return false
+ }
+ if Isfat(n.Type) {
+ return false
+ }
+ return true
+ // TODO: try to make more variables SSAable.
+}
+
+// nilCheck generates nil pointer checking code.
+// Starts a new block on return.
+// Used only for automatically inserted nil checks,
+// not for user code like 'x != nil'.
+func (s *state) nilCheck(ptr *ssa.Value) {
+ c := s.newValue1(ssa.OpIsNonNil, ssa.TypeBool, ptr)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.Control = c
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ addEdge(b, bNext)
+ addEdge(b, s.exit)
+ s.startBlock(bNext)
+ // TODO(khr): Don't go directly to exit. Go to a stub that calls panicmem first.
+ // TODO: implicit nil checks somehow?
+}
+
+// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
+// Starts a new block on return.
+func (s *state) boundsCheck(idx, len *ssa.Value) {
+ // TODO: convert index to full width?
+ // TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero.
+
+ // bounds check
+ cmp := s.newValue2(ssa.OpIsInBounds, ssa.TypeBool, idx, len)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.Control = cmp
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ addEdge(b, bNext)
+ addEdge(b, s.exit)
+ // TODO: don't go directly to s.exit. Go to a stub that calls panicindex first.
+ s.startBlock(bNext)
+}
+
+// checkgoto checks that a goto from from to to does not
+// jump into a block or jump over variable declarations.
+// It is a copy of checkgoto in the pre-SSA backend,
+// modified only for line number handling.
+// TODO: document how this works and why it is designed the way it is.
+func (s *state) checkgoto(from *Node, to *Node) {
+ if from.Sym == to.Sym {
+ return
+ }
+
+ nf := 0
+ for fs := from.Sym; fs != nil; fs = fs.Link {
+ nf++
+ }
+ nt := 0
+ for fs := to.Sym; fs != nil; fs = fs.Link {
+ nt++
+ }
+ fs := from.Sym
+ for ; nf > nt; nf-- {
+ fs = fs.Link
+ }
+ if fs != to.Sym {
+ // decide what to complain about.
+ // prefer to complain about 'into block' over declarations,
+ // so scan backward to find most recent block or else dcl.
+ var block *Sym
+
+ var dcl *Sym
+ ts := to.Sym
+ for ; nt > nf; nt-- {
+ if ts.Pkg == nil {
+ block = ts
+ } else {
+ dcl = ts
+ }
+ ts = ts.Link
+ }
+
+ for ts != fs {
+ if ts.Pkg == nil {
+ block = ts
+ } else {
+ dcl = ts
+ }
+ ts = ts.Link
+ fs = fs.Link
+ }
+
+ lno := int(from.Left.Lineno)
+ if block != nil {
+ yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, Ctxt.Line(int(block.Lastlineno)))
+ } else {
+ yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, Ctxt.Line(int(dcl.Lastlineno)))
+ }
+ }
+}
+
+// variable returns the value of a variable at the current location.
+func (s *state) variable(name *Node, t ssa.Type) *ssa.Value {
+ v := s.vars[name]
+ if v == nil {
+ // TODO: get type? Take Sym as arg?
+ v = s.newValue0A(ssa.OpFwdRef, t, name)
+ s.vars[name] = v
+ }
+ return v
+}
+
+func (s *state) mem() *ssa.Value {
+ return s.variable(&memvar, ssa.TypeMem)
+}
+
+func (s *state) linkForwardReferences() {
+ // Build ssa graph. Each variable on its first use in a basic block
+ // leaves a FwdRef in that block representing the incoming value
+ // of that variable. This function links that ref up with possible definitions,
+ // inserting Phi values as needed. This is essentially the algorithm
+ // described by Brau, Buchwald, Hack, Leißa, Mallon, and Zwinkau:
+ // http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
+ for _, b := range s.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != ssa.OpFwdRef {
+ continue
+ }
+ name := v.Aux.(*Node)
+ v.Op = ssa.OpCopy
+ v.Aux = nil
+ v.SetArgs1(s.lookupVarIncoming(b, v.Type, name))
+ }
+ }
+}
+
+// lookupVarIncoming finds the variable's value at the start of block b.
+func (s *state) lookupVarIncoming(b *ssa.Block, t ssa.Type, name *Node) *ssa.Value {
+ // TODO(khr): have lookupVarIncoming overwrite the fwdRef or copy it
+ // will be used in, instead of having the result used in a copy value.
+ if b == s.f.Entry {
+ if name == &memvar {
+ return s.startmem
+ }
+ // variable is live at the entry block. Load it.
+ addr := s.decladdrs[name]
+ if addr == nil {
+ // TODO: closure args reach here.
+ s.Unimplementedf("variable %s not found", name)
+ }
+ if _, ok := addr.Aux.(*ssa.ArgSymbol); !ok {
+ s.Fatalf("variable live at start of function %s is not an argument %s", b.Func.Name, name)
+ }
+ return s.entryNewValue2(ssa.OpLoad, t, addr, s.startmem)
+ }
+ var vals []*ssa.Value
+ for _, p := range b.Preds {
+ vals = append(vals, s.lookupVarOutgoing(p, t, name))
+ }
+ if len(vals) == 0 {
+ // This block is dead; we have no predecessors and we're not the entry block.
+ // It doesn't matter what we use here as long as it is well-formed,
+ // so use the default/zero value.
+ if name == &memvar {
+ return s.startmem
+ }
+ return s.zeroVal(name.Type)
+ }
+ v0 := vals[0]
+ for i := 1; i < len(vals); i++ {
+ if vals[i] != v0 {
+ // need a phi value
+ v := b.NewValue0(s.peekLine(), ssa.OpPhi, t)
+ v.AddArgs(vals...)
+ return v
+ }
+ }
+ return v0
+}
+
+// lookupVarOutgoing finds the variable's value at the end of block b.
+func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name *Node) *ssa.Value {
+ m := s.defvars[b.ID]
+ if v, ok := m[name]; ok {
+ return v
+ }
+ // The variable is not defined by b and we haven't
+ // looked it up yet. Generate v, a copy value which
+ // will be the outgoing value of the variable. Then
+ // look up w, the incoming value of the variable.
+ // Make v = copy(w). We need the extra copy to
+ // prevent infinite recursion when looking up the
+ // incoming value of the variable.
+ v := b.NewValue0(s.peekLine(), ssa.OpCopy, t)
+ m[name] = v
+ v.AddArg(s.lookupVarIncoming(b, t, name))
+ return v
+}
+
+// TODO: the above mutually recursive functions can lead to very deep stacks. Fix that.
+
+// addEdge adds an edge from b to c.
+func addEdge(b, c *ssa.Block) {
+ b.Succs = append(b.Succs, c)
+ c.Preds = append(c.Preds, b)
+}
+
+// an unresolved branch
+type branch struct {
+ p *obj.Prog // branch instruction
+ b *ssa.Block // target
+}
+
+// genssa appends entries to ptxt for each instruction in f.
+// gcargs and gclocals are filled in with pointer maps for the frame.
+func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
+ // TODO: line numbers
+
+ if f.FrameSize > 1<<31 {
+ Yyerror("stack frame too large (>2GB)")
+ return
+ }
+
+ e := f.Config.Frontend().(*ssaExport)
+ // We're about to emit a bunch of Progs.
+ // Since the only way to get here is to explicitly request it,
+ // just fail on unimplemented instead of trying to unwind our mess.
+ e.mustImplement = true
+
+ ptxt.To.Type = obj.TYPE_TEXTSIZE
+ ptxt.To.Val = int32(Rnd(Curfn.Type.Argwid, int64(Widthptr))) // arg size
+ ptxt.To.Offset = f.FrameSize - 8 // TODO: arch-dependent
+
+ // Remember where each block starts.
+ bstart := make([]*obj.Prog, f.NumBlocks())
+
+ // Remember all the branch instructions we've seen
+ // and where they would like to go
+ var branches []branch
+
+ // Emit basic blocks
+ for i, b := range f.Blocks {
+ bstart[b.ID] = Pc
+ // Emit values in block
+ for _, v := range b.Values {
+ genValue(v)
+ }
+ // Emit control flow instructions for block
+ var next *ssa.Block
+ if i < len(f.Blocks)-1 {
+ next = f.Blocks[i+1]
+ }
+ branches = genBlock(b, next, branches)
+ }
+
+ // Resolve branches
+ for _, br := range branches {
+ br.p.To.Val = bstart[br.b.ID]
+ }
+
+ Pc.As = obj.ARET // overwrite AEND
+
+ // Emit static data
+ if f.StaticData != nil {
+ for _, n := range f.StaticData.([]*Node) {
+ if !gen_as_init(n, false) {
+ Fatal("non-static data marked as static: %v\n\n", n, f)
+ }
+ }
+ }
+
+ // TODO: liveness
+ // TODO: gcargs
+ // TODO: gclocals
+
+ // TODO: dump frame if -f
+
+ // Emit garbage collection symbols. TODO: put something in them
+ //liveness(Curfn, ptxt, gcargs, gclocals)
+ duint32(gcargs, 0, 0)
+ ggloblsym(gcargs, 4, obj.RODATA|obj.DUPOK)
+ duint32(gclocals, 0, 0)
+ ggloblsym(gclocals, 4, obj.RODATA|obj.DUPOK)
+}
+
+func genValue(v *ssa.Value) {
+ lineno = v.Line
+ switch v.Op {
+ case ssa.OpAMD64ADDQ:
+ // TODO: use addq instead of leaq if target is in the right register.
+ p := Prog(x86.ALEAQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = regnum(v.Args[0])
+ p.From.Scale = 1
+ p.From.Index = regnum(v.Args[1])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v)
+ case ssa.OpAMD64ADDL:
+ p := Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = regnum(v.Args[0])
+ p.From.Scale = 1
+ p.From.Index = regnum(v.Args[1])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v)
+ case ssa.OpAMD64ADDW:
+ p := Prog(x86.ALEAW)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = regnum(v.Args[0])
+ p.From.Scale = 1
+ p.From.Index = regnum(v.Args[1])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v)
+ // 2-address opcode arithmetic, symmetric
+ case ssa.OpAMD64ADDB,
+ ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL, ssa.OpAMD64ANDW, ssa.OpAMD64ANDB,
+ ssa.OpAMD64ORQ, ssa.OpAMD64ORL, ssa.OpAMD64ORW, ssa.OpAMD64ORB,
+ ssa.OpAMD64XORQ, ssa.OpAMD64XORL, ssa.OpAMD64XORW, ssa.OpAMD64XORB,
+ ssa.OpAMD64MULQ, ssa.OpAMD64MULL, ssa.OpAMD64MULW:
+ r := regnum(v)
+ x := regnum(v.Args[0])
+ y := regnum(v.Args[1])
+ if x != r && y != r {
+ p := Prog(regMoveAMD64(v.Type.Size()))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ x = r
+ }
+ p := Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if x == r {
+ p.From.Reg = y
+ } else {
+ p.From.Reg = x
+ }
+ // 2-address opcode arithmetic, not symmetric
+ case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL, ssa.OpAMD64SUBW, ssa.OpAMD64SUBB:
+ r := regnum(v)
+ x := regnum(v.Args[0])
+ y := regnum(v.Args[1])
+ var neg bool
+ if y == r {
+ // compute -(y-x) instead
+ x, y = y, x
+ neg = true
+ }
+ if x != r {
+ p := Prog(regMoveAMD64(v.Type.Size()))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+
+ p := Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Reg = y
+ if neg {
+ p := Prog(x86.ANEGQ) // TODO: use correct size? This is mostly a hack until regalloc does 2-address correctly
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL, ssa.OpAMD64SHLW, ssa.OpAMD64SHLB,
+ ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL, ssa.OpAMD64SHRW, ssa.OpAMD64SHRB,
+ ssa.OpAMD64SARQ, ssa.OpAMD64SARL, ssa.OpAMD64SARW, ssa.OpAMD64SARB:
+ x := regnum(v.Args[0])
+ r := regnum(v)
+ if x != r {
+ if r == x86.REG_CX {
+ v.Fatalf("can't implement %s, target and shift both in CX", v.LongString())
+ }
+ p := Prog(regMoveAMD64(v.Type.Size()))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ p := Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = regnum(v.Args[1]) // should be CX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst, ssa.OpAMD64ADDWconst:
+ // TODO: use addq instead of leaq if target is in the right register.
+ var asm int
+ switch v.Op {
+ case ssa.OpAMD64ADDQconst:
+ asm = x86.ALEAQ
+ case ssa.OpAMD64ADDLconst:
+ asm = x86.ALEAL
+ case ssa.OpAMD64ADDWconst:
+ asm = x86.ALEAW
+ }
+ p := Prog(asm)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = regnum(v.Args[0])
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v)
+ case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst, ssa.OpAMD64MULWconst:
+ r := regnum(v)
+ x := regnum(v.Args[0])
+ if r != x {
+ p := Prog(regMoveAMD64(v.Type.Size()))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ p := Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ // TODO: Teach doasm to compile the three-address multiply imul $c, r1, r2
+ // instead of using the MOVQ above.
+ //p.From3 = new(obj.Addr)
+ //p.From3.Type = obj.TYPE_REG
+ //p.From3.Reg = regnum(v.Args[0])
+ case ssa.OpAMD64ADDBconst,
+ ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst, ssa.OpAMD64ANDWconst, ssa.OpAMD64ANDBconst,
+ ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst, ssa.OpAMD64ORWconst, ssa.OpAMD64ORBconst,
+ ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst, ssa.OpAMD64XORWconst, ssa.OpAMD64XORBconst,
+ ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst, ssa.OpAMD64SUBWconst, ssa.OpAMD64SUBBconst,
+ ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst, ssa.OpAMD64SHLWconst, ssa.OpAMD64SHLBconst,
+ ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst,
+ ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst:
+ // This code compensates for the fact that the register allocator
+ // doesn't understand 2-address instructions yet. TODO: fix that.
+ x := regnum(v.Args[0])
+ r := regnum(v)
+ if x != r {
+ p := Prog(regMoveAMD64(v.Type.Size()))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ p := Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
+ r := regnum(v)
+ p := Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8:
+ p := Prog(x86.ALEAQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = regnum(v.Args[0])
+ switch v.Op {
+ case ssa.OpAMD64LEAQ1:
+ p.From.Scale = 1
+ case ssa.OpAMD64LEAQ2:
+ p.From.Scale = 2
+ case ssa.OpAMD64LEAQ4:
+ p.From.Scale = 4
+ case ssa.OpAMD64LEAQ8:
+ p.From.Scale = 8
+ }
+ p.From.Index = regnum(v.Args[1])
+ addAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v)
+ case ssa.OpAMD64LEAQ:
+ p := Prog(x86.ALEAQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = regnum(v.Args[0])
+ addAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v)
+ case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
+ ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB:
+ p := Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = regnum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v.Args[1])
+ case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst,
+ ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst:
+ p := Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = regnum(v.Args[0])
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.OpAMD64MOVBconst, ssa.OpAMD64MOVWconst, ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
+ x := regnum(v)
+ p := Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ var i int64
+ switch v.Op {
+ case ssa.OpAMD64MOVBconst:
+ i = int64(int8(v.AuxInt))
+ case ssa.OpAMD64MOVWconst:
+ i = int64(int16(v.AuxInt))
+ case ssa.OpAMD64MOVLconst:
+ i = int64(int32(v.AuxInt))
+ case ssa.OpAMD64MOVQconst:
+ i = v.AuxInt
+ }
+ p.From.Offset = i
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVBQZXload:
+ p := Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = regnum(v.Args[0])
+ addAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v)
+ case ssa.OpAMD64MOVQloadidx8:
+ p := Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = regnum(v.Args[0])
+ addAux(&p.From, v)
+ p.From.Scale = 8
+ p.From.Index = regnum(v.Args[1])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v)
+ case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore:
+ p := Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = regnum(v.Args[1])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = regnum(v.Args[0])
+ addAux(&p.To, v)
+ case ssa.OpAMD64MOVQstoreidx8:
+ p := Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = regnum(v.Args[2])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = regnum(v.Args[0])
+ p.To.Scale = 8
+ p.To.Index = regnum(v.Args[1])
+ addAux(&p.To, v)
+ case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX:
+ p := Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = regnum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v)
+ case ssa.OpAMD64MOVXzero:
+ nb := v.AuxInt
+ offset := int64(0)
+ reg := regnum(v.Args[0])
+ for nb >= 8 {
+ nb, offset = movZero(x86.AMOVQ, 8, nb, offset, reg)
+ }
+ for nb >= 4 {
+ nb, offset = movZero(x86.AMOVL, 4, nb, offset, reg)
+ }
+ for nb >= 2 {
+ nb, offset = movZero(x86.AMOVW, 2, nb, offset, reg)
+ }
+ for nb >= 1 {
+ nb, offset = movZero(x86.AMOVB, 1, nb, offset, reg)
+ }
+ case ssa.OpCopy: // TODO: lower to MOVQ earlier?
+ if v.Type.IsMemory() {
+ return
+ }
+ x := regnum(v.Args[0])
+ y := regnum(v)
+ if x != y {
+ p := Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ }
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Unimplementedf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := Prog(movSize(v.Type.Size()))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = x86.REG_SP
+ p.From.Offset = localOffset(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v)
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Unimplementedf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := Prog(movSize(v.Type.Size()))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = regnum(v.Args[0])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = x86.REG_SP
+ p.To.Offset = localOffset(v)
+ case ssa.OpPhi:
+ // just check to make sure regalloc did it right
+ f := v.Block.Func
+ loc := f.RegAlloc[v.ID]
+ for _, a := range v.Args {
+ if f.RegAlloc[a.ID] != loc { // TODO: .Equal() instead?
+ v.Fatalf("phi arg at different location than phi %v %v %v %v", v, loc, a, f.RegAlloc[a.ID])
+ }
+ }
+ case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64, ssa.OpConstString, ssa.OpConstNil, ssa.OpConstBool:
+ if v.Block.Func.RegAlloc[v.ID] != nil {
+ v.Fatalf("const value %v shouldn't have a location", v)
+ }
+ case ssa.OpArg:
+ // memory arg needs no code
+ // TODO: check that only mem arg goes here.
+ case ssa.OpAMD64CALLstatic:
+ p := Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = Linksym(v.Aux.(*Sym))
+ case ssa.OpAMD64CALLclosure:
+ p := Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v.Args[0])
+ case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL, ssa.OpAMD64NEGW, ssa.OpAMD64NEGB,
+ ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL, ssa.OpAMD64NOTW, ssa.OpAMD64NOTB:
+ p := Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v.Args[0])
+ case ssa.OpSP, ssa.OpSB:
+ // nothing to do
+ case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE,
+ ssa.OpAMD64SETL, ssa.OpAMD64SETLE,
+ ssa.OpAMD64SETG, ssa.OpAMD64SETGE,
+ ssa.OpAMD64SETB, ssa.OpAMD64SETBE,
+ ssa.OpAMD64SETA, ssa.OpAMD64SETAE:
+ p := Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v)
+ case ssa.OpAMD64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v)
+ case ssa.OpAMD64REPSTOSQ:
+ Prog(x86.AREP)
+ Prog(x86.ASTOSQ)
+ v.Unimplementedf("REPSTOSQ clobbers not implemented: %s", v.LongString())
+ case ssa.OpAMD64REPMOVSB:
+ Prog(x86.AREP)
+ Prog(x86.AMOVSB)
+ v.Unimplementedf("REPMOVSB clobbers not implemented: %s", v.LongString())
+ default:
+ v.Unimplementedf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+// movSize returns the MOV instruction of the given width.
+func movSize(width int64) (asm int) {
+ switch width {
+ case 1:
+ asm = x86.AMOVB
+ case 2:
+ asm = x86.AMOVW
+ case 4:
+ asm = x86.AMOVL
+ case 8:
+ asm = x86.AMOVQ
+ default:
+ panic(fmt.Errorf("bad movSize %d", width))
+ }
+ return asm
+}
+
+// movZero generates a register indirect move with a 0 immediate and keeps track of bytes left and next offset
+func movZero(as int, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
+ p := Prog(as)
+ // TODO: use zero register on archs that support it.
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = regnum
+ p.To.Offset = offset
+ offset += width
+ nleft = nbytes - width
+ return nleft, offset
+}
+
+var blockJump = [...]struct{ asm, invasm int }{
+ ssa.BlockAMD64EQ: {x86.AJEQ, x86.AJNE},
+ ssa.BlockAMD64NE: {x86.AJNE, x86.AJEQ},
+ ssa.BlockAMD64LT: {x86.AJLT, x86.AJGE},
+ ssa.BlockAMD64GE: {x86.AJGE, x86.AJLT},
+ ssa.BlockAMD64LE: {x86.AJLE, x86.AJGT},
+ ssa.BlockAMD64GT: {x86.AJGT, x86.AJLE},
+ ssa.BlockAMD64ULT: {x86.AJCS, x86.AJCC},
+ ssa.BlockAMD64UGE: {x86.AJCC, x86.AJCS},
+ ssa.BlockAMD64UGT: {x86.AJHI, x86.AJLS},
+ ssa.BlockAMD64ULE: {x86.AJLS, x86.AJHI},
+}
+
+func genBlock(b, next *ssa.Block, branches []branch) []branch {
+ lineno = b.Line
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0] != next {
+ p := Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ branches = append(branches, branch{p, b.Succs[0]})
+ }
+ case ssa.BlockExit:
+ Prog(obj.ARET)
+ case ssa.BlockCall:
+ if b.Succs[0] != next {
+ p := Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ branches = append(branches, branch{p, b.Succs[0]})
+ }
+ case ssa.BlockAMD64EQ, ssa.BlockAMD64NE,
+ ssa.BlockAMD64LT, ssa.BlockAMD64GE,
+ ssa.BlockAMD64LE, ssa.BlockAMD64GT,
+ ssa.BlockAMD64ULT, ssa.BlockAMD64UGT,
+ ssa.BlockAMD64ULE, ssa.BlockAMD64UGE:
+
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0]:
+ p := Prog(jmp.invasm)
+ p.To.Type = obj.TYPE_BRANCH
+ branches = append(branches, branch{p, b.Succs[1]})
+ case b.Succs[1]:
+ p := Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ branches = append(branches, branch{p, b.Succs[0]})
+ default:
+ p := Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ branches = append(branches, branch{p, b.Succs[0]})
+ q := Prog(obj.AJMP)
+ q.To.Type = obj.TYPE_BRANCH
+ branches = append(branches, branch{q, b.Succs[1]})
+ }
+
+ default:
+ b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
+ }
+ return branches
+}
+
+// addAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
+func addAux(a *obj.Addr, v *ssa.Value) {
+ if a.Type != obj.TYPE_MEM {
+ v.Fatalf("bad addAux addr %s", a)
+ }
+ // add integer offset
+ a.Offset += v.AuxInt
+
+ // If no additional symbol offset, we're done.
+ if v.Aux == nil {
+ return
+ }
+ // Add symbol's offset from its base register.
+ switch sym := v.Aux.(type) {
+ case *ssa.ExternSymbol:
+ a.Name = obj.NAME_EXTERN
+ a.Sym = Linksym(sym.Sym.(*Sym))
+ case *ssa.ArgSymbol:
+ a.Offset += v.Block.Func.FrameSize + sym.Offset
+ case *ssa.AutoSymbol:
+ if sym.Offset == -1 {
+ v.Fatalf("auto symbol %s offset not calculated", sym.Sym)
+ }
+ a.Offset += sym.Offset
+ default:
+ v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
+ }
+}
+
+// extendIndex extends v to a full pointer width.
+func (s *state) extendIndex(v *ssa.Value) *ssa.Value {
+ size := v.Type.Size()
+ if size == s.config.PtrSize {
+ return v
+ }
+ if size > s.config.PtrSize {
+ // TODO: truncate 64-bit indexes on 32-bit pointer archs. We'd need to test
+ // the high word and branch to out-of-bounds failure if it is not 0.
+ s.Unimplementedf("64->32 index truncation not implemented")
+ return v
+ }
+
+ // Extend value to the required size
+ var op ssa.Op
+ if v.Type.IsSigned() {
+ switch 10*size + s.config.PtrSize {
+ case 14:
+ op = ssa.OpSignExt8to32
+ case 18:
+ op = ssa.OpSignExt8to64
+ case 24:
+ op = ssa.OpSignExt16to32
+ case 28:
+ op = ssa.OpSignExt16to64
+ case 48:
+ op = ssa.OpSignExt32to64
+ default:
+ s.Fatalf("bad signed index extension %s", v.Type)
+ }
+ } else {
+ switch 10*size + s.config.PtrSize {
+ case 14:
+ op = ssa.OpZeroExt8to32
+ case 18:
+ op = ssa.OpZeroExt8to64
+ case 24:
+ op = ssa.OpZeroExt16to32
+ case 28:
+ op = ssa.OpZeroExt16to64
+ case 48:
+ op = ssa.OpZeroExt32to64
+ default:
+ s.Fatalf("bad unsigned index extension %s", v.Type)
+ }
+ }
+ return s.newValue1(op, s.config.Uintptr, v)
+}
+
+// ssaRegToReg maps ssa register numbers to obj register numbers.
+var ssaRegToReg = [...]int16{
+ x86.REG_AX,
+ x86.REG_CX,
+ x86.REG_DX,
+ x86.REG_BX,
+ x86.REG_SP,
+ x86.REG_BP,
+ x86.REG_SI,
+ x86.REG_DI,
+ x86.REG_R8,
+ x86.REG_R9,
+ x86.REG_R10,
+ x86.REG_R11,
+ x86.REG_R12,
+ x86.REG_R13,
+ x86.REG_R14,
+ x86.REG_R15,
+ x86.REG_X0,
+ x86.REG_X1,
+ x86.REG_X2,
+ x86.REG_X3,
+ x86.REG_X4,
+ x86.REG_X5,
+ x86.REG_X6,
+ x86.REG_X7,
+ x86.REG_X8,
+ x86.REG_X9,
+ x86.REG_X10,
+ x86.REG_X11,
+ x86.REG_X12,
+ x86.REG_X13,
+ x86.REG_X14,
+ x86.REG_X15,
+ 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
+ // TODO: arch-dependent
+}
+
+// regMoveAMD64 returns the register->register move opcode for the given width.
+// TODO: generalize for all architectures?
+func regMoveAMD64(width int64) int {
+ switch width {
+ case 1:
+ return x86.AMOVB
+ case 2:
+ return x86.AMOVW
+ case 4:
+ return x86.AMOVL
+ case 8:
+ return x86.AMOVQ
+ default:
+ panic("bad register width")
+ }
+}
+
+// regnum returns the register (in cmd/internal/obj numbering) to
+// which v has been allocated. Panics if v is not assigned to a
+// register.
+func regnum(v *ssa.Value) int16 {
+ return ssaRegToReg[v.Block.Func.RegAlloc[v.ID].(*ssa.Register).Num]
+}
+
+// localOffset returns the offset below the frame pointer where
+// a stack-allocated local has been allocated. Panics if v
+// is not assigned to a local slot.
+// TODO: Make this panic again once it stops happening routinely.
+func localOffset(v *ssa.Value) int64 {
+ reg := v.Block.Func.RegAlloc[v.ID]
+ slot, ok := reg.(*ssa.LocalSlot)
+ if !ok {
+ v.Unimplementedf("localOffset of non-LocalSlot value: %s", v.LongString())
+ return 0
+ }
+ return slot.Idx
+}
+
+// ssaExport exports a bunch of compiler services for the ssa backend.
+type ssaExport struct {
+ log bool
+ unimplemented bool
+ mustImplement bool
+}
+
+// StringData returns a symbol (a *Sym wrapped in an interface) which
+// is the data component of a global string constant containing s.
+func (*ssaExport) StringData(s string) interface{} {
+ // TODO: is idealstring correct? It might not matter...
+ _, data := stringsym(s)
+ return &ssa.ExternSymbol{Typ: idealstring, Sym: data}
+}
+
+// Log logs a message from the compiler.
+func (e *ssaExport) Logf(msg string, args ...interface{}) {
+ // If e was marked as unimplemented, anything could happen. Ignore.
+ if e.log && !e.unimplemented {
+ fmt.Printf(msg, args...)
+ }
+}
+
+// Fatal reports a compiler error and exits.
+func (e *ssaExport) Fatalf(msg string, args ...interface{}) {
+ // If e was marked as unimplemented, anything could happen. Ignore.
+ if !e.unimplemented {
+ Fatal(msg, args...)
+ }
+}
+
+// Unimplemented reports that the function cannot be compiled.
+// It will be removed once SSA work is complete.
+func (e *ssaExport) Unimplementedf(msg string, args ...interface{}) {
+ if e.mustImplement {
+ Fatal(msg, args...)
+ }
+ const alwaysLog = false // enable to calculate top unimplemented features
+ if !e.unimplemented && (e.log || alwaysLog) {
+ // first implementation failure, print explanation
+ fmt.Printf("SSA unimplemented: "+msg+"\n", args...)
+ }
+ e.unimplemented = true
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "internal/testenv"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+// TODO: move all these tests elsewhere?
+// Perhaps teach test/run.go how to run them with a new action verb.
+func runTest(t *testing.T, filename string) {
+ if runtime.GOARCH != "amd64" {
+ t.Skipf("skipping SSA tests on %s for now", runtime.GOARCH)
+ }
+ testenv.MustHaveGoBuild(t)
+ var stdout, stderr bytes.Buffer
+ cmd := exec.Command("go", "run", filepath.Join("testdata", filename))
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ // TODO: set GOGC=off until we have stackmaps
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr)
+ }
+ if s := stdout.String(); s != "" {
+ t.Errorf("Stdout = %s\nWant empty", s)
+ }
+ if s := stderr.String(); strings.Contains(s, "SSA unimplemented") {
+ t.Errorf("Unimplemented message found in stderr:\n%s", s)
+ }
+}
+
+// TestShortCircuit tests OANDAND and OOROR expressions and short circuiting.
+func TestShortCircuit(t *testing.T) { runTest(t, "short_ssa.go") }
+
+// TestBreakContinue tests that continue and break statements do what they say.
+func TestBreakContinue(t *testing.T) { runTest(t, "break_ssa.go") }
+
+// TestArithmetic tests that both backends have the same result for arithmetic expressions.
+func TestArithmetic(t *testing.T) { runTest(t, "arith_ssa.go") }
--- /dev/null
+// run
+
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests arithmetic expressions
+
+package main
+
+// test64BitConstMulti tests that rewrite rules don't fold 64 bit constants
+// into multiply instructions.
+func test64BitConstMult(a, b int64) {
+ want := 34359738369*a + b*34359738370
+ if got := test64BitConstMult_ssa(a, b); want != got {
+ println("test64BitConstMult failed, wanted", want, "got", got)
+ failed = true
+ }
+}
+func test64BitConstMult_ssa(a, b int64) int64 {
+ switch { // prevent inlining
+ }
+ return 34359738369*a + b*34359738370
+}
+
+// test64BitConstAdd tests that rewrite rules don't fold 64 bit constants
+// into add instructions.
+func test64BitConstAdd(a, b int64) {
+ want := a + 575815584948629622 + b + 2991856197886747025
+ if got := test64BitConstAdd_ssa(a, b); want != got {
+ println("test64BitConstAdd failed, wanted", want, "got", got)
+ failed = true
+ }
+}
+func test64BitConstAdd_ssa(a, b int64) int64 {
+ switch {
+ }
+ return a + 575815584948629622 + b + 2991856197886747025
+}
+
+// testRegallocCVSpill tests that regalloc spills a value whose last use is the
+// current value.
+func testRegallocCVSpill(a, b, c, d int8) {
+ want := a + -32 + b + 63*c*-87*d
+ if got := testRegallocCVSpill_ssa(a, b, c, d); want != got {
+ println("testRegallocCVSpill failed, wanted", want, "got", got)
+ failed = true
+ }
+}
+func testRegallocCVSpill_ssa(a, b, c, d int8) int8 {
+ switch {
+ }
+ return a + -32 + b + 63*c*-87*d
+}
+
+func testBitwiseLogic() {
+ a, b := uint32(57623283), uint32(1314713839)
+ if want, got := uint32(38551779), testBitwiseAnd_ssa(a, b); want != got {
+ println("testBitwiseAnd failed, wanted", want, "got", got)
+ }
+ if want, got := uint32(1333785343), testBitwiseOr_ssa(a, b); want != got {
+ println("testBitwiseAnd failed, wanted", want, "got", got)
+ }
+}
+
+func testBitwiseAnd_ssa(a, b uint32) uint32 {
+ switch { // prevent inlining
+ }
+ return a & b
+}
+
+func testBitwiseOr_ssa(a, b uint32) uint32 {
+ switch { // prevent inlining
+ }
+ return a | b
+}
+
+var failed = false
+
+func main() {
+
+ test64BitConstMult(1, 2)
+ test64BitConstAdd(1, 2)
+ testRegallocCVSpill(1, 2, 3, 4)
+
+ if failed {
+ panic("failed")
+ }
+}
--- /dev/null
+// run
+
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests continue and break.
+
+package main
+
+func continuePlain_ssa() int {
+ var n int
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue
+ }
+ n = i
+ }
+ return n
+}
+
+func continueLabeled_ssa() int {
+ var n int
+Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue Next
+ }
+ n = i
+ }
+ return n
+}
+
+func continuePlainInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func continueLabeledInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func continueLabeledOuter_ssa() int {
+ var n int
+Next:
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func breakPlain_ssa() int {
+ var n int
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break
+ }
+ n = i
+ }
+ return n
+}
+
+func breakLabeled_ssa() int {
+ var n int
+Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break Next
+ }
+ n = i
+ }
+ return n
+}
+
+func breakPlainInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func breakLabeledInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func breakLabeledOuter_ssa() int {
+ var n int
+Next:
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+var g, h int // globals to ensure optimizations don't collapse our switch statements
+
+func switchPlain_ssa() int {
+ var n int
+ switch g {
+ case 0:
+ n = 1
+ break
+ n = 2
+ }
+ return n
+}
+
+func switchLabeled_ssa() int {
+ var n int
+Done:
+ switch g {
+ case 0:
+ n = 1
+ break Done
+ n = 2
+ }
+ return n
+}
+
+func switchPlainInner_ssa() int {
+ var n int
+ switch g {
+ case 0:
+ n = 1
+ switch h {
+ case 0:
+ n += 10
+ break
+ }
+ n = 2
+ }
+ return n
+}
+
+func switchLabeledInner_ssa() int {
+ var n int
+ switch g {
+ case 0:
+ n = 1
+ Done:
+ switch h {
+ case 0:
+ n += 10
+ break Done
+ }
+ n = 2
+ }
+ return n
+}
+
+func switchLabeledOuter_ssa() int {
+ var n int
+Done:
+ switch g {
+ case 0:
+ n = 1
+ switch h {
+ case 0:
+ n += 10
+ break Done
+ }
+ n = 2
+ }
+ return n
+}
+
+func main() {
+ tests := [...]struct {
+ name string
+ fn func() int
+ want int
+ }{
+ {"continuePlain_ssa", continuePlain_ssa, 9},
+ {"continueLabeled_ssa", continueLabeled_ssa, 9},
+ {"continuePlainInner_ssa", continuePlainInner_ssa, 29},
+ {"continueLabeledInner_ssa", continueLabeledInner_ssa, 29},
+ {"continueLabeledOuter_ssa", continueLabeledOuter_ssa, 5},
+
+ {"breakPlain_ssa", breakPlain_ssa, 5},
+ {"breakLabeled_ssa", breakLabeled_ssa, 5},
+ {"breakPlainInner_ssa", breakPlainInner_ssa, 25},
+ {"breakLabeledInner_ssa", breakLabeledInner_ssa, 25},
+ {"breakLabeledOuter_ssa", breakLabeledOuter_ssa, 5},
+
+ {"switchPlain_ssa", switchPlain_ssa, 1},
+ {"switchLabeled_ssa", switchLabeled_ssa, 1},
+ {"switchPlainInner_ssa", switchPlainInner_ssa, 2},
+ {"switchLabeledInner_ssa", switchLabeledInner_ssa, 2},
+ {"switchLabeledOuter_ssa", switchLabeledOuter_ssa, 11},
+
+ // no select tests; they're identical to switch
+ }
+
+ var failed bool
+ for _, test := range tests {
+ if got := test.fn(); test.fn() != test.want {
+ print(test.name, "()=", got, ", want ", test.want, "\n")
+ failed = true
+ }
+ }
+
+ if failed {
+ panic("failed")
+ }
+}
--- /dev/null
+// run
+
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests short circuiting.
+
+package main
+
+func and_ssa(arg1, arg2 bool) bool {
+ return arg1 && rightCall(arg2)
+}
+
+func or_ssa(arg1, arg2 bool) bool {
+ return arg1 || rightCall(arg2)
+}
+
+var rightCalled bool
+
+func rightCall(v bool) bool {
+ rightCalled = true
+ return v
+ select {} // hack to prevent inlining
+ panic("unreached")
+}
+
+func testAnd(arg1, arg2, wantRes bool) { testShortCircuit("AND", arg1, arg2, and_ssa, arg1, wantRes) }
+func testOr(arg1, arg2, wantRes bool) { testShortCircuit("OR", arg1, arg2, or_ssa, !arg1, wantRes) }
+
+func testShortCircuit(opName string, arg1, arg2 bool, fn func(bool, bool) bool, wantRightCall, wantRes bool) {
+ rightCalled = false
+ got := fn(arg1, arg2)
+ if rightCalled != wantRightCall {
+ println("failed for", arg1, opName, arg2, "; rightCalled=", rightCalled, "want=", wantRightCall)
+ failed = true
+ }
+ if wantRes != got {
+ println("failed for", arg1, opName, arg2, "; res=", got, "want=", wantRes)
+ failed = true
+ }
+}
+
+var failed = false
+
+func main() {
+ testAnd(false, false, false)
+ testAnd(false, true, false)
+ testAnd(true, false, false)
+ testAnd(true, true, true)
+
+ testOr(false, false, false)
+ testOr(false, true, true)
+ testOr(true, false, true)
+ testOr(true, true, true)
+
+ if failed {
+ panic("failed")
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides methods that let us export a Type as an ../ssa:Type.
+// We don't export this package's Type directly because it would lead
+// to an import cycle with this package and ../ssa.
+// TODO: move Type to its own package, then we don't need to dance around import cycles.
+
+package gc
+
+import (
+ "cmd/compile/internal/ssa"
+)
+
+func (t *Type) Size() int64 {
+ dowidth(t)
+ return t.Width
+}
+
+func (t *Type) Alignment() int64 {
+ dowidth(t)
+ return int64(t.Align)
+}
+
+func (t *Type) Equal(u ssa.Type) bool {
+ x, ok := u.(*Type)
+ if !ok {
+ return false
+ }
+ return Eqtype(t, x)
+}
+
+func (t *Type) IsBoolean() bool {
+ return t.Etype == TBOOL
+}
+
+func (t *Type) IsInteger() bool {
+ switch t.Etype {
+ case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR:
+ return true
+ }
+ return false
+}
+
+func (t *Type) IsSigned() bool {
+ switch t.Etype {
+ case TINT8, TINT16, TINT32, TINT64, TINT:
+ return true
+ }
+ return false
+}
+
+func (t *Type) IsFloat() bool {
+ return t.Etype == TFLOAT32 || t.Etype == TFLOAT64
+}
+
+func (t *Type) IsPtr() bool {
+ return t.Etype == TPTR32 || t.Etype == TPTR64 || t.Etype == TUNSAFEPTR ||
+ t.Etype == TMAP || t.Etype == TCHAN || t.Etype == TFUNC
+}
+
+func (t *Type) IsString() bool {
+ return t.Etype == TSTRING
+}
+
+func (t *Type) IsSlice() bool {
+ return t.Etype == TARRAY && t.Bound < 0
+}
+
+func (t *Type) Elem() ssa.Type {
+ return t.Type
+}
+func (t *Type) PtrTo() ssa.Type {
+ return Ptrto(t)
+}
+
+func (t *Type) IsMemory() bool { return false }
+func (t *Type) IsFlags() bool { return false }
--- /dev/null
+This is a list of things that need to be worked on. It is by no means complete.
+
+Allocation
+- Allocation of decls in stackalloc. Decls survive if they are
+ addrtaken or are too large for registerization.
+
+Scheduling
+ - Make sure loads are scheduled correctly with respect to stores.
+ Same for flag type values. We can't have more than one value of
+ mem or flag types live at once.
+ - Reduce register pressure. Schedule instructions which kill
+ variables first.
+
+Values
+ - Store *Type instead of Type? Keep an array of used Types in Func
+ and reference by id? Unify with the type ../gc so we just use a
+ pointer instead of an interface?
+ - Recycle dead values instead of using GC to do that.
+ - A lot of Aux fields are just int64. Add a separate AuxInt field?
+ If not that, then cache the interfaces that wrap int64s.
+ - OpStore uses 3 args. Increase the size of argstorage to 3?
+
+Regalloc
+ - Make less arch-dependent
+ - Don't spill everything at every basic block boundary.
+ - Allow args and return values to be ssa-able.
+ - Handle 2-address instructions.
+ - Floating point registers
+ - Make calls clobber all registers
+ - Make liveness analysis non-quadratic.
+ - Handle in-place instructions (like XORQconst) directly:
+ Use XORQ AX, 1 rather than MOVQ AX, BX; XORQ BX, 1.
+
+StackAlloc:
+ - Sort variables so all ptr-containing ones are first (so stack
+ maps are smaller)
+ - Reuse stack slots for noninterfering and type-compatible variables
+ (both AUTOs and spilled Values). But see issue 8740 for what
+ "type-compatible variables" mean and what DWARF information provides.
+
+Rewrites
+ - Strength reduction (both arch-indep and arch-dependent?)
+ - Start another architecture (arm?)
+ - 64-bit ops on 32-bit machines
+ - <regwidth ops. For example, x+y on int32s on amd64 needs (MOVLQSX (ADDL x y)).
+ Then add rewrites like (MOVLstore (MOVLQSX x) m) -> (MOVLstore x m)
+ to get rid of most of the MOVLQSX.
+ - Determine which nil checks can be done implicitly (by faulting)
+ and which need code generated, and do the code generation.
+
+Common-Subexpression Elimination
+ - Make better decision about which value in an equivalence class we should
+ choose to replace other values in that class.
+ - Can we move control values out of their basic block?
+ This would break nilcheckelim as currently implemented,
+ but it could be replaced by a similar CFG simplication pass.
+ - Investigate type equality. During SSA generation, should we use n.Type or (say) TypeBool?
+ Should we get rid of named types in favor of underlying types during SSA generation?
+ Should we introduce a new type equality routine that is less strict than the frontend's?
+
+Other
+ - Write barriers
+ - For testing, do something more sophisticated than
+ checkOpcodeCounts. Michael Matloob suggests using a similar
+ pattern matcher to the rewrite engine to check for certain
+ expression subtrees in the output.
+ - Implement memory zeroing with REPSTOSQ and DuffZero
+ - make deadstore work with zeroing.
+ - Add a value range propagation optimization pass.
+ Use it for bounds check elimination and bitwidth reduction.
+ - Branch prediction: Respect hints from the frontend, add our own.
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "fmt"
+
+// Block represents a basic block in the control flow graph of a function.
+type Block struct {
+ // A unique identifier for the block. The system will attempt to allocate
+ // these IDs densely, but no guarantees.
+ ID ID
+
+ // The kind of block this is.
+ Kind BlockKind
+
+ // Subsequent blocks, if any. The number and order depend on the block kind.
+ // All successors must be distinct (to make phi values in successors unambiguous).
+ Succs []*Block
+
+ // Inverse of successors.
+ // The order is significant to Phi nodes in the block.
+ Preds []*Block
+ // TODO: predecessors is a pain to maintain. Can we somehow order phi
+ // arguments by block id and have this field computed explicitly when needed?
+
+ // A value that determines how the block is exited. Its value depends on the kind
+ // of the block. For instance, a BlockIf has a boolean control value and BlockExit
+ // has a memory control value.
+ Control *Value
+
+ // The unordered set of Values that define the operation of this block.
+ // The list must include the control value, if any. (TODO: need this last condition?)
+ // After the scheduling pass, this list is ordered.
+ Values []*Value
+
+ // The containing function
+ Func *Func
+
+ // Line number for block's control operation
+ Line int32
+}
+
+// kind control successors
+// ------------------------------------------
+// Exit return mem []
+// Plain nil [next]
+// If a boolean Value [then, else]
+// Call mem [nopanic, panic] (control opcode should be OpCall or OpStaticCall)
+type BlockKind int32
+
+// short form print
+func (b *Block) String() string {
+ return fmt.Sprintf("b%d", b.ID)
+}
+
+// long form print
+func (b *Block) LongString() string {
+ s := b.Kind.String()
+ if b.Control != nil {
+ s += fmt.Sprintf(" %s", b.Control)
+ }
+ if len(b.Succs) > 0 {
+ s += " ->"
+ for _, c := range b.Succs {
+ s += " " + c.String()
+ }
+ }
+ return s
+}
+
+func (b *Block) Logf(msg string, args ...interface{}) { b.Func.Logf(msg, args...) }
+func (b *Block) Fatalf(msg string, args ...interface{}) { b.Func.Fatalf(msg, args...) }
+func (b *Block) Unimplementedf(msg string, args ...interface{}) { b.Func.Unimplementedf(msg, args...) }
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// checkFunc checks invariants of f.
+func checkFunc(f *Func) {
+ blockMark := make([]bool, f.NumBlocks())
+ valueMark := make([]bool, f.NumValues())
+
+ for _, b := range f.Blocks {
+ if blockMark[b.ID] {
+ f.Fatalf("block %s appears twice in %s!", b, f.Name)
+ }
+ blockMark[b.ID] = true
+ if b.Func != f {
+ f.Fatalf("%s.Func=%s, want %s", b, b.Func.Name, f.Name)
+ }
+
+ for i, c := range b.Succs {
+ for j, d := range b.Succs {
+ if i != j && c == d {
+ f.Fatalf("%s.Succs has duplicate block %s", b, c)
+ }
+ }
+ }
+ // Note: duplicate successors are hard in the following case:
+ // if(...) goto x else goto x
+ // x: v = phi(a, b)
+ // If the conditional is true, does v get the value of a or b?
+ // We could solve this other ways, but the easiest is just to
+ // require (by possibly adding empty control-flow blocks) that
+ // all successors are distinct. They will need to be distinct
+ // anyway for register allocation (duplicate successors implies
+ // the existence of critical edges).
+
+ for _, p := range b.Preds {
+ var found bool
+ for _, c := range p.Succs {
+ if c == b {
+ found = true
+ break
+ }
+ }
+ if !found {
+ f.Fatalf("block %s is not a succ of its pred block %s", b, p)
+ }
+ }
+
+ switch b.Kind {
+ case BlockExit:
+ if len(b.Succs) != 0 {
+ f.Fatalf("exit block %s has successors", b)
+ }
+ if b.Control == nil {
+ f.Fatalf("exit block %s has no control value", b)
+ }
+ if !b.Control.Type.IsMemory() {
+ f.Fatalf("exit block %s has non-memory control value %s", b, b.Control.LongString())
+ }
+ case BlockDead:
+ if len(b.Succs) != 0 {
+ f.Fatalf("dead block %s has successors", b)
+ }
+ if len(b.Preds) != 0 {
+ f.Fatalf("dead block %s has predecessors", b)
+ }
+ if len(b.Values) != 0 {
+ f.Fatalf("dead block %s has values", b)
+ }
+ if b.Control != nil {
+ f.Fatalf("dead block %s has a control value", b)
+ }
+ case BlockPlain:
+ if len(b.Succs) != 1 {
+ f.Fatalf("plain block %s len(Succs)==%d, want 1", b, len(b.Succs))
+ }
+ if b.Control != nil {
+ f.Fatalf("plain block %s has non-nil control %s", b, b.Control.LongString())
+ }
+ case BlockIf:
+ if len(b.Succs) != 2 {
+ f.Fatalf("if block %s len(Succs)==%d, want 2", b, len(b.Succs))
+ }
+ if b.Control == nil {
+ f.Fatalf("if block %s has no control value", b)
+ }
+ if !b.Control.Type.IsBoolean() {
+ f.Fatalf("if block %s has non-bool control value %s", b, b.Control.LongString())
+ }
+ case BlockCall:
+ if len(b.Succs) != 2 {
+ f.Fatalf("call block %s len(Succs)==%d, want 2", b, len(b.Succs))
+ }
+ if b.Control == nil {
+ f.Fatalf("call block %s has no control value", b)
+ }
+ if !b.Control.Type.IsMemory() {
+ f.Fatalf("call block %s has non-memory control value %s", b, b.Control.LongString())
+ }
+ if b.Succs[1].Kind != BlockExit {
+ f.Fatalf("exception edge from call block %s does not go to exit but %s", b, b.Succs[1])
+ }
+ }
+
+ for _, v := range b.Values {
+ for _, arg := range v.Args {
+ if arg == nil {
+ f.Fatalf("value %v has nil arg", v.LongString())
+ }
+ }
+
+ if valueMark[v.ID] {
+ f.Fatalf("value %s appears twice!", v.LongString())
+ }
+ valueMark[v.ID] = true
+
+ if v.Block != b {
+ f.Fatalf("%s.block != %s", v, b)
+ }
+ if v.Op == OpPhi && len(v.Args) != len(b.Preds) {
+ f.Fatalf("phi length %s does not match pred length %d for block %s", v.LongString(), len(b.Preds), b)
+ }
+
+ if v.Op == OpAddr {
+ if len(v.Args) == 0 {
+ f.Fatalf("no args for OpAddr %s", v.LongString())
+ }
+ if v.Args[0].Op != OpSP && v.Args[0].Op != OpSB {
+ f.Fatalf("bad arg to OpAddr %v", v)
+ }
+ }
+
+ // TODO: check for cycles in values
+ // TODO: check type
+ }
+ }
+
+ for _, id := range f.bid.free {
+ if blockMark[id] {
+ f.Fatalf("used block b%d in free list", id)
+ }
+ }
+ for _, id := range f.vid.free {
+ if valueMark[id] {
+ f.Fatalf("used value v%d in free list", id)
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "log"
+
+// Compile is the main entry point for this package.
+// Compile modifies f so that on return:
+// · all Values in f map to 0 or 1 assembly instructions of the target architecture
+// · the order of f.Blocks is the order to emit the Blocks
+// · the order of b.Values is the order to emit the Values in each Block
+// · f has a non-nil regAlloc field
+func Compile(f *Func) {
+ // TODO: debugging - set flags to control verbosity of compiler,
+ // which phases to dump IR before/after, etc.
+ f.Logf("compiling %s\n", f.Name)
+
+ // hook to print function & phase if panic happens
+ phaseName := "init"
+ defer func() {
+ if phaseName != "" {
+ f.Fatalf("panic during %s while compiling %s\n", phaseName, f.Name)
+ }
+ }()
+
+ // Run all the passes
+ printFunc(f)
+ checkFunc(f)
+ for _, p := range passes {
+ phaseName = p.name
+ f.Logf(" pass %s begin\n", p.name)
+ p.fn(f)
+ f.Logf(" pass %s end\n", p.name)
+ printFunc(f)
+ checkFunc(f)
+ }
+
+ // Squash error printing defer
+ phaseName = ""
+}
+
+type pass struct {
+ name string
+ fn func(*Func)
+}
+
+// list of passes for the compiler
+var passes = [...]pass{
+ {"phielim", phielim},
+ {"copyelim", copyelim},
+ {"early deadcode", deadcode}, // remove generated dead code to avoid doing pointless work during opt
+ {"opt", opt},
+ {"opt deadcode", deadcode}, // remove any blocks orphaned during opt
+ {"generic cse", cse},
+ {"nilcheckelim", nilcheckelim},
+ {"generic deadcode", deadcode},
+ {"dse", dse},
+ {"fuse", fuse},
+ {"lower", lower},
+ {"lowered cse", cse},
+ {"lowered deadcode", deadcode},
+ {"critical", critical}, // remove critical edges
+ {"layout", layout}, // schedule blocks
+ {"schedule", schedule}, // schedule values
+ {"regalloc", regalloc},
+ {"stackalloc", stackalloc},
+}
+
+// Double-check phase ordering constraints.
+// This code is intended to document the ordering requirements
+// between different phases. It does not override the passes
+// list above.
+type constraint struct {
+ a, b string // a must come before b
+}
+
+var passOrder = [...]constraint{
+ // common-subexpression before dead-store elim, so that we recognize
+ // when two address expressions are the same.
+ {"generic cse", "dse"},
+ // cse substantially improves nilcheckelim efficacy
+ {"generic cse", "nilcheckelim"},
+ // allow deadcode to clean up after nilcheckelim
+ {"nilcheckelim", "generic deadcode"},
+ // nilcheckelim generates sequences of plain basic blocks
+ {"nilcheckelim", "fuse"},
+ // don't layout blocks until critical edges have been removed
+ {"critical", "layout"},
+ // regalloc requires the removal of all critical edges
+ {"critical", "regalloc"},
+ // regalloc requires all the values in a block to be scheduled
+ {"schedule", "regalloc"},
+ // stack allocation requires register allocation
+ {"regalloc", "stackalloc"},
+}
+
+func init() {
+ for _, c := range passOrder {
+ a, b := c.a, c.b
+ i := -1
+ j := -1
+ for k, p := range passes {
+ if p.name == a {
+ i = k
+ }
+ if p.name == b {
+ j = k
+ }
+ }
+ if i < 0 {
+ log.Panicf("pass %s not found", a)
+ }
+ if j < 0 {
+ log.Panicf("pass %s not found", b)
+ }
+ if i >= j {
+ log.Panicf("passes %s and %s out of order", a, b)
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+type Config struct {
+ arch string // "amd64", etc.
+ IntSize int64 // 4 or 8
+ PtrSize int64 // 4 or 8
+ Uintptr Type // pointer arithmetic type
+ Int Type
+ lowerBlock func(*Block) bool // lowering function
+ lowerValue func(*Value, *Config) bool // lowering function
+ fe Frontend // callbacks into compiler frontend
+
+ // TODO: more stuff. Compiler flags of interest, ...
+}
+
+type Frontend interface {
+ // StringData returns a symbol pointing to the given string's contents.
+ StringData(string) interface{} // returns *gc.Sym
+
+ // Log logs a message from the compiler.
+ Logf(string, ...interface{})
+
+ // Fatal reports a compiler error and exits.
+ Fatalf(string, ...interface{})
+
+ // Unimplemented reports that the function cannot be compiled.
+ // It will be removed once SSA work is complete.
+ Unimplementedf(msg string, args ...interface{})
+}
+
+// NewConfig returns a new configuration object for the given architecture.
+func NewConfig(arch string, fe Frontend) *Config {
+ c := &Config{arch: arch, fe: fe}
+ switch arch {
+ case "amd64":
+ c.IntSize = 8
+ c.PtrSize = 8
+ c.lowerBlock = rewriteBlockAMD64
+ c.lowerValue = rewriteValueAMD64
+ case "386":
+ c.IntSize = 4
+ c.PtrSize = 4
+ c.lowerBlock = rewriteBlockAMD64
+ c.lowerValue = rewriteValueAMD64 // TODO(khr): full 32-bit support
+ default:
+ fe.Unimplementedf("arch %s not implemented", arch)
+ }
+
+ // cache the frequently-used types in the config
+ c.Uintptr = TypeUInt32
+ c.Int = TypeInt32
+ if c.PtrSize == 8 {
+ c.Uintptr = TypeUInt64
+ }
+ if c.IntSize == 8 {
+ c.Int = TypeInt64
+ }
+
+ return c
+}
+
+func (c *Config) Frontend() Frontend { return c.fe }
+
+// NewFunc returns a new, empty function object
+func (c *Config) NewFunc() *Func {
+ // TODO(khr): should this function take name, type, etc. as arguments?
+ return &Func{Config: c}
+}
+
+func (c *Config) Logf(msg string, args ...interface{}) { c.fe.Logf(msg, args...) }
+func (c *Config) Fatalf(msg string, args ...interface{}) { c.fe.Fatalf(msg, args...) }
+func (c *Config) Unimplementedf(msg string, args ...interface{}) { c.fe.Unimplementedf(msg, args...) }
+
+// TODO(khr): do we really need a separate Config, or can we just
+// store all its fields inside a Func?
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// copyelim removes all copies from f.
+func copyelim(f *Func) {
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, w := range v.Args {
+ x := w
+ for x.Op == OpCopy {
+ x = x.Args[0]
+ }
+ if x != w {
+ v.Args[i] = x
+ }
+ }
+ }
+ v := b.Control
+ if v != nil {
+ for v.Op == OpCopy {
+ v = v.Args[0]
+ }
+ b.Control = v
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// critical splits critical edges (those that go from a block with
+// more than one outedge to a block with more than one inedge).
+// Regalloc wants a critical-edge-free CFG so it can implement phi values.
+func critical(f *Func) {
+ for _, b := range f.Blocks {
+ if len(b.Preds) <= 1 {
+ continue
+ }
+
+ // decide if we need to split edges coming into b.
+ hasphi := false
+ for _, v := range b.Values {
+ if v.Op == OpPhi && v.Type != TypeMem {
+ hasphi = true
+ break
+ }
+ }
+ if !hasphi {
+ // no splitting needed
+ continue
+ }
+
+ // split input edges coming from multi-output blocks.
+ for i, c := range b.Preds {
+ if c.Kind == BlockPlain {
+ continue // only single output block
+ }
+
+ // allocate a new block to place on the edge
+ d := f.NewBlock(BlockPlain)
+
+ // splice it in
+ d.Preds = append(d.Preds, c)
+ d.Succs = append(d.Succs, b)
+ b.Preds[i] = d
+ // replace b with d in c's successor list.
+ for j, b2 := range c.Succs {
+ if b2 == b {
+ c.Succs[j] = d
+ break
+ }
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "sort"
+
+// cse does common-subexpression elimination on the Function.
+// Values are just relinked, nothing is deleted. A subsequent deadcode
+// pass is required to actually remove duplicate expressions.
+func cse(f *Func) {
+ // Two values are equivalent if they satisfy the following definition:
+ // equivalent(v, w):
+ // v.op == w.op
+ // v.type == w.type
+ // v.aux == w.aux
+ // v.auxint == w.auxint
+ // len(v.args) == len(w.args)
+ // v.block == w.block if v.op == OpPhi
+ // equivalent(v.args[i], w.args[i]) for i in 0..len(v.args)-1
+
+ // The algorithm searches for a partition of f's values into
+ // equivalence classes using the above definition.
+ // It starts with a coarse partition and iteratively refines it
+ // until it reaches a fixed point.
+
+ // Make initial partition based on opcode, type-name, aux, auxint, nargs, phi-block, and the ops of v's first args
+ type key struct {
+ op Op
+ typ string
+ aux interface{}
+ auxint int64
+ nargs int
+ block ID // block id for phi vars, -1 otherwise
+ arg0op Op // v.Args[0].Op if len(v.Args) > 0, OpInvalid otherwise
+ arg1op Op // v.Args[1].Op if len(v.Args) > 1, OpInvalid otherwise
+ }
+ m := map[key]eqclass{}
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ bid := ID(-1)
+ if v.Op == OpPhi {
+ bid = b.ID
+ }
+ arg0op := OpInvalid
+ if len(v.Args) > 0 {
+ arg0op = v.Args[0].Op
+ }
+ arg1op := OpInvalid
+ if len(v.Args) > 1 {
+ arg1op = v.Args[1].Op
+ }
+ k := key{v.Op, v.Type.String(), v.Aux, v.AuxInt, len(v.Args), bid, arg0op, arg1op}
+ m[k] = append(m[k], v)
+ }
+ }
+
+ // A partition is a set of disjoint eqclasses.
+ var partition []eqclass
+ for _, v := range m {
+ partition = append(partition, v)
+ }
+ // TODO: Sort partition here for perfect reproducibility?
+ // Sort by what? Partition size?
+ // (Could that improve efficiency by discovering splits earlier?)
+
+ // map from value id back to eqclass id
+ valueEqClass := make([]int, f.NumValues())
+ for i, e := range partition {
+ for _, v := range e {
+ valueEqClass[v.ID] = i
+ }
+ }
+
+ // Find an equivalence class where some members of the class have
+ // non-equivalent arguments. Split the equivalence class appropriately.
+ // Repeat until we can't find any more splits.
+ for {
+ changed := false
+
+ // partition can grow in the loop. By not using a range loop here,
+ // we process new additions as they arrive, avoiding O(n^2) behavior.
+ for i := 0; i < len(partition); i++ {
+ e := partition[i]
+ v := e[0]
+ // all values in this equiv class that are not equivalent to v get moved
+ // into another equiv class.
+ // To avoid allocating while building that equivalence class,
+ // move the values equivalent to v to the beginning of e,
+ // other values to the end of e, and track where the split is.
+ allvals := e
+ split := len(e)
+ eqloop:
+ for j := 1; j < len(e); {
+ w := e[j]
+ for i := 0; i < len(v.Args); i++ {
+ if valueEqClass[v.Args[i].ID] != valueEqClass[w.Args[i].ID] || !v.Type.Equal(w.Type) {
+ // w is not equivalent to v.
+ // move it to the end, shrink e, and move the split.
+ e[j], e[len(e)-1] = e[len(e)-1], e[j]
+ e = e[:len(e)-1]
+ split--
+ valueEqClass[w.ID] = len(partition)
+ changed = true
+ continue eqloop
+ }
+ }
+ // v and w are equivalent. Keep w in e.
+ j++
+ }
+ partition[i] = e
+ if split < len(allvals) {
+ partition = append(partition, allvals[split:])
+ }
+ }
+
+ if !changed {
+ break
+ }
+ }
+
+ // Compute dominator tree
+ idom := dominators(f)
+
+ // Compute substitutions we would like to do. We substitute v for w
+ // if v and w are in the same equivalence class and v dominates w.
+ rewrite := make([]*Value, f.NumValues())
+ for _, e := range partition {
+ sort.Sort(e) // ensure deterministic ordering
+ for len(e) > 1 {
+ // Find a maximal dominant element in e
+ v := e[0]
+ for _, w := range e[1:] {
+ if dom(w.Block, v.Block, idom) {
+ v = w
+ }
+ }
+
+ // Replace all elements of e which v dominates
+ for i := 0; i < len(e); {
+ w := e[i]
+ if w == v {
+ e, e[i] = e[:len(e)-1], e[len(e)-1]
+ } else if dom(v.Block, w.Block, idom) {
+ rewrite[w.ID] = v
+ e, e[i] = e[:len(e)-1], e[len(e)-1]
+ } else {
+ i++
+ }
+ }
+ // TODO(khr): if value is a control value, do we need to keep it block-local?
+ }
+ }
+
+ // Apply substitutions
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, w := range v.Args {
+ if x := rewrite[w.ID]; x != nil {
+ v.SetArg(i, x)
+ }
+ }
+ }
+ }
+}
+
+// returns true if b dominates c.
+// TODO(khr): faster
+func dom(b, c *Block, idom []*Block) bool {
+ // Walk up from c in the dominator tree looking for b.
+ for c != nil {
+ if c == b {
+ return true
+ }
+ c = idom[c.ID]
+ }
+ // Reached the entry block, never saw b.
+ return false
+}
+
+// An eqclass approximates an equivalence class. During the
+// algorithm it may represent the union of several of the
+// final equivalence classes.
+type eqclass []*Value
+
+// Sort an equivalence class by value ID.
+func (e eqclass) Len() int { return len(e) }
+func (e eqclass) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+func (e eqclass) Less(i, j int) bool { return e[i].ID < e[j].ID }
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// deadcode removes dead code from f.
+func deadcode(f *Func) {
+
+ // Find all reachable basic blocks.
+ reachable := make([]bool, f.NumBlocks())
+ reachable[f.Entry.ID] = true
+ p := []*Block{f.Entry} // stack-like worklist
+ for len(p) > 0 {
+ // Pop a reachable block
+ b := p[len(p)-1]
+ p = p[:len(p)-1]
+ // Mark successors as reachable
+ for _, c := range b.Succs {
+ if !reachable[c.ID] {
+ reachable[c.ID] = true
+ p = append(p, c) // push
+ }
+ }
+ }
+
+ // Find all live values
+ live := make([]bool, f.NumValues()) // flag to set for each live value
+ var q []*Value // stack-like worklist of unscanned values
+
+ // Starting set: all control values of reachable blocks are live.
+ for _, b := range f.Blocks {
+ if !reachable[b.ID] {
+ continue
+ }
+ if v := b.Control; v != nil && !live[v.ID] {
+ live[v.ID] = true
+ q = append(q, v)
+ }
+ }
+
+ // Compute transitive closure of live values.
+ for len(q) > 0 {
+ // pop a reachable value
+ v := q[len(q)-1]
+ q = q[:len(q)-1]
+ for i, x := range v.Args {
+ if v.Op == OpPhi && !reachable[v.Block.Preds[i].ID] {
+ continue
+ }
+ if !live[x.ID] {
+ live[x.ID] = true
+ q = append(q, x) // push
+ }
+ }
+ }
+
+ // Remove dead values from blocks' value list. Return dead
+ // value ids to the allocator.
+ for _, b := range f.Blocks {
+ i := 0
+ for _, v := range b.Values {
+ if live[v.ID] {
+ b.Values[i] = v
+ i++
+ } else {
+ f.vid.put(v.ID)
+ }
+ }
+ // aid GC
+ tail := b.Values[i:]
+ for j := range tail {
+ tail[j] = nil
+ }
+ b.Values = b.Values[:i]
+ }
+
+ // Remove unreachable blocks. Return dead block ids to allocator.
+ i := 0
+ for _, b := range f.Blocks {
+ if reachable[b.ID] {
+ f.Blocks[i] = b
+ i++
+ } else {
+ if len(b.Values) > 0 {
+ b.Fatalf("live values in unreachable block %v: %v", b, b.Values)
+ }
+ f.bid.put(b.ID)
+ }
+ }
+ // zero remainder to help GC
+ tail := f.Blocks[i:]
+ for j := range tail {
+ tail[j] = nil
+ }
+ f.Blocks = f.Blocks[:i]
+
+ // TODO: renumber Blocks and Values densely?
+ // TODO: save dead Values and Blocks for reuse? Or should we just let GC handle it?
+}
+
+// There was an edge b->c. c has been removed from b's successors.
+// Fix up c to handle that fact.
+func (f *Func) removePredecessor(b, c *Block) {
+ work := [][2]*Block{{b, c}}
+
+ for len(work) > 0 {
+ b, c := work[0][0], work[0][1]
+ work = work[1:]
+
+ // find index of b in c's predecessor list
+ var i int
+ for j, p := range c.Preds {
+ if p == b {
+ i = j
+ break
+ }
+ }
+
+ n := len(c.Preds) - 1
+ c.Preds[i] = c.Preds[n]
+ c.Preds[n] = nil // aid GC
+ c.Preds = c.Preds[:n]
+
+ // rewrite phi ops to match the new predecessor list
+ for _, v := range c.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.Args[i] = v.Args[n]
+ v.Args[n] = nil // aid GC
+ v.Args = v.Args[:n]
+ if n == 1 {
+ v.Op = OpCopy
+ // Note: this is trickier than it looks. Replacing
+ // a Phi with a Copy can in general cause problems because
+ // Phi and Copy don't have exactly the same semantics.
+ // Phi arguments always come from a predecessor block,
+ // whereas copies don't. This matters in loops like:
+ // 1: x = (Phi y)
+ // y = (Add x 1)
+ // goto 1
+ // If we replace Phi->Copy, we get
+ // 1: x = (Copy y)
+ // y = (Add x 1)
+ // goto 1
+ // (Phi y) refers to the *previous* value of y, whereas
+ // (Copy y) refers to the *current* value of y.
+ // The modified code has a cycle and the scheduler
+ // will barf on it.
+ //
+ // Fortunately, this situation can only happen for dead
+ // code loops. So although the value graph is transiently
+ // bad, we'll throw away the bad part by the end of
+ // the next deadcode phase.
+ // Proof: If we have a potential bad cycle, we have a
+ // situation like this:
+ // x = (Phi z)
+ // y = (op1 x ...)
+ // z = (op2 y ...)
+ // Where opX are not Phi ops. But such a situation
+ // implies a cycle in the dominator graph. In the
+ // example, x.Block dominates y.Block, y.Block dominates
+ // z.Block, and z.Block dominates x.Block (treating
+ // "dominates" as reflexive). Cycles in the dominator
+ // graph can only happen in an unreachable cycle.
+ }
+ }
+ if n == 0 {
+ // c is now dead--recycle its values
+ for _, v := range c.Values {
+ f.vid.put(v.ID)
+ }
+ c.Values = nil
+ // Also kill any successors of c now, to spare later processing.
+ for _, succ := range c.Succs {
+ work = append(work, [2]*Block{c, succ})
+ }
+ c.Succs = nil
+ c.Kind = BlockDead
+ c.Control = nil
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+func TestDeadLoop(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")),
+ // dead loop
+ Bloc("deadblock",
+ // dead value in dead block
+ Valu("deadval", OpConstBool, TypeBool, 0, true),
+ If("deadval", "deadblock", "exit")))
+
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["deadblock"] {
+ t.Errorf("dead block not removed")
+ }
+ for _, v := range b.Values {
+ if v == fun.values["deadval"] {
+ t.Errorf("control value of dead block not removed")
+ }
+ }
+ }
+}
+
+func TestDeadValue(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("deadval", OpConst64, TypeInt64, 37, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ for _, v := range b.Values {
+ if v == fun.values["deadval"] {
+ t.Errorf("dead value not removed")
+ }
+ }
+ }
+}
+
+func TestNeverTaken(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("cond", OpConstBool, TypeBool, 0, false),
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ If("cond", "then", "else")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("else",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ Opt(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if fun.blocks["entry"].Kind != BlockPlain {
+ t.Errorf("if(false) not simplified")
+ }
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] {
+ t.Errorf("then block still present")
+ }
+ for _, v := range b.Values {
+ if v == fun.values["cond"] {
+ t.Errorf("constant condition still present")
+ }
+ }
+ }
+
+}
+
+func TestNestedDeadBlocks(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("cond", OpConstBool, TypeBool, 0, false),
+ If("cond", "b2", "b4")),
+ Bloc("b2",
+ If("cond", "b3", "b4")),
+ Bloc("b3",
+ If("cond", "b3", "b4")),
+ Bloc("b4",
+ If("cond", "b3", "exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ Opt(fun.f)
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+ if fun.blocks["entry"].Kind != BlockPlain {
+ t.Errorf("if(false) not simplified")
+ }
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["b2"] {
+ t.Errorf("b2 block still present")
+ }
+ if b == fun.blocks["b3"] {
+ t.Errorf("b3 block still present")
+ }
+ for _, v := range b.Values {
+ if v == fun.values["cond"] {
+ t.Errorf("constant condition still present")
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// dse does dead-store elimination on the Function.
+// Dead stores are those which are unconditionally followed by
+// another store to the same location, with no intervening load.
+// This implementation only works within a basic block. TODO: use something more global.
+func dse(f *Func) {
+ var stores []*Value
+ loadUse := newSparseSet(f.NumValues())
+ storeUse := newSparseSet(f.NumValues())
+ shadowed := newSparseSet(f.NumValues())
+ for _, b := range f.Blocks {
+ // Find all the stores in this block. Categorize their uses:
+ // loadUse contains stores which are used by a subsequent load.
+ // storeUse contains stores which are used by a subsequent store.
+ loadUse.clear()
+ storeUse.clear()
+ stores = stores[:0]
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ // Ignore phis - they will always be first and can't be eliminated
+ continue
+ }
+ if v.Type.IsMemory() {
+ stores = append(stores, v)
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ storeUse.add(a.ID)
+ if v.Op != OpStore {
+ // CALL, DUFFCOPY, etc. are both
+ // reads and writes.
+ loadUse.add(a.ID)
+ }
+ }
+ }
+ } else {
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ loadUse.add(a.ID)
+ }
+ }
+ }
+ }
+ if len(stores) == 0 {
+ continue
+ }
+
+ // find last store in the block
+ var last *Value
+ for _, v := range stores {
+ if storeUse.contains(v.ID) {
+ continue
+ }
+ if last != nil {
+ b.Fatalf("two final stores - simultaneous live stores %s %s", last, v)
+ }
+ last = v
+ }
+ if last == nil {
+ b.Fatalf("no last store found - cycle?")
+ }
+
+ // Walk backwards looking for dead stores. Keep track of shadowed addresses.
+ // An "address" is an SSA Value which encodes both the address and size of
+ // the write. This code will not remove dead stores to the same address
+ // of different types.
+ shadowed.clear()
+ v := last
+
+ walkloop:
+ if loadUse.contains(v.ID) {
+ // Someone might be reading this memory state.
+ // Clear all shadowed addresses.
+ shadowed.clear()
+ }
+ if v.Op == OpStore {
+ if shadowed.contains(v.Args[0].ID) {
+ // Modify store into a copy
+ v.Op = OpCopy
+ v.Aux = nil
+ v.SetArgs1(v.Args[2])
+ } else {
+ shadowed.add(v.Args[0].ID)
+ }
+ }
+ // walk to previous store
+ if v.Op == OpPhi {
+ continue // At start of block. Move on to next block.
+ }
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ v = a
+ goto walkloop
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "testing"
+)
+
+func TestDeadStore(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("start", OpArg, TypeMem, 0, ".mem"),
+ Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("v", OpConstBool, TypeBool, 0, true),
+ Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("addr2", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("store1", OpStore, TypeMem, 0, nil, "addr1", "v", "start"),
+ Valu("store2", OpStore, TypeMem, 0, nil, "addr2", "v", "store1"),
+ Valu("store3", OpStore, TypeMem, 0, nil, "addr1", "v", "store2"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("store3")))
+
+ CheckFunc(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+
+ v := fun.values["store1"]
+ if v.Op != OpCopy {
+ t.Errorf("dead store not removed")
+ }
+}
+func TestDeadStorePhi(t *testing.T) {
+ // make sure we don't get into an infinite loop with phi values.
+ c := NewConfig("amd64", DummyFrontend{t})
+ ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("start", OpArg, TypeMem, 0, ".mem"),
+ Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("v", OpConstBool, TypeBool, 0, true),
+ Valu("addr", OpAddr, ptrType, 0, nil, "sb"),
+ Goto("loop")),
+ Bloc("loop",
+ Valu("phi", OpPhi, TypeMem, 0, nil, "start", "store"),
+ Valu("store", OpStore, TypeMem, 0, nil, "addr", "v", "phi"),
+ If("v", "loop", "exit")),
+ Bloc("exit",
+ Exit("store")))
+
+ CheckFunc(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+}
+
+func TestDeadStoreTypes(t *testing.T) {
+ // Make sure a narrow store can't shadow a wider one. We test an even
+ // stronger restriction, that one store can't shadow another unless the
+ // types of the address fields are identical (where identicalness is
+ // decided by the CSE pass).
+ c := NewConfig("amd64", DummyFrontend{t})
+ t1 := &TypeImpl{Size_: 8, Ptr: true, Name: "t1"}
+ t2 := &TypeImpl{Size_: 4, Ptr: true, Name: "t2"}
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("start", OpArg, TypeMem, 0, ".mem"),
+ Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("v", OpConstBool, TypeBool, 0, true),
+ Valu("addr1", OpAddr, t1, 0, nil, "sb"),
+ Valu("addr2", OpAddr, t2, 0, nil, "sb"),
+ Valu("store1", OpStore, TypeMem, 0, nil, "addr1", "v", "start"),
+ Valu("store2", OpStore, TypeMem, 0, nil, "addr2", "v", "store1"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("store2")))
+
+ CheckFunc(fun.f)
+ cse(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+
+ v := fun.values["store1"]
+ if v.Op == OpCopy {
+ t.Errorf("store %s incorrectly removed", v)
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// mark values
+const (
+ notFound = 0 // block has not been discovered yet
+ notExplored = 1 // discovered and in queue, outedges not processed yet
+ explored = 2 // discovered and in queue, outedges processed
+ done = 3 // all done, in output ordering
+)
+
+// This file contains code to compute the dominator tree
+// of a control-flow graph.
+
+// postorder computes a postorder traversal ordering for the
+// basic blocks in f. Unreachable blocks will not appear.
+func postorder(f *Func) []*Block {
+ mark := make([]byte, f.NumBlocks())
+
+ // result ordering
+ var order []*Block
+
+ // stack of blocks
+ var s []*Block
+ s = append(s, f.Entry)
+ mark[f.Entry.ID] = notExplored
+ for len(s) > 0 {
+ b := s[len(s)-1]
+ switch mark[b.ID] {
+ case explored:
+ // Children have all been visited. Pop & output block.
+ s = s[:len(s)-1]
+ mark[b.ID] = done
+ order = append(order, b)
+ case notExplored:
+ // Children have not been visited yet. Mark as explored
+ // and queue any children we haven't seen yet.
+ mark[b.ID] = explored
+ for _, c := range b.Succs {
+ if mark[c.ID] == notFound {
+ mark[c.ID] = notExplored
+ s = append(s, c)
+ }
+ }
+ default:
+ b.Fatalf("bad stack state %v %d", b, mark[b.ID])
+ }
+ }
+ return order
+}
+
+type linkedBlocks func(*Block) []*Block
+
+// dfs performs a depth first search over the blocks. dfnum contains a mapping
+// from block id to an int indicating the order the block was reached or
+// notFound if the block was not reached. order contains a mapping from dfnum
+// to block
+func dfs(entry *Block, succFn linkedBlocks) (dfnum []int, order []*Block, parent []*Block) {
+ maxBlockID := entry.Func.NumBlocks()
+
+ dfnum = make([]int, maxBlockID)
+ order = make([]*Block, maxBlockID)
+ parent = make([]*Block, maxBlockID)
+
+ n := 0
+ s := make([]*Block, 0, 256)
+ s = append(s, entry)
+ parent[entry.ID] = entry
+ for len(s) > 0 {
+ node := s[len(s)-1]
+ s = s[:len(s)-1]
+
+ n++
+ for _, w := range succFn(node) {
+ // if it has a dfnum, we've already visited it
+ if dfnum[w.ID] == notFound {
+ s = append(s, w)
+ parent[w.ID] = node
+ dfnum[w.ID] = notExplored
+ }
+ }
+ dfnum[node.ID] = n
+ order[n] = node
+ }
+
+ return
+}
+
+// dominators computes the dominator tree for f. It returns a slice
+// which maps block ID to the immediate dominator of that block.
+// Unreachable blocks map to nil. The entry block maps to nil.
+func dominators(f *Func) []*Block {
+ preds := func(b *Block) []*Block { return b.Preds }
+ succs := func(b *Block) []*Block { return b.Succs }
+
+ //TODO: benchmark and try to find criteria for swapping between
+ // dominatorsSimple and dominatorsLT
+ return dominatorsLT(f.Entry, preds, succs)
+}
+
+// postDominators computes the post-dominator tree for f.
+func postDominators(f *Func) []*Block {
+ preds := func(b *Block) []*Block { return b.Preds }
+ succs := func(b *Block) []*Block { return b.Succs }
+
+ if len(f.Blocks) == 0 {
+ return nil
+ }
+
+ // find the exit block, maybe store it as f.Exit instead?
+ var exit *Block
+ for i := len(f.Blocks) - 1; i >= 0; i-- {
+ if f.Blocks[i].Kind == BlockExit {
+ exit = f.Blocks[i]
+ break
+ }
+ }
+
+ // infite loop with no exit
+ if exit == nil {
+ return make([]*Block, f.NumBlocks())
+ }
+ return dominatorsLT(exit, succs, preds)
+}
+
+// dominatorsLt runs Lengauer-Tarjan to compute a dominator tree starting at
+// entry and using predFn/succFn to find predecessors/successors to allow
+// computing both dominator and post-dominator trees.
+func dominatorsLT(entry *Block, predFn linkedBlocks, succFn linkedBlocks) []*Block {
+ // Based on Lengauer-Tarjan from Modern Compiler Implementation in C -
+ // Appel with optimizations from Finding Dominators in Practice -
+ // Georgiadis
+
+ // Step 1. Carry out a depth first search of the problem graph. Number
+ // the vertices from 1 to n as they are reached during the search.
+ dfnum, vertex, parent := dfs(entry, succFn)
+
+ maxBlockID := entry.Func.NumBlocks()
+ semi := make([]*Block, maxBlockID)
+ samedom := make([]*Block, maxBlockID)
+ idom := make([]*Block, maxBlockID)
+ ancestor := make([]*Block, maxBlockID)
+ best := make([]*Block, maxBlockID)
+ bucket := make([]*Block, maxBlockID)
+
+ // Step 2. Compute the semidominators of all vertices by applying
+ // Theorem 4. Carry out the computation vertex by vertex in decreasing
+ // order by number.
+ for i := maxBlockID - 1; i > 0; i-- {
+ w := vertex[i]
+ if w == nil {
+ continue
+ }
+
+ if dfnum[w.ID] == notFound {
+ // skip unreachable node
+ continue
+ }
+
+ // Step 3. Implicitly define the immediate dominator of each
+ // vertex by applying Corollary 1. (reordered)
+ for v := bucket[w.ID]; v != nil; v = bucket[v.ID] {
+ u := eval(v, ancestor, semi, dfnum, best)
+ if semi[u.ID] == semi[v.ID] {
+ idom[v.ID] = w // true dominator
+ } else {
+ samedom[v.ID] = u // v has same dominator as u
+ }
+ }
+
+ p := parent[w.ID]
+ s := p // semidominator
+
+ var sp *Block
+ // calculate the semidominator of w
+ for _, v := range w.Preds {
+ if dfnum[v.ID] == notFound {
+ // skip unreachable predecessor
+ continue
+ }
+
+ if dfnum[v.ID] <= dfnum[w.ID] {
+ sp = v
+ } else {
+ sp = semi[eval(v, ancestor, semi, dfnum, best).ID]
+ }
+
+ if dfnum[sp.ID] < dfnum[s.ID] {
+ s = sp
+ }
+ }
+
+ // link
+ ancestor[w.ID] = p
+ best[w.ID] = w
+
+ semi[w.ID] = s
+ if semi[s.ID] != parent[s.ID] {
+ bucket[w.ID] = bucket[s.ID]
+ bucket[s.ID] = w
+ }
+ }
+
+ // Final pass of step 3
+ for v := bucket[0]; v != nil; v = bucket[v.ID] {
+ idom[v.ID] = bucket[0]
+ }
+
+ // Step 4. Explictly define the immediate dominator of each vertex,
+ // carrying out the computation vertex by vertex in increasing order by
+ // number.
+ for i := 1; i < maxBlockID-1; i++ {
+ w := vertex[i]
+ if w == nil {
+ continue
+ }
+ // w has the same dominator as samedom[w.ID]
+ if samedom[w.ID] != nil {
+ idom[w.ID] = idom[samedom[w.ID].ID]
+ }
+ }
+ return idom
+}
+
+// eval function from LT paper with path compression
+func eval(v *Block, ancestor []*Block, semi []*Block, dfnum []int, best []*Block) *Block {
+ a := ancestor[v.ID]
+ if ancestor[a.ID] != nil {
+ b := eval(a, ancestor, semi, dfnum, best)
+ ancestor[v.ID] = ancestor[a.ID]
+ if dfnum[semi[b.ID].ID] < dfnum[semi[best[v.ID].ID].ID] {
+ best[v.ID] = b
+ }
+ }
+ return best[v.ID]
+}
+
+// dominators computes the dominator tree for f. It returns a slice
+// which maps block ID to the immediate dominator of that block.
+// Unreachable blocks map to nil. The entry block maps to nil.
+func dominatorsSimple(f *Func) []*Block {
+ // A simple algorithm for now
+ // Cooper, Harvey, Kennedy
+ idom := make([]*Block, f.NumBlocks())
+
+ // Compute postorder walk
+ post := postorder(f)
+
+ // Make map from block id to order index (for intersect call)
+ postnum := make([]int, f.NumBlocks())
+ for i, b := range post {
+ postnum[b.ID] = i
+ }
+
+ // Make the entry block a self-loop
+ idom[f.Entry.ID] = f.Entry
+ if postnum[f.Entry.ID] != len(post)-1 {
+ f.Fatalf("entry block %v not last in postorder", f.Entry)
+ }
+
+ // Compute relaxation of idom entries
+ for {
+ changed := false
+
+ for i := len(post) - 2; i >= 0; i-- {
+ b := post[i]
+ var d *Block
+ for _, p := range b.Preds {
+ if idom[p.ID] == nil {
+ continue
+ }
+ if d == nil {
+ d = p
+ continue
+ }
+ d = intersect(d, p, postnum, idom)
+ }
+ if d != idom[b.ID] {
+ idom[b.ID] = d
+ changed = true
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+ // Set idom of entry block to nil instead of itself.
+ idom[f.Entry.ID] = nil
+ return idom
+}
+
+// intersect finds the closest dominator of both b and c.
+// It requires a postorder numbering of all the blocks.
+func intersect(b, c *Block, postnum []int, idom []*Block) *Block {
+ // TODO: This loop is O(n^2). See BenchmarkNilCheckDeep*.
+ for b != c {
+ if postnum[b.ID] < postnum[c.ID] {
+ b = idom[b.ID]
+ } else {
+ c = idom[c.ID]
+ }
+ }
+ return b
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+func BenchmarkDominatorsLinear(b *testing.B) { benchmarkDominators(b, 10000, genLinear) }
+func BenchmarkDominatorsFwdBack(b *testing.B) { benchmarkDominators(b, 10000, genFwdBack) }
+func BenchmarkDominatorsManyPred(b *testing.B) { benchmarkDominators(b, 10000, genManyPred) }
+func BenchmarkDominatorsMaxPred(b *testing.B) { benchmarkDominators(b, 10000, genMaxPred) }
+func BenchmarkDominatorsMaxPredVal(b *testing.B) { benchmarkDominators(b, 10000, genMaxPredValue) }
+
+type blockGen func(size int) []bloc
+
+// genLinear creates an array of blocks that succeed one another
+// b_n -> [b_n+1].
+func genLinear(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Goto(blockn(0)),
+ ),
+ )
+ for i := 0; i < size; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ Goto(blockn(i+1))))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genLinear creates an array of blocks that alternate between
+// b_n -> [b_n+1], b_n -> [b_n+1, b_n-1] , b_n -> [b_n+1, b_n+2]
+func genFwdBack(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("p", OpConstBool, TypeBool, 0, true),
+ Goto(blockn(0)),
+ ),
+ )
+ for i := 0; i < size; i++ {
+ switch i % 2 {
+ case 0:
+ blocs = append(blocs, Bloc(blockn(i),
+ If("p", blockn(i+1), blockn(i+2))))
+ case 1:
+ blocs = append(blocs, Bloc(blockn(i),
+ If("p", blockn(i+1), blockn(i-1))))
+ }
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genManyPred creates an array of blocks where 1/3rd have a sucessor of the
+// first block, 1/3rd the last block, and the remaining third are plain.
+func genManyPred(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("p", OpConstBool, TypeBool, 0, true),
+ Goto(blockn(0)),
+ ),
+ )
+
+ // We want predecessor lists to be long, so 2/3rds of the blocks have a
+ // sucessor of the first or last block.
+ for i := 0; i < size; i++ {
+ switch i % 3 {
+ case 0:
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, TypeBool, 0, true),
+ Goto(blockn(i+1))))
+ case 1:
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, TypeBool, 0, true),
+ If("p", blockn(i+1), blockn(0))))
+ case 2:
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, TypeBool, 0, true),
+ If("p", blockn(i+1), blockn(size))))
+ }
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genMaxPred maximizes the size of the 'exit' predecessor list.
+func genMaxPred(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("p", OpConstBool, TypeBool, 0, true),
+ Goto(blockn(0)),
+ ),
+ )
+
+ for i := 0; i < size; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ If("p", blockn(i+1), "exit")))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genMaxPredValue is identical to genMaxPred but contains an
+// additional value.
+func genMaxPredValue(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("p", OpConstBool, TypeBool, 0, true),
+ Goto(blockn(0)),
+ ),
+ )
+
+ for i := 0; i < size; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, TypeBool, 0, true),
+ If("p", blockn(i+1), "exit")))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// sink for benchmark
+var domBenchRes []*Block
+
+func benchmarkDominators(b *testing.B, size int, bg blockGen) {
+ c := NewConfig("amd64", DummyFrontend{b})
+ fun := Fun(c, "entry", bg(size)...)
+
+ CheckFunc(fun.f)
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ domBenchRes = dominators(fun.f)
+ }
+}
+
+type domFunc func(f *Func) []*Block
+
+// verifyDominators verifies that the dominators of fut (function under test)
+// as determined by domFn, match the map node->dominator
+func verifyDominators(t *testing.T, fut fun, domFn domFunc, doms map[string]string) {
+ blockNames := map[*Block]string{}
+ for n, b := range fut.blocks {
+ blockNames[b] = n
+ }
+
+ calcDom := domFn(fut.f)
+
+ for n, d := range doms {
+ nblk, ok := fut.blocks[n]
+ if !ok {
+ t.Errorf("invalid block name %s", n)
+ }
+ dblk, ok := fut.blocks[d]
+ if !ok {
+ t.Errorf("invalid block name %s", d)
+ }
+
+ domNode := calcDom[nblk.ID]
+ switch {
+ case calcDom[nblk.ID] == dblk:
+ calcDom[nblk.ID] = nil
+ continue
+ case calcDom[nblk.ID] != dblk:
+ t.Errorf("expected %s as dominator of %s, found %s", d, n, blockNames[domNode])
+ default:
+ t.Fatal("unexpected dominator condition")
+ }
+ }
+
+ for id, d := range calcDom {
+ // If nil, we've already verified it
+ if d == nil {
+ continue
+ }
+ for _, b := range fut.blocks {
+ if int(b.ID) == id {
+ t.Errorf("unexpected dominator of %s for %s", blockNames[d], blockNames[b])
+ }
+ }
+ }
+
+}
+
+func TestDominatorsSingleBlock(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Exit("mem")))
+
+ doms := map[string]string{}
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+
+}
+
+func TestDominatorsSimple(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Goto("a")),
+ Bloc("a",
+ Goto("b")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "a",
+ "c": "b",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+
+}
+
+func TestDominatorsMultPredFwd(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("p", OpConstBool, TypeBool, 0, true),
+ If("p", "a", "c")),
+ Bloc("a",
+ If("p", "b", "c")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "a",
+ "c": "entry",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestDominatorsDeadCode(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("p", OpConstBool, TypeBool, 0, false),
+ If("p", "b3", "b5")),
+ Bloc("b2", Exit("mem")),
+ Bloc("b3", Goto("b2")),
+ Bloc("b4", Goto("b2")),
+ Bloc("b5", Goto("b2")))
+
+ doms := map[string]string{
+ "b2": "entry",
+ "b3": "entry",
+ "b5": "entry",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestDominatorsMultPredRev(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("p", OpConstBool, TypeBool, 0, true),
+ Goto("a")),
+ Bloc("a",
+ If("p", "b", "entry")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ If("p", "exit", "b")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "a",
+ "c": "b",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestDominatorsMultPred(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("p", OpConstBool, TypeBool, 0, true),
+ If("p", "a", "c")),
+ Bloc("a",
+ If("p", "b", "c")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ If("p", "b", "exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "entry",
+ "c": "entry",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestPostDominators(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("p", OpConstBool, TypeBool, 0, true),
+ If("p", "a", "c")),
+ Bloc("a",
+ If("p", "b", "c")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ If("p", "b", "exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{"entry": "c",
+ "a": "c",
+ "b": "c",
+ "c": "exit",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, postDominators, doms)
+}
+
+func TestInfiniteLoop(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ // note lack of an exit block
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("p", OpConstBool, TypeBool, 0, true),
+ Goto("a")),
+ Bloc("a",
+ Goto("b")),
+ Bloc("b",
+ Goto("a")))
+
+ CheckFunc(fun.f)
+ doms := map[string]string{"a": "entry",
+ "b": "a"}
+ verifyDominators(t, fun, dominators, doms)
+
+ // no exit block, so there are no post-dominators
+ postDoms := map[string]string{}
+ verifyDominators(t, fun, postDominators, postDoms)
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+var CheckFunc = checkFunc
+var PrintFunc = printFunc
+var Opt = opt
+var Deadcode = deadcode
+
+type DummyFrontend struct {
+ t testing.TB
+}
+
+func (DummyFrontend) StringData(s string) interface{} {
+ return nil
+}
+
+func (d DummyFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
+func (d DummyFrontend) Fatalf(msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
+func (d DummyFrontend) Unimplementedf(msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "sync"
+
+// A Func represents a Go func declaration (or function literal) and
+// its body. This package compiles each Func independently.
+type Func struct {
+ Config *Config // architecture information
+ Name string // e.g. bytes·Compare
+ Type Type // type signature of the function.
+ StaticData interface{} // associated static data, untouched by the ssa package
+ Blocks []*Block // unordered set of all basic blocks (note: not indexable by ID)
+ Entry *Block // the entry basic block
+ bid idAlloc // block ID allocator
+ vid idAlloc // value ID allocator
+
+ // when register allocation is done, maps value ids to locations
+ RegAlloc []Location
+ // when stackalloc is done, the size of the stack frame
+ FrameSize int64
+}
+
+// NumBlocks returns an integer larger than the id of any Block in the Func.
+func (f *Func) NumBlocks() int {
+ return f.bid.num()
+}
+
+// NumValues returns an integer larger than the id of any Value in the Func.
+func (f *Func) NumValues() int {
+ return f.vid.num()
+}
+
+const (
+ blockSize = 100
+)
+
+// blockPool provides a contiguous array of Blocks which
+// improves the speed of traversing dominator trees.
+type blockPool struct {
+ blocks []Block
+ mu sync.Mutex
+}
+
+func (bp *blockPool) newBlock() *Block {
+ bp.mu.Lock()
+ defer bp.mu.Unlock()
+
+ if len(bp.blocks) == 0 {
+ bp.blocks = make([]Block, blockSize, blockSize)
+ }
+
+ res := &bp.blocks[0]
+ bp.blocks = bp.blocks[1:]
+ return res
+}
+
+var bp blockPool
+
+// NewBlock returns a new block of the given kind and appends it to f.Blocks.
+func (f *Func) NewBlock(kind BlockKind) *Block {
+ b := bp.newBlock()
+ b.ID = f.bid.get()
+ b.Kind = kind
+ b.Func = f
+ f.Blocks = append(f.Blocks, b)
+ return b
+}
+
+// NewValue0 returns a new value in the block with no arguments and zero aux values.
+func (b *Block) NewValue0(line int32, op Op, t Type) *Value {
+ v := &Value{
+ ID: b.Func.vid.get(),
+ Op: op,
+ Type: t,
+ Block: b,
+ Line: line,
+ }
+ v.Args = v.argstorage[:0]
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// NewValue returns a new value in the block with no arguments and an auxint value.
+func (b *Block) NewValue0I(line int32, op Op, t Type, auxint int64) *Value {
+ v := &Value{
+ ID: b.Func.vid.get(),
+ Op: op,
+ Type: t,
+ AuxInt: auxint,
+ Block: b,
+ Line: line,
+ }
+ v.Args = v.argstorage[:0]
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// NewValue returns a new value in the block with no arguments and an aux value.
+func (b *Block) NewValue0A(line int32, op Op, t Type, aux interface{}) *Value {
+ if _, ok := aux.(int64); ok {
+ // Disallow int64 aux values. They should be in the auxint field instead.
+ // Maybe we want to allow this at some point, but for now we disallow it
+ // to prevent errors like using NewValue1A instead of NewValue1I.
+ b.Fatalf("aux field has int64 type op=%s type=%s aux=%v", op, t, aux)
+ }
+ v := &Value{
+ ID: b.Func.vid.get(),
+ Op: op,
+ Type: t,
+ Aux: aux,
+ Block: b,
+ Line: line,
+ }
+ v.Args = v.argstorage[:0]
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// NewValue returns a new value in the block with no arguments and both an auxint and aux values.
+func (b *Block) NewValue0IA(line int32, op Op, t Type, auxint int64, aux interface{}) *Value {
+ v := &Value{
+ ID: b.Func.vid.get(),
+ Op: op,
+ Type: t,
+ AuxInt: auxint,
+ Aux: aux,
+ Block: b,
+ Line: line,
+ }
+ v.Args = v.argstorage[:0]
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// NewValue1 returns a new value in the block with one argument and zero aux values.
+func (b *Block) NewValue1(line int32, op Op, t Type, arg *Value) *Value {
+ v := &Value{
+ ID: b.Func.vid.get(),
+ Op: op,
+ Type: t,
+ Block: b,
+ Line: line,
+ }
+ v.Args = v.argstorage[:1]
+ v.Args[0] = arg
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// NewValue1I returns a new value in the block with one argument and an auxint value.
+func (b *Block) NewValue1I(line int32, op Op, t Type, auxint int64, arg *Value) *Value {
+ v := &Value{
+ ID: b.Func.vid.get(),
+ Op: op,
+ Type: t,
+ AuxInt: auxint,
+ Block: b,
+ Line: line,
+ }
+ v.Args = v.argstorage[:1]
+ v.Args[0] = arg
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// NewValue1A returns a new value in the block with one argument and an aux value.
+func (b *Block) NewValue1A(line int32, op Op, t Type, aux interface{}, arg *Value) *Value {
+ v := &Value{
+ ID: b.Func.vid.get(),
+ Op: op,
+ Type: t,
+ Aux: aux,
+ Block: b,
+ Line: line,
+ }
+ v.Args = v.argstorage[:1]
+ v.Args[0] = arg
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// NewValue1IA returns a new value in the block with one argument and both an auxint and aux values.
+func (b *Block) NewValue1IA(line int32, op Op, t Type, auxint int64, aux interface{}, arg *Value) *Value {
+ v := &Value{
+ ID: b.Func.vid.get(),
+ Op: op,
+ Type: t,
+ AuxInt: auxint,
+ Aux: aux,
+ Block: b,
+ Line: line,
+ }
+ v.Args = v.argstorage[:1]
+ v.Args[0] = arg
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// NewValue2 returns a new value in the block with two arguments and zero aux values.
+func (b *Block) NewValue2(line int32, op Op, t Type, arg0, arg1 *Value) *Value {
+ v := &Value{
+ ID: b.Func.vid.get(),
+ Op: op,
+ Type: t,
+ Block: b,
+ Line: line,
+ }
+ v.Args = v.argstorage[:2]
+ v.Args[0] = arg0
+ v.Args[1] = arg1
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// NewValue2I returns a new value in the block with two arguments and an auxint value.
+func (b *Block) NewValue2I(line int32, op Op, t Type, aux int64, arg0, arg1 *Value) *Value {
+ v := &Value{
+ ID: b.Func.vid.get(),
+ Op: op,
+ Type: t,
+ AuxInt: aux,
+ Block: b,
+ Line: line,
+ }
+ v.Args = v.argstorage[:2]
+ v.Args[0] = arg0
+ v.Args[1] = arg1
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// NewValue3 returns a new value in the block with three arguments and zero aux values.
+func (b *Block) NewValue3(line int32, op Op, t Type, arg0, arg1, arg2 *Value) *Value {
+ v := &Value{
+ ID: b.Func.vid.get(),
+ Op: op,
+ Type: t,
+ Block: b,
+ Line: line,
+ }
+ v.Args = []*Value{arg0, arg1, arg2}
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// ConstInt returns an int constant representing its argument.
+func (f *Func) ConstInt8(line int32, t Type, c int8) *Value {
+ // TODO: cache?
+ return f.Entry.NewValue0I(line, OpConst8, t, int64(c))
+}
+func (f *Func) ConstInt16(line int32, t Type, c int16) *Value {
+ // TODO: cache?
+ return f.Entry.NewValue0I(line, OpConst16, t, int64(c))
+}
+func (f *Func) ConstInt32(line int32, t Type, c int32) *Value {
+ // TODO: cache?
+ return f.Entry.NewValue0I(line, OpConst32, t, int64(c))
+}
+func (f *Func) ConstInt64(line int32, t Type, c int64) *Value {
+ // TODO: cache?
+ return f.Entry.NewValue0I(line, OpConst64, t, c)
+}
+func (f *Func) ConstIntPtr(line int32, t Type, c int64) *Value {
+ // TODO: cache?
+ return f.Entry.NewValue0I(line, OpConstPtr, t, c)
+}
+
+func (f *Func) Logf(msg string, args ...interface{}) { f.Config.Logf(msg, args...) }
+func (f *Func) Fatalf(msg string, args ...interface{}) { f.Config.Fatalf(msg, args...) }
+func (f *Func) Unimplementedf(msg string, args ...interface{}) { f.Config.Unimplementedf(msg, args...) }
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains some utility functions to help define Funcs for testing.
+// As an example, the following func
+//
+// b1:
+// v1 = Arg <mem> [.mem]
+// Plain -> b2
+// b2:
+// Exit v1
+// b3:
+// v2 = Const <bool> [true]
+// If v2 -> b3 b2
+//
+// can be defined as
+//
+// fun := Fun("entry",
+// Bloc("entry",
+// Valu("mem", OpArg, TypeMem, 0, ".mem"),
+// Goto("exit")),
+// Bloc("exit",
+// Exit("mem")),
+// Bloc("deadblock",
+// Valu("deadval", OpConstBool, TypeBool, 0, true),
+// If("deadval", "deadblock", "exit")))
+//
+// and the Blocks or Values used in the Func can be accessed
+// like this:
+// fun.blocks["entry"] or fun.values["deadval"]
+
+package ssa
+
+// TODO(matloob): Choose better names for Fun, Bloc, Goto, etc.
+// TODO(matloob): Write a parser for the Func disassembly. Maybe
+// the parser can be used instead of Fun.
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+// Compare two Funcs for equivalence. Their CFGs must be isomorphic,
+// and their values must correspond.
+// Requires that values and predecessors are in the same order, even
+// though Funcs could be equivalent when they are not.
+// TODO(matloob): Allow values and predecessors to be in different
+// orders if the CFG are otherwise equivalent.
+func Equiv(f, g *Func) bool {
+ valcor := make(map[*Value]*Value)
+ var checkVal func(fv, gv *Value) bool
+ checkVal = func(fv, gv *Value) bool {
+ if fv == nil && gv == nil {
+ return true
+ }
+ if valcor[fv] == nil && valcor[gv] == nil {
+ valcor[fv] = gv
+ valcor[gv] = fv
+ // Ignore ids. Ops and Types are compared for equality.
+ // TODO(matloob): Make sure types are canonical and can
+ // be compared for equality.
+ if fv.Op != gv.Op || fv.Type != gv.Type || fv.AuxInt != gv.AuxInt {
+ return false
+ }
+ if !reflect.DeepEqual(fv.Aux, gv.Aux) {
+ // This makes the assumption that aux values can be compared
+ // using DeepEqual.
+ // TODO(matloob): Aux values may be *gc.Sym pointers in the near
+ // future. Make sure they are canonical.
+ return false
+ }
+ if len(fv.Args) != len(gv.Args) {
+ return false
+ }
+ for i := range fv.Args {
+ if !checkVal(fv.Args[i], gv.Args[i]) {
+ return false
+ }
+ }
+ }
+ return valcor[fv] == gv && valcor[gv] == fv
+ }
+ blkcor := make(map[*Block]*Block)
+ var checkBlk func(fb, gb *Block) bool
+ checkBlk = func(fb, gb *Block) bool {
+ if blkcor[fb] == nil && blkcor[gb] == nil {
+ blkcor[fb] = gb
+ blkcor[gb] = fb
+ // ignore ids
+ if fb.Kind != gb.Kind {
+ return false
+ }
+ if len(fb.Values) != len(gb.Values) {
+ return false
+ }
+ for i := range fb.Values {
+ if !checkVal(fb.Values[i], gb.Values[i]) {
+ return false
+ }
+ }
+ if len(fb.Succs) != len(gb.Succs) {
+ return false
+ }
+ for i := range fb.Succs {
+ if !checkBlk(fb.Succs[i], gb.Succs[i]) {
+ return false
+ }
+ }
+ if len(fb.Preds) != len(gb.Preds) {
+ return false
+ }
+ for i := range fb.Preds {
+ if !checkBlk(fb.Preds[i], gb.Preds[i]) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return blkcor[fb] == gb && blkcor[gb] == fb
+ }
+
+ return checkBlk(f.Entry, g.Entry)
+}
+
+// fun is the return type of Fun. It contains the created func
+// itself as well as indexes from block and value names into the
+// corresponding Blocks and Values.
+type fun struct {
+ f *Func
+ blocks map[string]*Block
+ values map[string]*Value
+}
+
+// Fun takes the name of an entry bloc and a series of Bloc calls, and
+// returns a fun containing the composed Func. entry must be a name
+// supplied to one of the Bloc functions. Each of the bloc names and
+// valu names should be unique across the Fun.
+func Fun(c *Config, entry string, blocs ...bloc) fun {
+ f := new(Func)
+ f.Config = c
+ blocks := make(map[string]*Block)
+ values := make(map[string]*Value)
+ // Create all the blocks and values.
+ for _, bloc := range blocs {
+ b := f.NewBlock(bloc.control.kind)
+ blocks[bloc.name] = b
+ for _, valu := range bloc.valus {
+ // args are filled in the second pass.
+ values[valu.name] = b.NewValue0IA(0, valu.op, valu.t, valu.auxint, valu.aux)
+ }
+ }
+ // Connect the blocks together and specify control values.
+ f.Entry = blocks[entry]
+ for _, bloc := range blocs {
+ b := blocks[bloc.name]
+ c := bloc.control
+ // Specify control values.
+ if c.control != "" {
+ cval, ok := values[c.control]
+ if !ok {
+ f.Fatalf("control value for block %s missing", bloc.name)
+ }
+ b.Control = cval
+ }
+ // Fill in args.
+ for _, valu := range bloc.valus {
+ v := values[valu.name]
+ for _, arg := range valu.args {
+ a, ok := values[arg]
+ if !ok {
+ b.Fatalf("arg %s missing for value %s in block %s",
+ arg, valu.name, bloc.name)
+ }
+ v.AddArg(a)
+ }
+ }
+ // Connect to successors.
+ for _, succ := range c.succs {
+ addEdge(b, blocks[succ])
+ }
+ }
+ return fun{f, blocks, values}
+}
+
+// Bloc defines a block for Fun. The bloc name should be unique
+// across the containing Fun. entries should consist of calls to valu,
+// as well as one call to Goto, If, or Exit to specify the block kind.
+func Bloc(name string, entries ...interface{}) bloc {
+ b := bloc{}
+ b.name = name
+ seenCtrl := false
+ for _, e := range entries {
+ switch v := e.(type) {
+ case ctrl:
+ // there should be exactly one Ctrl entry.
+ if seenCtrl {
+ panic(fmt.Sprintf("already seen control for block %s", name))
+ }
+ b.control = v
+ seenCtrl = true
+ case valu:
+ b.valus = append(b.valus, v)
+ }
+ }
+ if !seenCtrl {
+ panic(fmt.Sprintf("block %s doesn't have control", b.name))
+ }
+ return b
+}
+
+// Valu defines a value in a block.
+func Valu(name string, op Op, t Type, auxint int64, aux interface{}, args ...string) valu {
+ return valu{name, op, t, auxint, aux, args}
+}
+
+// Goto specifies that this is a BlockPlain and names the single successor.
+// TODO(matloob): choose a better name.
+func Goto(succ string) ctrl {
+ return ctrl{BlockPlain, "", []string{succ}}
+}
+
+// If specifies a BlockIf.
+func If(cond, sub, alt string) ctrl {
+ return ctrl{BlockIf, cond, []string{sub, alt}}
+}
+
+// Exit specifies a BlockExit.
+func Exit(arg string) ctrl {
+ return ctrl{BlockExit, arg, []string{}}
+}
+
+// bloc, ctrl, and valu are internal structures used by Bloc, Valu, Goto,
+// If, and Exit to help define blocks.
+
+type bloc struct {
+ name string
+ control ctrl
+ valus []valu
+}
+
+type ctrl struct {
+ kind BlockKind
+ control string
+ succs []string
+}
+
+type valu struct {
+ name string
+ op Op
+ t Type
+ auxint int64
+ aux interface{}
+ args []string
+}
+
+func addEdge(b, c *Block) {
+ b.Succs = append(b.Succs, c)
+ c.Preds = append(c.Preds, b)
+}
+
+func TestArgs(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("a", OpConst64, TypeInt64, 14, nil),
+ Valu("b", OpConst64, TypeInt64, 26, nil),
+ Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+ sum := fun.values["sum"]
+ for i, name := range []string{"a", "b"} {
+ if sum.Args[i] != fun.values[name] {
+ t.Errorf("arg %d for sum is incorrect: want %s, got %s",
+ i, sum.Args[i], fun.values[name])
+ }
+ }
+}
+
+func TestEquiv(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ equivalentCases := []struct{ f, g fun }{
+ // simple case
+ {
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("a", OpConst64, TypeInt64, 14, nil),
+ Valu("b", OpConst64, TypeInt64, 26, nil),
+ Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("a", OpConst64, TypeInt64, 14, nil),
+ Valu("b", OpConst64, TypeInt64, 26, nil),
+ Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ },
+ // block order changed
+ {
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("a", OpConst64, TypeInt64, 14, nil),
+ Valu("b", OpConst64, TypeInt64, 26, nil),
+ Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ Fun(c, "entry",
+ Bloc("exit",
+ Exit("mem")),
+ Bloc("entry",
+ Valu("a", OpConst64, TypeInt64, 14, nil),
+ Valu("b", OpConst64, TypeInt64, 26, nil),
+ Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Goto("exit"))),
+ },
+ }
+ for _, c := range equivalentCases {
+ if !Equiv(c.f.f, c.g.f) {
+ t.Error("expected equivalence. Func definitions:")
+ t.Error(c.f.f)
+ t.Error(c.g.f)
+ }
+ }
+
+ differentCases := []struct{ f, g fun }{
+ // different shape
+ {
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Exit("mem"))),
+ },
+ // value order changed
+ {
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("b", OpConst64, TypeInt64, 26, nil),
+ Valu("a", OpConst64, TypeInt64, 14, nil),
+ Exit("mem"))),
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("a", OpConst64, TypeInt64, 14, nil),
+ Valu("b", OpConst64, TypeInt64, 26, nil),
+ Exit("mem"))),
+ },
+ // value auxint different
+ {
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("a", OpConst64, TypeInt64, 14, nil),
+ Exit("mem"))),
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("a", OpConst64, TypeInt64, 26, nil),
+ Exit("mem"))),
+ },
+ // value aux different
+ {
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("a", OpConst64, TypeInt64, 0, 14),
+ Exit("mem"))),
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("a", OpConst64, TypeInt64, 0, 26),
+ Exit("mem"))),
+ },
+ // value args different
+ {
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("a", OpConst64, TypeInt64, 14, nil),
+ Valu("b", OpConst64, TypeInt64, 26, nil),
+ Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
+ Exit("mem"))),
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("a", OpConst64, TypeInt64, 0, nil),
+ Valu("b", OpConst64, TypeInt64, 14, nil),
+ Valu("sum", OpAdd64, TypeInt64, 0, nil, "b", "a"),
+ Exit("mem"))),
+ },
+ }
+ for _, c := range differentCases {
+ if Equiv(c.f.f, c.g.f) {
+ t.Error("expected difference. Func definitions:")
+ t.Error(c.f.f)
+ t.Error(c.g.f)
+ }
+ }
+}
+
+// opcodeMap returns a map from opcode to the number of times that opcode
+// appears in the function.
+func opcodeMap(f *Func) map[Op]int {
+ m := map[Op]int{}
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ m[v.Op]++
+ }
+ }
+ return m
+}
+
+// opcodeCounts checks that the number of opcodes listed in m agree with the
+// number of opcodes that appear in the function.
+func checkOpcodeCounts(t *testing.T, f *Func, m map[Op]int) {
+ n := opcodeMap(f)
+ for op, cnt := range m {
+ if n[op] != cnt {
+ t.Errorf("%s appears %d times, want %d times", op, n[op], cnt)
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// fuse simplifies control flow by joining basic blocks.
+func fuse(f *Func) {
+ for _, b := range f.Blocks {
+ if b.Kind != BlockPlain {
+ continue
+ }
+ c := b.Succs[0]
+ if len(c.Preds) != 1 {
+ continue
+ }
+
+ // move all of b's values to c.
+ for _, v := range b.Values {
+ v.Block = c
+ c.Values = append(c.Values, v)
+ }
+
+ // replace b->c edge with preds(b) -> c
+ c.Preds = b.Preds
+ for _, p := range c.Preds {
+ for i, q := range p.Succs {
+ if q == b {
+ p.Succs[i] = c
+ }
+ }
+ }
+ if f.Entry == b {
+ f.Entry = c
+ }
+
+ // trash b, just in case
+ b.Kind = blockInvalid
+ b.Values = nil
+ b.Preds = nil
+ b.Succs = nil
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// x86 register conventions:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. Upper bytes are junk.
+// - We do not use AH,BH,CH,DH registers.
+// - Floating-point types will live in the low natural slot of an sse2 register.
+// Unused portions are junk.
+
+// Lowering arithmetic
+(Add64 x y) -> (ADDQ x y)
+(AddPtr x y) -> (ADDQ x y)
+(Add32 x y) -> (ADDL x y)
+(Add16 x y) -> (ADDW x y)
+(Add8 x y) -> (ADDB x y)
+
+(Sub64 x y) -> (SUBQ x y)
+(Sub32 x y) -> (SUBL x y)
+(Sub16 x y) -> (SUBW x y)
+(Sub8 x y) -> (SUBB x y)
+
+(Mul64 x y) -> (MULQ x y)
+(MulPtr x y) -> (MULQ x y)
+(Mul32 x y) -> (MULL x y)
+(Mul16 x y) -> (MULW x y)
+// Note: we use 16-bit multiply instructions for 8-bit multiplies because
+// the 16-bit multiply instructions are more forgiving (they operate on
+// any register instead of just AX/DX).
+(Mul8 x y) -> (MULW x y)
+
+(And64 x y) -> (ANDQ x y)
+(And32 x y) -> (ANDL x y)
+(And16 x y) -> (ANDW x y)
+(And8 x y) -> (ANDB x y)
+
+(Or64 x y) -> (ORQ x y)
+(Or32 x y) -> (ORL x y)
+(Or16 x y) -> (ORW x y)
+(Or8 x y) -> (ORB x y)
+
+(Xor64 x y) -> (XORQ x y)
+(Xor32 x y) -> (XORL x y)
+(Xor16 x y) -> (XORW x y)
+(Xor8 x y) -> (XORB x y)
+
+(Neg64 x) -> (NEGQ x)
+(Neg32 x) -> (NEGL x)
+(Neg16 x) -> (NEGW x)
+(Neg8 x) -> (NEGB x)
+
+(Com64 x) -> (NOTQ x)
+(Com32 x) -> (NOTL x)
+(Com16 x) -> (NOTW x)
+(Com8 x) -> (NOTB x)
+
+// Note: we always extend to 64 bits even though some ops don't need that many result bits.
+(SignExt8to16 x) -> (MOVBQSX x)
+(SignExt8to32 x) -> (MOVBQSX x)
+(SignExt8to64 x) -> (MOVBQSX x)
+(SignExt16to32 x) -> (MOVWQSX x)
+(SignExt16to64 x) -> (MOVWQSX x)
+(SignExt32to64 x) -> (MOVLQSX x)
+
+(ZeroExt8to16 x) -> (MOVBQZX x)
+(ZeroExt8to32 x) -> (MOVBQZX x)
+(ZeroExt8to64 x) -> (MOVBQZX x)
+(ZeroExt16to32 x) -> (MOVWQZX x)
+(ZeroExt16to64 x) -> (MOVWQZX x)
+(ZeroExt32to64 x) -> (MOVLQZX x)
+
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 x) -> (Copy x)
+(Trunc32to8 x) -> (Copy x)
+(Trunc32to16 x) -> (Copy x)
+(Trunc64to8 x) -> (Copy x)
+(Trunc64to16 x) -> (Copy x)
+(Trunc64to32 x) -> (Copy x)
+
+(ConvNop <t> x) && t == x.Type -> (Copy x)
+(ConvNop <t> x) && t.IsInteger() && x.Type.IsInteger() && t.Size() == x.Type.Size() -> (Copy x)
+// TODO: other ConvNops are safe? Maybe all of them?
+
+// Lowering shifts
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
+// Note: for small shifts we generate 32 bits of mask even when we don't need it all.
+(Lsh64x64 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst <TypeFlags> [64] y)))
+(Lsh64x32 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst <TypeFlags> [64] y)))
+(Lsh64x16 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst <TypeFlags> [64] y)))
+(Lsh64x8 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst <TypeFlags> [64] y)))
+
+(Lsh32x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [32] y)))
+(Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [32] y)))
+(Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [32] y)))
+(Lsh32x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [32] y)))
+
+(Lsh16x64 <t> x y) -> (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [16] y)))
+(Lsh16x32 <t> x y) -> (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [16] y)))
+(Lsh16x16 <t> x y) -> (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [16] y)))
+(Lsh16x8 <t> x y) -> (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [16] y)))
+
+(Lsh8x64 <t> x y) -> (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [8] y)))
+(Lsh8x32 <t> x y) -> (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [8] y)))
+(Lsh8x16 <t> x y) -> (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [8] y)))
+(Lsh8x8 <t> x y) -> (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [8] y)))
+
+(Rsh64Ux64 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst <TypeFlags> [64] y)))
+(Rsh64Ux32 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst <TypeFlags> [64] y)))
+(Rsh64Ux16 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst <TypeFlags> [64] y)))
+(Rsh64Ux8 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst <TypeFlags> [64] y)))
+
+(Rsh32Ux64 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [32] y)))
+(Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [32] y)))
+(Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [32] y)))
+(Rsh32Ux8 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [32] y)))
+
+(Rsh16Ux64 <t> x y) -> (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [16] y)))
+(Rsh16Ux32 <t> x y) -> (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [16] y)))
+(Rsh16Ux16 <t> x y) -> (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [16] y)))
+(Rsh16Ux8 <t> x y) -> (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [16] y)))
+
+(Rsh8Ux64 <t> x y) -> (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [8] y)))
+(Rsh8Ux32 <t> x y) -> (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [8] y)))
+(Rsh8Ux16 <t> x y) -> (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [8] y)))
+(Rsh8Ux8 <t> x y) -> (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [8] y)))
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
+// Note: for small shift widths we generate 32 bits of mask even when we don't need it all.
+(Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst <TypeFlags> [64] y)))))
+(Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst <TypeFlags> [64] y)))))
+(Rsh64x16 <t> x y) -> (SARQ <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst <TypeFlags> [64] y)))))
+(Rsh64x8 <t> x y) -> (SARQ <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst <TypeFlags> [64] y)))))
+
+(Rsh32x64 <t> x y) -> (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst <TypeFlags> [32] y)))))
+(Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst <TypeFlags> [32] y)))))
+(Rsh32x16 <t> x y) -> (SARL <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst <TypeFlags> [32] y)))))
+(Rsh32x8 <t> x y) -> (SARL <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst <TypeFlags> [32] y)))))
+
+(Rsh16x64 <t> x y) -> (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst <TypeFlags> [16] y)))))
+(Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst <TypeFlags> [16] y)))))
+(Rsh16x16 <t> x y) -> (SARW <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst <TypeFlags> [16] y)))))
+(Rsh16x8 <t> x y) -> (SARW <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst <TypeFlags> [16] y)))))
+
+(Rsh8x64 <t> x y) -> (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst <TypeFlags> [8] y)))))
+(Rsh8x32 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst <TypeFlags> [8] y)))))
+(Rsh8x16 <t> x y) -> (SARB <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst <TypeFlags> [8] y)))))
+(Rsh8x8 <t> x y) -> (SARB <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst <TypeFlags> [8] y)))))
+
+(Less64 x y) -> (SETL (CMPQ <TypeFlags> x y))
+(Less32 x y) -> (SETL (CMPL <TypeFlags> x y))
+(Less16 x y) -> (SETL (CMPW <TypeFlags> x y))
+(Less8 x y) -> (SETL (CMPB <TypeFlags> x y))
+
+(Less64U x y) -> (SETB (CMPQ <TypeFlags> x y))
+(Less32U x y) -> (SETB (CMPL <TypeFlags> x y))
+(Less16U x y) -> (SETB (CMPW <TypeFlags> x y))
+(Less8U x y) -> (SETB (CMPB <TypeFlags> x y))
+
+(Leq64 x y) -> (SETLE (CMPQ <TypeFlags> x y))
+(Leq32 x y) -> (SETLE (CMPL <TypeFlags> x y))
+(Leq16 x y) -> (SETLE (CMPW <TypeFlags> x y))
+(Leq8 x y) -> (SETLE (CMPB <TypeFlags> x y))
+
+(Leq64U x y) -> (SETBE (CMPQ <TypeFlags> x y))
+(Leq32U x y) -> (SETBE (CMPL <TypeFlags> x y))
+(Leq16U x y) -> (SETBE (CMPW <TypeFlags> x y))
+(Leq8U x y) -> (SETBE (CMPB <TypeFlags> x y))
+
+(Greater64 x y) -> (SETG (CMPQ <TypeFlags> x y))
+(Greater32 x y) -> (SETG (CMPL <TypeFlags> x y))
+(Greater16 x y) -> (SETG (CMPW <TypeFlags> x y))
+(Greater8 x y) -> (SETG (CMPB <TypeFlags> x y))
+
+(Greater64U x y) -> (SETA (CMPQ <TypeFlags> x y))
+(Greater32U x y) -> (SETA (CMPL <TypeFlags> x y))
+(Greater16U x y) -> (SETA (CMPW <TypeFlags> x y))
+(Greater8U x y) -> (SETA (CMPB <TypeFlags> x y))
+
+(Geq64 x y) -> (SETGE (CMPQ <TypeFlags> x y))
+(Geq32 x y) -> (SETGE (CMPL <TypeFlags> x y))
+(Geq16 x y) -> (SETGE (CMPW <TypeFlags> x y))
+(Geq8 x y) -> (SETGE (CMPB <TypeFlags> x y))
+
+(Geq64U x y) -> (SETAE (CMPQ <TypeFlags> x y))
+(Geq32U x y) -> (SETAE (CMPL <TypeFlags> x y))
+(Geq16U x y) -> (SETAE (CMPW <TypeFlags> x y))
+(Geq8U x y) -> (SETAE (CMPB <TypeFlags> x y))
+
+(Eq64 x y) -> (SETEQ (CMPQ <TypeFlags> x y))
+(Eq32 x y) -> (SETEQ (CMPL <TypeFlags> x y))
+(Eq16 x y) -> (SETEQ (CMPW <TypeFlags> x y))
+(Eq8 x y) -> (SETEQ (CMPB <TypeFlags> x y))
+(EqPtr x y) -> (SETEQ (CMPQ <TypeFlags> x y))
+
+(Neq64 x y) -> (SETNE (CMPQ <TypeFlags> x y))
+(Neq32 x y) -> (SETNE (CMPL <TypeFlags> x y))
+(Neq16 x y) -> (SETNE (CMPW <TypeFlags> x y))
+(Neq8 x y) -> (SETNE (CMPB <TypeFlags> x y))
+(NeqPtr x y) -> (SETNE (CMPQ <TypeFlags> x y))
+
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
+(Store ptr val mem) && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVQstore ptr val mem)
+(Store ptr val mem) && is32BitInt(val.Type) -> (MOVLstore ptr val mem)
+(Store ptr val mem) && is16BitInt(val.Type) -> (MOVWstore ptr val mem)
+(Store ptr val mem) && is8BitInt(val.Type) -> (MOVBstore ptr val mem)
+(Store ptr val mem) && val.Type.IsBoolean() -> (MOVBstore ptr val mem)
+
+// checks
+(IsNonNil p) -> (SETNE (TESTQ <TypeFlags> p p))
+(IsInBounds idx len) -> (SETB (CMPQ <TypeFlags> idx len))
+
+(Move [size] dst src mem) -> (REPMOVSB dst src (MOVQconst <TypeUInt64> [size]) mem)
+
+(Not x) -> (XORBconst [1] x)
+
+(OffPtr [off] ptr) -> (ADDQconst [off] ptr)
+
+(Const8 [val]) -> (MOVBconst [val])
+(Const16 [val]) -> (MOVWconst [val])
+(Const32 [val]) -> (MOVLconst [val])
+(Const64 [val]) -> (MOVQconst [val])
+(ConstPtr [val]) -> (MOVQconst [val])
+(ConstNil) -> (MOVQconst [0])
+(ConstBool {b}) && !b.(bool) -> (MOVBconst [0])
+(ConstBool {b}) && b.(bool) -> (MOVBconst [1])
+
+(Addr {sym} base) -> (LEAQ {sym} base)
+
+// block rewrites
+(If (SETL cmp) yes no) -> (LT cmp yes no)
+(If (SETLE cmp) yes no) -> (LE cmp yes no)
+(If (SETG cmp) yes no) -> (GT cmp yes no)
+(If (SETGE cmp) yes no) -> (GE cmp yes no)
+(If (SETEQ cmp) yes no) -> (EQ cmp yes no)
+(If (SETNE cmp) yes no) -> (NE cmp yes no)
+(If (SETB cmp) yes no) -> (ULT cmp yes no)
+(If (SETBE cmp) yes no) -> (ULE cmp yes no)
+(If (SETA cmp) yes no) -> (UGT cmp yes no)
+(If (SETAE cmp) yes no) -> (UGE cmp yes no)
+(If cond yes no) && cond.Op == OpAMD64MOVBload -> (NE (TESTB <TypeFlags> cond cond) yes no)
+
+(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
+(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
+
+// Rules below here apply some simple optimizations after lowering.
+// TODO: Should this be a separate pass?
+
+// fold constants into instructions
+(ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x)
+(ADDQ (MOVQconst [c]) x) && is32Bit(c) -> (ADDQconst [c] x)
+(ADDL x (MOVLconst [c])) -> (ADDLconst [c] x)
+(ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x)
+(ADDW x (MOVWconst [c])) -> (ADDWconst [c] x)
+(ADDW (MOVWconst [c]) x) -> (ADDWconst [c] x)
+(ADDB x (MOVBconst [c])) -> (ADDBconst [c] x)
+(ADDB (MOVBconst [c]) x) -> (ADDBconst [c] x)
+
+(SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c])
+(SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst <v.Type> x [c]))
+(SUBL x (MOVLconst [c])) -> (SUBLconst x [c])
+(SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c]))
+(SUBW x (MOVWconst [c])) -> (SUBWconst x [c])
+(SUBW (MOVWconst [c]) x) -> (NEGW (SUBWconst <v.Type> x [c]))
+(SUBB x (MOVBconst [c])) -> (SUBBconst x [c])
+(SUBB (MOVBconst [c]) x) -> (NEGB (SUBBconst <v.Type> x [c]))
+
+(MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x)
+(MULQ (MOVQconst [c]) x) && is32Bit(c) -> (MULQconst [c] x)
+(MULL x (MOVLconst [c])) -> (MULLconst [c] x)
+(MULL (MOVLconst [c]) x) -> (MULLconst [c] x)
+(MULW x (MOVWconst [c])) -> (MULWconst [c] x)
+(MULW (MOVWconst [c]) x) -> (MULWconst [c] x)
+
+(ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x)
+(ANDQ (MOVQconst [c]) x) && is32Bit(c) -> (ANDQconst [c] x)
+(ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
+(ANDL (MOVLconst [c]) x) -> (ANDLconst [c] x)
+(ANDW x (MOVWconst [c])) -> (ANDWconst [c] x)
+(ANDW (MOVWconst [c]) x) -> (ANDWconst [c] x)
+(ANDB x (MOVBconst [c])) -> (ANDBconst [c] x)
+(ANDB (MOVBconst [c]) x) -> (ANDBconst [c] x)
+
+(ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x)
+(ORQ (MOVQconst [c]) x) && is32Bit(c) -> (ORQconst [c] x)
+(ORL x (MOVLconst [c])) -> (ORLconst [c] x)
+(ORL (MOVLconst [c]) x) -> (ORLconst [c] x)
+(ORW x (MOVWconst [c])) -> (ORWconst [c] x)
+(ORW (MOVWconst [c]) x) -> (ORWconst [c] x)
+(ORB x (MOVBconst [c])) -> (ORBconst [c] x)
+(ORB (MOVBconst [c]) x) -> (ORBconst [c] x)
+
+(XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x)
+(XORQ (MOVQconst [c]) x) && is32Bit(c) -> (XORQconst [c] x)
+(XORL x (MOVLconst [c])) -> (XORLconst [c] x)
+(XORL (MOVLconst [c]) x) -> (XORLconst [c] x)
+(XORW x (MOVWconst [c])) -> (XORWconst [c] x)
+(XORW (MOVWconst [c]) x) -> (XORWconst [c] x)
+(XORB x (MOVBconst [c])) -> (XORBconst [c] x)
+(XORB (MOVBconst [c]) x) -> (XORBconst [c] x)
+
+(SHLQ x (MOVQconst [c])) -> (SHLQconst [c&63] x)
+(SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x)
+(SHLW x (MOVWconst [c])) -> (SHLWconst [c&31] x)
+(SHLB x (MOVBconst [c])) -> (SHLBconst [c&31] x)
+
+(SHRQ x (MOVQconst [c])) -> (SHRQconst [c&63] x)
+(SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x)
+(SHRW x (MOVWconst [c])) -> (SHRWconst [c&31] x)
+(SHRB x (MOVBconst [c])) -> (SHRBconst [c&31] x)
+
+(SARQ x (MOVQconst [c])) -> (SARQconst [c&63] x)
+(SARL x (MOVLconst [c])) -> (SARLconst [c&31] x)
+(SARW x (MOVWconst [c])) -> (SARWconst [c&31] x)
+(SARB x (MOVBconst [c])) -> (SARBconst [c&31] x)
+
+// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
+// because the x86 instructions are defined to use all 5 bits of the shift even
+// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
+// (SHLW x (MOVWconst [24])), but just in case.
+
+(CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c])
+(CMPQ (MOVQconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPQconst <TypeFlags> x [c]))
+(CMPL x (MOVLconst [c])) -> (CMPLconst x [c])
+(CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst <TypeFlags> x [c]))
+(CMPW x (MOVWconst [c])) -> (CMPWconst x [c])
+(CMPW (MOVWconst [c]) x) -> (InvertFlags (CMPWconst <TypeFlags> x [c]))
+(CMPB x (MOVBconst [c])) -> (CMPBconst x [c])
+(CMPB (MOVBconst [c]) x) -> (InvertFlags (CMPBconst <TypeFlags> x [c]))
+
+// strength reduction
+(MULQconst [-1] x) -> (NEGQ x)
+(MULQconst [0] _) -> (MOVQconst [0])
+(MULQconst [1] x) -> (Copy x)
+(MULQconst [3] x) -> (LEAQ2 x x)
+(MULQconst [5] x) -> (LEAQ4 x x)
+(MULQconst [9] x) -> (LEAQ8 x x)
+(MULQconst [c] x) && isPowerOfTwo(c) -> (SHLQconst [log2(c)] x)
+
+// fold add/shift into leaq
+(ADDQ x (SHLQconst [3] y)) -> (LEAQ8 x y)
+(ADDQconst [c] (LEAQ8 [d] x y)) -> (LEAQ8 [addOff(c, d)] x y)
+
+// reverse ordering of compare instruction
+(SETL (InvertFlags x)) -> (SETG x)
+(SETG (InvertFlags x)) -> (SETL x)
+(SETB (InvertFlags x)) -> (SETA x)
+(SETA (InvertFlags x)) -> (SETB x)
+(SETLE (InvertFlags x)) -> (SETGE x)
+(SETGE (InvertFlags x)) -> (SETLE x)
+(SETBE (InvertFlags x)) -> (SETAE x)
+(SETAE (InvertFlags x)) -> (SETBE x)
+(SETEQ (InvertFlags x)) -> (SETEQ x)
+(SETNE (InvertFlags x)) -> (SETNE x)
+
+// sign extended loads
+(MOVBQSX (MOVBload ptr mem)) -> (MOVBQSXload ptr mem)
+(MOVBQZX (MOVBload ptr mem)) -> (MOVBQZXload ptr mem)
+// TODO: more
+
+// Don't extend before storing
+(MOVLstore ptr (MOVLQSX x) mem) -> (MOVLstore ptr x mem)
+(MOVWstore ptr (MOVWQSX x) mem) -> (MOVWstore ptr x mem)
+(MOVBstore ptr (MOVBQSX x) mem) -> (MOVBstore ptr x mem)
+(MOVLstore ptr (MOVLQZX x) mem) -> (MOVLstore ptr x mem)
+(MOVWstore ptr (MOVWQZX x) mem) -> (MOVWstore ptr x mem)
+(MOVBstore ptr (MOVBQZX x) mem) -> (MOVBstore ptr x mem)
+
+// fold constants into memory operations
+// Note that this is not always a good idea because if not all the uses of
+// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
+// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
+// Nevertheless, let's do it!
+(MOVQload [off1] (ADDQconst [off2] ptr) mem) -> (MOVQload [addOff(off1, off2)] ptr mem)
+(MOVQstore [off1] (ADDQconst [off2] ptr) val mem) -> (MOVQstore [addOff(off1, off2)] ptr val mem)
+
+(MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && (sym1 == nil || sym2 == nil) ->
+ (MOVQload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+(MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && (sym1 == nil || sym2 == nil) ->
+ (MOVQstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
+
+// indexed loads and stores
+(MOVQload [off1] (LEAQ8 [off2] ptr idx) mem) -> (MOVQloadidx8 [addOff(off1, off2)] ptr idx mem)
+(MOVQstore [off1] (LEAQ8 [off2] ptr idx) val mem) -> (MOVQstoreidx8 [addOff(off1, off2)] ptr idx val mem)
+
+(MOVQloadidx8 [off1] (ADDQconst [off2] ptr) idx mem) -> (MOVQloadidx8 [addOff(off1, off2)] ptr idx mem)
+(MOVQstoreidx8 [off1] (ADDQconst [off2] ptr) idx val mem) -> (MOVQstoreidx8 [addOff(off1, off2)] ptr idx val mem)
+
+(ADDQconst [0] x) -> (Copy x)
+
+// lower Zero instructions with word sizes
+(Zero [0] _ mem) -> (Copy mem)
+(Zero [1] destptr mem) -> (MOVBstore destptr (MOVBconst <TypeInt8> [0]) mem)
+(Zero [2] destptr mem) -> (MOVWstore destptr (MOVWconst <TypeInt16> [0]) mem)
+(Zero [4] destptr mem) -> (MOVLstore destptr (MOVLconst <TypeInt32> [0]) mem)
+(Zero [8] destptr mem) -> (MOVQstore destptr (MOVQconst <TypeInt64> [0]) mem)
+
+// rewrite anything less than 4 words into a series of MOV[BWLQ] $0, ptr(off) instructions
+(Zero [size] destptr mem) && size < 4*8 -> (MOVXzero [size] destptr mem)
+// Use STOSQ to zero memory. Rewrite this into storing the words with REPSTOSQ and then filling in the remainder with linear moves
+(Zero [size] destptr mem) && size >= 4*8 -> (Zero [size%8] (OffPtr <TypeUInt64> [size-(size%8)] destptr) (REPSTOSQ <TypeMem> destptr (MOVQconst <TypeUInt64> [size/8]) mem))
+
+// Absorb InvertFlags into branches.
+(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
+
+// get rid of overflow code for constant shifts
+(SBBQcarrymask (CMPQconst [c] (MOVQconst [d]))) && inBounds(d, c) -> (MOVQconst [-1])
+(SBBQcarrymask (CMPQconst [c] (MOVQconst [d]))) && !inBounds(d, c) -> (MOVQconst [0])
+(SBBQcarrymask (CMPLconst [c] (MOVLconst [d]))) && inBounds(int64(int32(d)), int64(int32(c))) -> (MOVQconst [-1])
+(SBBQcarrymask (CMPLconst [c] (MOVLconst [d]))) && !inBounds(int64(int32(d)), int64(int32(c))) -> (MOVQconst [0])
+(SBBQcarrymask (CMPWconst [c] (MOVWconst [d]))) && inBounds(int64(int16(d)), int64(int16(c))) -> (MOVQconst [-1])
+(SBBQcarrymask (CMPWconst [c] (MOVWconst [d]))) && !inBounds(int64(int16(d)), int64(int16(c))) -> (MOVQconst [0])
+(SBBQcarrymask (CMPBconst [c] (MOVBconst [d]))) && inBounds(int64(int8(d)), int64(int8(c))) -> (MOVQconst [-1])
+(SBBQcarrymask (CMPBconst [c] (MOVBconst [d]))) && !inBounds(int64(int8(d)), int64(int8(c))) -> (MOVQconst [0])
+(ANDQconst [0] _) -> (MOVQconst [0])
+(ANDLconst [c] _) && int32(c)==0 -> (MOVLconst [0])
+(ANDWconst [c] _) && int16(c)==0 -> (MOVWconst [0])
+(ANDBconst [c] _) && int8(c)==0 -> (MOVBconst [0])
+(ANDQconst [-1] x) -> (Copy x)
+(ANDLconst [c] x) && int32(c)==-1 -> (Copy x)
+(ANDWconst [c] x) && int16(c)==-1 -> (Copy x)
+(ANDBconst [c] x) && int8(c)==-1 -> (Copy x)
+(ORQconst [0] x) -> (Copy x)
+(ORLconst [c] x) && int32(c)==0 -> (Copy x)
+(ORWconst [c] x) && int16(c)==0 -> (Copy x)
+(ORBconst [c] x) && int8(c)==0 -> (Copy x)
+(ORQconst [-1] _) -> (MOVQconst [-1])
+(ORLconst [c] _) && int32(c)==-1 -> (MOVLconst [-1])
+(ORWconst [c] _) && int16(c)==-1 -> (MOVWconst [-1])
+(ORBconst [c] _) && int8(c)==-1 -> (MOVBconst [-1])
+
+// generic constant folding
+// TODO: more of this
+(ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d])
+(ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [c+d])
+(ADDWconst [c] (MOVWconst [d])) -> (MOVWconst [c+d])
+(ADDBconst [c] (MOVBconst [d])) -> (MOVBconst [c+d])
+(SUBQconst [c] (MOVQconst [d])) -> (MOVQconst [c-d])
+(SUBLconst [c] (MOVLconst [d])) -> (MOVLconst [c-d])
+(SUBWconst [c] (MOVWconst [d])) -> (MOVWconst [c-d])
+(SUBBconst [c] (MOVBconst [d])) -> (MOVBconst [c-d])
+(MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d])
+(MULLconst [c] (MOVLconst [d])) -> (MOVLconst [c*d])
+(MULWconst [c] (MOVWconst [d])) -> (MOVWconst [c*d])
+(ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d])
+(ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d])
+(ANDWconst [c] (MOVWconst [d])) -> (MOVWconst [c&d])
+(ANDBconst [c] (MOVBconst [d])) -> (MOVBconst [c&d])
+(ORQconst [c] (MOVQconst [d])) -> (MOVQconst [c|d])
+(ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d])
+(ORWconst [c] (MOVWconst [d])) -> (MOVWconst [c|d])
+(ORBconst [c] (MOVBconst [d])) -> (MOVBconst [c|d])
+(XORQconst [c] (MOVQconst [d])) -> (MOVQconst [c^d])
+(XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d])
+(XORWconst [c] (MOVWconst [d])) -> (MOVWconst [c^d])
+(XORBconst [c] (MOVBconst [d])) -> (MOVBconst [c^d])
+(NOTQ (MOVQconst [c])) -> (MOVQconst [^c])
+(NOTL (MOVLconst [c])) -> (MOVLconst [^c])
+(NOTW (MOVWconst [c])) -> (MOVWconst [^c])
+(NOTB (MOVBconst [c])) -> (MOVBconst [^c])
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "strings"
+
+// copied from ../../amd64/reg.go
+var regNamesAMD64 = []string{
+ ".AX",
+ ".CX",
+ ".DX",
+ ".BX",
+ ".SP",
+ ".BP",
+ ".SI",
+ ".DI",
+ ".R8",
+ ".R9",
+ ".R10",
+ ".R11",
+ ".R12",
+ ".R13",
+ ".R14",
+ ".R15",
+ ".X0",
+ ".X1",
+ ".X2",
+ ".X3",
+ ".X4",
+ ".X5",
+ ".X6",
+ ".X7",
+ ".X8",
+ ".X9",
+ ".X10",
+ ".X11",
+ ".X12",
+ ".X13",
+ ".X14",
+ ".X15",
+
+ // pseudo-registers
+ ".SB",
+ ".FLAGS",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesAMD64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesAMD64 {
+ if name[0] != '.' {
+ panic("register name " + name + " does not start with '.'")
+ }
+ num[name[1:]] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ gp := buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15")
+ gpsp := gp | buildReg("SP")
+ gpspsb := gpsp | buildReg("SB")
+ flags := buildReg("FLAGS")
+ gp01 := regInfo{[]regMask{}, 0, []regMask{gp}}
+ gp11 := regInfo{[]regMask{gpsp}, 0, []regMask{gp}}
+ gp11sb := regInfo{[]regMask{gpspsb}, 0, []regMask{gp}}
+ gp21 := regInfo{[]regMask{gpsp, gpsp}, 0, []regMask{gp}}
+ gp21sb := regInfo{[]regMask{gpspsb, gpsp}, 0, []regMask{gp}}
+ gp21shift := regInfo{[]regMask{gpsp, buildReg("CX")}, 0, []regMask{gp}}
+ gp2flags := regInfo{[]regMask{gpsp, gpsp}, 0, []regMask{flags}}
+ gp1flags := regInfo{[]regMask{gpsp}, 0, []regMask{flags}}
+ flagsgp1 := regInfo{[]regMask{flags}, 0, []regMask{gp}}
+ gpload := regInfo{[]regMask{gpspsb, 0}, 0, []regMask{gp}}
+ gploadidx := regInfo{[]regMask{gpspsb, gpsp, 0}, 0, []regMask{gp}}
+ gpstore := regInfo{[]regMask{gpspsb, gpsp, 0}, 0, nil}
+ gpstoreconst := regInfo{[]regMask{gpspsb, 0}, 0, nil}
+ gpstoreidx := regInfo{[]regMask{gpspsb, gpsp, gpsp, 0}, 0, nil}
+ flagsgp := regInfo{[]regMask{flags}, 0, []regMask{gp}}
+
+ // Suffixes encode the bit width of various instructions.
+ // Q = 64 bit, L = 32 bit, W = 16 bit, B = 8 bit
+
+ // TODO: 2-address instructions. Mark ops as needing matching input/output regs.
+ var AMD64ops = []opData{
+ // binary ops
+ {name: "ADDQ", reg: gp21, asm: "ADDQ"}, // arg0 + arg1
+ {name: "ADDL", reg: gp21, asm: "ADDL"}, // arg0 + arg1
+ {name: "ADDW", reg: gp21, asm: "ADDW"}, // arg0 + arg1
+ {name: "ADDB", reg: gp21, asm: "ADDB"}, // arg0 + arg1
+ {name: "ADDQconst", reg: gp11, asm: "ADDQ"}, // arg0 + auxint
+ {name: "ADDLconst", reg: gp11, asm: "ADDL"}, // arg0 + auxint
+ {name: "ADDWconst", reg: gp11, asm: "ADDW"}, // arg0 + auxint
+ {name: "ADDBconst", reg: gp11, asm: "ADDB"}, // arg0 + auxint
+
+ {name: "SUBQ", reg: gp21, asm: "SUBQ"}, // arg0 - arg1
+ {name: "SUBL", reg: gp21, asm: "SUBL"}, // arg0 - arg1
+ {name: "SUBW", reg: gp21, asm: "SUBW"}, // arg0 - arg1
+ {name: "SUBB", reg: gp21, asm: "SUBB"}, // arg0 - arg1
+ {name: "SUBQconst", reg: gp11, asm: "SUBQ"}, // arg0 - auxint
+ {name: "SUBLconst", reg: gp11, asm: "SUBL"}, // arg0 - auxint
+ {name: "SUBWconst", reg: gp11, asm: "SUBW"}, // arg0 - auxint
+ {name: "SUBBconst", reg: gp11, asm: "SUBB"}, // arg0 - auxint
+
+ {name: "MULQ", reg: gp21, asm: "IMULQ"}, // arg0 * arg1
+ {name: "MULL", reg: gp21, asm: "IMULL"}, // arg0 * arg1
+ {name: "MULW", reg: gp21, asm: "IMULW"}, // arg0 * arg1
+ {name: "MULQconst", reg: gp11, asm: "IMULQ"}, // arg0 * auxint
+ {name: "MULLconst", reg: gp11, asm: "IMULL"}, // arg0 * auxint
+ {name: "MULWconst", reg: gp11, asm: "IMULW"}, // arg0 * auxint
+
+ {name: "ANDQ", reg: gp21, asm: "ANDQ"}, // arg0 & arg1
+ {name: "ANDL", reg: gp21, asm: "ANDL"}, // arg0 & arg1
+ {name: "ANDW", reg: gp21, asm: "ANDW"}, // arg0 & arg1
+ {name: "ANDB", reg: gp21, asm: "ANDB"}, // arg0 & arg1
+ {name: "ANDQconst", reg: gp11, asm: "ANDQ"}, // arg0 & auxint
+ {name: "ANDLconst", reg: gp11, asm: "ANDL"}, // arg0 & auxint
+ {name: "ANDWconst", reg: gp11, asm: "ANDW"}, // arg0 & auxint
+ {name: "ANDBconst", reg: gp11, asm: "ANDB"}, // arg0 & auxint
+
+ {name: "ORQ", reg: gp21, asm: "ORQ"}, // arg0 | arg1
+ {name: "ORL", reg: gp21, asm: "ORL"}, // arg0 | arg1
+ {name: "ORW", reg: gp21, asm: "ORW"}, // arg0 | arg1
+ {name: "ORB", reg: gp21, asm: "ORB"}, // arg0 | arg1
+ {name: "ORQconst", reg: gp11, asm: "ORQ"}, // arg0 | auxint
+ {name: "ORLconst", reg: gp11, asm: "ORL"}, // arg0 | auxint
+ {name: "ORWconst", reg: gp11, asm: "ORW"}, // arg0 | auxint
+ {name: "ORBconst", reg: gp11, asm: "ORB"}, // arg0 | auxint
+
+ {name: "XORQ", reg: gp21, asm: "XORQ"}, // arg0 ^ arg1
+ {name: "XORL", reg: gp21, asm: "XORL"}, // arg0 ^ arg1
+ {name: "XORW", reg: gp21, asm: "XORW"}, // arg0 ^ arg1
+ {name: "XORB", reg: gp21, asm: "XORB"}, // arg0 ^ arg1
+ {name: "XORQconst", reg: gp11, asm: "XORQ"}, // arg0 ^ auxint
+ {name: "XORLconst", reg: gp11, asm: "XORL"}, // arg0 ^ auxint
+ {name: "XORWconst", reg: gp11, asm: "XORW"}, // arg0 ^ auxint
+ {name: "XORBconst", reg: gp11, asm: "XORB"}, // arg0 ^ auxint
+
+ {name: "CMPQ", reg: gp2flags, asm: "CMPQ"}, // arg0 compare to arg1
+ {name: "CMPL", reg: gp2flags, asm: "CMPL"}, // arg0 compare to arg1
+ {name: "CMPW", reg: gp2flags, asm: "CMPW"}, // arg0 compare to arg1
+ {name: "CMPB", reg: gp2flags, asm: "CMPB"}, // arg0 compare to arg1
+ {name: "CMPQconst", reg: gp1flags, asm: "CMPQ"}, // arg0 compare to auxint
+ {name: "CMPLconst", reg: gp1flags, asm: "CMPL"}, // arg0 compare to auxint
+ {name: "CMPWconst", reg: gp1flags, asm: "CMPW"}, // arg0 compare to auxint
+ {name: "CMPBconst", reg: gp1flags, asm: "CMPB"}, // arg0 compare to auxint
+
+ {name: "TESTQ", reg: gp2flags, asm: "TESTQ"}, // (arg0 & arg1) compare to 0
+ {name: "TESTL", reg: gp2flags, asm: "TESTL"}, // (arg0 & arg1) compare to 0
+ {name: "TESTW", reg: gp2flags, asm: "TESTW"}, // (arg0 & arg1) compare to 0
+ {name: "TESTB", reg: gp2flags, asm: "TESTB"}, // (arg0 & arg1) compare to 0
+ {name: "TESTQconst", reg: gp1flags, asm: "TESTQ"}, // (arg0 & auxint) compare to 0
+ {name: "TESTLconst", reg: gp1flags, asm: "TESTL"}, // (arg0 & auxint) compare to 0
+ {name: "TESTWconst", reg: gp1flags, asm: "TESTW"}, // (arg0 & auxint) compare to 0
+ {name: "TESTBconst", reg: gp1flags, asm: "TESTB"}, // (arg0 & auxint) compare to 0
+
+ {name: "SHLQ", reg: gp21shift, asm: "SHLQ"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SHLL", reg: gp21shift, asm: "SHLL"}, // arg0 << arg1, shift amount is mod 32
+ {name: "SHLW", reg: gp21shift, asm: "SHLW"}, // arg0 << arg1, shift amount is mod 32
+ {name: "SHLB", reg: gp21shift, asm: "SHLB"}, // arg0 << arg1, shift amount is mod 32
+ {name: "SHLQconst", reg: gp11, asm: "SHLQ"}, // arg0 << auxint, shift amount 0-63
+ {name: "SHLLconst", reg: gp11, asm: "SHLL"}, // arg0 << auxint, shift amount 0-31
+ {name: "SHLWconst", reg: gp11, asm: "SHLW"}, // arg0 << auxint, shift amount 0-31
+ {name: "SHLBconst", reg: gp11, asm: "SHLB"}, // arg0 << auxint, shift amount 0-31
+ // Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount!
+
+ {name: "SHRQ", reg: gp21shift, asm: "SHRQ"}, // unsigned arg0 >> arg1, shift amount is mod 64
+ {name: "SHRL", reg: gp21shift, asm: "SHRL"}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRW", reg: gp21shift, asm: "SHRW"}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRB", reg: gp21shift, asm: "SHRB"}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRQconst", reg: gp11, asm: "SHRQ"}, // unsigned arg0 >> auxint, shift amount 0-63
+ {name: "SHRLconst", reg: gp11, asm: "SHRL"}, // unsigned arg0 >> auxint, shift amount 0-31
+ {name: "SHRWconst", reg: gp11, asm: "SHRW"}, // unsigned arg0 >> auxint, shift amount 0-31
+ {name: "SHRBconst", reg: gp11, asm: "SHRB"}, // unsigned arg0 >> auxint, shift amount 0-31
+
+ {name: "SARQ", reg: gp21shift, asm: "SARQ"}, // signed arg0 >> arg1, shift amount is mod 64
+ {name: "SARL", reg: gp21shift, asm: "SARL"}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARW", reg: gp21shift, asm: "SARW"}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARB", reg: gp21shift, asm: "SARB"}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARQconst", reg: gp11, asm: "SARQ"}, // signed arg0 >> auxint, shift amount 0-63
+ {name: "SARLconst", reg: gp11, asm: "SARL"}, // signed arg0 >> auxint, shift amount 0-31
+ {name: "SARWconst", reg: gp11, asm: "SARW"}, // signed arg0 >> auxint, shift amount 0-31
+ {name: "SARBconst", reg: gp11, asm: "SARB"}, // signed arg0 >> auxint, shift amount 0-31
+
+ // unary ops
+ {name: "NEGQ", reg: gp11, asm: "NEGQ"}, // -arg0
+ {name: "NEGL", reg: gp11, asm: "NEGL"}, // -arg0
+ {name: "NEGW", reg: gp11, asm: "NEGW"}, // -arg0
+ {name: "NEGB", reg: gp11, asm: "NEGB"}, // -arg0
+
+ {name: "NOTQ", reg: gp11, asm: "NOTQ"}, // ^arg0
+ {name: "NOTL", reg: gp11, asm: "NOTL"}, // ^arg0
+ {name: "NOTW", reg: gp11, asm: "NOTW"}, // ^arg0
+ {name: "NOTB", reg: gp11, asm: "NOTB"}, // ^arg0
+
+ {name: "SBBQcarrymask", reg: flagsgp1, asm: "SBBQ"}, // (int64)(-1) if carry is set, 0 if carry is clear.
+ {name: "SBBLcarrymask", reg: flagsgp1, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear.
+ // Note: SBBW and SBBB are subsumed by SBBL
+
+ {name: "SETEQ", reg: flagsgp, asm: "SETEQ"}, // extract == condition from arg0
+ {name: "SETNE", reg: flagsgp, asm: "SETNE"}, // extract != condition from arg0
+ {name: "SETL", reg: flagsgp, asm: "SETLT"}, // extract signed < condition from arg0
+ {name: "SETLE", reg: flagsgp, asm: "SETLE"}, // extract signed <= condition from arg0
+ {name: "SETG", reg: flagsgp, asm: "SETGT"}, // extract signed > condition from arg0
+ {name: "SETGE", reg: flagsgp, asm: "SETGE"}, // extract signed >= condition from arg0
+ {name: "SETB", reg: flagsgp, asm: "SETCS"}, // extract unsigned < condition from arg0
+ {name: "SETBE", reg: flagsgp, asm: "SETLS"}, // extract unsigned <= condition from arg0
+ {name: "SETA", reg: flagsgp, asm: "SETHI"}, // extract unsigned > condition from arg0
+ {name: "SETAE", reg: flagsgp, asm: "SETCC"}, // extract unsigned >= condition from arg0
+
+ {name: "MOVBQSX", reg: gp11, asm: "MOVBQSX"}, // sign extend arg0 from int8 to int64
+ {name: "MOVBQZX", reg: gp11, asm: "MOVBQZX"}, // zero extend arg0 from int8 to int64
+ {name: "MOVWQSX", reg: gp11, asm: "MOVWQSX"}, // sign extend arg0 from int16 to int64
+ {name: "MOVWQZX", reg: gp11, asm: "MOVWQZX"}, // zero extend arg0 from int16 to int64
+ {name: "MOVLQSX", reg: gp11, asm: "MOVLQSX"}, // sign extend arg0 from int32 to int64
+ {name: "MOVLQZX", reg: gp11, asm: "MOVLQZX"}, // zero extend arg0 from int32 to int64
+
+ {name: "MOVBconst", reg: gp01, asm: "MOVB"}, // 8 low bits of auxint
+ {name: "MOVWconst", reg: gp01, asm: "MOVW"}, // 16 low bits of auxint
+ {name: "MOVLconst", reg: gp01, asm: "MOVL"}, // 32 low bits of auxint
+ {name: "MOVQconst", reg: gp01, asm: "MOVQ"}, // auxint
+
+ {name: "LEAQ", reg: gp11sb}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAQ1", reg: gp21sb}, // arg0 + arg1 + auxint
+ {name: "LEAQ2", reg: gp21sb}, // arg0 + 2*arg1 + auxint
+ {name: "LEAQ4", reg: gp21sb}, // arg0 + 4*arg1 + auxint
+ {name: "LEAQ8", reg: gp21sb}, // arg0 + 8*arg1 + auxint
+
+ {name: "MOVBload", reg: gpload, asm: "MOVB"}, // load byte from arg0+auxint. arg1=mem
+ {name: "MOVBQSXload", reg: gpload, asm: "MOVBQSX"}, // ditto, extend to int64
+ {name: "MOVBQZXload", reg: gpload, asm: "MOVBQZX"}, // ditto, extend to uint64
+ {name: "MOVWload", reg: gpload, asm: "MOVW"}, // load 2 bytes from arg0+auxint. arg1=mem
+ {name: "MOVLload", reg: gpload, asm: "MOVL"}, // load 4 bytes from arg0+auxint. arg1=mem
+ {name: "MOVQload", reg: gpload, asm: "MOVQ"}, // load 8 bytes from arg0+auxint. arg1=mem
+ {name: "MOVQloadidx8", reg: gploadidx, asm: "MOVQ"}, // load 8 bytes from arg0+8*arg1+auxint. arg2=mem
+ {name: "MOVBstore", reg: gpstore, asm: "MOVB"}, // store byte in arg1 to arg0+auxint. arg2=mem
+ {name: "MOVWstore", reg: gpstore, asm: "MOVW"}, // store 2 bytes in arg1 to arg0+auxint. arg2=mem
+ {name: "MOVLstore", reg: gpstore, asm: "MOVL"}, // store 4 bytes in arg1 to arg0+auxint. arg2=mem
+ {name: "MOVQstore", reg: gpstore, asm: "MOVQ"}, // store 8 bytes in arg1 to arg0+auxint. arg2=mem
+ {name: "MOVQstoreidx8", reg: gpstoreidx, asm: "MOVQ"}, // store 8 bytes in arg2 to arg0+8*arg1+auxint. arg3=mem
+
+ {name: "MOVXzero", reg: gpstoreconst}, // store auxint 0 bytes into arg0 using a series of MOV instructions. arg1=mem.
+ // TODO: implement this when register clobbering works
+ {name: "REPSTOSQ", reg: regInfo{[]regMask{buildReg("DI"), buildReg("CX")}, buildReg("DI AX CX"), nil}}, // store arg1 8-byte words containing zero into arg0 using STOSQ. arg2=mem.
+
+ //TODO: set register clobber to everything?
+ {name: "CALLstatic"}, // call static function aux.(*gc.Sym). arg0=mem, returns mem
+ {name: "CALLclosure", reg: regInfo{[]regMask{gpsp, buildReg("DX"), 0}, 0, nil}}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem returns mem
+
+ {name: "REPMOVSB", reg: regInfo{[]regMask{buildReg("DI"), buildReg("SI"), buildReg("CX")}, buildReg("DI SI CX"), nil}}, // move arg2 bytes from arg1 to arg0. arg3=mem, returns memory
+
+ // (InvertFlags (CMPQ a b)) == (CMPQ b a)
+ // So if we want (SETL (CMPQ a b)) but we can't do that because a is a constant,
+ // then we do (SETL (InvertFlags (CMPQ b a))) instead.
+ // Rewrites will convert this to (SETG (CMPQ b a)).
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags"}, // reverse direction of arg0
+ }
+
+ var AMD64blocks = []blockData{
+ {name: "EQ"},
+ {name: "NE"},
+ {name: "LT"},
+ {name: "LE"},
+ {name: "GT"},
+ {name: "GE"},
+ {name: "ULT"},
+ {name: "ULE"},
+ {name: "UGT"},
+ {name: "UGE"},
+ }
+
+ archs = append(archs, arch{"AMD64", AMD64ops, AMD64blocks, regNamesAMD64})
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+This package generates opcode tables, rewrite rules, etc. for the ssa compiler.
+Run it with:
+ go run *.go
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// values are specified using the following format:
+// (op <type> [auxint] {aux} arg0 arg1 ...)
+// the type and aux fields are optional
+// on the matching side
+// - the type, aux, and auxint fields must match if they are specified.
+// on the generated side
+// - the type of the top-level expression is the same as the one on the left-hand side.
+// - the type of any subexpressions must be specified explicitly.
+// - auxint will be 0 if not specified.
+// - aux will be nil if not specified.
+
+// blocks are specified using the following format:
+// (kind controlvalue succ0 succ1 ...)
+// controlvalue must be "nil" or a value expression
+// succ* fields must be variables
+// For now, the generated successors must be a permutation of the matched successors.
+
+// constant folding
+(Add64 (Const64 [c]) (Const64 [d])) -> (Const64 [c+d])
+(AddPtr (ConstPtr [c]) (ConstPtr [d])) -> (ConstPtr [c+d])
+(Mul64 (Const64 [c]) (Const64 [d])) -> (Const64 [c*d])
+(MulPtr (ConstPtr [c]) (ConstPtr [d])) -> (ConstPtr [c*d])
+(IsInBounds (ConstPtr [c]) (ConstPtr [d])) -> (ConstPtr {inBounds(c,d)})
+
+// tear apart slices
+// TODO: anything that generates a slice needs to go in here.
+(SlicePtr (Load ptr mem)) -> (Load ptr mem)
+(SliceLen (Load ptr mem)) -> (Load (AddPtr <ptr.Type> ptr (ConstPtr <config.Uintptr> [config.PtrSize])) mem)
+(SliceCap (Load ptr mem)) -> (Load (AddPtr <ptr.Type> ptr (ConstPtr <config.Uintptr> [config.PtrSize*2])) mem)
+
+// slice and interface comparisons
+// the frontend ensures that we can only compare against nil
+// start by putting nil on the right to simplify the other rules
+(EqFat x y) && x.Op == OpConstNil && y.Op != OpConstNil -> (EqFat y x)
+(NeqFat x y) && x.Op == OpConstNil && y.Op != OpConstNil -> (NeqFat y x)
+// it suffices to check the first word (backing array for slices, dynamic type for interfaces)
+(EqFat (Load ptr mem) (ConstNil)) -> (EqPtr (Load <config.Uintptr> ptr mem) (ConstPtr <config.Uintptr> [0]))
+(NeqFat (Load ptr mem) (ConstNil)) -> (NeqPtr (Load <config.Uintptr> ptr mem) (ConstPtr <config.Uintptr> [0]))
+
+// indexing operations
+// Note: bounds check has already been done
+(ArrayIndex (Load ptr mem) idx) -> (Load (PtrIndex <v.Type.PtrTo()> ptr idx) mem)
+(PtrIndex <t> ptr idx) -> (AddPtr ptr (MulPtr <config.Uintptr> idx (ConstPtr <config.Uintptr> [t.Elem().Size()])))
+(StructSelect [idx] (Load ptr mem)) -> (Load (OffPtr <v.Type.PtrTo()> [idx] ptr) mem)
+
+// big-object moves
+// TODO: fix size
+(Store dst (Load <t> src mem) mem) && t.Size() > 8 -> (Move [t.Size()] dst src mem)
+
+// string ops
+(ConstString {s}) -> (StringMake (Addr <TypeBytePtr> {config.fe.StringData(s.(string))} (SB <config.Uintptr>)) (ConstPtr <config.Uintptr> [int64(len(s.(string)))]))
+(Load <t> ptr mem) && t.IsString() -> (StringMake (Load <TypeBytePtr> ptr mem) (Load <config.Uintptr> (OffPtr <TypeBytePtr> [config.PtrSize] ptr) mem))
+(StringPtr (StringMake ptr _)) -> ptr
+(StringLen (StringMake _ len)) -> len
+(Store dst str mem) && str.Type.IsString() -> (Store (OffPtr <TypeBytePtr> [config.PtrSize] dst) (StringLen <config.Uintptr> str) (Store <TypeMem> dst (StringPtr <TypeBytePtr> str) mem))
+
+(If (Not cond) yes no) -> (If cond no yes)
+(If (ConstBool {c}) yes no) && c.(bool) -> (Plain nil yes)
+(If (ConstBool {c}) yes no) && !c.(bool) -> (Plain nil no)
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+var genericOps = []opData{
+ // 2-input arithmetic
+ // Types must be consistent with Go typing. Add, for example, must take two values
+ // of the same type and produces that same type.
+ {name: "Add8"}, // arg0 + arg1
+ {name: "Add16"},
+ {name: "Add32"},
+ {name: "Add64"},
+ {name: "AddPtr"},
+ // TODO: Add32F, Add64F, Add64C, Add128C
+
+ {name: "Sub8"}, // arg0 - arg1
+ {name: "Sub16"},
+ {name: "Sub32"},
+ {name: "Sub64"},
+ // TODO: Sub32F, Sub64F, Sub64C, Sub128C
+
+ {name: "Mul8"}, // arg0 * arg1
+ {name: "Mul16"},
+ {name: "Mul32"},
+ {name: "Mul64"},
+ {name: "MulPtr"}, // MulPtr is used for address calculations
+
+ {name: "And8"}, // arg0 & arg1
+ {name: "And16"},
+ {name: "And32"},
+ {name: "And64"},
+
+ {name: "Or8"}, // arg0 | arg1
+ {name: "Or16"},
+ {name: "Or32"},
+ {name: "Or64"},
+
+ {name: "Xor8"}, // arg0 ^ arg1
+ {name: "Xor16"},
+ {name: "Xor32"},
+ {name: "Xor64"},
+
+ // For shifts, AxB means the shifted value has A bits and the shift amount has B bits.
+ {name: "Lsh8x8"}, // arg0 << arg1
+ {name: "Lsh8x16"},
+ {name: "Lsh8x32"},
+ {name: "Lsh8x64"},
+ {name: "Lsh16x8"},
+ {name: "Lsh16x16"},
+ {name: "Lsh16x32"},
+ {name: "Lsh16x64"},
+ {name: "Lsh32x8"},
+ {name: "Lsh32x16"},
+ {name: "Lsh32x32"},
+ {name: "Lsh32x64"},
+ {name: "Lsh64x8"},
+ {name: "Lsh64x16"},
+ {name: "Lsh64x32"},
+ {name: "Lsh64x64"},
+
+ {name: "Rsh8x8"}, // arg0 >> arg1, signed
+ {name: "Rsh8x16"},
+ {name: "Rsh8x32"},
+ {name: "Rsh8x64"},
+ {name: "Rsh16x8"},
+ {name: "Rsh16x16"},
+ {name: "Rsh16x32"},
+ {name: "Rsh16x64"},
+ {name: "Rsh32x8"},
+ {name: "Rsh32x16"},
+ {name: "Rsh32x32"},
+ {name: "Rsh32x64"},
+ {name: "Rsh64x8"},
+ {name: "Rsh64x16"},
+ {name: "Rsh64x32"},
+ {name: "Rsh64x64"},
+
+ {name: "Rsh8Ux8"}, // arg0 >> arg1, unsigned
+ {name: "Rsh8Ux16"},
+ {name: "Rsh8Ux32"},
+ {name: "Rsh8Ux64"},
+ {name: "Rsh16Ux8"},
+ {name: "Rsh16Ux16"},
+ {name: "Rsh16Ux32"},
+ {name: "Rsh16Ux64"},
+ {name: "Rsh32Ux8"},
+ {name: "Rsh32Ux16"},
+ {name: "Rsh32Ux32"},
+ {name: "Rsh32Ux64"},
+ {name: "Rsh64Ux8"},
+ {name: "Rsh64Ux16"},
+ {name: "Rsh64Ux32"},
+ {name: "Rsh64Ux64"},
+
+ // 2-input comparisons
+ {name: "Eq8"}, // arg0 == arg1
+ {name: "Eq16"},
+ {name: "Eq32"},
+ {name: "Eq64"},
+ {name: "EqPtr"},
+ {name: "EqFat"}, // slice/interface; arg0 or arg1 is nil; other cases handled by frontend
+
+ {name: "Neq8"}, // arg0 != arg1
+ {name: "Neq16"},
+ {name: "Neq32"},
+ {name: "Neq64"},
+ {name: "NeqPtr"},
+ {name: "NeqFat"}, // slice/interface; arg0 or arg1 is nil; other cases handled by frontend
+
+ {name: "Less8"}, // arg0 < arg1
+ {name: "Less8U"},
+ {name: "Less16"},
+ {name: "Less16U"},
+ {name: "Less32"},
+ {name: "Less32U"},
+ {name: "Less64"},
+ {name: "Less64U"},
+
+ {name: "Leq8"}, // arg0 <= arg1
+ {name: "Leq8U"},
+ {name: "Leq16"},
+ {name: "Leq16U"},
+ {name: "Leq32"},
+ {name: "Leq32U"},
+ {name: "Leq64"},
+ {name: "Leq64U"},
+
+ {name: "Greater8"}, // arg0 > arg1
+ {name: "Greater8U"},
+ {name: "Greater16"},
+ {name: "Greater16U"},
+ {name: "Greater32"},
+ {name: "Greater32U"},
+ {name: "Greater64"},
+ {name: "Greater64U"},
+
+ {name: "Geq8"}, // arg0 <= arg1
+ {name: "Geq8U"},
+ {name: "Geq16"},
+ {name: "Geq16U"},
+ {name: "Geq32"},
+ {name: "Geq32U"},
+ {name: "Geq64"},
+ {name: "Geq64U"},
+
+ // 1-input ops
+ {name: "Not"}, // !arg0
+
+ {name: "Neg8"}, // -arg0
+ {name: "Neg16"},
+ {name: "Neg32"},
+ {name: "Neg64"},
+
+ {name: "Com8"}, // ^arg0
+ {name: "Com16"},
+ {name: "Com32"},
+ {name: "Com64"},
+
+ // Data movement
+ {name: "Phi"}, // select an argument based on which predecessor block we came from
+ {name: "Copy"}, // output = arg0
+
+ // constants. Constant values are stored in the aux field.
+ // booleans have a bool aux field, strings have a string aux
+ // field, and so on. All integer types store their value
+ // in the AuxInt field as an int64 (including int, uint64, etc.).
+ // For integer types smaller than 64 bits, only the low-order
+ // bits of the AuxInt field matter.
+ {name: "ConstBool"},
+ {name: "ConstString"},
+ {name: "ConstNil"},
+ {name: "Const8"},
+ {name: "Const16"},
+ {name: "Const32"},
+ {name: "Const64"},
+ {name: "ConstPtr"}, // pointer-sized integer constant
+ // TODO: Const32F, ...
+
+ // Constant-like things
+ {name: "Arg"}, // memory input to the function.
+
+ // The address of a variable. arg0 is the base pointer (SB or SP, depending
+ // on whether it is a global or stack variable). The Aux field identifies the
+ // variable. It will be either an *ExternSymbol (with arg0=SB), *ArgSymbol (arg0=SP),
+ // or *AutoSymbol (arg0=SP).
+ {name: "Addr"}, // Address of a variable. Arg0=SP or SB. Aux identifies the variable.
+
+ {name: "SP"}, // stack pointer
+ {name: "SB"}, // static base pointer (a.k.a. globals pointer)
+ {name: "Func"}, // entry address of a function
+
+ // Memory operations
+ {name: "Load"}, // Load from arg0. arg1=memory
+ {name: "Store"}, // Store arg1 to arg0. arg2=memory. Returns memory.
+ {name: "Move"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size. Returns memory.
+ {name: "Zero"}, // arg0=destptr, arg1=mem, auxint=size. Returns memory.
+
+ // Function calls. Arguments to the call have already been written to the stack.
+ // Return values appear on the stack. The method receiver, if any, is treated
+ // as a phantom first argument.
+ {name: "ClosureCall"}, // arg0=code pointer, arg1=context ptr, arg2=memory. Returns memory.
+ {name: "StaticCall"}, // call function aux.(*gc.Sym), arg0=memory. Returns memory.
+
+ // Conversions: signed extensions, zero (unsigned) extensions, truncations, and no-op (type only)
+ {name: "SignExt8to16"},
+ {name: "SignExt8to32"},
+ {name: "SignExt8to64"},
+ {name: "SignExt16to32"},
+ {name: "SignExt16to64"},
+ {name: "SignExt32to64"},
+ {name: "ZeroExt8to16"},
+ {name: "ZeroExt8to32"},
+ {name: "ZeroExt8to64"},
+ {name: "ZeroExt16to32"},
+ {name: "ZeroExt16to64"},
+ {name: "ZeroExt32to64"},
+ {name: "Trunc16to8"},
+ {name: "Trunc32to8"},
+ {name: "Trunc32to16"},
+ {name: "Trunc64to8"},
+ {name: "Trunc64to16"},
+ {name: "Trunc64to32"},
+
+ {name: "ConvNop"},
+
+ // Automatically inserted safety checks
+ {name: "IsNonNil"}, // arg0 != nil
+ {name: "IsInBounds"}, // 0 <= arg0 < arg1
+
+ // Indexing operations
+ {name: "ArrayIndex"}, // arg0=array, arg1=index. Returns a[i]
+ {name: "PtrIndex"}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type
+ {name: "OffPtr"}, // arg0 + auxint (arg0 and result are pointers)
+ {name: "StructSelect"}, // arg0=struct, auxint=field offset. Returns field at that offset (size=size of result type)
+
+ // Slices
+ {name: "SliceMake"}, // arg0=ptr, arg1=len, arg2=cap
+ {name: "SlicePtr"}, // ptr(arg0)
+ {name: "SliceLen"}, // len(arg0)
+ {name: "SliceCap"}, // cap(arg0)
+
+ // Strings
+ {name: "StringMake"}, // arg0=ptr, arg1=len
+ {name: "StringPtr"}, // ptr(arg0)
+ {name: "StringLen"}, // len(arg0)
+
+ // Spill&restore ops for the register allocator. These are
+ // semantically identical to OpCopy; they do not take/return
+ // stores like regular memory ops do. We can get away without memory
+ // args because we know there is no aliasing of spill slots on the stack.
+ {name: "StoreReg"},
+ {name: "LoadReg"},
+
+ // Used during ssa construction. Like Copy, but the arg has not been specified yet.
+ {name: "FwdRef"},
+}
+
+// kind control successors
+// ------------------------------------------
+// Exit return mem []
+// Plain nil [next]
+// If a boolean Value [then, else]
+// Call mem [nopanic, panic] (control opcode should be OpCall or OpStaticCall)
+
+var genericBlocks = []blockData{
+ {name: "Exit"}, // no successors. There should only be 1 of these.
+ {name: "Dead"}, // no successors; determined to be dead but not yet removed
+ {name: "Plain"}, // a single successor
+ {name: "If"}, // 2 successors, if control goto Succs[0] else goto Succs[1]
+ {name: "Call"}, // 2 successors, normal return and panic
+ // TODO(khr): BlockPanic for the built-in panic call, has 1 edge to the exit block
+}
+
+func init() {
+ archs = append(archs, arch{"generic", genericOps, genericBlocks, nil})
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The gen command generates Go code (in the parent directory) for all
+// the architecture-specific opcodes, blocks, and rewrites.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "regexp"
+)
+
+type arch struct {
+ name string
+ ops []opData
+ blocks []blockData
+ regnames []string
+}
+
+type opData struct {
+ name string
+ reg regInfo
+ asm string
+}
+
+type blockData struct {
+ name string
+}
+
+type regInfo struct {
+ inputs []regMask
+ clobbers regMask
+ outputs []regMask
+}
+
+type regMask uint64
+
+func (a arch) regMaskComment(r regMask) string {
+ var buf bytes.Buffer
+ for i := uint64(0); r != 0; i++ {
+ if r&1 != 0 {
+ if buf.Len() == 0 {
+ buf.WriteString(" //")
+ }
+ buf.WriteString(" ")
+ buf.WriteString(a.regnames[i])
+ }
+ r >>= 1
+ }
+ return buf.String()
+}
+
+var archs []arch
+
+func main() {
+ genOp()
+ genLower()
+}
+
+func genOp() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// autogenerated: do not edit!\n")
+ fmt.Fprintf(w, "// generated from gen/*Ops.go\n")
+ fmt.Fprintln(w, "package ssa")
+
+ fmt.Fprintln(w, "import \"cmd/internal/obj/x86\"")
+
+ // generate Block* declarations
+ fmt.Fprintln(w, "const (")
+ fmt.Fprintln(w, "blockInvalid BlockKind = iota")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, d := range a.blocks {
+ fmt.Fprintf(w, "Block%s%s\n", a.Name(), d.name)
+ }
+ }
+ fmt.Fprintln(w, ")")
+
+ // generate block kind string method
+ fmt.Fprintln(w, "var blockString = [...]string{")
+ fmt.Fprintln(w, "blockInvalid:\"BlockInvalid\",")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, b := range a.blocks {
+ fmt.Fprintf(w, "Block%s%s:\"%s\",\n", a.Name(), b.name, b.name)
+ }
+ }
+ fmt.Fprintln(w, "}")
+ fmt.Fprintln(w, "func (k BlockKind) String() string {return blockString[k]}")
+
+ // generate Op* declarations
+ fmt.Fprintln(w, "const (")
+ fmt.Fprintln(w, "OpInvalid Op = iota")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, v := range a.ops {
+ fmt.Fprintf(w, "Op%s%s\n", a.Name(), v.name)
+ }
+ }
+ fmt.Fprintln(w, ")")
+
+ // generate OpInfo table
+ fmt.Fprintln(w, "var opcodeTable = [...]opInfo{")
+ fmt.Fprintln(w, " { name: \"OpInvalid\" },")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, v := range a.ops {
+ fmt.Fprintln(w, "{")
+ fmt.Fprintf(w, "name:\"%s\",\n", v.name)
+ if a.name == "generic" {
+ fmt.Fprintln(w, "generic:true,")
+ fmt.Fprintln(w, "},") // close op
+ // generic ops have no reg info or asm
+ continue
+ }
+ if v.asm != "" {
+ fmt.Fprintf(w, "asm: x86.A%s,\n", v.asm)
+ }
+ fmt.Fprintln(w, "reg:regInfo{")
+ // reg inputs
+ if len(v.reg.inputs) > 0 {
+ fmt.Fprintln(w, "inputs: []regMask{")
+ for _, r := range v.reg.inputs {
+ fmt.Fprintf(w, "%d,%s\n", r, a.regMaskComment(r))
+ }
+ fmt.Fprintln(w, "},")
+ }
+ if v.reg.clobbers > 0 {
+ fmt.Fprintf(w, "clobbers: %d,%s\n", v.reg.clobbers, a.regMaskComment(v.reg.clobbers))
+ }
+ // reg outputs
+ if len(v.reg.outputs) > 0 {
+ fmt.Fprintln(w, "outputs: []regMask{")
+ for _, r := range v.reg.outputs {
+ fmt.Fprintf(w, "%d,%s\n", r, a.regMaskComment(r))
+ }
+ fmt.Fprintln(w, "},")
+ }
+ fmt.Fprintln(w, "},") // close reg info
+ fmt.Fprintln(w, "},") // close op
+ }
+ }
+ fmt.Fprintln(w, "}")
+
+ fmt.Fprintln(w, "func (o Op) Asm() int {return opcodeTable[o].asm}")
+
+ // generate op string method
+ fmt.Fprintln(w, "func (o Op) String() string {return opcodeTable[o].name }")
+
+ // gofmt result
+ b := w.Bytes()
+ var err error
+ b, err = format.Source(b)
+ if err != nil {
+ panic(err)
+ }
+
+ err = ioutil.WriteFile("../opGen.go", b, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+
+ // Check that ../gc/ssa.go handles all the arch-specific opcodes.
+ // This is very much a hack, but it is better than nothing.
+ ssa, err := ioutil.ReadFile("../../gc/ssa.go")
+ if err != nil {
+ log.Fatalf("can't read ../../gc/ssa.go: %v", err)
+ }
+ for _, a := range archs {
+ if a.name == "generic" {
+ continue
+ }
+ for _, v := range a.ops {
+ pattern := fmt.Sprintf("\\Wssa[.]Op%s%s\\W", a.name, v.name)
+ match, err := regexp.Match(pattern, ssa)
+ if err != nil {
+ log.Fatalf("bad opcode regexp %s: %v", pattern, err)
+ }
+ if !match {
+ log.Fatalf("Op%s%s has no code generation in ../../gc/ssa.go", a.name, v.name)
+ }
+ }
+ }
+}
+
+// Name returns the name of the architecture for use in Op* and Block* enumerations.
+func (a arch) Name() string {
+ s := a.name
+ if s == "generic" {
+ s = ""
+ }
+ return s
+}
+
+func genLower() {
+ for _, a := range archs {
+ genRules(a)
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates Go code that applies rewrite rules to a Value.
+// The generated code implements a function of type func (v *Value) bool
+// which returns true iff if did something.
+// Ideas stolen from Swift: http://www.hpl.hp.com/techreports/Compaq-DEC/WRL-2000-2.html
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/md5"
+ "fmt"
+ "go/format"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+// rule syntax:
+// sexpr [&& extra conditions] -> sexpr
+//
+// sexpr are s-expressions (lisp-like parenthesized groupings)
+// sexpr ::= (opcode sexpr*)
+// | variable
+// | <type>
+// | [auxint]
+// | {aux}
+//
+// aux ::= variable | {code}
+// type ::= variable | {code}
+// variable ::= some token
+// opcode ::= one of the opcodes from ../op.go (without the Op prefix)
+
+// extra conditions is just a chunk of Go that evaluates to a boolean. It may use
+// variables declared in the matching sexpr. The variable "v" is predefined to be
+// the value matched by the entire rule.
+
+// If multiple rules match, the first one in file order is selected.
+
+func genRules(arch arch) {
+ // Open input file.
+ text, err := os.Open(arch.name + ".rules")
+ if err != nil {
+ log.Fatalf("can't read rule file: %v", err)
+ }
+
+ // oprules contains a list of rules for each block and opcode
+ blockrules := map[string][]string{}
+ oprules := map[string][]string{}
+
+ // read rule file
+ scanner := bufio.NewScanner(text)
+ rule := ""
+ for scanner.Scan() {
+ line := scanner.Text()
+ if i := strings.Index(line, "//"); i >= 0 {
+ // Remove comments. Note that this isn't string safe, so
+ // it will truncate lines with // inside strings. Oh well.
+ line = line[:i]
+ }
+ rule += " " + line
+ rule = strings.TrimSpace(rule)
+ if rule == "" {
+ continue
+ }
+ if !strings.Contains(rule, "->") {
+ continue
+ }
+ if strings.HasSuffix(rule, "->") {
+ continue
+ }
+ if unbalanced(rule) {
+ continue
+ }
+ op := strings.Split(rule, " ")[0][1:]
+ if op[len(op)-1] == ')' {
+ op = op[:len(op)-1] // rule has only opcode, e.g. (ConstNil) -> ...
+ }
+ if isBlock(op, arch) {
+ blockrules[op] = append(blockrules[op], rule)
+ } else {
+ oprules[op] = append(oprules[op], rule)
+ }
+ rule = ""
+ }
+ if unbalanced(rule) {
+ log.Fatalf("unbalanced rule: %v\n", rule)
+ }
+ if err := scanner.Err(); err != nil {
+ log.Fatalf("scanner failed: %v\n", err)
+ }
+
+ // Start output buffer, write header.
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// autogenerated from gen/%s.rules: do not edit!\n", arch.name)
+ fmt.Fprintln(w, "// generated with: cd gen; go run *.go")
+ fmt.Fprintln(w, "package ssa")
+ fmt.Fprintf(w, "func rewriteValue%s(v *Value, config *Config) bool {\n", arch.name)
+
+ // generate code for each rule
+ fmt.Fprintf(w, "switch v.Op {\n")
+ var ops []string
+ for op := range oprules {
+ ops = append(ops, op)
+ }
+ sort.Strings(ops)
+ for _, op := range ops {
+ fmt.Fprintf(w, "case %s:\n", opName(op, arch))
+ for _, rule := range oprules[op] {
+ // Note: we use a hash to identify the rule so that its
+ // identity is invariant to adding/removing rules elsewhere
+ // in the rules file. This is useful to squash spurious
+ // diffs that would occur if we used rule index.
+ rulehash := fmt.Sprintf("%02x", md5.Sum([]byte(rule)))
+
+ // split at ->
+ s := strings.Split(rule, "->")
+ if len(s) != 2 {
+ log.Fatalf("rule must contain exactly one arrow: %s", rule)
+ }
+ lhs := strings.TrimSpace(s[0])
+ result := strings.TrimSpace(s[1])
+
+ // split match into matching part and additional condition
+ match := lhs
+ cond := ""
+ if i := strings.Index(match, "&&"); i >= 0 {
+ cond = strings.TrimSpace(match[i+2:])
+ match = strings.TrimSpace(match[:i])
+ }
+
+ fmt.Fprintf(w, "// match: %s\n", match)
+ fmt.Fprintf(w, "// cond: %s\n", cond)
+ fmt.Fprintf(w, "// result: %s\n", result)
+
+ fail := fmt.Sprintf("{\ngoto end%s\n}\n", rulehash)
+
+ fmt.Fprintf(w, "{\n")
+ genMatch(w, arch, match, fail)
+
+ if cond != "" {
+ fmt.Fprintf(w, "if !(%s) %s", cond, fail)
+ }
+
+ genResult(w, arch, result)
+ fmt.Fprintf(w, "return true\n")
+
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "goto end%s\n", rulehash) // use label
+ fmt.Fprintf(w, "end%s:;\n", rulehash)
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "return false\n")
+ fmt.Fprintf(w, "}\n")
+
+ // Generate block rewrite function.
+ fmt.Fprintf(w, "func rewriteBlock%s(b *Block) bool {\n", arch.name)
+ fmt.Fprintf(w, "switch b.Kind {\n")
+ ops = nil
+ for op := range blockrules {
+ ops = append(ops, op)
+ }
+ sort.Strings(ops)
+ for _, op := range ops {
+ fmt.Fprintf(w, "case %s:\n", blockName(op, arch))
+ for _, rule := range blockrules[op] {
+ rulehash := fmt.Sprintf("%02x", md5.Sum([]byte(rule)))
+ // split at ->
+ s := strings.Split(rule, "->")
+ if len(s) != 2 {
+ log.Fatalf("no arrow in rule %s", rule)
+ }
+ lhs := strings.TrimSpace(s[0])
+ result := strings.TrimSpace(s[1])
+
+ // split match into matching part and additional condition
+ match := lhs
+ cond := ""
+ if i := strings.Index(match, "&&"); i >= 0 {
+ cond = strings.TrimSpace(match[i+2:])
+ match = strings.TrimSpace(match[:i])
+ }
+
+ fmt.Fprintf(w, "// match: %s\n", match)
+ fmt.Fprintf(w, "// cond: %s\n", cond)
+ fmt.Fprintf(w, "// result: %s\n", result)
+
+ fail := fmt.Sprintf("{\ngoto end%s\n}\n", rulehash)
+
+ fmt.Fprintf(w, "{\n")
+ s = split(match[1 : len(match)-1]) // remove parens, then split
+
+ // check match of control value
+ if s[1] != "nil" {
+ fmt.Fprintf(w, "v := b.Control\n")
+ genMatch0(w, arch, s[1], "v", fail, map[string]string{}, false)
+ }
+
+ // assign successor names
+ succs := s[2:]
+ for i, a := range succs {
+ if a != "_" {
+ fmt.Fprintf(w, "%s := b.Succs[%d]\n", a, i)
+ }
+ }
+
+ if cond != "" {
+ fmt.Fprintf(w, "if !(%s) %s", cond, fail)
+ }
+
+ // Rule matches. Generate result.
+ t := split(result[1 : len(result)-1]) // remove parens, then split
+ newsuccs := t[2:]
+
+ // Check if newsuccs is a subset of succs.
+ m := map[string]bool{}
+ for _, succ := range succs {
+ if m[succ] {
+ log.Fatalf("can't have a repeat successor name %s in %s", succ, rule)
+ }
+ m[succ] = true
+ }
+ for _, succ := range newsuccs {
+ if !m[succ] {
+ log.Fatalf("unknown successor %s in %s", succ, rule)
+ }
+ delete(m, succ)
+ }
+
+ // Modify predecessor lists for no-longer-reachable blocks
+ for succ := range m {
+ fmt.Fprintf(w, "v.Block.Func.removePredecessor(b, %s)\n", succ)
+ }
+
+ fmt.Fprintf(w, "b.Kind = %s\n", blockName(t[0], arch))
+ if t[1] == "nil" {
+ fmt.Fprintf(w, "b.Control = nil\n")
+ } else {
+ fmt.Fprintf(w, "b.Control = %s\n", genResult0(w, arch, t[1], new(int), false))
+ }
+ if len(newsuccs) < len(succs) {
+ fmt.Fprintf(w, "b.Succs = b.Succs[:%d]\n", len(newsuccs))
+ }
+ for i, a := range newsuccs {
+ fmt.Fprintf(w, "b.Succs[%d] = %s\n", i, a)
+ }
+
+ fmt.Fprintf(w, "return true\n")
+
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "goto end%s\n", rulehash) // use label
+ fmt.Fprintf(w, "end%s:;\n", rulehash)
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "return false\n")
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ b, err = format.Source(b)
+ if err != nil {
+ panic(err)
+ }
+
+ // Write to file
+ err = ioutil.WriteFile("../rewrite"+arch.name+".go", b, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
+
+func genMatch(w io.Writer, arch arch, match, fail string) {
+ genMatch0(w, arch, match, "v", fail, map[string]string{}, true)
+}
+
+func genMatch0(w io.Writer, arch arch, match, v, fail string, m map[string]string, top bool) {
+ if match[0] != '(' {
+ if x, ok := m[match]; ok {
+ // variable already has a definition. Check whether
+ // the old definition and the new definition match.
+ // For example, (add x x). Equality is just pointer equality
+ // on Values (so cse is important to do before lowering).
+ fmt.Fprintf(w, "if %s != %s %s", v, x, fail)
+ return
+ }
+ // remember that this variable references the given value
+ if match == "_" {
+ return
+ }
+ m[match] = v
+ fmt.Fprintf(w, "%s := %s\n", match, v)
+ return
+ }
+
+ // split body up into regions. Split by spaces/tabs, except those
+ // contained in () or {}.
+ s := split(match[1 : len(match)-1]) // remove parens, then split
+
+ // check op
+ if !top {
+ fmt.Fprintf(w, "if %s.Op != %s %s", v, opName(s[0], arch), fail)
+ }
+
+ // check type/aux/args
+ argnum := 0
+ for _, a := range s[1:] {
+ if a[0] == '<' {
+ // type restriction
+ t := a[1 : len(a)-1] // remove <>
+ if !isVariable(t) {
+ // code. We must match the results of this code.
+ fmt.Fprintf(w, "if %s.Type != %s %s", v, t, fail)
+ } else {
+ // variable
+ if u, ok := m[t]; ok {
+ // must match previous variable
+ fmt.Fprintf(w, "if %s.Type != %s %s", v, u, fail)
+ } else {
+ m[t] = v + ".Type"
+ fmt.Fprintf(w, "%s := %s.Type\n", t, v)
+ }
+ }
+ } else if a[0] == '[' {
+ // auxint restriction
+ x := a[1 : len(a)-1] // remove []
+ if !isVariable(x) {
+ // code
+ fmt.Fprintf(w, "if %s.AuxInt != %s %s", v, x, fail)
+ } else {
+ // variable
+ if y, ok := m[x]; ok {
+ fmt.Fprintf(w, "if %s.AuxInt != %s %s", v, y, fail)
+ } else {
+ m[x] = v + ".AuxInt"
+ fmt.Fprintf(w, "%s := %s.AuxInt\n", x, v)
+ }
+ }
+ } else if a[0] == '{' {
+ // auxint restriction
+ x := a[1 : len(a)-1] // remove {}
+ if !isVariable(x) {
+ // code
+ fmt.Fprintf(w, "if %s.Aux != %s %s", v, x, fail)
+ } else {
+ // variable
+ if y, ok := m[x]; ok {
+ fmt.Fprintf(w, "if %s.Aux != %s %s", v, y, fail)
+ } else {
+ m[x] = v + ".Aux"
+ fmt.Fprintf(w, "%s := %s.Aux\n", x, v)
+ }
+ }
+ } else {
+ // variable or sexpr
+ genMatch0(w, arch, a, fmt.Sprintf("%s.Args[%d]", v, argnum), fail, m, false)
+ argnum++
+ }
+ }
+}
+
+func genResult(w io.Writer, arch arch, result string) {
+ genResult0(w, arch, result, new(int), true)
+}
+func genResult0(w io.Writer, arch arch, result string, alloc *int, top bool) string {
+ if result[0] != '(' {
+ // variable
+ if top {
+ fmt.Fprintf(w, "v.Op = %s.Op\n", result)
+ fmt.Fprintf(w, "v.AuxInt = %s.AuxInt\n", result)
+ fmt.Fprintf(w, "v.Aux = %s.Aux\n", result)
+ fmt.Fprintf(w, "v.resetArgs()\n")
+ fmt.Fprintf(w, "v.AddArgs(%s.Args...)\n", result)
+ }
+ return result
+ }
+
+ s := split(result[1 : len(result)-1]) // remove parens, then split
+ var v string
+ var hasType bool
+ if top {
+ v = "v"
+ fmt.Fprintf(w, "v.Op = %s\n", opName(s[0], arch))
+ fmt.Fprintf(w, "v.AuxInt = 0\n")
+ fmt.Fprintf(w, "v.Aux = nil\n")
+ fmt.Fprintf(w, "v.resetArgs()\n")
+ hasType = true
+ } else {
+ v = fmt.Sprintf("v%d", *alloc)
+ *alloc++
+ fmt.Fprintf(w, "%s := v.Block.NewValue0(v.Line, %s, TypeInvalid)\n", v, opName(s[0], arch))
+ }
+ for _, a := range s[1:] {
+ if a[0] == '<' {
+ // type restriction
+ t := a[1 : len(a)-1] // remove <>
+ fmt.Fprintf(w, "%s.Type = %s\n", v, t)
+ hasType = true
+ } else if a[0] == '[' {
+ // auxint restriction
+ x := a[1 : len(a)-1] // remove []
+ fmt.Fprintf(w, "%s.AuxInt = %s\n", v, x)
+ } else if a[0] == '{' {
+ // aux restriction
+ x := a[1 : len(a)-1] // remove {}
+ fmt.Fprintf(w, "%s.Aux = %s\n", v, x)
+ } else {
+ // regular argument (sexpr or variable)
+ x := genResult0(w, arch, a, alloc, false)
+ fmt.Fprintf(w, "%s.AddArg(%s)\n", v, x)
+ }
+ }
+ if !hasType {
+ log.Fatalf("sub-expression %s must have a type", result)
+ }
+ return v
+}
+
+func split(s string) []string {
+ var r []string
+
+outer:
+ for s != "" {
+ d := 0 // depth of ({[<
+ var open, close byte // opening and closing markers ({[< or )}]>
+ nonsp := false // found a non-space char so far
+ for i := 0; i < len(s); i++ {
+ switch {
+ case d == 0 && s[i] == '(':
+ open, close = '(', ')'
+ d++
+ case d == 0 && s[i] == '<':
+ open, close = '<', '>'
+ d++
+ case d == 0 && s[i] == '[':
+ open, close = '[', ']'
+ d++
+ case d == 0 && s[i] == '{':
+ open, close = '{', '}'
+ d++
+ case d == 0 && (s[i] == ' ' || s[i] == '\t'):
+ if nonsp {
+ r = append(r, strings.TrimSpace(s[:i]))
+ s = s[i:]
+ continue outer
+ }
+ case d > 0 && s[i] == open:
+ d++
+ case d > 0 && s[i] == close:
+ d--
+ default:
+ nonsp = true
+ }
+ }
+ if d != 0 {
+ panic("imbalanced expression: " + s)
+ }
+ if nonsp {
+ r = append(r, strings.TrimSpace(s))
+ }
+ break
+ }
+ return r
+}
+
+// isBlock returns true if this op is a block opcode.
+func isBlock(name string, arch arch) bool {
+ for _, b := range genericBlocks {
+ if b.name == name {
+ return true
+ }
+ }
+ for _, b := range arch.blocks {
+ if b.name == name {
+ return true
+ }
+ }
+ return false
+}
+
+// opName converts from an op name specified in a rule file to an Op enum.
+// if the name matches a generic op, returns "Op" plus the specified name.
+// Otherwise, returns "Op" plus arch name plus op name.
+func opName(name string, arch arch) string {
+ for _, op := range genericOps {
+ if op.name == name {
+ return "Op" + name
+ }
+ }
+ return "Op" + arch.name + name
+}
+
+func blockName(name string, arch arch) string {
+ for _, b := range genericBlocks {
+ if b.name == name {
+ return "Block" + name
+ }
+ }
+ return "Block" + arch.name + name
+}
+
+// unbalanced returns true if there aren't the same number of ( and ) in the string.
+func unbalanced(s string) bool {
+ var left, right int
+ for _, c := range s {
+ if c == '(' {
+ left++
+ }
+ if c == ')' {
+ right++
+ }
+ }
+ return left != right
+}
+
+// isVariable reports whether s is a single Go alphanumeric identifier.
+func isVariable(s string) bool {
+ b, err := regexp.MatchString("[A-Za-z_][A-Za-z_0-9]*", s)
+ if err != nil {
+ panic("bad variable regexp")
+ }
+ return b
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+type ID int32
+
+// idAlloc provides an allocator for unique integers.
+type idAlloc struct {
+ last ID
+ free []ID
+}
+
+// get allocates an ID and returns it.
+func (a *idAlloc) get() ID {
+ if n := len(a.free); n > 0 {
+ x := a.free[n-1]
+ a.free = a.free[:n-1]
+ return x
+ }
+ x := a.last
+ x++
+ if x == 1<<31-1 {
+ panic("too many ids for this function")
+ }
+ a.last = x
+ return x
+}
+
+// put deallocates an ID.
+func (a *idAlloc) put(x ID) {
+ a.free = append(a.free, x)
+}
+
+// num returns the maximum ID ever returned + 1.
+func (a *idAlloc) num() int {
+ return int(a.last + 1)
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// layout orders basic blocks in f with the goal of minimizing control flow instructions.
+// After this phase returns, the order of f.Blocks matters and is the order
+// in which those blocks will appear in the assembly output.
+func layout(f *Func) {
+ order := make([]*Block, 0, f.NumBlocks())
+ scheduled := make([]bool, f.NumBlocks())
+ idToBlock := make([]*Block, f.NumBlocks())
+ indegree := make([]int, f.NumBlocks())
+ posdegree := newSparseSet(f.NumBlocks()) // blocks with positive remaining degree
+ zerodegree := newSparseSet(f.NumBlocks()) // blocks with zero remaining degree
+
+ // Initialize indegree of each block
+ for _, b := range f.Blocks {
+ idToBlock[b.ID] = b
+ indegree[b.ID] = len(b.Preds)
+ if len(b.Preds) == 0 {
+ zerodegree.add(b.ID)
+ } else {
+ posdegree.add(b.ID)
+ }
+ }
+
+ bid := f.Entry.ID
+blockloop:
+ for {
+ // add block to schedule
+ b := idToBlock[bid]
+ order = append(order, b)
+ scheduled[bid] = true
+ if len(order) == len(f.Blocks) {
+ break
+ }
+
+ for _, c := range b.Succs {
+ indegree[c.ID]--
+ if indegree[c.ID] == 0 {
+ posdegree.remove(c.ID)
+ zerodegree.add(c.ID)
+ }
+ }
+
+ // Pick the next block to schedule
+ // Pick among the successor blocks that have not been scheduled yet.
+ // Just use degree for now. TODO(khr): use likely direction hints.
+ bid = 0
+ mindegree := f.NumBlocks()
+ for _, c := range order[len(order)-1].Succs {
+ if scheduled[c.ID] {
+ continue
+ }
+ if indegree[c.ID] < mindegree {
+ mindegree = indegree[c.ID]
+ bid = c.ID
+ }
+ }
+ if bid != 0 {
+ continue
+ }
+ // TODO: improve this part
+ // No successor of the previously scheduled block works.
+ // Pick a zero-degree block if we can.
+ for zerodegree.size() > 0 {
+ cid := zerodegree.pop()
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+ // Still nothing, pick any block.
+ for {
+ cid := posdegree.pop()
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+ b.Fatalf("no block available for layout")
+ }
+ f.Blocks = order
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+)
+
+// A place that an ssa variable can reside.
+type Location interface {
+ Name() string // name to use in assembly templates: %rax, 16(%rsp), ...
+}
+
+// A Register is a machine register, like %rax.
+// They are numbered densely from 0 (for each architecture).
+type Register struct {
+ Num int32
+ name string
+}
+
+func (r *Register) Name() string {
+ return r.name
+}
+
+// A LocalSlot is a location in the stack frame.
+type LocalSlot struct {
+ Idx int64 // offset in locals area (distance up from SP)
+}
+
+func (s *LocalSlot) Name() string {
+ return fmt.Sprintf("%d(SP)", s.Idx)
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// convert to machine-dependent ops
+func lower(f *Func) {
+ // repeat rewrites until we find no more rewrites
+ applyRewrite(f, f.Config.lowerBlock, f.Config.lowerValue)
+
+ // Check for unlowered opcodes, fail if we find one.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if opcodeTable[v.Op].generic && v.Op != OpSP && v.Op != OpSB && v.Op != OpArg && v.Op != OpCopy && v.Op != OpPhi {
+ f.Unimplementedf("%s not lowered", v.LongString())
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// nilcheckelim eliminates unnecessary nil checks.
+func nilcheckelim(f *Func) {
+ // Exit early if there are no nil checks to eliminate.
+ var found bool
+ for _, b := range f.Blocks {
+ if checkedptr(b) != nil {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return
+ }
+
+ // Eliminate redundant nil checks.
+ // A nil check is redundant if the same
+ // nil check has been performed by a
+ // dominating block.
+ // The efficacy of this pass depends
+ // heavily on the efficacy of the cse pass.
+ idom := dominators(f) // TODO: cache the dominator tree in the function, clearing when the CFG changes?
+ for _, b := range f.Blocks {
+ ptr := checkedptr(b)
+ if ptr == nil {
+ continue
+ }
+ var elim bool
+ // Walk up the dominator tree,
+ // looking for identical nil checks.
+ // TODO: This loop is O(n^2). See BenchmarkNilCheckDeep*.
+ for c := idom[b.ID]; c != nil; c = idom[c.ID] {
+ if checkedptr(c) == ptr {
+ elim = true
+ break
+ }
+ }
+ if elim {
+ // Eliminate the nil check.
+ // The deadcode pass will remove vestigial values,
+ // and the fuse pass will join this block with its successor.
+ b.Kind = BlockPlain
+ b.Control = nil
+ f.removePredecessor(b, b.Succs[1])
+ b.Succs = b.Succs[:1]
+ }
+ }
+
+ // TODO: Eliminate more nil checks.
+ // For example, pointers to function arguments
+ // and pointers to static values cannot be nil.
+ // We could also track pointers constructed by
+ // taking the address of another value.
+ // We can also recursively remove any chain of
+ // fixed offset calculations,
+ // i.e. struct fields and array elements,
+ // even with non-constant indices:
+ // x is non-nil iff x.a.b[i].c is.
+}
+
+// checkedptr returns the Value, if any,
+// that is used in a nil check in b's Control op.
+func checkedptr(b *Block) *Value {
+ if b.Kind == BlockIf && b.Control.Op == OpIsNonNil {
+ return b.Control.Args[0]
+ }
+ return nil
+}
--- /dev/null
+package ssa
+
+import (
+ "strconv"
+ "testing"
+)
+
+func BenchmarkNilCheckDeep1(b *testing.B) { benchmarkNilCheckDeep(b, 1) }
+func BenchmarkNilCheckDeep10(b *testing.B) { benchmarkNilCheckDeep(b, 10) }
+func BenchmarkNilCheckDeep100(b *testing.B) { benchmarkNilCheckDeep(b, 100) }
+func BenchmarkNilCheckDeep1000(b *testing.B) { benchmarkNilCheckDeep(b, 1000) }
+func BenchmarkNilCheckDeep10000(b *testing.B) { benchmarkNilCheckDeep(b, 10000) }
+
+// benchmarkNilCheckDeep is a stress test of nilcheckelim.
+// It uses the worst possible input: A linear string of
+// nil checks, none of which can be eliminated.
+// Run with multiple depths to observe big-O behavior.
+func benchmarkNilCheckDeep(b *testing.B, depth int) {
+ ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
+
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Goto(blockn(0)),
+ ),
+ )
+ for i := 0; i < depth; i++ {
+ blocs = append(blocs,
+ Bloc(blockn(i),
+ Valu(ptrn(i), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(booln(i), OpIsNonNil, TypeBool, 0, nil, ptrn(i)),
+ If(booln(i), blockn(i+1), "exit"),
+ ),
+ )
+ }
+ blocs = append(blocs,
+ Bloc(blockn(depth), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ c := NewConfig("amd64", DummyFrontend{b})
+ fun := Fun(c, "entry", blocs...)
+
+ CheckFunc(fun.f)
+ b.SetBytes(int64(depth)) // helps for eyeballing linearity
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ nilcheckelim(fun.f)
+ }
+}
+
+func blockn(n int) string { return "b" + strconv.Itoa(n) }
+func ptrn(n int) string { return "p" + strconv.Itoa(n) }
+func booln(n int) string { return "c" + strconv.Itoa(n) }
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// An Op encodes the specific operation that a Value performs.
+// Opcodes' semantics can be modified by the type and aux fields of the Value.
+// For instance, OpAdd can be 32 or 64 bit, signed or unsigned, float or complex, depending on Value.Type.
+// Semantics of each op are described in the opcode files in gen/*Ops.go.
+// There is one file for generic (architecture-independent) ops and one file
+// for each architecture.
+type Op int32
+
+type opInfo struct {
+ name string
+ asm int
+ reg regInfo
+ generic bool // this is a generic (arch-independent) opcode
+}
+
+type regInfo struct {
+ inputs []regMask
+ clobbers regMask
+ outputs []regMask // NOTE: values can only have 1 output for now.
+}
--- /dev/null
+// autogenerated: do not edit!
+// generated from gen/*Ops.go
+package ssa
+
+import "cmd/internal/obj/x86"
+
+const (
+ blockInvalid BlockKind = iota
+
+ BlockAMD64EQ
+ BlockAMD64NE
+ BlockAMD64LT
+ BlockAMD64LE
+ BlockAMD64GT
+ BlockAMD64GE
+ BlockAMD64ULT
+ BlockAMD64ULE
+ BlockAMD64UGT
+ BlockAMD64UGE
+
+ BlockExit
+ BlockDead
+ BlockPlain
+ BlockIf
+ BlockCall
+)
+
+var blockString = [...]string{
+ blockInvalid: "BlockInvalid",
+
+ BlockAMD64EQ: "EQ",
+ BlockAMD64NE: "NE",
+ BlockAMD64LT: "LT",
+ BlockAMD64LE: "LE",
+ BlockAMD64GT: "GT",
+ BlockAMD64GE: "GE",
+ BlockAMD64ULT: "ULT",
+ BlockAMD64ULE: "ULE",
+ BlockAMD64UGT: "UGT",
+ BlockAMD64UGE: "UGE",
+
+ BlockExit: "Exit",
+ BlockDead: "Dead",
+ BlockPlain: "Plain",
+ BlockIf: "If",
+ BlockCall: "Call",
+}
+
+func (k BlockKind) String() string { return blockString[k] }
+
+const (
+ OpInvalid Op = iota
+
+ OpAMD64ADDQ
+ OpAMD64ADDL
+ OpAMD64ADDW
+ OpAMD64ADDB
+ OpAMD64ADDQconst
+ OpAMD64ADDLconst
+ OpAMD64ADDWconst
+ OpAMD64ADDBconst
+ OpAMD64SUBQ
+ OpAMD64SUBL
+ OpAMD64SUBW
+ OpAMD64SUBB
+ OpAMD64SUBQconst
+ OpAMD64SUBLconst
+ OpAMD64SUBWconst
+ OpAMD64SUBBconst
+ OpAMD64MULQ
+ OpAMD64MULL
+ OpAMD64MULW
+ OpAMD64MULQconst
+ OpAMD64MULLconst
+ OpAMD64MULWconst
+ OpAMD64ANDQ
+ OpAMD64ANDL
+ OpAMD64ANDW
+ OpAMD64ANDB
+ OpAMD64ANDQconst
+ OpAMD64ANDLconst
+ OpAMD64ANDWconst
+ OpAMD64ANDBconst
+ OpAMD64ORQ
+ OpAMD64ORL
+ OpAMD64ORW
+ OpAMD64ORB
+ OpAMD64ORQconst
+ OpAMD64ORLconst
+ OpAMD64ORWconst
+ OpAMD64ORBconst
+ OpAMD64XORQ
+ OpAMD64XORL
+ OpAMD64XORW
+ OpAMD64XORB
+ OpAMD64XORQconst
+ OpAMD64XORLconst
+ OpAMD64XORWconst
+ OpAMD64XORBconst
+ OpAMD64CMPQ
+ OpAMD64CMPL
+ OpAMD64CMPW
+ OpAMD64CMPB
+ OpAMD64CMPQconst
+ OpAMD64CMPLconst
+ OpAMD64CMPWconst
+ OpAMD64CMPBconst
+ OpAMD64TESTQ
+ OpAMD64TESTL
+ OpAMD64TESTW
+ OpAMD64TESTB
+ OpAMD64TESTQconst
+ OpAMD64TESTLconst
+ OpAMD64TESTWconst
+ OpAMD64TESTBconst
+ OpAMD64SHLQ
+ OpAMD64SHLL
+ OpAMD64SHLW
+ OpAMD64SHLB
+ OpAMD64SHLQconst
+ OpAMD64SHLLconst
+ OpAMD64SHLWconst
+ OpAMD64SHLBconst
+ OpAMD64SHRQ
+ OpAMD64SHRL
+ OpAMD64SHRW
+ OpAMD64SHRB
+ OpAMD64SHRQconst
+ OpAMD64SHRLconst
+ OpAMD64SHRWconst
+ OpAMD64SHRBconst
+ OpAMD64SARQ
+ OpAMD64SARL
+ OpAMD64SARW
+ OpAMD64SARB
+ OpAMD64SARQconst
+ OpAMD64SARLconst
+ OpAMD64SARWconst
+ OpAMD64SARBconst
+ OpAMD64NEGQ
+ OpAMD64NEGL
+ OpAMD64NEGW
+ OpAMD64NEGB
+ OpAMD64NOTQ
+ OpAMD64NOTL
+ OpAMD64NOTW
+ OpAMD64NOTB
+ OpAMD64SBBQcarrymask
+ OpAMD64SBBLcarrymask
+ OpAMD64SETEQ
+ OpAMD64SETNE
+ OpAMD64SETL
+ OpAMD64SETLE
+ OpAMD64SETG
+ OpAMD64SETGE
+ OpAMD64SETB
+ OpAMD64SETBE
+ OpAMD64SETA
+ OpAMD64SETAE
+ OpAMD64MOVBQSX
+ OpAMD64MOVBQZX
+ OpAMD64MOVWQSX
+ OpAMD64MOVWQZX
+ OpAMD64MOVLQSX
+ OpAMD64MOVLQZX
+ OpAMD64MOVBconst
+ OpAMD64MOVWconst
+ OpAMD64MOVLconst
+ OpAMD64MOVQconst
+ OpAMD64LEAQ
+ OpAMD64LEAQ1
+ OpAMD64LEAQ2
+ OpAMD64LEAQ4
+ OpAMD64LEAQ8
+ OpAMD64MOVBload
+ OpAMD64MOVBQSXload
+ OpAMD64MOVBQZXload
+ OpAMD64MOVWload
+ OpAMD64MOVLload
+ OpAMD64MOVQload
+ OpAMD64MOVQloadidx8
+ OpAMD64MOVBstore
+ OpAMD64MOVWstore
+ OpAMD64MOVLstore
+ OpAMD64MOVQstore
+ OpAMD64MOVQstoreidx8
+ OpAMD64MOVXzero
+ OpAMD64REPSTOSQ
+ OpAMD64CALLstatic
+ OpAMD64CALLclosure
+ OpAMD64REPMOVSB
+ OpAMD64InvertFlags
+
+ OpAdd8
+ OpAdd16
+ OpAdd32
+ OpAdd64
+ OpAddPtr
+ OpSub8
+ OpSub16
+ OpSub32
+ OpSub64
+ OpMul8
+ OpMul16
+ OpMul32
+ OpMul64
+ OpMulPtr
+ OpAnd8
+ OpAnd16
+ OpAnd32
+ OpAnd64
+ OpOr8
+ OpOr16
+ OpOr32
+ OpOr64
+ OpXor8
+ OpXor16
+ OpXor32
+ OpXor64
+ OpLsh8x8
+ OpLsh8x16
+ OpLsh8x32
+ OpLsh8x64
+ OpLsh16x8
+ OpLsh16x16
+ OpLsh16x32
+ OpLsh16x64
+ OpLsh32x8
+ OpLsh32x16
+ OpLsh32x32
+ OpLsh32x64
+ OpLsh64x8
+ OpLsh64x16
+ OpLsh64x32
+ OpLsh64x64
+ OpRsh8x8
+ OpRsh8x16
+ OpRsh8x32
+ OpRsh8x64
+ OpRsh16x8
+ OpRsh16x16
+ OpRsh16x32
+ OpRsh16x64
+ OpRsh32x8
+ OpRsh32x16
+ OpRsh32x32
+ OpRsh32x64
+ OpRsh64x8
+ OpRsh64x16
+ OpRsh64x32
+ OpRsh64x64
+ OpRsh8Ux8
+ OpRsh8Ux16
+ OpRsh8Ux32
+ OpRsh8Ux64
+ OpRsh16Ux8
+ OpRsh16Ux16
+ OpRsh16Ux32
+ OpRsh16Ux64
+ OpRsh32Ux8
+ OpRsh32Ux16
+ OpRsh32Ux32
+ OpRsh32Ux64
+ OpRsh64Ux8
+ OpRsh64Ux16
+ OpRsh64Ux32
+ OpRsh64Ux64
+ OpEq8
+ OpEq16
+ OpEq32
+ OpEq64
+ OpEqPtr
+ OpEqFat
+ OpNeq8
+ OpNeq16
+ OpNeq32
+ OpNeq64
+ OpNeqPtr
+ OpNeqFat
+ OpLess8
+ OpLess8U
+ OpLess16
+ OpLess16U
+ OpLess32
+ OpLess32U
+ OpLess64
+ OpLess64U
+ OpLeq8
+ OpLeq8U
+ OpLeq16
+ OpLeq16U
+ OpLeq32
+ OpLeq32U
+ OpLeq64
+ OpLeq64U
+ OpGreater8
+ OpGreater8U
+ OpGreater16
+ OpGreater16U
+ OpGreater32
+ OpGreater32U
+ OpGreater64
+ OpGreater64U
+ OpGeq8
+ OpGeq8U
+ OpGeq16
+ OpGeq16U
+ OpGeq32
+ OpGeq32U
+ OpGeq64
+ OpGeq64U
+ OpNot
+ OpNeg8
+ OpNeg16
+ OpNeg32
+ OpNeg64
+ OpCom8
+ OpCom16
+ OpCom32
+ OpCom64
+ OpPhi
+ OpCopy
+ OpConstBool
+ OpConstString
+ OpConstNil
+ OpConst8
+ OpConst16
+ OpConst32
+ OpConst64
+ OpConstPtr
+ OpArg
+ OpAddr
+ OpSP
+ OpSB
+ OpFunc
+ OpLoad
+ OpStore
+ OpMove
+ OpZero
+ OpClosureCall
+ OpStaticCall
+ OpSignExt8to16
+ OpSignExt8to32
+ OpSignExt8to64
+ OpSignExt16to32
+ OpSignExt16to64
+ OpSignExt32to64
+ OpZeroExt8to16
+ OpZeroExt8to32
+ OpZeroExt8to64
+ OpZeroExt16to32
+ OpZeroExt16to64
+ OpZeroExt32to64
+ OpTrunc16to8
+ OpTrunc32to8
+ OpTrunc32to16
+ OpTrunc64to8
+ OpTrunc64to16
+ OpTrunc64to32
+ OpConvNop
+ OpIsNonNil
+ OpIsInBounds
+ OpArrayIndex
+ OpPtrIndex
+ OpOffPtr
+ OpStructSelect
+ OpSliceMake
+ OpSlicePtr
+ OpSliceLen
+ OpSliceCap
+ OpStringMake
+ OpStringPtr
+ OpStringLen
+ OpStoreReg
+ OpLoadReg
+ OpFwdRef
+)
+
+var opcodeTable = [...]opInfo{
+ {name: "OpInvalid"},
+
+ {
+ name: "ADDQ",
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ADDL",
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ADDW",
+ asm: x86.AADDW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ADDB",
+ asm: x86.AADDB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ADDQconst",
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ADDLconst",
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ADDWconst",
+ asm: x86.AADDW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ADDBconst",
+ asm: x86.AADDB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SUBQ",
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SUBL",
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SUBW",
+ asm: x86.ASUBW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SUBB",
+ asm: x86.ASUBB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SUBQconst",
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SUBLconst",
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SUBWconst",
+ asm: x86.ASUBW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SUBBconst",
+ asm: x86.ASUBB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MULQ",
+ asm: x86.AIMULQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MULL",
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MULW",
+ asm: x86.AIMULW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MULQconst",
+ asm: x86.AIMULQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MULLconst",
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MULWconst",
+ asm: x86.AIMULW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ANDQ",
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ANDL",
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ANDW",
+ asm: x86.AANDW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ANDB",
+ asm: x86.AANDB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ANDQconst",
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ANDLconst",
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ANDWconst",
+ asm: x86.AANDW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ANDBconst",
+ asm: x86.AANDB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ORQ",
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ORL",
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ORW",
+ asm: x86.AORW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ORB",
+ asm: x86.AORB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ORQconst",
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ORLconst",
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ORWconst",
+ asm: x86.AORW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "ORBconst",
+ asm: x86.AORB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "XORQ",
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "XORL",
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "XORW",
+ asm: x86.AXORW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "XORB",
+ asm: x86.AXORB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "XORQconst",
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "XORLconst",
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "XORWconst",
+ asm: x86.AXORW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "XORBconst",
+ asm: x86.AXORB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "CMPQ",
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPL",
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPB",
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPQconst",
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPLconst",
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPBconst",
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "TESTQ",
+ asm: x86.ATESTQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "TESTL",
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "TESTW",
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "TESTB",
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "TESTQconst",
+ asm: x86.ATESTQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "TESTLconst",
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "TESTWconst",
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "TESTBconst",
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ },
+ },
+ {
+ name: "SHLQ",
+ asm: x86.ASHLQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHLL",
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHLW",
+ asm: x86.ASHLW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHLB",
+ asm: x86.ASHLB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHLQconst",
+ asm: x86.ASHLQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHLLconst",
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHLWconst",
+ asm: x86.ASHLW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHLBconst",
+ asm: x86.ASHLB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHRQ",
+ asm: x86.ASHRQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHRL",
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHRW",
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHRB",
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHRQconst",
+ asm: x86.ASHRQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHRLconst",
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHRWconst",
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SHRBconst",
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SARQ",
+ asm: x86.ASARQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SARL",
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SARW",
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SARB",
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SARQconst",
+ asm: x86.ASARQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SARLconst",
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SARWconst",
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SARBconst",
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "NEGQ",
+ asm: x86.ANEGQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "NEGL",
+ asm: x86.ANEGL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "NEGW",
+ asm: x86.ANEGW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "NEGB",
+ asm: x86.ANEGB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "NOTQ",
+ asm: x86.ANOTQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "NOTL",
+ asm: x86.ANOTL,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "NOTW",
+ asm: x86.ANOTW,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "NOTB",
+ asm: x86.ANOTB,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SBBQcarrymask",
+ asm: x86.ASBBQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SBBLcarrymask",
+ asm: x86.ASBBL,
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SETEQ",
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SETNE",
+ asm: x86.ASETNE,
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SETL",
+ asm: x86.ASETLT,
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SETLE",
+ asm: x86.ASETLE,
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SETG",
+ asm: x86.ASETGT,
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SETGE",
+ asm: x86.ASETGE,
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SETB",
+ asm: x86.ASETCS,
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SETBE",
+ asm: x86.ASETLS,
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SETA",
+ asm: x86.ASETHI,
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "SETAE",
+ asm: x86.ASETCC,
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVBQSX",
+ asm: x86.AMOVBQSX,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVBQZX",
+ asm: x86.AMOVBQZX,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVWQSX",
+ asm: x86.AMOVWQSX,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVWQZX",
+ asm: x86.AMOVWQZX,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVLQSX",
+ asm: x86.AMOVLQSX,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVLQZX",
+ asm: x86.AMOVLQZX,
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVBconst",
+ asm: x86.AMOVB,
+ reg: regInfo{
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVWconst",
+ asm: x86.AMOVW,
+ reg: regInfo{
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVLconst",
+ asm: x86.AMOVL,
+ reg: regInfo{
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVQconst",
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "LEAQ",
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "LEAQ1",
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "LEAQ2",
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "LEAQ4",
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "LEAQ8",
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 0,
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVBQSXload",
+ asm: x86.AMOVBQSX,
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 0,
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVBQZXload",
+ asm: x86.AMOVBQZX,
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 0,
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 0,
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVLload",
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 0,
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVQload",
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 0,
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVQloadidx8",
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 0,
+ },
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 0,
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 0,
+ },
+ },
+ },
+ {
+ name: "MOVLstore",
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 0,
+ },
+ },
+ },
+ {
+ name: "MOVQstore",
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 0,
+ },
+ },
+ },
+ {
+ name: "MOVQstoreidx8",
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 0,
+ },
+ },
+ },
+ {
+ name: "MOVXzero",
+ reg: regInfo{
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 0,
+ },
+ },
+ },
+ {
+ name: "REPSTOSQ",
+ reg: regInfo{
+ inputs: []regMask{
+ 128, // .DI
+ 2, // .CX
+ },
+ clobbers: 131, // .AX .CX .DI
+ },
+ },
+ {
+ name: "CALLstatic",
+ reg: regInfo{},
+ },
+ {
+ name: "CALLclosure",
+ reg: regInfo{
+ inputs: []regMask{
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 4, // .DX
+ 0,
+ },
+ },
+ },
+ {
+ name: "REPMOVSB",
+ reg: regInfo{
+ inputs: []regMask{
+ 128, // .DI
+ 64, // .SI
+ 2, // .CX
+ },
+ clobbers: 194, // .CX .SI .DI
+ },
+ },
+ {
+ name: "InvertFlags",
+ reg: regInfo{},
+ },
+
+ {
+ name: "Add8",
+ generic: true,
+ },
+ {
+ name: "Add16",
+ generic: true,
+ },
+ {
+ name: "Add32",
+ generic: true,
+ },
+ {
+ name: "Add64",
+ generic: true,
+ },
+ {
+ name: "AddPtr",
+ generic: true,
+ },
+ {
+ name: "Sub8",
+ generic: true,
+ },
+ {
+ name: "Sub16",
+ generic: true,
+ },
+ {
+ name: "Sub32",
+ generic: true,
+ },
+ {
+ name: "Sub64",
+ generic: true,
+ },
+ {
+ name: "Mul8",
+ generic: true,
+ },
+ {
+ name: "Mul16",
+ generic: true,
+ },
+ {
+ name: "Mul32",
+ generic: true,
+ },
+ {
+ name: "Mul64",
+ generic: true,
+ },
+ {
+ name: "MulPtr",
+ generic: true,
+ },
+ {
+ name: "And8",
+ generic: true,
+ },
+ {
+ name: "And16",
+ generic: true,
+ },
+ {
+ name: "And32",
+ generic: true,
+ },
+ {
+ name: "And64",
+ generic: true,
+ },
+ {
+ name: "Or8",
+ generic: true,
+ },
+ {
+ name: "Or16",
+ generic: true,
+ },
+ {
+ name: "Or32",
+ generic: true,
+ },
+ {
+ name: "Or64",
+ generic: true,
+ },
+ {
+ name: "Xor8",
+ generic: true,
+ },
+ {
+ name: "Xor16",
+ generic: true,
+ },
+ {
+ name: "Xor32",
+ generic: true,
+ },
+ {
+ name: "Xor64",
+ generic: true,
+ },
+ {
+ name: "Lsh8x8",
+ generic: true,
+ },
+ {
+ name: "Lsh8x16",
+ generic: true,
+ },
+ {
+ name: "Lsh8x32",
+ generic: true,
+ },
+ {
+ name: "Lsh8x64",
+ generic: true,
+ },
+ {
+ name: "Lsh16x8",
+ generic: true,
+ },
+ {
+ name: "Lsh16x16",
+ generic: true,
+ },
+ {
+ name: "Lsh16x32",
+ generic: true,
+ },
+ {
+ name: "Lsh16x64",
+ generic: true,
+ },
+ {
+ name: "Lsh32x8",
+ generic: true,
+ },
+ {
+ name: "Lsh32x16",
+ generic: true,
+ },
+ {
+ name: "Lsh32x32",
+ generic: true,
+ },
+ {
+ name: "Lsh32x64",
+ generic: true,
+ },
+ {
+ name: "Lsh64x8",
+ generic: true,
+ },
+ {
+ name: "Lsh64x16",
+ generic: true,
+ },
+ {
+ name: "Lsh64x32",
+ generic: true,
+ },
+ {
+ name: "Lsh64x64",
+ generic: true,
+ },
+ {
+ name: "Rsh8x8",
+ generic: true,
+ },
+ {
+ name: "Rsh8x16",
+ generic: true,
+ },
+ {
+ name: "Rsh8x32",
+ generic: true,
+ },
+ {
+ name: "Rsh8x64",
+ generic: true,
+ },
+ {
+ name: "Rsh16x8",
+ generic: true,
+ },
+ {
+ name: "Rsh16x16",
+ generic: true,
+ },
+ {
+ name: "Rsh16x32",
+ generic: true,
+ },
+ {
+ name: "Rsh16x64",
+ generic: true,
+ },
+ {
+ name: "Rsh32x8",
+ generic: true,
+ },
+ {
+ name: "Rsh32x16",
+ generic: true,
+ },
+ {
+ name: "Rsh32x32",
+ generic: true,
+ },
+ {
+ name: "Rsh32x64",
+ generic: true,
+ },
+ {
+ name: "Rsh64x8",
+ generic: true,
+ },
+ {
+ name: "Rsh64x16",
+ generic: true,
+ },
+ {
+ name: "Rsh64x32",
+ generic: true,
+ },
+ {
+ name: "Rsh64x64",
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux8",
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux16",
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux32",
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux64",
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux8",
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux16",
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux32",
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux64",
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux8",
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux16",
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux32",
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux64",
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux8",
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux16",
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux32",
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux64",
+ generic: true,
+ },
+ {
+ name: "Eq8",
+ generic: true,
+ },
+ {
+ name: "Eq16",
+ generic: true,
+ },
+ {
+ name: "Eq32",
+ generic: true,
+ },
+ {
+ name: "Eq64",
+ generic: true,
+ },
+ {
+ name: "EqPtr",
+ generic: true,
+ },
+ {
+ name: "EqFat",
+ generic: true,
+ },
+ {
+ name: "Neq8",
+ generic: true,
+ },
+ {
+ name: "Neq16",
+ generic: true,
+ },
+ {
+ name: "Neq32",
+ generic: true,
+ },
+ {
+ name: "Neq64",
+ generic: true,
+ },
+ {
+ name: "NeqPtr",
+ generic: true,
+ },
+ {
+ name: "NeqFat",
+ generic: true,
+ },
+ {
+ name: "Less8",
+ generic: true,
+ },
+ {
+ name: "Less8U",
+ generic: true,
+ },
+ {
+ name: "Less16",
+ generic: true,
+ },
+ {
+ name: "Less16U",
+ generic: true,
+ },
+ {
+ name: "Less32",
+ generic: true,
+ },
+ {
+ name: "Less32U",
+ generic: true,
+ },
+ {
+ name: "Less64",
+ generic: true,
+ },
+ {
+ name: "Less64U",
+ generic: true,
+ },
+ {
+ name: "Leq8",
+ generic: true,
+ },
+ {
+ name: "Leq8U",
+ generic: true,
+ },
+ {
+ name: "Leq16",
+ generic: true,
+ },
+ {
+ name: "Leq16U",
+ generic: true,
+ },
+ {
+ name: "Leq32",
+ generic: true,
+ },
+ {
+ name: "Leq32U",
+ generic: true,
+ },
+ {
+ name: "Leq64",
+ generic: true,
+ },
+ {
+ name: "Leq64U",
+ generic: true,
+ },
+ {
+ name: "Greater8",
+ generic: true,
+ },
+ {
+ name: "Greater8U",
+ generic: true,
+ },
+ {
+ name: "Greater16",
+ generic: true,
+ },
+ {
+ name: "Greater16U",
+ generic: true,
+ },
+ {
+ name: "Greater32",
+ generic: true,
+ },
+ {
+ name: "Greater32U",
+ generic: true,
+ },
+ {
+ name: "Greater64",
+ generic: true,
+ },
+ {
+ name: "Greater64U",
+ generic: true,
+ },
+ {
+ name: "Geq8",
+ generic: true,
+ },
+ {
+ name: "Geq8U",
+ generic: true,
+ },
+ {
+ name: "Geq16",
+ generic: true,
+ },
+ {
+ name: "Geq16U",
+ generic: true,
+ },
+ {
+ name: "Geq32",
+ generic: true,
+ },
+ {
+ name: "Geq32U",
+ generic: true,
+ },
+ {
+ name: "Geq64",
+ generic: true,
+ },
+ {
+ name: "Geq64U",
+ generic: true,
+ },
+ {
+ name: "Not",
+ generic: true,
+ },
+ {
+ name: "Neg8",
+ generic: true,
+ },
+ {
+ name: "Neg16",
+ generic: true,
+ },
+ {
+ name: "Neg32",
+ generic: true,
+ },
+ {
+ name: "Neg64",
+ generic: true,
+ },
+ {
+ name: "Com8",
+ generic: true,
+ },
+ {
+ name: "Com16",
+ generic: true,
+ },
+ {
+ name: "Com32",
+ generic: true,
+ },
+ {
+ name: "Com64",
+ generic: true,
+ },
+ {
+ name: "Phi",
+ generic: true,
+ },
+ {
+ name: "Copy",
+ generic: true,
+ },
+ {
+ name: "ConstBool",
+ generic: true,
+ },
+ {
+ name: "ConstString",
+ generic: true,
+ },
+ {
+ name: "ConstNil",
+ generic: true,
+ },
+ {
+ name: "Const8",
+ generic: true,
+ },
+ {
+ name: "Const16",
+ generic: true,
+ },
+ {
+ name: "Const32",
+ generic: true,
+ },
+ {
+ name: "Const64",
+ generic: true,
+ },
+ {
+ name: "ConstPtr",
+ generic: true,
+ },
+ {
+ name: "Arg",
+ generic: true,
+ },
+ {
+ name: "Addr",
+ generic: true,
+ },
+ {
+ name: "SP",
+ generic: true,
+ },
+ {
+ name: "SB",
+ generic: true,
+ },
+ {
+ name: "Func",
+ generic: true,
+ },
+ {
+ name: "Load",
+ generic: true,
+ },
+ {
+ name: "Store",
+ generic: true,
+ },
+ {
+ name: "Move",
+ generic: true,
+ },
+ {
+ name: "Zero",
+ generic: true,
+ },
+ {
+ name: "ClosureCall",
+ generic: true,
+ },
+ {
+ name: "StaticCall",
+ generic: true,
+ },
+ {
+ name: "SignExt8to16",
+ generic: true,
+ },
+ {
+ name: "SignExt8to32",
+ generic: true,
+ },
+ {
+ name: "SignExt8to64",
+ generic: true,
+ },
+ {
+ name: "SignExt16to32",
+ generic: true,
+ },
+ {
+ name: "SignExt16to64",
+ generic: true,
+ },
+ {
+ name: "SignExt32to64",
+ generic: true,
+ },
+ {
+ name: "ZeroExt8to16",
+ generic: true,
+ },
+ {
+ name: "ZeroExt8to32",
+ generic: true,
+ },
+ {
+ name: "ZeroExt8to64",
+ generic: true,
+ },
+ {
+ name: "ZeroExt16to32",
+ generic: true,
+ },
+ {
+ name: "ZeroExt16to64",
+ generic: true,
+ },
+ {
+ name: "ZeroExt32to64",
+ generic: true,
+ },
+ {
+ name: "Trunc16to8",
+ generic: true,
+ },
+ {
+ name: "Trunc32to8",
+ generic: true,
+ },
+ {
+ name: "Trunc32to16",
+ generic: true,
+ },
+ {
+ name: "Trunc64to8",
+ generic: true,
+ },
+ {
+ name: "Trunc64to16",
+ generic: true,
+ },
+ {
+ name: "Trunc64to32",
+ generic: true,
+ },
+ {
+ name: "ConvNop",
+ generic: true,
+ },
+ {
+ name: "IsNonNil",
+ generic: true,
+ },
+ {
+ name: "IsInBounds",
+ generic: true,
+ },
+ {
+ name: "ArrayIndex",
+ generic: true,
+ },
+ {
+ name: "PtrIndex",
+ generic: true,
+ },
+ {
+ name: "OffPtr",
+ generic: true,
+ },
+ {
+ name: "StructSelect",
+ generic: true,
+ },
+ {
+ name: "SliceMake",
+ generic: true,
+ },
+ {
+ name: "SlicePtr",
+ generic: true,
+ },
+ {
+ name: "SliceLen",
+ generic: true,
+ },
+ {
+ name: "SliceCap",
+ generic: true,
+ },
+ {
+ name: "StringMake",
+ generic: true,
+ },
+ {
+ name: "StringPtr",
+ generic: true,
+ },
+ {
+ name: "StringLen",
+ generic: true,
+ },
+ {
+ name: "StoreReg",
+ generic: true,
+ },
+ {
+ name: "LoadReg",
+ generic: true,
+ },
+ {
+ name: "FwdRef",
+ generic: true,
+ },
+}
+
+func (o Op) Asm() int { return opcodeTable[o].asm }
+func (o Op) String() string { return opcodeTable[o].name }
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// machine-independent optimization
+func opt(f *Func) {
+ applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric)
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// phielim eliminates redundant phi values from f.
+// A phi is redundant if its arguments are all equal. For
+// purposes of counting, ignore the phi itself. Both of
+// these phis are redundant:
+// v = phi(x,x,x)
+// v = phi(x,v,x,v)
+func phielim(f *Func) {
+ args := newSparseSet(f.NumValues())
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ args.clear()
+ for _, x := range v.Args {
+ for x.Op == OpCopy {
+ x = x.Args[0]
+ }
+ args.add(x.ID)
+ }
+ switch {
+ case args.size() == 1:
+ v.Op = OpCopy
+ v.SetArgs1(v.Args[0])
+ case args.size() == 2 && args.contains(v.ID):
+ var w *Value
+ for _, x := range v.Args {
+ if x.ID != v.ID {
+ w = x
+ break
+ }
+ }
+ v.Op = OpCopy
+ v.SetArgs1(w)
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+func printFunc(f *Func) {
+ f.Logf("%s", f)
+}
+
+func (f *Func) String() string {
+ var buf bytes.Buffer
+ fprintFunc(&buf, f)
+ return buf.String()
+}
+
+func fprintFunc(w io.Writer, f *Func) {
+ fmt.Fprint(w, f.Name)
+ fmt.Fprint(w, " ")
+ fmt.Fprintln(w, f.Type)
+ printed := make([]bool, f.NumValues())
+ for _, b := range f.Blocks {
+ fmt.Fprintf(w, " b%d:", b.ID)
+ if len(b.Preds) > 0 {
+ io.WriteString(w, " <-")
+ for _, pred := range b.Preds {
+ fmt.Fprintf(w, " b%d", pred.ID)
+ }
+ }
+ io.WriteString(w, "\n")
+ n := 0
+
+ // print phis first since all value cycles contain a phi
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ fmt.Fprint(w, " ")
+ fmt.Fprintln(w, v.LongString())
+ printed[v.ID] = true
+ n++
+ }
+
+ // print rest of values in dependency order
+ for n < len(b.Values) {
+ m := n
+ outer:
+ for _, v := range b.Values {
+ if printed[v.ID] {
+ continue
+ }
+ for _, w := range v.Args {
+ // w == nil shouldn't happen, but if it does,
+ // don't panic; we'll get a better diagnosis later.
+ if w != nil && w.Block == b && !printed[w.ID] {
+ continue outer
+ }
+ }
+ fmt.Fprint(w, " ")
+ fmt.Fprintln(w, v.LongString())
+ printed[v.ID] = true
+ n++
+ }
+ if m == n {
+ fmt.Fprintln(w, "dependency cycle!")
+ for _, v := range b.Values {
+ if printed[v.ID] {
+ continue
+ }
+ fmt.Fprint(w, " ")
+ fmt.Fprintln(w, v.LongString())
+ printed[v.ID] = true
+ n++
+ }
+ }
+ }
+
+ fmt.Fprintln(w, " "+b.LongString())
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "sort"
+
+func setloc(home []Location, v *Value, loc Location) []Location {
+ for v.ID >= ID(len(home)) {
+ home = append(home, nil)
+ }
+ home[v.ID] = loc
+ return home
+}
+
+type register uint
+
+type regMask uint64
+
+// TODO: make arch-dependent
+var numRegs register = 64
+
+var registers = [...]Register{
+ Register{0, "AX"},
+ Register{1, "CX"},
+ Register{2, "DX"},
+ Register{3, "BX"},
+ Register{4, "SP"},
+ Register{5, "BP"},
+ Register{6, "SI"},
+ Register{7, "DI"},
+ Register{8, "R8"},
+ Register{9, "R9"},
+ Register{10, "R10"},
+ Register{11, "R11"},
+ Register{12, "R12"},
+ Register{13, "R13"},
+ Register{14, "R14"},
+ Register{15, "R15"},
+ Register{16, "X0"},
+ Register{17, "X1"},
+ Register{18, "X2"},
+ Register{19, "X3"},
+ Register{20, "X4"},
+ Register{21, "X5"},
+ Register{22, "X6"},
+ Register{23, "X7"},
+ Register{24, "X8"},
+ Register{25, "X9"},
+ Register{26, "X10"},
+ Register{27, "X11"},
+ Register{28, "X12"},
+ Register{29, "X13"},
+ Register{30, "X14"},
+ Register{31, "X15"},
+ Register{32, "SB"}, // pseudo-register for global base pointer (aka %rip)
+ Register{33, "FLAGS"},
+
+ // TODO: make arch-dependent
+}
+
+// countRegs returns the number of set bits in the register mask.
+func countRegs(r regMask) int {
+ n := 0
+ for r != 0 {
+ n += int(r & 1)
+ r >>= 1
+ }
+ return n
+}
+
+// pickReg picks an arbitrary register from the register mask.
+func pickReg(r regMask) register {
+ // pick the lowest one
+ if r == 0 {
+ panic("can't pick a register from an empty set")
+ }
+ for i := register(0); ; i++ {
+ if r&1 != 0 {
+ return i
+ }
+ r >>= 1
+ }
+}
+
+// regalloc performs register allocation on f. It sets f.RegAlloc
+// to the resulting allocation.
+func regalloc(f *Func) {
+ // For now, a very simple allocator. Everything has a home
+ // location on the stack (TBD as a subsequent stackalloc pass).
+ // Values live in the home locations at basic block boundaries.
+ // We use a simple greedy allocator within a basic block.
+ home := make([]Location, f.NumValues())
+
+ addPhiCopies(f) // add copies of phi inputs in preceeding blocks
+
+ // Compute live values at the end of each block.
+ live := live(f)
+ lastUse := make([]int, f.NumValues())
+
+ var oldSched []*Value
+
+ // Hack to find sp and sb Values and assign them a register. (TODO: make not so hacky)
+ var sp, sb *Value
+ for _, v := range f.Entry.Values {
+ switch v.Op {
+ case OpSP:
+ sp = v
+ home = setloc(home, v, ®isters[4]) // TODO: arch-dependent
+ case OpSB:
+ sb = v
+ home = setloc(home, v, ®isters[32]) // TODO: arch-dependent
+ }
+ }
+
+ // Register allocate each block separately. All live values will live
+ // in home locations (stack slots) between blocks.
+ for _, b := range f.Blocks {
+
+ // Compute the index of the last use of each Value in the Block.
+ // Scheduling has already happened, so Values are totally ordered.
+ // lastUse[x] = max(i) where b.Value[i] uses Value x.
+ for i, v := range b.Values {
+ lastUse[v.ID] = -1
+ for _, w := range v.Args {
+ // could condition this store on w.Block == b, but no need
+ lastUse[w.ID] = i
+ }
+ }
+ // Values which are live at block exit have a lastUse of len(b.Values).
+ if b.Control != nil {
+ lastUse[b.Control.ID] = len(b.Values)
+ }
+ // Values live after block exit have a lastUse of len(b.Values)+1.
+ for _, vid := range live[b.ID] {
+ lastUse[vid] = len(b.Values) + 1
+ }
+
+ // For each register, store which value it contains
+ type regInfo struct {
+ v *Value // stack-homed original value (or nil if empty)
+ c *Value // the register copy of v
+ dirty bool // if the stack-homed copy is out of date
+ }
+ regs := make([]regInfo, numRegs)
+
+ // TODO: hack: initialize fixed registers
+ regs[4] = regInfo{sp, sp, false}
+ regs[32] = regInfo{sb, sb, false}
+
+ var used regMask // has a 1 for each non-nil entry in regs
+ var dirty regMask // has a 1 for each dirty entry in regs
+
+ oldSched = append(oldSched[:0], b.Values...)
+ b.Values = b.Values[:0]
+
+ for idx, v := range oldSched {
+ // For each instruction, do:
+ // set up inputs to v in registers
+ // pick output register
+ // run insn
+ // mark output register as dirty
+ // Note that v represents the Value at "home" (on the stack), and c
+ // is its register equivalent. There are two ways to establish c:
+ // - use of v. c will be a load from v's home.
+ // - definition of v. c will be identical to v but will live in
+ // a register. v will be modified into a spill of c.
+ regspec := opcodeTable[v.Op].reg
+ if v.Op == OpCopy || v.Op == OpConvNop {
+ // TODO: make this less of a hack
+ regspec = opcodeTable[OpAMD64ADDQconst].reg
+ }
+ inputs := regspec.inputs
+ outputs := regspec.outputs
+ if len(inputs) == 0 && len(outputs) == 0 {
+ // No register allocation required (or none specified yet)
+ b.Values = append(b.Values, v)
+ continue
+ }
+ if v.Op == OpCopy && v.Type.IsMemory() {
+ b.Values = append(b.Values, v)
+ continue
+ }
+
+ // Compute a good input ordering. Start with the most constrained input.
+ order := make([]intPair, len(inputs))
+ for i, input := range inputs {
+ order[i] = intPair{countRegs(input), i}
+ }
+ sort.Sort(byKey(order))
+
+ // nospill contains registers that we can't spill because
+ // we already set them up for use by the current instruction.
+ var nospill regMask
+ nospill |= 0x100000010 // SP & SB can't be spilled (TODO: arch-specific)
+
+ // Move inputs into registers
+ for _, o := range order {
+ w := v.Args[o.val]
+ mask := inputs[o.val]
+ if mask == 0 {
+ // Input doesn't need a register
+ continue
+ }
+ // TODO: 2-address overwrite instructions
+
+ // Find registers that w is already in
+ var wreg regMask
+ for r := register(0); r < numRegs; r++ {
+ if regs[r].v == w {
+ wreg |= regMask(1) << r
+ }
+ }
+
+ var r register
+ if mask&wreg != 0 {
+ // w is already in an allowed register. We're done.
+ r = pickReg(mask & wreg)
+ } else {
+ // Pick a register for w
+ // Priorities (in order)
+ // - an unused register
+ // - a clean register
+ // - a dirty register
+ // TODO: for used registers, pick the one whose next use is the
+ // farthest in the future.
+ mask &^= nospill
+ if mask & ^dirty != 0 {
+ mask &^= dirty
+ }
+ if mask & ^used != 0 {
+ mask &^= used
+ }
+ r = pickReg(mask)
+
+ // Kick out whomever is using this register.
+ if regs[r].v != nil {
+ x := regs[r].v
+ c := regs[r].c
+ if regs[r].dirty && lastUse[x.ID] >= idx {
+ // Write x back to home. Its value is currently held in c.
+ x.Op = OpStoreReg
+ x.Aux = nil
+ x.resetArgs()
+ x.AddArg(c)
+ b.Values = append(b.Values, x)
+ regs[r].dirty = false
+ dirty &^= regMask(1) << r
+ }
+ regs[r].v = nil
+ regs[r].c = nil
+ used &^= regMask(1) << r
+ }
+
+ // Load w into this register
+ var c *Value
+ if len(w.Args) == 0 {
+ // Materialize w
+ if w.Op == OpSB {
+ c = w
+ } else if w.Op == OpSP {
+ c = b.NewValue1(w.Line, OpCopy, w.Type, w)
+ } else {
+ c = b.NewValue0IA(w.Line, w.Op, w.Type, w.AuxInt, w.Aux)
+ }
+ } else if len(w.Args) == 1 && (w.Args[0].Op == OpSP || w.Args[0].Op == OpSB) {
+ // Materialize offsets from SP/SB
+ c = b.NewValue1IA(w.Line, w.Op, w.Type, w.AuxInt, w.Aux, w.Args[0])
+ } else if wreg != 0 {
+ // Copy from another register.
+ // Typically just an optimization, but this is
+ // required if w is dirty.
+ s := pickReg(wreg)
+ // inv: s != r
+ c = b.NewValue1(w.Line, OpCopy, w.Type, regs[s].c)
+ } else {
+ // Load from home location
+ c = b.NewValue1(w.Line, OpLoadReg, w.Type, w)
+ }
+ home = setloc(home, c, ®isters[r])
+ // Remember what we did
+ regs[r].v = w
+ regs[r].c = c
+ regs[r].dirty = false
+ used |= regMask(1) << r
+ }
+
+ // Replace w with its in-register copy.
+ v.SetArg(o.val, regs[r].c)
+
+ // Remember not to undo this register assignment until after
+ // the instruction is issued.
+ nospill |= regMask(1) << r
+ }
+
+ // TODO: do any clobbering
+
+ // pick a register for v itself.
+ if len(outputs) > 1 {
+ panic("can't do multi-output yet")
+ }
+ if len(outputs) == 0 || outputs[0] == 0 {
+ // output doesn't need a register
+ b.Values = append(b.Values, v)
+ } else {
+ mask := outputs[0]
+ if mask & ^dirty != 0 {
+ mask &^= dirty
+ }
+ if mask & ^used != 0 {
+ mask &^= used
+ }
+ r := pickReg(mask)
+
+ // Kick out whomever is using this register.
+ if regs[r].v != nil {
+ x := regs[r].v
+ c := regs[r].c
+ if regs[r].dirty && lastUse[x.ID] >= idx {
+ // Write x back to home. Its value is currently held in c.
+ x.Op = OpStoreReg
+ x.Aux = nil
+ x.resetArgs()
+ x.AddArg(c)
+ b.Values = append(b.Values, x)
+ regs[r].dirty = false
+ dirty &^= regMask(1) << r
+ }
+ regs[r].v = nil
+ regs[r].c = nil
+ used &^= regMask(1) << r
+ }
+
+ // Reissue v with new op, with r as its home.
+ c := b.NewValue0IA(v.Line, v.Op, v.Type, v.AuxInt, v.Aux)
+ c.AddArgs(v.Args...)
+ home = setloc(home, c, ®isters[r])
+
+ // Remember what we did
+ regs[r].v = v
+ regs[r].c = c
+ regs[r].dirty = true
+ used |= regMask(1) << r
+ dirty |= regMask(1) << r
+ }
+ }
+
+ // If the block ends in a call, we must put the call after the spill code.
+ var call *Value
+ if b.Kind == BlockCall {
+ call = b.Control
+ if call != b.Values[len(b.Values)-1] {
+ b.Fatalf("call not at end of block %b %v", b, call)
+ }
+ b.Values = b.Values[:len(b.Values)-1]
+ // TODO: do this for all control types?
+ }
+
+ // at the end of the block, spill any remaining dirty, live values
+ for r := register(0); r < numRegs; r++ {
+ if !regs[r].dirty {
+ continue
+ }
+ v := regs[r].v
+ c := regs[r].c
+ if lastUse[v.ID] <= len(oldSched) {
+ if v == v.Block.Control {
+ // link control value to register version
+ v.Block.Control = c
+ }
+ continue // not live after block
+ }
+
+ // change v to be a copy of c
+ v.Op = OpStoreReg
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(c)
+ b.Values = append(b.Values, v)
+ }
+
+ // add call back after spills
+ if b.Kind == BlockCall {
+ b.Values = append(b.Values, call)
+ }
+ }
+ f.RegAlloc = home
+ deadcode(f) // remove values that had all of their uses rematerialized. TODO: separate pass?
+}
+
+// addPhiCopies adds copies of phi inputs in the blocks
+// immediately preceding the phi's block.
+func addPhiCopies(f *Func) {
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ break // all phis should appear first
+ }
+ if v.Type.IsMemory() { // TODO: only "regallocable" types
+ continue
+ }
+ for i, w := range v.Args {
+ c := b.Preds[i]
+ cpy := c.NewValue1(w.Line, OpCopy, v.Type, w)
+ v.Args[i] = cpy
+ }
+ }
+ }
+}
+
+// live returns a map from block ID to a list of value IDs live at the end of that block
+// TODO: this could be quadratic if lots of variables are live across lots of
+// basic blocks. Figure out a way to make this function (or, more precisely, the user
+// of this function) require only linear size & time.
+func live(f *Func) [][]ID {
+ live := make([][]ID, f.NumBlocks())
+ var phis []*Value
+
+ s := newSparseSet(f.NumValues())
+ t := newSparseSet(f.NumValues())
+
+ // Instead of iterating over f.Blocks, iterate over their postordering.
+ // Liveness information flows backward, so starting at the end
+ // increases the probability that we will stabilize quickly.
+ // TODO: Do a better job yet. Here's one possibility:
+ // Calculate the dominator tree and locate all strongly connected components.
+ // If a value is live in one block of an SCC, it is live in all.
+ // Walk the dominator tree from end to beginning, just once, treating SCC
+ // components as single blocks, duplicated calculated liveness information
+ // out to all of them.
+ po := postorder(f)
+ for {
+ for _, b := range po {
+ f.Logf("live %s %v\n", b, live[b.ID])
+ }
+ changed := false
+
+ for _, b := range po {
+ // Start with known live values at the end of the block
+ s.clear()
+ s.addAll(live[b.ID])
+
+ // Propagate backwards to the start of the block
+ // Assumes Values have been scheduled.
+ phis := phis[:0]
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ s.remove(v.ID)
+ if v.Op == OpPhi {
+ // save phi ops for later
+ phis = append(phis, v)
+ continue
+ }
+ s.addAllValues(v.Args)
+ }
+
+ // for each predecessor of b, expand its list of live-at-end values
+ // inv: s contains the values live at the start of b (excluding phi inputs)
+ for i, p := range b.Preds {
+ t.clear()
+ t.addAll(live[p.ID])
+ t.addAll(s.contents())
+ for _, v := range phis {
+ t.add(v.Args[i].ID)
+ }
+ if t.size() == len(live[p.ID]) {
+ continue
+ }
+ // grow p's live set
+ c := make([]ID, t.size())
+ copy(c, t.contents())
+ live[p.ID] = c
+ changed = true
+ }
+ }
+
+ if !changed {
+ break
+ }
+ }
+ return live
+}
+
+// for sorting a pair of integers by key
+type intPair struct {
+ key, val int
+}
+type byKey []intPair
+
+func (a byKey) Len() int { return len(a) }
+func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byKey) Less(i, j int) bool { return a[i].key < a[j].key }
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "fmt"
+
+func applyRewrite(f *Func, rb func(*Block) bool, rv func(*Value, *Config) bool) {
+ // repeat rewrites until we find no more rewrites
+ var curb *Block
+ var curv *Value
+ defer func() {
+ if curb != nil {
+ curb.Fatalf("panic during rewrite of block %s\n", curb.LongString())
+ }
+ if curv != nil {
+ curv.Fatalf("panic during rewrite of value %s\n", curv.LongString())
+ // TODO(khr): print source location also
+ }
+ }()
+ config := f.Config
+ for {
+ change := false
+ for _, b := range f.Blocks {
+ if b.Kind == BlockDead {
+ continue
+ }
+ if b.Control != nil && b.Control.Op == OpCopy {
+ for b.Control.Op == OpCopy {
+ b.Control = b.Control.Args[0]
+ }
+ }
+ curb = b
+ if rb(b) {
+ change = true
+ }
+ curb = nil
+ for _, v := range b.Values {
+ // elide any copies generated during rewriting
+ for i, a := range v.Args {
+ if a.Op != OpCopy {
+ continue
+ }
+ // Rewriting can generate OpCopy loops.
+ // They are harmless (see removePredecessor),
+ // but take care not to loop forever.
+ for a.Op == OpCopy && a != a.Args[0] {
+ a = a.Args[0]
+ }
+ v.Args[i] = a
+ }
+
+ // apply rewrite function
+ curv = v
+ if rv(v, config) {
+ change = true
+ }
+ curv = nil
+ }
+ }
+ if !change {
+ return
+ }
+ }
+}
+
+// Common functions called from rewriting rules
+
+func is64BitInt(t Type) bool {
+ return t.Size() == 8 && t.IsInteger()
+}
+
+func is32BitInt(t Type) bool {
+ return t.Size() == 4 && t.IsInteger()
+}
+
+func is16BitInt(t Type) bool {
+ return t.Size() == 2 && t.IsInteger()
+}
+
+func is8BitInt(t Type) bool {
+ return t.Size() == 1 && t.IsInteger()
+}
+
+func isPtr(t Type) bool {
+ return t.IsPtr()
+}
+
+func isSigned(t Type) bool {
+ return t.IsSigned()
+}
+
+func typeSize(t Type) int64 {
+ return t.Size()
+}
+
+// addOff adds two int64 offsets. Fails if wraparound happens.
+func addOff(x, y int64) int64 {
+ z := x + y
+ // x and y have same sign and z has a different sign => overflow
+ if x^y >= 0 && x^z < 0 {
+ panic(fmt.Sprintf("offset overflow %d %d", x, y))
+ }
+ return z
+}
+
+func mergeSym(x, y interface{}) interface{} {
+ if x == nil {
+ return y
+ }
+ if y == nil {
+ return x
+ }
+ panic(fmt.Sprintf("mergeSym with two non-nil syms %s %s", x, y))
+ return nil
+}
+
+func inBounds(idx, len int64) bool {
+ return idx >= 0 && idx < len
+}
+
+// log2 returns logarithm in base of n.
+// expects n to be a power of 2.
+func log2(n int64) (l int64) {
+ for n > 1 {
+ l++
+ n >>= 1
+ }
+ return l
+}
+
+// isPowerOfTwo reports whether n is a power of 2.
+func isPowerOfTwo(n int64) bool {
+ return n > 0 && n&(n-1) == 0
+}
+
+// is32Bit reports whether n can be represented as a signed 32 bit integer.
+func is32Bit(n int64) bool {
+ return n == int64(int32(n))
+}
--- /dev/null
+// autogenerated from gen/AMD64.rules: do not edit!
+// generated with: cd gen; go run *.go
+package ssa
+
+func rewriteValueAMD64(v *Value, config *Config) bool {
+ switch v.Op {
+ case OpAMD64ADDB:
+ // match: (ADDB x (MOVBconst [c]))
+ // cond:
+ // result: (ADDBconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVBconst {
+ goto endab690db69bfd8192eea57a2f9f76bf84
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64ADDBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto endab690db69bfd8192eea57a2f9f76bf84
+ endab690db69bfd8192eea57a2f9f76bf84:
+ ;
+ // match: (ADDB (MOVBconst [c]) x)
+ // cond:
+ // result: (ADDBconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVBconst {
+ goto end28aa1a4abe7e1abcdd64135e9967d39d
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64ADDBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end28aa1a4abe7e1abcdd64135e9967d39d
+ end28aa1a4abe7e1abcdd64135e9967d39d:
+ ;
+ case OpAMD64ADDBconst:
+ // match: (ADDBconst [c] (MOVBconst [d]))
+ // cond:
+ // result: (MOVBconst [c+d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVBconst {
+ goto enda9b1e9e31ccdf0af5f4fe57bf4b1343f
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c + d
+ return true
+ }
+ goto enda9b1e9e31ccdf0af5f4fe57bf4b1343f
+ enda9b1e9e31ccdf0af5f4fe57bf4b1343f:
+ ;
+ case OpAMD64ADDL:
+ // match: (ADDL x (MOVLconst [c]))
+ // cond:
+ // result: (ADDLconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLconst {
+ goto end8d6d3b99a7be8da6b7a254b7e709cc95
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64ADDLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end8d6d3b99a7be8da6b7a254b7e709cc95
+ end8d6d3b99a7be8da6b7a254b7e709cc95:
+ ;
+ // match: (ADDL (MOVLconst [c]) x)
+ // cond:
+ // result: (ADDLconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto end739561e08a561e26ce3634dc0d5ec733
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64ADDLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end739561e08a561e26ce3634dc0d5ec733
+ end739561e08a561e26ce3634dc0d5ec733:
+ ;
+ case OpAMD64ADDLconst:
+ // match: (ADDLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c+d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto ende04850e987890abf1d66199042a19c23
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c + d
+ return true
+ }
+ goto ende04850e987890abf1d66199042a19c23
+ ende04850e987890abf1d66199042a19c23:
+ ;
+ case OpAMD64ADDQ:
+ // match: (ADDQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDQconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVQconst {
+ goto end1de8aeb1d043e0dadcffd169a99ce5c0
+ }
+ c := v.Args[1].AuxInt
+ if !(is32Bit(c)) {
+ goto end1de8aeb1d043e0dadcffd169a99ce5c0
+ }
+ v.Op = OpAMD64ADDQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end1de8aeb1d043e0dadcffd169a99ce5c0
+ end1de8aeb1d043e0dadcffd169a99ce5c0:
+ ;
+ // match: (ADDQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (ADDQconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto endca635e3bdecd9e3aeb892f841021dfaa
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ goto endca635e3bdecd9e3aeb892f841021dfaa
+ }
+ v.Op = OpAMD64ADDQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto endca635e3bdecd9e3aeb892f841021dfaa
+ endca635e3bdecd9e3aeb892f841021dfaa:
+ ;
+ // match: (ADDQ x (SHLQconst [3] y))
+ // cond:
+ // result: (LEAQ8 x y)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64SHLQconst {
+ goto endc02313d35a0525d1d680cd58992e820d
+ }
+ if v.Args[1].AuxInt != 3 {
+ goto endc02313d35a0525d1d680cd58992e820d
+ }
+ y := v.Args[1].Args[0]
+ v.Op = OpAMD64LEAQ8
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto endc02313d35a0525d1d680cd58992e820d
+ endc02313d35a0525d1d680cd58992e820d:
+ ;
+ case OpAMD64ADDQconst:
+ // match: (ADDQconst [c] (LEAQ8 [d] x y))
+ // cond:
+ // result: (LEAQ8 [addOff(c, d)] x y)
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64LEAQ8 {
+ goto ende2cc681c9abf9913288803fb1b39e639
+ }
+ d := v.Args[0].AuxInt
+ x := v.Args[0].Args[0]
+ y := v.Args[0].Args[1]
+ v.Op = OpAMD64LEAQ8
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = addOff(c, d)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto ende2cc681c9abf9913288803fb1b39e639
+ ende2cc681c9abf9913288803fb1b39e639:
+ ;
+ // match: (ADDQconst [0] x)
+ // cond:
+ // result: (Copy x)
+ {
+ if v.AuxInt != 0 {
+ goto end288952f259d4a1842f1e8d5c389b3f28
+ }
+ x := v.Args[0]
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end288952f259d4a1842f1e8d5c389b3f28
+ end288952f259d4a1842f1e8d5c389b3f28:
+ ;
+ // match: (ADDQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [c+d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto end09dc54395b4e96e8332cf8e4e7481c52
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c + d
+ return true
+ }
+ goto end09dc54395b4e96e8332cf8e4e7481c52
+ end09dc54395b4e96e8332cf8e4e7481c52:
+ ;
+ case OpAMD64ADDW:
+ // match: (ADDW x (MOVWconst [c]))
+ // cond:
+ // result: (ADDWconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWconst {
+ goto end1aabd2317de77c7dfc4876fd7e4c5011
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64ADDWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end1aabd2317de77c7dfc4876fd7e4c5011
+ end1aabd2317de77c7dfc4876fd7e4c5011:
+ ;
+ // match: (ADDW (MOVWconst [c]) x)
+ // cond:
+ // result: (ADDWconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto ende3aede99966f388afc624f9e86676fd2
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64ADDWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto ende3aede99966f388afc624f9e86676fd2
+ ende3aede99966f388afc624f9e86676fd2:
+ ;
+ case OpAMD64ADDWconst:
+ // match: (ADDWconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c+d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto end32541920f2f5a920dfae41d8ebbef00f
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c + d
+ return true
+ }
+ goto end32541920f2f5a920dfae41d8ebbef00f
+ end32541920f2f5a920dfae41d8ebbef00f:
+ ;
+ case OpAMD64ANDB:
+ // match: (ANDB x (MOVBconst [c]))
+ // cond:
+ // result: (ANDBconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVBconst {
+ goto endd275ec2e73768cb3d201478fc934e06c
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64ANDBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto endd275ec2e73768cb3d201478fc934e06c
+ endd275ec2e73768cb3d201478fc934e06c:
+ ;
+ // match: (ANDB (MOVBconst [c]) x)
+ // cond:
+ // result: (ANDBconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVBconst {
+ goto end4068edac2ae0f354cf581db210288b98
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64ANDBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end4068edac2ae0f354cf581db210288b98
+ end4068edac2ae0f354cf581db210288b98:
+ ;
+ case OpAMD64ANDBconst:
+ // match: (ANDBconst [c] _)
+ // cond: int8(c)==0
+ // result: (MOVBconst [0])
+ {
+ c := v.AuxInt
+ if !(int8(c) == 0) {
+ goto end2106d410c949da14d7c00041f40eca76
+ }
+ v.Op = OpAMD64MOVBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 0
+ return true
+ }
+ goto end2106d410c949da14d7c00041f40eca76
+ end2106d410c949da14d7c00041f40eca76:
+ ;
+ // match: (ANDBconst [c] x)
+ // cond: int8(c)==-1
+ // result: (Copy x)
+ {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int8(c) == -1) {
+ goto ende983ac58fd9834f2c8503e92e45d83db
+ }
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto ende983ac58fd9834f2c8503e92e45d83db
+ ende983ac58fd9834f2c8503e92e45d83db:
+ ;
+ // match: (ANDBconst [c] (MOVBconst [d]))
+ // cond:
+ // result: (MOVBconst [c&d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVBconst {
+ goto end946312b1f216933da86febe293eb956f
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & d
+ return true
+ }
+ goto end946312b1f216933da86febe293eb956f
+ end946312b1f216933da86febe293eb956f:
+ ;
+ case OpAMD64ANDL:
+ // match: (ANDL x (MOVLconst [c]))
+ // cond:
+ // result: (ANDLconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLconst {
+ goto end0a4c49d9a26759c0fd21369dafcd7abb
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64ANDLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end0a4c49d9a26759c0fd21369dafcd7abb
+ end0a4c49d9a26759c0fd21369dafcd7abb:
+ ;
+ // match: (ANDL (MOVLconst [c]) x)
+ // cond:
+ // result: (ANDLconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto end0529ba323d9b6f15c41add401ef67959
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64ANDLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end0529ba323d9b6f15c41add401ef67959
+ end0529ba323d9b6f15c41add401ef67959:
+ ;
+ case OpAMD64ANDLconst:
+ // match: (ANDLconst [c] _)
+ // cond: int32(c)==0
+ // result: (MOVLconst [0])
+ {
+ c := v.AuxInt
+ if !(int32(c) == 0) {
+ goto end5efb241208aef28c950b7bcf8d85d5de
+ }
+ v.Op = OpAMD64MOVLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 0
+ return true
+ }
+ goto end5efb241208aef28c950b7bcf8d85d5de
+ end5efb241208aef28c950b7bcf8d85d5de:
+ ;
+ // match: (ANDLconst [c] x)
+ // cond: int32(c)==-1
+ // result: (Copy x)
+ {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == -1) {
+ goto enda670b6e074269a5e1fcbdaec05596a28
+ }
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto enda670b6e074269a5e1fcbdaec05596a28
+ enda670b6e074269a5e1fcbdaec05596a28:
+ ;
+ // match: (ANDLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c&d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto end7bfd24059369753eadd235f07e2dd7b8
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & d
+ return true
+ }
+ goto end7bfd24059369753eadd235f07e2dd7b8
+ end7bfd24059369753eadd235f07e2dd7b8:
+ ;
+ case OpAMD64ANDQ:
+ // match: (ANDQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ANDQconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVQconst {
+ goto end048fadc69e81103480015b84b9cafff7
+ }
+ c := v.Args[1].AuxInt
+ if !(is32Bit(c)) {
+ goto end048fadc69e81103480015b84b9cafff7
+ }
+ v.Op = OpAMD64ANDQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end048fadc69e81103480015b84b9cafff7
+ end048fadc69e81103480015b84b9cafff7:
+ ;
+ // match: (ANDQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (ANDQconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto end3035a3bf650b708705fd27dd857ab0a4
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ goto end3035a3bf650b708705fd27dd857ab0a4
+ }
+ v.Op = OpAMD64ANDQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end3035a3bf650b708705fd27dd857ab0a4
+ end3035a3bf650b708705fd27dd857ab0a4:
+ ;
+ case OpAMD64ANDQconst:
+ // match: (ANDQconst [0] _)
+ // cond:
+ // result: (MOVQconst [0])
+ {
+ if v.AuxInt != 0 {
+ goto end57018c1d0f54fd721521095b4832bab2
+ }
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 0
+ return true
+ }
+ goto end57018c1d0f54fd721521095b4832bab2
+ end57018c1d0f54fd721521095b4832bab2:
+ ;
+ // match: (ANDQconst [-1] x)
+ // cond:
+ // result: (Copy x)
+ {
+ if v.AuxInt != -1 {
+ goto end993d44ced14a02748f2d0e77230e8991
+ }
+ x := v.Args[0]
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end993d44ced14a02748f2d0e77230e8991
+ end993d44ced14a02748f2d0e77230e8991:
+ ;
+ // match: (ANDQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [c&d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto end67ca66494705b0345a5f22c710225292
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & d
+ return true
+ }
+ goto end67ca66494705b0345a5f22c710225292
+ end67ca66494705b0345a5f22c710225292:
+ ;
+ case OpAMD64ANDW:
+ // match: (ANDW x (MOVWconst [c]))
+ // cond:
+ // result: (ANDWconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWconst {
+ goto enda77a39f65a5eb3436a5842eab69a3103
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64ANDWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto enda77a39f65a5eb3436a5842eab69a3103
+ enda77a39f65a5eb3436a5842eab69a3103:
+ ;
+ // match: (ANDW (MOVWconst [c]) x)
+ // cond:
+ // result: (ANDWconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto endea2a25eb525a5dbf6d5132d84ea4e7a5
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64ANDWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto endea2a25eb525a5dbf6d5132d84ea4e7a5
+ endea2a25eb525a5dbf6d5132d84ea4e7a5:
+ ;
+ case OpAMD64ANDWconst:
+ // match: (ANDWconst [c] _)
+ // cond: int16(c)==0
+ // result: (MOVWconst [0])
+ {
+ c := v.AuxInt
+ if !(int16(c) == 0) {
+ goto end336ece33b4f0fb44dfe1f24981df7b74
+ }
+ v.Op = OpAMD64MOVWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 0
+ return true
+ }
+ goto end336ece33b4f0fb44dfe1f24981df7b74
+ end336ece33b4f0fb44dfe1f24981df7b74:
+ ;
+ // match: (ANDWconst [c] x)
+ // cond: int16(c)==-1
+ // result: (Copy x)
+ {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int16(c) == -1) {
+ goto ende01402832ff041ac3e12fc077684125f
+ }
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto ende01402832ff041ac3e12fc077684125f
+ ende01402832ff041ac3e12fc077684125f:
+ ;
+ // match: (ANDWconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c&d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto end250eb27fcac10bf6c0d96ce66a21726e
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & d
+ return true
+ }
+ goto end250eb27fcac10bf6c0d96ce66a21726e
+ end250eb27fcac10bf6c0d96ce66a21726e:
+ ;
+ case OpAdd16:
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADDW x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ADDW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto ende604481c6de9fe4574cb2954ba2ddc67
+ ende604481c6de9fe4574cb2954ba2ddc67:
+ ;
+ case OpAdd32:
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADDL x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ADDL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto endc445ea2a65385445676cd684ae9a42b5
+ endc445ea2a65385445676cd684ae9a42b5:
+ ;
+ case OpAdd64:
+ // match: (Add64 x y)
+ // cond:
+ // result: (ADDQ x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ADDQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto endd88f18b3f39e3ccc201477a616f0abc0
+ endd88f18b3f39e3ccc201477a616f0abc0:
+ ;
+ case OpAdd8:
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADDB x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ADDB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end6117c84a6b75c1b816b3fb095bc5f656
+ end6117c84a6b75c1b816b3fb095bc5f656:
+ ;
+ case OpAddPtr:
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADDQ x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ADDQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto enda1d5640788c7157996f9d4af602dec1c
+ enda1d5640788c7157996f9d4af602dec1c:
+ ;
+ case OpAddr:
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (LEAQ {sym} base)
+ {
+ sym := v.Aux
+ base := v.Args[0]
+ v.Op = OpAMD64LEAQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+ goto end53cad0c3c9daa5575680e77c14e05e72
+ end53cad0c3c9daa5575680e77c14e05e72:
+ ;
+ case OpAnd16:
+ // match: (And16 x y)
+ // cond:
+ // result: (ANDW x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end1c01f04a173d86ce1a6d1ef59e753014
+ end1c01f04a173d86ce1a6d1ef59e753014:
+ ;
+ case OpAnd32:
+ // match: (And32 x y)
+ // cond:
+ // result: (ANDL x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end6b9eb9375b3a859028a6ba6bf6b8ec88
+ end6b9eb9375b3a859028a6ba6bf6b8ec88:
+ ;
+ case OpAnd64:
+ // match: (And64 x y)
+ // cond:
+ // result: (ANDQ x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto enda0bde5853819d05fa2b7d3b723629552
+ enda0bde5853819d05fa2b7d3b723629552:
+ ;
+ case OpAnd8:
+ // match: (And8 x y)
+ // cond:
+ // result: (ANDB x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end0f53bee6291f1229b43aa1b5f977b4f2
+ end0f53bee6291f1229b43aa1b5f977b4f2:
+ ;
+ case OpAMD64CMPB:
+ // match: (CMPB x (MOVBconst [c]))
+ // cond:
+ // result: (CMPBconst x [c])
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVBconst {
+ goto end52190c0b8759133aa6c540944965c4c0
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64CMPBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ goto end52190c0b8759133aa6c540944965c4c0
+ end52190c0b8759133aa6c540944965c4c0:
+ ;
+ // match: (CMPB (MOVBconst [c]) x)
+ // cond:
+ // result: (InvertFlags (CMPBconst <TypeFlags> x [c]))
+ {
+ if v.Args[0].Op != OpAMD64MOVBconst {
+ goto end6798593f4f9a27e90de089b3248187fd
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64InvertFlags
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ goto end6798593f4f9a27e90de089b3248187fd
+ end6798593f4f9a27e90de089b3248187fd:
+ ;
+ case OpAMD64CMPL:
+ // match: (CMPL x (MOVLconst [c]))
+ // cond:
+ // result: (CMPLconst x [c])
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLconst {
+ goto end49ff4559c4bdecb2aef0c905e2d9a6cf
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64CMPLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ goto end49ff4559c4bdecb2aef0c905e2d9a6cf
+ end49ff4559c4bdecb2aef0c905e2d9a6cf:
+ ;
+ // match: (CMPL (MOVLconst [c]) x)
+ // cond:
+ // result: (InvertFlags (CMPLconst <TypeFlags> x [c]))
+ {
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto end3c04e861f07a442be9e2f5e0e0d07cce
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64InvertFlags
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ goto end3c04e861f07a442be9e2f5e0e0d07cce
+ end3c04e861f07a442be9e2f5e0e0d07cce:
+ ;
+ case OpAMD64CMPQ:
+ // match: (CMPQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (CMPQconst x [c])
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVQconst {
+ goto end3bbb2c6caa57853a7561738ce3c0c630
+ }
+ c := v.Args[1].AuxInt
+ if !(is32Bit(c)) {
+ goto end3bbb2c6caa57853a7561738ce3c0c630
+ }
+ v.Op = OpAMD64CMPQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ goto end3bbb2c6caa57853a7561738ce3c0c630
+ end3bbb2c6caa57853a7561738ce3c0c630:
+ ;
+ // match: (CMPQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (InvertFlags (CMPQconst <TypeFlags> x [c]))
+ {
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto end5edbe48a495a51ecabd3b2c0ed44a3d3
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ goto end5edbe48a495a51ecabd3b2c0ed44a3d3
+ }
+ v.Op = OpAMD64InvertFlags
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ goto end5edbe48a495a51ecabd3b2c0ed44a3d3
+ end5edbe48a495a51ecabd3b2c0ed44a3d3:
+ ;
+ case OpAMD64CMPW:
+ // match: (CMPW x (MOVWconst [c]))
+ // cond:
+ // result: (CMPWconst x [c])
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWconst {
+ goto end310a9ba58ac35c97587e08c63fe8a46c
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64CMPWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ goto end310a9ba58ac35c97587e08c63fe8a46c
+ end310a9ba58ac35c97587e08c63fe8a46c:
+ ;
+ // match: (CMPW (MOVWconst [c]) x)
+ // cond:
+ // result: (InvertFlags (CMPWconst <TypeFlags> x [c]))
+ {
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto end1ce191aaab0f4dd3b98dafdfbfac13ce
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64InvertFlags
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ goto end1ce191aaab0f4dd3b98dafdfbfac13ce
+ end1ce191aaab0f4dd3b98dafdfbfac13ce:
+ ;
+ case OpClosureCall:
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.Op = OpAMD64CALLclosure
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+ goto endfd75d26316012d86cb71d0dd1214259b
+ endfd75d26316012d86cb71d0dd1214259b:
+ ;
+ case OpCom16:
+ // match: (Com16 x)
+ // cond:
+ // result: (NOTW x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64NOTW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end1b14ba8d7d7aa585ec0a211827f280ae
+ end1b14ba8d7d7aa585ec0a211827f280ae:
+ ;
+ case OpCom32:
+ // match: (Com32 x)
+ // cond:
+ // result: (NOTL x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64NOTL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end6eb124ba3bdb3fd6031414370852feb6
+ end6eb124ba3bdb3fd6031414370852feb6:
+ ;
+ case OpCom64:
+ // match: (Com64 x)
+ // cond:
+ // result: (NOTQ x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64NOTQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endf5f3b355a87779c347e305719dddda05
+ endf5f3b355a87779c347e305719dddda05:
+ ;
+ case OpCom8:
+ // match: (Com8 x)
+ // cond:
+ // result: (NOTB x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64NOTB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end1c7c5c055d663ccf1f05fbc4883030c6
+ end1c7c5c055d663ccf1f05fbc4883030c6:
+ ;
+ case OpConst16:
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ {
+ val := v.AuxInt
+ v.Op = OpAMD64MOVWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = val
+ return true
+ }
+ goto end2c6c92f297873b8ac12bd035d56d001e
+ end2c6c92f297873b8ac12bd035d56d001e:
+ ;
+ case OpConst32:
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ {
+ val := v.AuxInt
+ v.Op = OpAMD64MOVLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = val
+ return true
+ }
+ goto enddae5807662af67143a3ac3ad9c63bae5
+ enddae5807662af67143a3ac3ad9c63bae5:
+ ;
+ case OpConst64:
+ // match: (Const64 [val])
+ // cond:
+ // result: (MOVQconst [val])
+ {
+ val := v.AuxInt
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = val
+ return true
+ }
+ goto endc630434ae7f143ab69d5f482a9b52b5f
+ endc630434ae7f143ab69d5f482a9b52b5f:
+ ;
+ case OpConst8:
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVBconst [val])
+ {
+ val := v.AuxInt
+ v.Op = OpAMD64MOVBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = val
+ return true
+ }
+ goto end200524c722ed14ca935ba47f8f30327d
+ end200524c722ed14ca935ba47f8f30327d:
+ ;
+ case OpConstBool:
+ // match: (ConstBool {b})
+ // cond: !b.(bool)
+ // result: (MOVBconst [0])
+ {
+ b := v.Aux
+ if !(!b.(bool)) {
+ goto end876159ea073d2dcefcc251667c1a7780
+ }
+ v.Op = OpAMD64MOVBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 0
+ return true
+ }
+ goto end876159ea073d2dcefcc251667c1a7780
+ end876159ea073d2dcefcc251667c1a7780:
+ ;
+ // match: (ConstBool {b})
+ // cond: b.(bool)
+ // result: (MOVBconst [1])
+ {
+ b := v.Aux
+ if !(b.(bool)) {
+ goto end0dacad3f7cad53905aad5303391447f6
+ }
+ v.Op = OpAMD64MOVBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 1
+ return true
+ }
+ goto end0dacad3f7cad53905aad5303391447f6
+ end0dacad3f7cad53905aad5303391447f6:
+ ;
+ case OpConstNil:
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVQconst [0])
+ {
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 0
+ return true
+ }
+ goto endea557d921056c25b945a49649e4b9b91
+ endea557d921056c25b945a49649e4b9b91:
+ ;
+ case OpConstPtr:
+ // match: (ConstPtr [val])
+ // cond:
+ // result: (MOVQconst [val])
+ {
+ val := v.AuxInt
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = val
+ return true
+ }
+ goto endc395c0a53eeccf597e225a07b53047d1
+ endc395c0a53eeccf597e225a07b53047d1:
+ ;
+ case OpConvNop:
+ // match: (ConvNop <t> x)
+ // cond: t == x.Type
+ // result: (Copy x)
+ {
+ t := v.Type
+ x := v.Args[0]
+ if !(t == x.Type) {
+ goto end6c588ed8aedc7dca8c06b4ada77e3ddd
+ }
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end6c588ed8aedc7dca8c06b4ada77e3ddd
+ end6c588ed8aedc7dca8c06b4ada77e3ddd:
+ ;
+ // match: (ConvNop <t> x)
+ // cond: t.IsInteger() && x.Type.IsInteger() && t.Size() == x.Type.Size()
+ // result: (Copy x)
+ {
+ t := v.Type
+ x := v.Args[0]
+ if !(t.IsInteger() && x.Type.IsInteger() && t.Size() == x.Type.Size()) {
+ goto endfb3563f9df3468ad8123dbaa962cdbf7
+ }
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endfb3563f9df3468ad8123dbaa962cdbf7
+ endfb3563f9df3468ad8123dbaa962cdbf7:
+ ;
+ case OpEq16:
+ // match: (Eq16 x y)
+ // cond:
+ // result: (SETEQ (CMPW <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETEQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end66a03470b5b3e8457ba205ccfcaccea6
+ end66a03470b5b3e8457ba205ccfcaccea6:
+ ;
+ case OpEq32:
+ // match: (Eq32 x y)
+ // cond:
+ // result: (SETEQ (CMPL <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETEQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end4d77d0b016f93817fd6e5f60fa0e7ef2
+ end4d77d0b016f93817fd6e5f60fa0e7ef2:
+ ;
+ case OpEq64:
+ // match: (Eq64 x y)
+ // cond:
+ // result: (SETEQ (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETEQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endae6c62e4e20b4f62694b6ee40dbd9211
+ endae6c62e4e20b4f62694b6ee40dbd9211:
+ ;
+ case OpEq8:
+ // match: (Eq8 x y)
+ // cond:
+ // result: (SETEQ (CMPB <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETEQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end84a692e769900e3adbfe00718d2169e0
+ end84a692e769900e3adbfe00718d2169e0:
+ ;
+ case OpEqPtr:
+ // match: (EqPtr x y)
+ // cond:
+ // result: (SETEQ (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETEQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end6de1d39c9d151e5e503d643bd835356e
+ end6de1d39c9d151e5e503d643bd835356e:
+ ;
+ case OpGeq16:
+ // match: (Geq16 x y)
+ // cond:
+ // result: (SETGE (CMPW <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETGE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end26084bf821f9e418934fee812632b774
+ end26084bf821f9e418934fee812632b774:
+ ;
+ case OpGeq16U:
+ // match: (Geq16U x y)
+ // cond:
+ // result: (SETAE (CMPW <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETAE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end20b00f850ca834cb2013414645c19ad9
+ end20b00f850ca834cb2013414645c19ad9:
+ ;
+ case OpGeq32:
+ // match: (Geq32 x y)
+ // cond:
+ // result: (SETGE (CMPL <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETGE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end713c3dfa0f7247dcc232bcfc916fb044
+ end713c3dfa0f7247dcc232bcfc916fb044:
+ ;
+ case OpGeq32U:
+ // match: (Geq32U x y)
+ // cond:
+ // result: (SETAE (CMPL <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETAE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endac2cde17ec6ab0107eabbda6407d1004
+ endac2cde17ec6ab0107eabbda6407d1004:
+ ;
+ case OpGeq64:
+ // match: (Geq64 x y)
+ // cond:
+ // result: (SETGE (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETGE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end63f44e3fec8d92723b5bde42d6d7eea0
+ end63f44e3fec8d92723b5bde42d6d7eea0:
+ ;
+ case OpGeq64U:
+ // match: (Geq64U x y)
+ // cond:
+ // result: (SETAE (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETAE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endd8d2d9faa19457f6a7b0635a756d234f
+ endd8d2d9faa19457f6a7b0635a756d234f:
+ ;
+ case OpGeq8:
+ // match: (Geq8 x y)
+ // cond:
+ // result: (SETGE (CMPB <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETGE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endb5f40ee158007e675b2113c3ce962382
+ endb5f40ee158007e675b2113c3ce962382:
+ ;
+ case OpGeq8U:
+ // match: (Geq8U x y)
+ // cond:
+ // result: (SETAE (CMPB <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETAE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endd30ee67afc0284c419cef70261f61452
+ endd30ee67afc0284c419cef70261f61452:
+ ;
+ case OpGreater16:
+ // match: (Greater16 x y)
+ // cond:
+ // result: (SETG (CMPW <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETG
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end5bc9fdb7e563a6b949e42d721903cb58
+ end5bc9fdb7e563a6b949e42d721903cb58:
+ ;
+ case OpGreater16U:
+ // match: (Greater16U x y)
+ // cond:
+ // result: (SETA (CMPW <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETA
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endd5b646f04fd839d11082a9ff6adb4a3f
+ endd5b646f04fd839d11082a9ff6adb4a3f:
+ ;
+ case OpGreater32:
+ // match: (Greater32 x y)
+ // cond:
+ // result: (SETG (CMPL <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETG
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endbf0b2b1368aadff48969a7386eee5795
+ endbf0b2b1368aadff48969a7386eee5795:
+ ;
+ case OpGreater32U:
+ // match: (Greater32U x y)
+ // cond:
+ // result: (SETA (CMPL <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETA
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end033c944272dc0af6fafe33f667cf7485
+ end033c944272dc0af6fafe33f667cf7485:
+ ;
+ case OpGreater64:
+ // match: (Greater64 x y)
+ // cond:
+ // result: (SETG (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETG
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endaef0cfa5e27e23cf5e527061cf251069
+ endaef0cfa5e27e23cf5e527061cf251069:
+ ;
+ case OpGreater64U:
+ // match: (Greater64U x y)
+ // cond:
+ // result: (SETA (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETA
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end2afc16a19fe1073dfa86770a78eba2b4
+ end2afc16a19fe1073dfa86770a78eba2b4:
+ ;
+ case OpGreater8:
+ // match: (Greater8 x y)
+ // cond:
+ // result: (SETG (CMPB <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETG
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endbdb1e5f6b760cf02e0fc2f474622e6be
+ endbdb1e5f6b760cf02e0fc2f474622e6be:
+ ;
+ case OpGreater8U:
+ // match: (Greater8U x y)
+ // cond:
+ // result: (SETA (CMPB <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETA
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end22eaafbcfe70447f79d9b3e6cc395bbd
+ end22eaafbcfe70447f79d9b3e6cc395bbd:
+ ;
+ case OpIsInBounds:
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (SETB (CMPQ <TypeFlags> idx len))
+ {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.Op = OpAMD64SETB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+ goto endb51d371171154c0f1613b687757e0576
+ endb51d371171154c0f1613b687757e0576:
+ ;
+ case OpIsNonNil:
+ // match: (IsNonNil p)
+ // cond:
+ // result: (SETNE (TESTQ <TypeFlags> p p))
+ {
+ p := v.Args[0]
+ v.Op = OpAMD64SETNE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64TESTQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(p)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ goto endff508c3726edfb573abc6128c177e76c
+ endff508c3726edfb573abc6128c177e76c:
+ ;
+ case OpLeq16:
+ // match: (Leq16 x y)
+ // cond:
+ // result: (SETLE (CMPW <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETLE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endc1916dfcb3eae58ab237e40a57e1ff16
+ endc1916dfcb3eae58ab237e40a57e1ff16:
+ ;
+ case OpLeq16U:
+ // match: (Leq16U x y)
+ // cond:
+ // result: (SETBE (CMPW <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETBE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end627e261aea217b5d17177b52711b8c82
+ end627e261aea217b5d17177b52711b8c82:
+ ;
+ case OpLeq32:
+ // match: (Leq32 x y)
+ // cond:
+ // result: (SETLE (CMPL <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETLE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endf422ecc8da0033e22242de9c67112537
+ endf422ecc8da0033e22242de9c67112537:
+ ;
+ case OpLeq32U:
+ // match: (Leq32U x y)
+ // cond:
+ // result: (SETBE (CMPL <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETBE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end1b39c9661896abdff8a29de509311b96
+ end1b39c9661896abdff8a29de509311b96:
+ ;
+ case OpLeq64:
+ // match: (Leq64 x y)
+ // cond:
+ // result: (SETLE (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETLE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endf03da5e28dccdb4797671f39e824fb10
+ endf03da5e28dccdb4797671f39e824fb10:
+ ;
+ case OpLeq64U:
+ // match: (Leq64U x y)
+ // cond:
+ // result: (SETBE (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETBE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end37302777dd91a5d0c6f410a5444ccb38
+ end37302777dd91a5d0c6f410a5444ccb38:
+ ;
+ case OpLeq8:
+ // match: (Leq8 x y)
+ // cond:
+ // result: (SETLE (CMPB <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETLE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end03be536eea60fdd98d48b17681acaf5a
+ end03be536eea60fdd98d48b17681acaf5a:
+ ;
+ case OpLeq8U:
+ // match: (Leq8U x y)
+ // cond:
+ // result: (SETBE (CMPB <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETBE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end661377f6745450bb1fa7fd0608ef0a86
+ end661377f6745450bb1fa7fd0608ef0a86:
+ ;
+ case OpLess16:
+ // match: (Less16 x y)
+ // cond:
+ // result: (SETL (CMPW <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endeb09704ef62ba2695a967b6fcb42e562
+ endeb09704ef62ba2695a967b6fcb42e562:
+ ;
+ case OpLess16U:
+ // match: (Less16U x y)
+ // cond:
+ // result: (SETB (CMPW <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end2209a57bd887f68ad732aa7da2bc7286
+ end2209a57bd887f68ad732aa7da2bc7286:
+ ;
+ case OpLess32:
+ // match: (Less32 x y)
+ // cond:
+ // result: (SETL (CMPL <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end8da8d2030c0a323a84503c1240c566ae
+ end8da8d2030c0a323a84503c1240c566ae:
+ ;
+ case OpLess32U:
+ // match: (Less32U x y)
+ // cond:
+ // result: (SETB (CMPL <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto enddcfbbb482eb194146f4f7c8f12029a7a
+ enddcfbbb482eb194146f4f7c8f12029a7a:
+ ;
+ case OpLess64:
+ // match: (Less64 x y)
+ // cond:
+ // result: (SETL (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endf8e7a24c25692045bbcfd2c9356d1a8c
+ endf8e7a24c25692045bbcfd2c9356d1a8c:
+ ;
+ case OpLess64U:
+ // match: (Less64U x y)
+ // cond:
+ // result: (SETB (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end2fac0a2c2e972b5e04b5062d5786b87d
+ end2fac0a2c2e972b5e04b5062d5786b87d:
+ ;
+ case OpLess8:
+ // match: (Less8 x y)
+ // cond:
+ // result: (SETL (CMPB <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end445ad05f8d23dfecf246ce083f1ea167
+ end445ad05f8d23dfecf246ce083f1ea167:
+ ;
+ case OpLess8U:
+ // match: (Less8U x y)
+ // cond:
+ // result: (SETB (CMPB <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end816d1dff858c45836dfa337262e04649
+ end816d1dff858c45836dfa337262e04649:
+ ;
+ case OpLoad:
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVQload ptr mem)
+ {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitInt(t) || isPtr(t)) {
+ goto end7c4c53acf57ebc5f03273652ba1d5934
+ }
+ v.Op = OpAMD64MOVQload
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ goto end7c4c53acf57ebc5f03273652ba1d5934
+ end7c4c53acf57ebc5f03273652ba1d5934:
+ ;
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t)
+ // result: (MOVLload ptr mem)
+ {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t)) {
+ goto ende1cfcb15bfbcfd448ce303d0882a4057
+ }
+ v.Op = OpAMD64MOVLload
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ goto ende1cfcb15bfbcfd448ce303d0882a4057
+ ende1cfcb15bfbcfd448ce303d0882a4057:
+ ;
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t)) {
+ goto end2d0a1304501ed9f4e9e2d288505a9c7c
+ }
+ v.Op = OpAMD64MOVWload
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ goto end2d0a1304501ed9f4e9e2d288505a9c7c
+ end2d0a1304501ed9f4e9e2d288505a9c7c:
+ ;
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ goto end8f83bf72293670e75b22d6627bd13f0b
+ }
+ v.Op = OpAMD64MOVBload
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ goto end8f83bf72293670e75b22d6627bd13f0b
+ end8f83bf72293670e75b22d6627bd13f0b:
+ ;
+ case OpLsh16x16:
+ // match: (Lsh16x16 <t> x y)
+ // cond:
+ // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [16] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLW, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end5b63495f0e75ac68c4ce9d4afa1472d4
+ end5b63495f0e75ac68c4ce9d4afa1472d4:
+ ;
+ case OpLsh16x32:
+ // match: (Lsh16x32 <t> x y)
+ // cond:
+ // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [16] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLW, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end6384dd9bdcec3046732d7347250d49f6
+ end6384dd9bdcec3046732d7347250d49f6:
+ ;
+ case OpLsh16x64:
+ // match: (Lsh16x64 <t> x y)
+ // cond:
+ // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [16] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLW, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end0975ca28988350db0ad556c925d8af07
+ end0975ca28988350db0ad556c925d8af07:
+ ;
+ case OpLsh16x8:
+ // match: (Lsh16x8 <t> x y)
+ // cond:
+ // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [16] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLW, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto endd17c913707f29d59cfcb5d57d5f5c6ff
+ endd17c913707f29d59cfcb5d57d5f5c6ff:
+ ;
+ case OpLsh32x16:
+ // match: (Lsh32x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [32] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLL, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end027b6f888054cc1dd8911fe16a6315a1
+ end027b6f888054cc1dd8911fe16a6315a1:
+ ;
+ case OpLsh32x32:
+ // match: (Lsh32x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [32] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLL, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto endbcc31e2bd8800d5ddb27c09d37f867b9
+ endbcc31e2bd8800d5ddb27c09d37f867b9:
+ ;
+ case OpLsh32x64:
+ // match: (Lsh32x64 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [32] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLL, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end6797e3a3bbb0fe7eda819fe19a4d4b49
+ end6797e3a3bbb0fe7eda819fe19a4d4b49:
+ ;
+ case OpLsh32x8:
+ // match: (Lsh32x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [32] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLL, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end7dd2c717933f46750e8a0871aab6fc63
+ end7dd2c717933f46750e8a0871aab6fc63:
+ ;
+ case OpLsh64x16:
+ // match: (Lsh64x16 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst <TypeFlags> [64] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLQ, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end3a2fda1dddb29e49f46ccde6f5397222
+ end3a2fda1dddb29e49f46ccde6f5397222:
+ ;
+ case OpLsh64x32:
+ // match: (Lsh64x32 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst <TypeFlags> [64] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLQ, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end147322aba732027ac2290fd8173d806a
+ end147322aba732027ac2290fd8173d806a:
+ ;
+ case OpLsh64x64:
+ // match: (Lsh64x64 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst <TypeFlags> [64] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLQ, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto endeb8e78c9c960fa12e29ea07a8519649b
+ endeb8e78c9c960fa12e29ea07a8519649b:
+ ;
+ case OpLsh64x8:
+ // match: (Lsh64x8 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst <TypeFlags> [64] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLQ, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end42cdc11c34c81bbd5e8b4ad19ceec1ef
+ end42cdc11c34c81bbd5e8b4ad19ceec1ef:
+ ;
+ case OpLsh8x16:
+ // match: (Lsh8x16 <t> x y)
+ // cond:
+ // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [8] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLB, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end60bf962bf5256e20b547e18e3c886aa5
+ end60bf962bf5256e20b547e18e3c886aa5:
+ ;
+ case OpLsh8x32:
+ // match: (Lsh8x32 <t> x y)
+ // cond:
+ // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [8] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLB, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end8ed3445f6dbba1a87c80b140371445ce
+ end8ed3445f6dbba1a87c80b140371445ce:
+ ;
+ case OpLsh8x64:
+ // match: (Lsh8x64 <t> x y)
+ // cond:
+ // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [8] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLB, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end0a03c9cc48ef1bfd74973de5f5fb02b0
+ end0a03c9cc48ef1bfd74973de5f5fb02b0:
+ ;
+ case OpLsh8x8:
+ // match: (Lsh8x8 <t> x y)
+ // cond:
+ // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [8] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHLB, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end781e3a47b186cf99fcb7137afd3432b9
+ end781e3a47b186cf99fcb7137afd3432b9:
+ ;
+ case OpAMD64MOVBQSX:
+ // match: (MOVBQSX (MOVBload ptr mem))
+ // cond:
+ // result: (MOVBQSXload ptr mem)
+ {
+ if v.Args[0].Op != OpAMD64MOVBload {
+ goto enda3a5eeb5767e31f42b0b6c1db8311ebb
+ }
+ ptr := v.Args[0].Args[0]
+ mem := v.Args[0].Args[1]
+ v.Op = OpAMD64MOVBQSXload
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ goto enda3a5eeb5767e31f42b0b6c1db8311ebb
+ enda3a5eeb5767e31f42b0b6c1db8311ebb:
+ ;
+ case OpAMD64MOVBQZX:
+ // match: (MOVBQZX (MOVBload ptr mem))
+ // cond:
+ // result: (MOVBQZXload ptr mem)
+ {
+ if v.Args[0].Op != OpAMD64MOVBload {
+ goto end9510a482da21d9945d53c4233b19e825
+ }
+ ptr := v.Args[0].Args[0]
+ mem := v.Args[0].Args[1]
+ v.Op = OpAMD64MOVBQZXload
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ goto end9510a482da21d9945d53c4233b19e825
+ end9510a482da21d9945d53c4233b19e825:
+ ;
+ case OpAMD64MOVBstore:
+ // match: (MOVBstore ptr (MOVBQSX x) mem)
+ // cond:
+ // result: (MOVBstore ptr x mem)
+ {
+ ptr := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVBQSX {
+ goto endc356ef104095b9217b36b594f85171c6
+ }
+ x := v.Args[1].Args[0]
+ mem := v.Args[2]
+ v.Op = OpAMD64MOVBstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ goto endc356ef104095b9217b36b594f85171c6
+ endc356ef104095b9217b36b594f85171c6:
+ ;
+ // match: (MOVBstore ptr (MOVBQZX x) mem)
+ // cond:
+ // result: (MOVBstore ptr x mem)
+ {
+ ptr := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVBQZX {
+ goto end25841a70cce7ac32c6d5e561b992d3df
+ }
+ x := v.Args[1].Args[0]
+ mem := v.Args[2]
+ v.Op = OpAMD64MOVBstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ goto end25841a70cce7ac32c6d5e561b992d3df
+ end25841a70cce7ac32c6d5e561b992d3df:
+ ;
+ case OpAMD64MOVLstore:
+ // match: (MOVLstore ptr (MOVLQSX x) mem)
+ // cond:
+ // result: (MOVLstore ptr x mem)
+ {
+ ptr := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLQSX {
+ goto endf79c699f70cb356abb52dc28f4abf46b
+ }
+ x := v.Args[1].Args[0]
+ mem := v.Args[2]
+ v.Op = OpAMD64MOVLstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ goto endf79c699f70cb356abb52dc28f4abf46b
+ endf79c699f70cb356abb52dc28f4abf46b:
+ ;
+ // match: (MOVLstore ptr (MOVLQZX x) mem)
+ // cond:
+ // result: (MOVLstore ptr x mem)
+ {
+ ptr := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLQZX {
+ goto end67d1549d16d373e4ad6a89298866d1bc
+ }
+ x := v.Args[1].Args[0]
+ mem := v.Args[2]
+ v.Op = OpAMD64MOVLstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ goto end67d1549d16d373e4ad6a89298866d1bc
+ end67d1549d16d373e4ad6a89298866d1bc:
+ ;
+ case OpAMD64MOVQload:
+ // match: (MOVQload [off1] (ADDQconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVQload [addOff(off1, off2)] ptr mem)
+ {
+ off1 := v.AuxInt
+ if v.Args[0].Op != OpAMD64ADDQconst {
+ goto end843d29b538c4483b432b632e5666d6e3
+ }
+ off2 := v.Args[0].AuxInt
+ ptr := v.Args[0].Args[0]
+ mem := v.Args[1]
+ v.Op = OpAMD64MOVQload
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = addOff(off1, off2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ goto end843d29b538c4483b432b632e5666d6e3
+ end843d29b538c4483b432b632e5666d6e3:
+ ;
+ // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: (sym1 == nil || sym2 == nil)
+ // result: (MOVQload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v.Args[0].Op != OpAMD64LEAQ {
+ goto end227426af95e74caddcf59fdcd30ca8bc
+ }
+ off2 := v.Args[0].AuxInt
+ sym2 := v.Args[0].Aux
+ base := v.Args[0].Args[0]
+ mem := v.Args[1]
+ if !(sym1 == nil || sym2 == nil) {
+ goto end227426af95e74caddcf59fdcd30ca8bc
+ }
+ v.Op = OpAMD64MOVQload
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = addOff(off1, off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ goto end227426af95e74caddcf59fdcd30ca8bc
+ end227426af95e74caddcf59fdcd30ca8bc:
+ ;
+ // match: (MOVQload [off1] (LEAQ8 [off2] ptr idx) mem)
+ // cond:
+ // result: (MOVQloadidx8 [addOff(off1, off2)] ptr idx mem)
+ {
+ off1 := v.AuxInt
+ if v.Args[0].Op != OpAMD64LEAQ8 {
+ goto end02f5ad148292c46463e7c20d3b821735
+ }
+ off2 := v.Args[0].AuxInt
+ ptr := v.Args[0].Args[0]
+ idx := v.Args[0].Args[1]
+ mem := v.Args[1]
+ v.Op = OpAMD64MOVQloadidx8
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = addOff(off1, off2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ goto end02f5ad148292c46463e7c20d3b821735
+ end02f5ad148292c46463e7c20d3b821735:
+ ;
+ case OpAMD64MOVQloadidx8:
+ // match: (MOVQloadidx8 [off1] (ADDQconst [off2] ptr) idx mem)
+ // cond:
+ // result: (MOVQloadidx8 [addOff(off1, off2)] ptr idx mem)
+ {
+ off1 := v.AuxInt
+ if v.Args[0].Op != OpAMD64ADDQconst {
+ goto ende81e44bcfb11f90916ccb440c590121f
+ }
+ off2 := v.Args[0].AuxInt
+ ptr := v.Args[0].Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.Op = OpAMD64MOVQloadidx8
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = addOff(off1, off2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ goto ende81e44bcfb11f90916ccb440c590121f
+ ende81e44bcfb11f90916ccb440c590121f:
+ ;
+ case OpAMD64MOVQstore:
+ // match: (MOVQstore [off1] (ADDQconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVQstore [addOff(off1, off2)] ptr val mem)
+ {
+ off1 := v.AuxInt
+ if v.Args[0].Op != OpAMD64ADDQconst {
+ goto end2108c693a43c79aed10b9246c39c80aa
+ }
+ off2 := v.Args[0].AuxInt
+ ptr := v.Args[0].Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.Op = OpAMD64MOVQstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = addOff(off1, off2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ goto end2108c693a43c79aed10b9246c39c80aa
+ end2108c693a43c79aed10b9246c39c80aa:
+ ;
+ // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: (sym1 == nil || sym2 == nil)
+ // result: (MOVQstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
+ {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v.Args[0].Op != OpAMD64LEAQ {
+ goto end5061f48193268a5eb1e1740bdd23c43d
+ }
+ off2 := v.Args[0].AuxInt
+ sym2 := v.Args[0].Aux
+ base := v.Args[0].Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(sym1 == nil || sym2 == nil) {
+ goto end5061f48193268a5eb1e1740bdd23c43d
+ }
+ v.Op = OpAMD64MOVQstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = addOff(off1, off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ goto end5061f48193268a5eb1e1740bdd23c43d
+ end5061f48193268a5eb1e1740bdd23c43d:
+ ;
+ // match: (MOVQstore [off1] (LEAQ8 [off2] ptr idx) val mem)
+ // cond:
+ // result: (MOVQstoreidx8 [addOff(off1, off2)] ptr idx val mem)
+ {
+ off1 := v.AuxInt
+ if v.Args[0].Op != OpAMD64LEAQ8 {
+ goto endce1db8c8d37c8397c500a2068a65c215
+ }
+ off2 := v.Args[0].AuxInt
+ ptr := v.Args[0].Args[0]
+ idx := v.Args[0].Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.Op = OpAMD64MOVQstoreidx8
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = addOff(off1, off2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ goto endce1db8c8d37c8397c500a2068a65c215
+ endce1db8c8d37c8397c500a2068a65c215:
+ ;
+ case OpAMD64MOVQstoreidx8:
+ // match: (MOVQstoreidx8 [off1] (ADDQconst [off2] ptr) idx val mem)
+ // cond:
+ // result: (MOVQstoreidx8 [addOff(off1, off2)] ptr idx val mem)
+ {
+ off1 := v.AuxInt
+ if v.Args[0].Op != OpAMD64ADDQconst {
+ goto end01c970657b0fdefeab82458c15022163
+ }
+ off2 := v.Args[0].AuxInt
+ ptr := v.Args[0].Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.Op = OpAMD64MOVQstoreidx8
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = addOff(off1, off2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ goto end01c970657b0fdefeab82458c15022163
+ end01c970657b0fdefeab82458c15022163:
+ ;
+ case OpAMD64MOVWstore:
+ // match: (MOVWstore ptr (MOVWQSX x) mem)
+ // cond:
+ // result: (MOVWstore ptr x mem)
+ {
+ ptr := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWQSX {
+ goto endcc13af07a951a61fcfec3299342f7e1f
+ }
+ x := v.Args[1].Args[0]
+ mem := v.Args[2]
+ v.Op = OpAMD64MOVWstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ goto endcc13af07a951a61fcfec3299342f7e1f
+ endcc13af07a951a61fcfec3299342f7e1f:
+ ;
+ // match: (MOVWstore ptr (MOVWQZX x) mem)
+ // cond:
+ // result: (MOVWstore ptr x mem)
+ {
+ ptr := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWQZX {
+ goto end4e7df15ee55bdd73d8ecd61b759134d4
+ }
+ x := v.Args[1].Args[0]
+ mem := v.Args[2]
+ v.Op = OpAMD64MOVWstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ goto end4e7df15ee55bdd73d8ecd61b759134d4
+ end4e7df15ee55bdd73d8ecd61b759134d4:
+ ;
+ case OpAMD64MULL:
+ // match: (MULL x (MOVLconst [c]))
+ // cond:
+ // result: (MULLconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLconst {
+ goto end893477a261bcad6c2821b77c83075c6c
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64MULLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end893477a261bcad6c2821b77c83075c6c
+ end893477a261bcad6c2821b77c83075c6c:
+ ;
+ // match: (MULL (MOVLconst [c]) x)
+ // cond:
+ // result: (MULLconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto end8a0f957c528a54eecb0dbfc5d96e017a
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64MULLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end8a0f957c528a54eecb0dbfc5d96e017a
+ end8a0f957c528a54eecb0dbfc5d96e017a:
+ ;
+ case OpAMD64MULLconst:
+ // match: (MULLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c*d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto endd5732835ed1276ef8b728bcfc1289f73
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c * d
+ return true
+ }
+ goto endd5732835ed1276ef8b728bcfc1289f73
+ endd5732835ed1276ef8b728bcfc1289f73:
+ ;
+ case OpAMD64MULQ:
+ // match: (MULQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (MULQconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVQconst {
+ goto endb38c6e3e0ddfa25ba0ef9684ac1528c0
+ }
+ c := v.Args[1].AuxInt
+ if !(is32Bit(c)) {
+ goto endb38c6e3e0ddfa25ba0ef9684ac1528c0
+ }
+ v.Op = OpAMD64MULQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto endb38c6e3e0ddfa25ba0ef9684ac1528c0
+ endb38c6e3e0ddfa25ba0ef9684ac1528c0:
+ ;
+ // match: (MULQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (MULQconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto end9cb4f29b0bd7141639416735dcbb3b87
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ goto end9cb4f29b0bd7141639416735dcbb3b87
+ }
+ v.Op = OpAMD64MULQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end9cb4f29b0bd7141639416735dcbb3b87
+ end9cb4f29b0bd7141639416735dcbb3b87:
+ ;
+ case OpAMD64MULQconst:
+ // match: (MULQconst [-1] x)
+ // cond:
+ // result: (NEGQ x)
+ {
+ if v.AuxInt != -1 {
+ goto end82501cca6b5fb121a7f8b197e55f2fec
+ }
+ x := v.Args[0]
+ v.Op = OpAMD64NEGQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end82501cca6b5fb121a7f8b197e55f2fec
+ end82501cca6b5fb121a7f8b197e55f2fec:
+ ;
+ // match: (MULQconst [0] _)
+ // cond:
+ // result: (MOVQconst [0])
+ {
+ if v.AuxInt != 0 {
+ goto endcb9faa068e3558ff44daaf1d47d091b5
+ }
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 0
+ return true
+ }
+ goto endcb9faa068e3558ff44daaf1d47d091b5
+ endcb9faa068e3558ff44daaf1d47d091b5:
+ ;
+ // match: (MULQconst [1] x)
+ // cond:
+ // result: (Copy x)
+ {
+ if v.AuxInt != 1 {
+ goto endd7217a7c6311fc7a3e0736a1b0b5be73
+ }
+ x := v.Args[0]
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endd7217a7c6311fc7a3e0736a1b0b5be73
+ endd7217a7c6311fc7a3e0736a1b0b5be73:
+ ;
+ // match: (MULQconst [3] x)
+ // cond:
+ // result: (LEAQ2 x x)
+ {
+ if v.AuxInt != 3 {
+ goto end34a86f261671b5852bec6c57155fe0da
+ }
+ x := v.Args[0]
+ v.Op = OpAMD64LEAQ2
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ goto end34a86f261671b5852bec6c57155fe0da
+ end34a86f261671b5852bec6c57155fe0da:
+ ;
+ // match: (MULQconst [5] x)
+ // cond:
+ // result: (LEAQ4 x x)
+ {
+ if v.AuxInt != 5 {
+ goto end534601906c45a9171a9fec3e4b82b189
+ }
+ x := v.Args[0]
+ v.Op = OpAMD64LEAQ4
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ goto end534601906c45a9171a9fec3e4b82b189
+ end534601906c45a9171a9fec3e4b82b189:
+ ;
+ // match: (MULQconst [9] x)
+ // cond:
+ // result: (LEAQ8 x x)
+ {
+ if v.AuxInt != 9 {
+ goto end48a2280b6459821289c56073b8354997
+ }
+ x := v.Args[0]
+ v.Op = OpAMD64LEAQ8
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ goto end48a2280b6459821289c56073b8354997
+ end48a2280b6459821289c56073b8354997:
+ ;
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo(c)
+ // result: (SHLQconst [log2(c)] x)
+ {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c)) {
+ goto end75076953dbfe022526a153eda99b39b2
+ }
+ v.Op = OpAMD64SHLQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ goto end75076953dbfe022526a153eda99b39b2
+ end75076953dbfe022526a153eda99b39b2:
+ ;
+ // match: (MULQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [c*d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto end55c38c5c405101e610d7ba7fc702ddc0
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c * d
+ return true
+ }
+ goto end55c38c5c405101e610d7ba7fc702ddc0
+ end55c38c5c405101e610d7ba7fc702ddc0:
+ ;
+ case OpAMD64MULW:
+ // match: (MULW x (MOVWconst [c]))
+ // cond:
+ // result: (MULWconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWconst {
+ goto end542112cc08217d4bdffc1a645d290ffb
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64MULWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end542112cc08217d4bdffc1a645d290ffb
+ end542112cc08217d4bdffc1a645d290ffb:
+ ;
+ // match: (MULW (MOVWconst [c]) x)
+ // cond:
+ // result: (MULWconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto endd97b4245ced2b3d27d8c555b06281de4
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64MULWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto endd97b4245ced2b3d27d8c555b06281de4
+ endd97b4245ced2b3d27d8c555b06281de4:
+ ;
+ case OpAMD64MULWconst:
+ // match: (MULWconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c*d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto end61dbc9d9e93dd6946a20a1f475b3f74b
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c * d
+ return true
+ }
+ goto end61dbc9d9e93dd6946a20a1f475b3f74b
+ end61dbc9d9e93dd6946a20a1f475b3f74b:
+ ;
+ case OpMove:
+ // match: (Move [size] dst src mem)
+ // cond:
+ // result: (REPMOVSB dst src (MOVQconst <TypeUInt64> [size]) mem)
+ {
+ size := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ v.Op = OpAMD64REPMOVSB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64MOVQconst, TypeInvalid)
+ v0.Type = TypeUInt64
+ v0.AuxInt = size
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ goto end2aab774aedae2c616ee88bfa87cdf30e
+ end2aab774aedae2c616ee88bfa87cdf30e:
+ ;
+ case OpMul16:
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MULW x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64MULW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end1addf5ea2c885aa1729b8f944859d00c
+ end1addf5ea2c885aa1729b8f944859d00c:
+ ;
+ case OpMul32:
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MULL x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64MULL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto ende144381f85808e5144782804768e2859
+ ende144381f85808e5144782804768e2859:
+ ;
+ case OpMul64:
+ // match: (Mul64 x y)
+ // cond:
+ // result: (MULQ x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64MULQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end38da21e77ac329eb643b20e7d97d5853
+ end38da21e77ac329eb643b20e7d97d5853:
+ ;
+ case OpMul8:
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MULW x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64MULW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end861428e804347e8489a6424f2e6ce71c
+ end861428e804347e8489a6424f2e6ce71c:
+ ;
+ case OpMulPtr:
+ // match: (MulPtr x y)
+ // cond:
+ // result: (MULQ x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64MULQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto endbbedad106c011a93243e2062afdcc75f
+ endbbedad106c011a93243e2062afdcc75f:
+ ;
+ case OpAMD64NOTB:
+ // match: (NOTB (MOVBconst [c]))
+ // cond:
+ // result: (MOVBconst [^c])
+ {
+ if v.Args[0].Op != OpAMD64MOVBconst {
+ goto end9e383a9ceb29a9e2bf890ec6a67212a8
+ }
+ c := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = ^c
+ return true
+ }
+ goto end9e383a9ceb29a9e2bf890ec6a67212a8
+ end9e383a9ceb29a9e2bf890ec6a67212a8:
+ ;
+ case OpAMD64NOTL:
+ // match: (NOTL (MOVLconst [c]))
+ // cond:
+ // result: (MOVLconst [^c])
+ {
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto endcc73972c088d5e652a1370a96e56502d
+ }
+ c := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = ^c
+ return true
+ }
+ goto endcc73972c088d5e652a1370a96e56502d
+ endcc73972c088d5e652a1370a96e56502d:
+ ;
+ case OpAMD64NOTQ:
+ // match: (NOTQ (MOVQconst [c]))
+ // cond:
+ // result: (MOVQconst [^c])
+ {
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto endb39ddb6bf7339d46f74114baad4333b6
+ }
+ c := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = ^c
+ return true
+ }
+ goto endb39ddb6bf7339d46f74114baad4333b6
+ endb39ddb6bf7339d46f74114baad4333b6:
+ ;
+ case OpAMD64NOTW:
+ // match: (NOTW (MOVWconst [c]))
+ // cond:
+ // result: (MOVWconst [^c])
+ {
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto end35848095ebcf894c6957ad3be5f82c43
+ }
+ c := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = ^c
+ return true
+ }
+ goto end35848095ebcf894c6957ad3be5f82c43
+ end35848095ebcf894c6957ad3be5f82c43:
+ ;
+ case OpNeg16:
+ // match: (Neg16 x)
+ // cond:
+ // result: (NEGW x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64NEGW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end7a8c652f4ffeb49656119af69512edb2
+ end7a8c652f4ffeb49656119af69512edb2:
+ ;
+ case OpNeg32:
+ // match: (Neg32 x)
+ // cond:
+ // result: (NEGL x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64NEGL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endce1f7e17fc193f6c076e47d5e401e126
+ endce1f7e17fc193f6c076e47d5e401e126:
+ ;
+ case OpNeg64:
+ // match: (Neg64 x)
+ // cond:
+ // result: (NEGQ x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64NEGQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto enda06c5b1718f2b96aba10bf5a5c437c6c
+ enda06c5b1718f2b96aba10bf5a5c437c6c:
+ ;
+ case OpNeg8:
+ // match: (Neg8 x)
+ // cond:
+ // result: (NEGB x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64NEGB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end1e5f495a2ac6cdea47b1ae5ba62aa95d
+ end1e5f495a2ac6cdea47b1ae5ba62aa95d:
+ ;
+ case OpNeq16:
+ // match: (Neq16 x y)
+ // cond:
+ // result: (SETNE (CMPW <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETNE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPW, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endf177c3b3868606824e43e11da7804572
+ endf177c3b3868606824e43e11da7804572:
+ ;
+ case OpNeq32:
+ // match: (Neq32 x y)
+ // cond:
+ // result: (SETNE (CMPL <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETNE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPL, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end39c4bf6d063f8a0b6f0064c96ce25173
+ end39c4bf6d063f8a0b6f0064c96ce25173:
+ ;
+ case OpNeq64:
+ // match: (Neq64 x y)
+ // cond:
+ // result: (SETNE (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETNE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end8ab0bcb910c0d3213dd8726fbcc4848e
+ end8ab0bcb910c0d3213dd8726fbcc4848e:
+ ;
+ case OpNeq8:
+ // match: (Neq8 x y)
+ // cond:
+ // result: (SETNE (CMPB <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETNE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPB, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end4aaff28af59a65b3684f4f1897299932
+ end4aaff28af59a65b3684f4f1897299932:
+ ;
+ case OpNeqPtr:
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (SETNE (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SETNE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end6e180ffd9583cd55361ed3e465158a4c
+ end6e180ffd9583cd55361ed3e465158a4c:
+ ;
+ case OpNot:
+ // match: (Not x)
+ // cond:
+ // result: (XORBconst [1] x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64XORBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+ goto end73973101aad60079c62fa64624e21db1
+ end73973101aad60079c62fa64624e21db1:
+ ;
+ case OpAMD64ORB:
+ // match: (ORB x (MOVBconst [c]))
+ // cond:
+ // result: (ORBconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVBconst {
+ goto end7b63870decde2515cb77ec4f8f76817c
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64ORBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end7b63870decde2515cb77ec4f8f76817c
+ end7b63870decde2515cb77ec4f8f76817c:
+ ;
+ // match: (ORB (MOVBconst [c]) x)
+ // cond:
+ // result: (ORBconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVBconst {
+ goto end70b43d531e2097a4f6293f66256a642e
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64ORBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end70b43d531e2097a4f6293f66256a642e
+ end70b43d531e2097a4f6293f66256a642e:
+ ;
+ case OpAMD64ORBconst:
+ // match: (ORBconst [c] x)
+ // cond: int8(c)==0
+ // result: (Copy x)
+ {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int8(c) == 0) {
+ goto end3b9f6d1a1a523595d101f89410f453a1
+ }
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end3b9f6d1a1a523595d101f89410f453a1
+ end3b9f6d1a1a523595d101f89410f453a1:
+ ;
+ // match: (ORBconst [c] _)
+ // cond: int8(c)==-1
+ // result: (MOVBconst [-1])
+ {
+ c := v.AuxInt
+ if !(int8(c) == -1) {
+ goto end6033c7910d8cd536b31446e179e4610d
+ }
+ v.Op = OpAMD64MOVBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = -1
+ return true
+ }
+ goto end6033c7910d8cd536b31446e179e4610d
+ end6033c7910d8cd536b31446e179e4610d:
+ ;
+ // match: (ORBconst [c] (MOVBconst [d]))
+ // cond:
+ // result: (MOVBconst [c|d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVBconst {
+ goto endbe5263f022dc10a5cf53c118937d79dd
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c | d
+ return true
+ }
+ goto endbe5263f022dc10a5cf53c118937d79dd
+ endbe5263f022dc10a5cf53c118937d79dd:
+ ;
+ case OpAMD64ORL:
+ // match: (ORL x (MOVLconst [c]))
+ // cond:
+ // result: (ORLconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLconst {
+ goto end1b883e30d860b6fac14ae98462c4f61a
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64ORLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end1b883e30d860b6fac14ae98462c4f61a
+ end1b883e30d860b6fac14ae98462c4f61a:
+ ;
+ // match: (ORL (MOVLconst [c]) x)
+ // cond:
+ // result: (ORLconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto enda5bc49524a0cbd2241f792837d0a48a8
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64ORLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto enda5bc49524a0cbd2241f792837d0a48a8
+ enda5bc49524a0cbd2241f792837d0a48a8:
+ ;
+ case OpAMD64ORLconst:
+ // match: (ORLconst [c] x)
+ // cond: int32(c)==0
+ // result: (Copy x)
+ {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ goto end800adaf85f4201ebf7a0e38dc1768c86
+ }
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end800adaf85f4201ebf7a0e38dc1768c86
+ end800adaf85f4201ebf7a0e38dc1768c86:
+ ;
+ // match: (ORLconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVLconst [-1])
+ {
+ c := v.AuxInt
+ if !(int32(c) == -1) {
+ goto end345a8ea439ef2ef54bd84fc8a0f73e97
+ }
+ v.Op = OpAMD64MOVLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = -1
+ return true
+ }
+ goto end345a8ea439ef2ef54bd84fc8a0f73e97
+ end345a8ea439ef2ef54bd84fc8a0f73e97:
+ ;
+ // match: (ORLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c|d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto ende9ca05024248f782c88084715f81d727
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c | d
+ return true
+ }
+ goto ende9ca05024248f782c88084715f81d727
+ ende9ca05024248f782c88084715f81d727:
+ ;
+ case OpAMD64ORQ:
+ // match: (ORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ORQconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVQconst {
+ goto end601f2bb3ccda102e484ff60adeaf6d26
+ }
+ c := v.Args[1].AuxInt
+ if !(is32Bit(c)) {
+ goto end601f2bb3ccda102e484ff60adeaf6d26
+ }
+ v.Op = OpAMD64ORQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end601f2bb3ccda102e484ff60adeaf6d26
+ end601f2bb3ccda102e484ff60adeaf6d26:
+ ;
+ // match: (ORQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (ORQconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto end010afbebcd314e288509d79a16a6d5cc
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ goto end010afbebcd314e288509d79a16a6d5cc
+ }
+ v.Op = OpAMD64ORQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end010afbebcd314e288509d79a16a6d5cc
+ end010afbebcd314e288509d79a16a6d5cc:
+ ;
+ case OpAMD64ORQconst:
+ // match: (ORQconst [0] x)
+ // cond:
+ // result: (Copy x)
+ {
+ if v.AuxInt != 0 {
+ goto end98a286fc50bc6cf8ca9f5af523e2b5cd
+ }
+ x := v.Args[0]
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end98a286fc50bc6cf8ca9f5af523e2b5cd
+ end98a286fc50bc6cf8ca9f5af523e2b5cd:
+ ;
+ // match: (ORQconst [-1] _)
+ // cond:
+ // result: (MOVQconst [-1])
+ {
+ if v.AuxInt != -1 {
+ goto endcde9b9d7c4527eaa5d50b252f50b43c1
+ }
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = -1
+ return true
+ }
+ goto endcde9b9d7c4527eaa5d50b252f50b43c1
+ endcde9b9d7c4527eaa5d50b252f50b43c1:
+ ;
+ // match: (ORQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [c|d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto enda2488509b71db9abcb06a5115c4ddc2c
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c | d
+ return true
+ }
+ goto enda2488509b71db9abcb06a5115c4ddc2c
+ enda2488509b71db9abcb06a5115c4ddc2c:
+ ;
+ case OpAMD64ORW:
+ // match: (ORW x (MOVWconst [c]))
+ // cond:
+ // result: (ORWconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWconst {
+ goto end9f98df10892dbf170b49aace86ee0d7f
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64ORWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end9f98df10892dbf170b49aace86ee0d7f
+ end9f98df10892dbf170b49aace86ee0d7f:
+ ;
+ // match: (ORW (MOVWconst [c]) x)
+ // cond:
+ // result: (ORWconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto end96405942c9ceb5fcb0ddb85a8709d015
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64ORWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end96405942c9ceb5fcb0ddb85a8709d015
+ end96405942c9ceb5fcb0ddb85a8709d015:
+ ;
+ case OpAMD64ORWconst:
+ // match: (ORWconst [c] x)
+ // cond: int16(c)==0
+ // result: (Copy x)
+ {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int16(c) == 0) {
+ goto end61a4fd5308425b3eafd158f13aaf8f13
+ }
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end61a4fd5308425b3eafd158f13aaf8f13
+ end61a4fd5308425b3eafd158f13aaf8f13:
+ ;
+ // match: (ORWconst [c] _)
+ // cond: int16(c)==-1
+ // result: (MOVWconst [-1])
+ {
+ c := v.AuxInt
+ if !(int16(c) == -1) {
+ goto ended87a5775f5e04b2d2a117a63d82dd9b
+ }
+ v.Op = OpAMD64MOVWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = -1
+ return true
+ }
+ goto ended87a5775f5e04b2d2a117a63d82dd9b
+ ended87a5775f5e04b2d2a117a63d82dd9b:
+ ;
+ // match: (ORWconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c|d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto endba9221a8462b5c62e8d7c686f64c2778
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c | d
+ return true
+ }
+ goto endba9221a8462b5c62e8d7c686f64c2778
+ endba9221a8462b5c62e8d7c686f64c2778:
+ ;
+ case OpOffPtr:
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADDQconst [off] ptr)
+ {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.Op = OpAMD64ADDQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ goto end0429f947ee7ac49ff45a243e461a5290
+ end0429f947ee7ac49ff45a243e461a5290:
+ ;
+ case OpOr16:
+ // match: (Or16 x y)
+ // cond:
+ // result: (ORW x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ORW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end8fedf2c79d5607b7056b0ff015199cbd
+ end8fedf2c79d5607b7056b0ff015199cbd:
+ ;
+ case OpOr32:
+ // match: (Or32 x y)
+ // cond:
+ // result: (ORL x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ORL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto endea45bed9ca97d2995b68b53e6012d384
+ endea45bed9ca97d2995b68b53e6012d384:
+ ;
+ case OpOr64:
+ // match: (Or64 x y)
+ // cond:
+ // result: (ORQ x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ORQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end3a446becaf2461f4f1a41faeef313f41
+ end3a446becaf2461f4f1a41faeef313f41:
+ ;
+ case OpOr8:
+ // match: (Or8 x y)
+ // cond:
+ // result: (ORB x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ORB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end6f8a8c559a167d1f0a5901d09a1fb248
+ end6f8a8c559a167d1f0a5901d09a1fb248:
+ ;
+ case OpRsh16Ux16:
+ // match: (Rsh16Ux16 <t> x y)
+ // cond:
+ // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [16] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRW, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end73239750a306668023d2c49875ac442f
+ end73239750a306668023d2c49875ac442f:
+ ;
+ case OpRsh16Ux32:
+ // match: (Rsh16Ux32 <t> x y)
+ // cond:
+ // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [16] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRW, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end9951e3b2e92c892256feece722b32219
+ end9951e3b2e92c892256feece722b32219:
+ ;
+ case OpRsh16Ux64:
+ // match: (Rsh16Ux64 <t> x y)
+ // cond:
+ // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [16] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRW, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end610d56d808c204abfa40d653447b2c17
+ end610d56d808c204abfa40d653447b2c17:
+ ;
+ case OpRsh16Ux8:
+ // match: (Rsh16Ux8 <t> x y)
+ // cond:
+ // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [16] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRW, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end45e76a8d2b004e6802d53cf12b4757b3
+ end45e76a8d2b004e6802d53cf12b4757b3:
+ ;
+ case OpRsh16x16:
+ // match: (Rsh16x16 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst <TypeFlags> [16] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORW, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto endbcd8fd69ada08517f6f94f35da91e1c3
+ endbcd8fd69ada08517f6f94f35da91e1c3:
+ ;
+ case OpRsh16x32:
+ // match: (Rsh16x32 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst <TypeFlags> [16] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORL, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto endec3994083e7f82857ecec05906c29aa6
+ endec3994083e7f82857ecec05906c29aa6:
+ ;
+ case OpRsh16x64:
+ // match: (Rsh16x64 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst <TypeFlags> [16] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORQ, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTQ, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto end19da3883e21ffa3a45d7fc648ef38b66
+ end19da3883e21ffa3a45d7fc648ef38b66:
+ ;
+ case OpRsh16x8:
+ // match: (Rsh16x8 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst <TypeFlags> [16] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORB, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto end3c989f6931d059ea04e4ba93601b6c51
+ end3c989f6931d059ea04e4ba93601b6c51:
+ ;
+ case OpRsh32Ux16:
+ // match: (Rsh32Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [32] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRL, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end056ede9885a9fc2f32615a2a03b35388
+ end056ede9885a9fc2f32615a2a03b35388:
+ ;
+ case OpRsh32Ux32:
+ // match: (Rsh32Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [32] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRL, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end30439bdc3517479ea25ae7f54408ba7f
+ end30439bdc3517479ea25ae7f54408ba7f:
+ ;
+ case OpRsh32Ux64:
+ // match: (Rsh32Ux64 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [32] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRL, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end49b47fd18b54461d8eea51f6e5889cd2
+ end49b47fd18b54461d8eea51f6e5889cd2:
+ ;
+ case OpRsh32Ux8:
+ // match: (Rsh32Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [32] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRL, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end46e045970a8b1afb9035605fc0e50c69
+ end46e045970a8b1afb9035605fc0e50c69:
+ ;
+ case OpRsh32x16:
+ // match: (Rsh32x16 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst <TypeFlags> [32] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORW, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto end5d1b8d7e1d1e53e621d13bb0eafc9102
+ end5d1b8d7e1d1e53e621d13bb0eafc9102:
+ ;
+ case OpRsh32x32:
+ // match: (Rsh32x32 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst <TypeFlags> [32] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORL, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto end9c27383961c2161a9955012fce808cab
+ end9c27383961c2161a9955012fce808cab:
+ ;
+ case OpRsh32x64:
+ // match: (Rsh32x64 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst <TypeFlags> [32] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORQ, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTQ, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto end75dc7144497705c800e0c60dcd4a2828
+ end75dc7144497705c800e0c60dcd4a2828:
+ ;
+ case OpRsh32x8:
+ // match: (Rsh32x8 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst <TypeFlags> [32] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORB, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto enda7b94b2fd5cbcd12bb2dcd576bdca481
+ enda7b94b2fd5cbcd12bb2dcd576bdca481:
+ ;
+ case OpRsh64Ux16:
+ // match: (Rsh64Ux16 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst <TypeFlags> [64] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRQ, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto endc4bdfdc375a5c94978d936bd0db89cc5
+ endc4bdfdc375a5c94978d936bd0db89cc5:
+ ;
+ case OpRsh64Ux32:
+ // match: (Rsh64Ux32 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst <TypeFlags> [64] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRQ, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end217f32bca5f6744b9a7de052f4fae13e
+ end217f32bca5f6744b9a7de052f4fae13e:
+ ;
+ case OpRsh64Ux64:
+ // match: (Rsh64Ux64 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst <TypeFlags> [64] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRQ, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end530dee0bcadf1cf5d092894b6210ffcd
+ end530dee0bcadf1cf5d092894b6210ffcd:
+ ;
+ case OpRsh64Ux8:
+ // match: (Rsh64Ux8 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst <TypeFlags> [64] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRQ, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto endf09baf4e0005c5eb4905f71ce4c8b306
+ endf09baf4e0005c5eb4905f71ce4c8b306:
+ ;
+ case OpRsh64x16:
+ // match: (Rsh64x16 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst <TypeFlags> [64] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORW, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto endb370ee74ca256a604138321ddca9d543
+ endb370ee74ca256a604138321ddca9d543:
+ ;
+ case OpRsh64x32:
+ // match: (Rsh64x32 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst <TypeFlags> [64] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORL, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto end3cc6edf5b286a449332757ea12d2d601
+ end3cc6edf5b286a449332757ea12d2d601:
+ ;
+ case OpRsh64x64:
+ // match: (Rsh64x64 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst <TypeFlags> [64] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORQ, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTQ, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto end45de7b33396d9fd2ba377bd095f1d7a6
+ end45de7b33396d9fd2ba377bd095f1d7a6:
+ ;
+ case OpRsh64x8:
+ // match: (Rsh64x8 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst <TypeFlags> [64] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORB, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto ende03fa68104fd18bb9b2bb94370e0c8b3
+ ende03fa68104fd18bb9b2bb94370e0c8b3:
+ ;
+ case OpRsh8Ux16:
+ // match: (Rsh8Ux16 <t> x y)
+ // cond:
+ // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst <TypeFlags> [8] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRB, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto enda1adfc560334e10d5e83fbff27a8752f
+ enda1adfc560334e10d5e83fbff27a8752f:
+ ;
+ case OpRsh8Ux32:
+ // match: (Rsh8Ux32 <t> x y)
+ // cond:
+ // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst <TypeFlags> [8] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRB, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end17f63b4b712e715a33ac780193b59c2e
+ end17f63b4b712e715a33ac780193b59c2e:
+ ;
+ case OpRsh8Ux64:
+ // match: (Rsh8Ux64 <t> x y)
+ // cond:
+ // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst <TypeFlags> [8] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRB, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end77d5c3ef9982ebd27c135d3461b7430b
+ end77d5c3ef9982ebd27c135d3461b7430b:
+ ;
+ case OpRsh8Ux8:
+ // match: (Rsh8Ux8 <t> x y)
+ // cond:
+ // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst <TypeFlags> [8] y)))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64ANDB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SHRB, TypeInvalid)
+ v0.Type = t
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v1.Type = t
+ v2 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v2.Type = TypeFlags
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+ goto end206712ffbda924142afbf384aeb8f09e
+ end206712ffbda924142afbf384aeb8f09e:
+ ;
+ case OpRsh8x16:
+ // match: (Rsh8x16 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst <TypeFlags> [8] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORW, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPWconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto endd303f390b49d9716dc783d5c4d57ddd1
+ endd303f390b49d9716dc783d5c4d57ddd1:
+ ;
+ case OpRsh8x32:
+ // match: (Rsh8x32 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst <TypeFlags> [8] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORL, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPLconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto ende12a524a6fc68eb245140c6919034337
+ ende12a524a6fc68eb245140c6919034337:
+ ;
+ case OpRsh8x64:
+ // match: (Rsh8x64 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst <TypeFlags> [8] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORQ, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTQ, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBQcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPQconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto end6ee53459daa5458d163c86ea02dd2f31
+ end6ee53459daa5458d163c86ea02dd2f31:
+ ;
+ case OpRsh8x8:
+ // match: (Rsh8x8 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst <TypeFlags> [8] y)))))
+ {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SARB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Type = t
+ v.AddArg(x)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64ORB, TypeInvalid)
+ v0.Type = y.Type
+ v0.AddArg(y)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64NOTL, TypeInvalid)
+ v1.Type = y.Type
+ v2 := v.Block.NewValue0(v.Line, OpAMD64SBBLcarrymask, TypeInvalid)
+ v2.Type = y.Type
+ v3 := v.Block.NewValue0(v.Line, OpAMD64CMPBconst, TypeInvalid)
+ v3.Type = TypeFlags
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto end07f447a7e25b048c41d412c242330ec0
+ end07f447a7e25b048c41d412c242330ec0:
+ ;
+ case OpAMD64SARB:
+ // match: (SARB x (MOVBconst [c]))
+ // cond:
+ // result: (SARBconst [c&31] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVBconst {
+ goto end3bf3d17717aa6c04462e56d1c87902ce
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SARBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ goto end3bf3d17717aa6c04462e56d1c87902ce
+ end3bf3d17717aa6c04462e56d1c87902ce:
+ ;
+ case OpAMD64SARL:
+ // match: (SARL x (MOVLconst [c]))
+ // cond:
+ // result: (SARLconst [c&31] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLconst {
+ goto ende586a72c1b232ee0b63e37c71eeb8470
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SARLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ goto ende586a72c1b232ee0b63e37c71eeb8470
+ ende586a72c1b232ee0b63e37c71eeb8470:
+ ;
+ case OpAMD64SARQ:
+ // match: (SARQ x (MOVQconst [c]))
+ // cond:
+ // result: (SARQconst [c&63] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVQconst {
+ goto end25e720ab203be2745dded5550e6d8a7c
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SARQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+ goto end25e720ab203be2745dded5550e6d8a7c
+ end25e720ab203be2745dded5550e6d8a7c:
+ ;
+ case OpAMD64SARW:
+ // match: (SARW x (MOVWconst [c]))
+ // cond:
+ // result: (SARWconst [c&31] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWconst {
+ goto endc46e3f211f94238f9a0aec3c498af490
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SARWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ goto endc46e3f211f94238f9a0aec3c498af490
+ endc46e3f211f94238f9a0aec3c498af490:
+ ;
+ case OpAMD64SBBQcarrymask:
+ // match: (SBBQcarrymask (CMPQconst [c] (MOVQconst [d])))
+ // cond: inBounds(d, c)
+ // result: (MOVQconst [-1])
+ {
+ if v.Args[0].Op != OpAMD64CMPQconst {
+ goto end378de7e659770f877c08b6b269073069
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[0].Args[0].Op != OpAMD64MOVQconst {
+ goto end378de7e659770f877c08b6b269073069
+ }
+ d := v.Args[0].Args[0].AuxInt
+ if !(inBounds(d, c)) {
+ goto end378de7e659770f877c08b6b269073069
+ }
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = -1
+ return true
+ }
+ goto end378de7e659770f877c08b6b269073069
+ end378de7e659770f877c08b6b269073069:
+ ;
+ // match: (SBBQcarrymask (CMPQconst [c] (MOVQconst [d])))
+ // cond: !inBounds(d, c)
+ // result: (MOVQconst [0])
+ {
+ if v.Args[0].Op != OpAMD64CMPQconst {
+ goto enda7bfd1974bf83ca79653c560a718a86c
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[0].Args[0].Op != OpAMD64MOVQconst {
+ goto enda7bfd1974bf83ca79653c560a718a86c
+ }
+ d := v.Args[0].Args[0].AuxInt
+ if !(!inBounds(d, c)) {
+ goto enda7bfd1974bf83ca79653c560a718a86c
+ }
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 0
+ return true
+ }
+ goto enda7bfd1974bf83ca79653c560a718a86c
+ enda7bfd1974bf83ca79653c560a718a86c:
+ ;
+ // match: (SBBQcarrymask (CMPLconst [c] (MOVLconst [d])))
+ // cond: inBounds(int64(int32(d)), int64(int32(c)))
+ // result: (MOVQconst [-1])
+ {
+ if v.Args[0].Op != OpAMD64CMPLconst {
+ goto end8c6d39847239120fa0fe953007eb40ae
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[0].Args[0].Op != OpAMD64MOVLconst {
+ goto end8c6d39847239120fa0fe953007eb40ae
+ }
+ d := v.Args[0].Args[0].AuxInt
+ if !(inBounds(int64(int32(d)), int64(int32(c)))) {
+ goto end8c6d39847239120fa0fe953007eb40ae
+ }
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = -1
+ return true
+ }
+ goto end8c6d39847239120fa0fe953007eb40ae
+ end8c6d39847239120fa0fe953007eb40ae:
+ ;
+ // match: (SBBQcarrymask (CMPLconst [c] (MOVLconst [d])))
+ // cond: !inBounds(int64(int32(d)), int64(int32(c)))
+ // result: (MOVQconst [0])
+ {
+ if v.Args[0].Op != OpAMD64CMPLconst {
+ goto end20885e855545e16ca77af2b9a2b69ea9
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[0].Args[0].Op != OpAMD64MOVLconst {
+ goto end20885e855545e16ca77af2b9a2b69ea9
+ }
+ d := v.Args[0].Args[0].AuxInt
+ if !(!inBounds(int64(int32(d)), int64(int32(c)))) {
+ goto end20885e855545e16ca77af2b9a2b69ea9
+ }
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 0
+ return true
+ }
+ goto end20885e855545e16ca77af2b9a2b69ea9
+ end20885e855545e16ca77af2b9a2b69ea9:
+ ;
+ // match: (SBBQcarrymask (CMPWconst [c] (MOVWconst [d])))
+ // cond: inBounds(int64(int16(d)), int64(int16(c)))
+ // result: (MOVQconst [-1])
+ {
+ if v.Args[0].Op != OpAMD64CMPWconst {
+ goto end16f61db69d07e67e9f408c2790a9de7c
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[0].Args[0].Op != OpAMD64MOVWconst {
+ goto end16f61db69d07e67e9f408c2790a9de7c
+ }
+ d := v.Args[0].Args[0].AuxInt
+ if !(inBounds(int64(int16(d)), int64(int16(c)))) {
+ goto end16f61db69d07e67e9f408c2790a9de7c
+ }
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = -1
+ return true
+ }
+ goto end16f61db69d07e67e9f408c2790a9de7c
+ end16f61db69d07e67e9f408c2790a9de7c:
+ ;
+ // match: (SBBQcarrymask (CMPWconst [c] (MOVWconst [d])))
+ // cond: !inBounds(int64(int16(d)), int64(int16(c)))
+ // result: (MOVQconst [0])
+ {
+ if v.Args[0].Op != OpAMD64CMPWconst {
+ goto end191ca427f7d5d2286bd290920c84a51d
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[0].Args[0].Op != OpAMD64MOVWconst {
+ goto end191ca427f7d5d2286bd290920c84a51d
+ }
+ d := v.Args[0].Args[0].AuxInt
+ if !(!inBounds(int64(int16(d)), int64(int16(c)))) {
+ goto end191ca427f7d5d2286bd290920c84a51d
+ }
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 0
+ return true
+ }
+ goto end191ca427f7d5d2286bd290920c84a51d
+ end191ca427f7d5d2286bd290920c84a51d:
+ ;
+ // match: (SBBQcarrymask (CMPBconst [c] (MOVBconst [d])))
+ // cond: inBounds(int64(int8(d)), int64(int8(c)))
+ // result: (MOVQconst [-1])
+ {
+ if v.Args[0].Op != OpAMD64CMPBconst {
+ goto end3fd3f1e9660b9050c6a41b4fc948f793
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[0].Args[0].Op != OpAMD64MOVBconst {
+ goto end3fd3f1e9660b9050c6a41b4fc948f793
+ }
+ d := v.Args[0].Args[0].AuxInt
+ if !(inBounds(int64(int8(d)), int64(int8(c)))) {
+ goto end3fd3f1e9660b9050c6a41b4fc948f793
+ }
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = -1
+ return true
+ }
+ goto end3fd3f1e9660b9050c6a41b4fc948f793
+ end3fd3f1e9660b9050c6a41b4fc948f793:
+ ;
+ // match: (SBBQcarrymask (CMPBconst [c] (MOVBconst [d])))
+ // cond: !inBounds(int64(int8(d)), int64(int8(c)))
+ // result: (MOVQconst [0])
+ {
+ if v.Args[0].Op != OpAMD64CMPBconst {
+ goto ende0d6edd92ae98e6dc041f65029d8b243
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[0].Args[0].Op != OpAMD64MOVBconst {
+ goto ende0d6edd92ae98e6dc041f65029d8b243
+ }
+ d := v.Args[0].Args[0].AuxInt
+ if !(!inBounds(int64(int8(d)), int64(int8(c)))) {
+ goto ende0d6edd92ae98e6dc041f65029d8b243
+ }
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = 0
+ return true
+ }
+ goto ende0d6edd92ae98e6dc041f65029d8b243
+ ende0d6edd92ae98e6dc041f65029d8b243:
+ ;
+ case OpAMD64SETA:
+ // match: (SETA (InvertFlags x))
+ // cond:
+ // result: (SETB x)
+ {
+ if v.Args[0].Op != OpAMD64InvertFlags {
+ goto enda4ac36e94fc279d762b5a6c7c6cc665d
+ }
+ x := v.Args[0].Args[0]
+ v.Op = OpAMD64SETB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto enda4ac36e94fc279d762b5a6c7c6cc665d
+ enda4ac36e94fc279d762b5a6c7c6cc665d:
+ ;
+ case OpAMD64SETAE:
+ // match: (SETAE (InvertFlags x))
+ // cond:
+ // result: (SETBE x)
+ {
+ if v.Args[0].Op != OpAMD64InvertFlags {
+ goto end0468f5be6caf682fdea6b91d6648991e
+ }
+ x := v.Args[0].Args[0]
+ v.Op = OpAMD64SETBE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end0468f5be6caf682fdea6b91d6648991e
+ end0468f5be6caf682fdea6b91d6648991e:
+ ;
+ case OpAMD64SETB:
+ // match: (SETB (InvertFlags x))
+ // cond:
+ // result: (SETA x)
+ {
+ if v.Args[0].Op != OpAMD64InvertFlags {
+ goto endc9eba7aa1e54a228570d2f5cc96f3565
+ }
+ x := v.Args[0].Args[0]
+ v.Op = OpAMD64SETA
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endc9eba7aa1e54a228570d2f5cc96f3565
+ endc9eba7aa1e54a228570d2f5cc96f3565:
+ ;
+ case OpAMD64SETBE:
+ // match: (SETBE (InvertFlags x))
+ // cond:
+ // result: (SETAE x)
+ {
+ if v.Args[0].Op != OpAMD64InvertFlags {
+ goto end9d9031643469798b14b8cad1f5a7a1ba
+ }
+ x := v.Args[0].Args[0]
+ v.Op = OpAMD64SETAE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end9d9031643469798b14b8cad1f5a7a1ba
+ end9d9031643469798b14b8cad1f5a7a1ba:
+ ;
+ case OpAMD64SETEQ:
+ // match: (SETEQ (InvertFlags x))
+ // cond:
+ // result: (SETEQ x)
+ {
+ if v.Args[0].Op != OpAMD64InvertFlags {
+ goto end5d2039c9368d8c0cfba23b5a85b459e1
+ }
+ x := v.Args[0].Args[0]
+ v.Op = OpAMD64SETEQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end5d2039c9368d8c0cfba23b5a85b459e1
+ end5d2039c9368d8c0cfba23b5a85b459e1:
+ ;
+ case OpAMD64SETG:
+ // match: (SETG (InvertFlags x))
+ // cond:
+ // result: (SETL x)
+ {
+ if v.Args[0].Op != OpAMD64InvertFlags {
+ goto endf7586738694c9cd0b74ae28bbadb649f
+ }
+ x := v.Args[0].Args[0]
+ v.Op = OpAMD64SETL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endf7586738694c9cd0b74ae28bbadb649f
+ endf7586738694c9cd0b74ae28bbadb649f:
+ ;
+ case OpAMD64SETGE:
+ // match: (SETGE (InvertFlags x))
+ // cond:
+ // result: (SETLE x)
+ {
+ if v.Args[0].Op != OpAMD64InvertFlags {
+ goto end82c11eff6f842159f564f2dad3d2eedc
+ }
+ x := v.Args[0].Args[0]
+ v.Op = OpAMD64SETLE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end82c11eff6f842159f564f2dad3d2eedc
+ end82c11eff6f842159f564f2dad3d2eedc:
+ ;
+ case OpAMD64SETL:
+ // match: (SETL (InvertFlags x))
+ // cond:
+ // result: (SETG x)
+ {
+ if v.Args[0].Op != OpAMD64InvertFlags {
+ goto ende33160cd86b9d4d3b77e02fb4658d5d3
+ }
+ x := v.Args[0].Args[0]
+ v.Op = OpAMD64SETG
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto ende33160cd86b9d4d3b77e02fb4658d5d3
+ ende33160cd86b9d4d3b77e02fb4658d5d3:
+ ;
+ case OpAMD64SETLE:
+ // match: (SETLE (InvertFlags x))
+ // cond:
+ // result: (SETGE x)
+ {
+ if v.Args[0].Op != OpAMD64InvertFlags {
+ goto end9307d96753efbeb888d1c98a6aba7a29
+ }
+ x := v.Args[0].Args[0]
+ v.Op = OpAMD64SETGE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end9307d96753efbeb888d1c98a6aba7a29
+ end9307d96753efbeb888d1c98a6aba7a29:
+ ;
+ case OpAMD64SETNE:
+ // match: (SETNE (InvertFlags x))
+ // cond:
+ // result: (SETNE x)
+ {
+ if v.Args[0].Op != OpAMD64InvertFlags {
+ goto endbc71811b789475308014550f638026eb
+ }
+ x := v.Args[0].Args[0]
+ v.Op = OpAMD64SETNE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endbc71811b789475308014550f638026eb
+ endbc71811b789475308014550f638026eb:
+ ;
+ case OpAMD64SHLB:
+ // match: (SHLB x (MOVBconst [c]))
+ // cond:
+ // result: (SHLBconst [c&31] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVBconst {
+ goto end2d0d0111d831d8a575b5627284a6337a
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SHLBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ goto end2d0d0111d831d8a575b5627284a6337a
+ end2d0d0111d831d8a575b5627284a6337a:
+ ;
+ case OpAMD64SHLL:
+ // match: (SHLL x (MOVLconst [c]))
+ // cond:
+ // result: (SHLLconst [c&31] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLconst {
+ goto end633f9ddcfbb63374c895a5f78da75d25
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SHLLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ goto end633f9ddcfbb63374c895a5f78da75d25
+ end633f9ddcfbb63374c895a5f78da75d25:
+ ;
+ case OpAMD64SHLQ:
+ // match: (SHLQ x (MOVQconst [c]))
+ // cond:
+ // result: (SHLQconst [c&63] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVQconst {
+ goto end4d7e3a945cacdd6b6c8c0de6f465d4ae
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SHLQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+ goto end4d7e3a945cacdd6b6c8c0de6f465d4ae
+ end4d7e3a945cacdd6b6c8c0de6f465d4ae:
+ ;
+ case OpAMD64SHLW:
+ // match: (SHLW x (MOVWconst [c]))
+ // cond:
+ // result: (SHLWconst [c&31] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWconst {
+ goto endba96a52aa58d28b3357828051e0e695c
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SHLWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ goto endba96a52aa58d28b3357828051e0e695c
+ endba96a52aa58d28b3357828051e0e695c:
+ ;
+ case OpAMD64SHRB:
+ // match: (SHRB x (MOVBconst [c]))
+ // cond:
+ // result: (SHRBconst [c&31] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVBconst {
+ goto enddb1cd5aaa826d43fa4f6d1b2b8795e58
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SHRBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ goto enddb1cd5aaa826d43fa4f6d1b2b8795e58
+ enddb1cd5aaa826d43fa4f6d1b2b8795e58:
+ ;
+ case OpAMD64SHRL:
+ // match: (SHRL x (MOVLconst [c]))
+ // cond:
+ // result: (SHRLconst [c&31] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLconst {
+ goto end344b8b9202e1925e8d0561f1c21412fc
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SHRLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ goto end344b8b9202e1925e8d0561f1c21412fc
+ end344b8b9202e1925e8d0561f1c21412fc:
+ ;
+ case OpAMD64SHRQ:
+ // match: (SHRQ x (MOVQconst [c]))
+ // cond:
+ // result: (SHRQconst [c&63] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVQconst {
+ goto end699d35e2d5cfa08b8a3b1c8a183ddcf3
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SHRQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+ goto end699d35e2d5cfa08b8a3b1c8a183ddcf3
+ end699d35e2d5cfa08b8a3b1c8a183ddcf3:
+ ;
+ case OpAMD64SHRW:
+ // match: (SHRW x (MOVWconst [c]))
+ // cond:
+ // result: (SHRWconst [c&31] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWconst {
+ goto endd75ff1f9b3e9ec9c942a39b6179da1b3
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SHRWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ goto endd75ff1f9b3e9ec9c942a39b6179da1b3
+ endd75ff1f9b3e9ec9c942a39b6179da1b3:
+ ;
+ case OpAMD64SUBB:
+ // match: (SUBB x (MOVBconst [c]))
+ // cond:
+ // result: (SUBBconst x [c])
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVBconst {
+ goto end9ca5d2a70e2df1a5a3ed6786bce1f7b2
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SUBBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ goto end9ca5d2a70e2df1a5a3ed6786bce1f7b2
+ end9ca5d2a70e2df1a5a3ed6786bce1f7b2:
+ ;
+ // match: (SUBB (MOVBconst [c]) x)
+ // cond:
+ // result: (NEGB (SUBBconst <v.Type> x [c]))
+ {
+ if v.Args[0].Op != OpAMD64MOVBconst {
+ goto endc288755d69b04d24a6aac32a73956411
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64NEGB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SUBBconst, TypeInvalid)
+ v0.Type = v.Type
+ v0.AddArg(x)
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ goto endc288755d69b04d24a6aac32a73956411
+ endc288755d69b04d24a6aac32a73956411:
+ ;
+ case OpAMD64SUBBconst:
+ // match: (SUBBconst [c] (MOVBconst [d]))
+ // cond:
+ // result: (MOVBconst [c-d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVBconst {
+ goto end0e2d5c3e3c02001a20d5433daa9e8317
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c - d
+ return true
+ }
+ goto end0e2d5c3e3c02001a20d5433daa9e8317
+ end0e2d5c3e3c02001a20d5433daa9e8317:
+ ;
+ case OpAMD64SUBL:
+ // match: (SUBL x (MOVLconst [c]))
+ // cond:
+ // result: (SUBLconst x [c])
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLconst {
+ goto end178c1d6c86f9c16f6497586c2f7d8625
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SUBLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ goto end178c1d6c86f9c16f6497586c2f7d8625
+ end178c1d6c86f9c16f6497586c2f7d8625:
+ ;
+ // match: (SUBL (MOVLconst [c]) x)
+ // cond:
+ // result: (NEGL (SUBLconst <v.Type> x [c]))
+ {
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto endb0efe6e15ec20486b849534a00483ae2
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64NEGL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SUBLconst, TypeInvalid)
+ v0.Type = v.Type
+ v0.AddArg(x)
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ goto endb0efe6e15ec20486b849534a00483ae2
+ endb0efe6e15ec20486b849534a00483ae2:
+ ;
+ case OpAMD64SUBLconst:
+ // match: (SUBLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c-d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto endbe7466f3c09d9645544bdfc44c37c922
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c - d
+ return true
+ }
+ goto endbe7466f3c09d9645544bdfc44c37c922
+ endbe7466f3c09d9645544bdfc44c37c922:
+ ;
+ case OpAMD64SUBQ:
+ // match: (SUBQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBQconst x [c])
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVQconst {
+ goto end9bbb7b20824a498752c605942fad89c2
+ }
+ c := v.Args[1].AuxInt
+ if !(is32Bit(c)) {
+ goto end9bbb7b20824a498752c605942fad89c2
+ }
+ v.Op = OpAMD64SUBQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ goto end9bbb7b20824a498752c605942fad89c2
+ end9bbb7b20824a498752c605942fad89c2:
+ ;
+ // match: (SUBQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (NEGQ (SUBQconst <v.Type> x [c]))
+ {
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto end8beb96de3efee9206d1bd4b7d777d2cb
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ goto end8beb96de3efee9206d1bd4b7d777d2cb
+ }
+ v.Op = OpAMD64NEGQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SUBQconst, TypeInvalid)
+ v0.Type = v.Type
+ v0.AddArg(x)
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ goto end8beb96de3efee9206d1bd4b7d777d2cb
+ end8beb96de3efee9206d1bd4b7d777d2cb:
+ ;
+ case OpAMD64SUBQconst:
+ // match: (SUBQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [c-d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto end96c09479fb3c043e875d89d3eb92f1d8
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c - d
+ return true
+ }
+ goto end96c09479fb3c043e875d89d3eb92f1d8
+ end96c09479fb3c043e875d89d3eb92f1d8:
+ ;
+ case OpAMD64SUBW:
+ // match: (SUBW x (MOVWconst [c]))
+ // cond:
+ // result: (SUBWconst x [c])
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWconst {
+ goto end135aa9100b2f61d58b37cede37b63731
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64SUBWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AuxInt = c
+ return true
+ }
+ goto end135aa9100b2f61d58b37cede37b63731
+ end135aa9100b2f61d58b37cede37b63731:
+ ;
+ // match: (SUBW (MOVWconst [c]) x)
+ // cond:
+ // result: (NEGW (SUBWconst <v.Type> x [c]))
+ {
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto end44d23f7e65a4b1c42d0e6463f8e493b6
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64NEGW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64SUBWconst, TypeInvalid)
+ v0.Type = v.Type
+ v0.AddArg(x)
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ goto end44d23f7e65a4b1c42d0e6463f8e493b6
+ end44d23f7e65a4b1c42d0e6463f8e493b6:
+ ;
+ case OpAMD64SUBWconst:
+ // match: (SUBWconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c-d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto end0e5079577fcf00f5925291dbd68306aa
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c - d
+ return true
+ }
+ goto end0e5079577fcf00f5925291dbd68306aa
+ end0e5079577fcf00f5925291dbd68306aa:
+ ;
+ case OpSignExt16to32:
+ // match: (SignExt16to32 x)
+ // cond:
+ // result: (MOVWQSX x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64MOVWQSX
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end21e4271c2b48a5aa3561ccfa8fa67cd9
+ end21e4271c2b48a5aa3561ccfa8fa67cd9:
+ ;
+ case OpSignExt16to64:
+ // match: (SignExt16to64 x)
+ // cond:
+ // result: (MOVWQSX x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64MOVWQSX
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endc6d242ee3a3e195ef0f9e8dae47ada75
+ endc6d242ee3a3e195ef0f9e8dae47ada75:
+ ;
+ case OpSignExt32to64:
+ // match: (SignExt32to64 x)
+ // cond:
+ // result: (MOVLQSX x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64MOVLQSX
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endb9f1a8b2d01eee44964a71a01bca165c
+ endb9f1a8b2d01eee44964a71a01bca165c:
+ ;
+ case OpSignExt8to16:
+ // match: (SignExt8to16 x)
+ // cond:
+ // result: (MOVBQSX x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64MOVBQSX
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end372869f08e147404b80634e5f83fd506
+ end372869f08e147404b80634e5f83fd506:
+ ;
+ case OpSignExt8to32:
+ // match: (SignExt8to32 x)
+ // cond:
+ // result: (MOVBQSX x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64MOVBQSX
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end913e3575e5b4cf7f60585c108db40464
+ end913e3575e5b4cf7f60585c108db40464:
+ ;
+ case OpSignExt8to64:
+ // match: (SignExt8to64 x)
+ // cond:
+ // result: (MOVBQSX x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64MOVBQSX
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endcef6d6001d3f25cf5dacee11a46e5c8c
+ endcef6d6001d3f25cf5dacee11a46e5c8c:
+ ;
+ case OpStaticCall:
+ // match: (StaticCall [argwid] {target} mem)
+ // cond:
+ // result: (CALLstatic [argwid] {target} mem)
+ {
+ argwid := v.AuxInt
+ target := v.Aux
+ mem := v.Args[0]
+ v.Op = OpAMD64CALLstatic
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = argwid
+ v.Aux = target
+ v.AddArg(mem)
+ return true
+ }
+ goto end32c5cbec813d1c2ae94fc9b1090e4b2a
+ end32c5cbec813d1c2ae94fc9b1090e4b2a:
+ ;
+ case OpStore:
+ // match: (Store ptr val mem)
+ // cond: (is64BitInt(val.Type) || isPtr(val.Type))
+ // result: (MOVQstore ptr val mem)
+ {
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is64BitInt(val.Type) || isPtr(val.Type)) {
+ goto endbaeb60123806948cd2433605820d5af1
+ }
+ v.Op = OpAMD64MOVQstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ goto endbaeb60123806948cd2433605820d5af1
+ endbaeb60123806948cd2433605820d5af1:
+ ;
+ // match: (Store ptr val mem)
+ // cond: is32BitInt(val.Type)
+ // result: (MOVLstore ptr val mem)
+ {
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32BitInt(val.Type)) {
+ goto end582e895008657c728c141c6b95070de7
+ }
+ v.Op = OpAMD64MOVLstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ goto end582e895008657c728c141c6b95070de7
+ end582e895008657c728c141c6b95070de7:
+ ;
+ // match: (Store ptr val mem)
+ // cond: is16BitInt(val.Type)
+ // result: (MOVWstore ptr val mem)
+ {
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16BitInt(val.Type)) {
+ goto enda3f6a985b6ebb277665f80ad30b178df
+ }
+ v.Op = OpAMD64MOVWstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ goto enda3f6a985b6ebb277665f80ad30b178df
+ enda3f6a985b6ebb277665f80ad30b178df:
+ ;
+ // match: (Store ptr val mem)
+ // cond: is8BitInt(val.Type)
+ // result: (MOVBstore ptr val mem)
+ {
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is8BitInt(val.Type)) {
+ goto ende2dee0bc82f631e3c6b0031bf8d224c1
+ }
+ v.Op = OpAMD64MOVBstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ goto ende2dee0bc82f631e3c6b0031bf8d224c1
+ ende2dee0bc82f631e3c6b0031bf8d224c1:
+ ;
+ // match: (Store ptr val mem)
+ // cond: val.Type.IsBoolean()
+ // result: (MOVBstore ptr val mem)
+ {
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(val.Type.IsBoolean()) {
+ goto end6f343b676bf49740054e459f972b24f5
+ }
+ v.Op = OpAMD64MOVBstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ goto end6f343b676bf49740054e459f972b24f5
+ end6f343b676bf49740054e459f972b24f5:
+ ;
+ case OpSub16:
+ // match: (Sub16 x y)
+ // cond:
+ // result: (SUBW x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SUBW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end54adc5de883c0460ca71c6ee464d4244
+ end54adc5de883c0460ca71c6ee464d4244:
+ ;
+ case OpSub32:
+ // match: (Sub32 x y)
+ // cond:
+ // result: (SUBL x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SUBL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto enddc3a2a488bda8c5856f93343e5ffe5f8
+ enddc3a2a488bda8c5856f93343e5ffe5f8:
+ ;
+ case OpSub64:
+ // match: (Sub64 x y)
+ // cond:
+ // result: (SUBQ x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SUBQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto endd88d5646309fd9174584888ecc8aca2c
+ endd88d5646309fd9174584888ecc8aca2c:
+ ;
+ case OpSub8:
+ // match: (Sub8 x y)
+ // cond:
+ // result: (SUBB x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64SUBB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end7d33bf9bdfa505f96b930563eca7955f
+ end7d33bf9bdfa505f96b930563eca7955f:
+ ;
+ case OpTrunc16to8:
+ // match: (Trunc16to8 x)
+ // cond:
+ // result: (Copy x)
+ {
+ x := v.Args[0]
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end18a19bd8418f9079595720df0874e90a
+ end18a19bd8418f9079595720df0874e90a:
+ ;
+ case OpTrunc32to16:
+ // match: (Trunc32to16 x)
+ // cond:
+ // result: (Copy x)
+ {
+ x := v.Args[0]
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end217b00780a8b1139d068680ed9d61cb0
+ end217b00780a8b1139d068680ed9d61cb0:
+ ;
+ case OpTrunc32to8:
+ // match: (Trunc32to8 x)
+ // cond:
+ // result: (Copy x)
+ {
+ x := v.Args[0]
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end05d10e0a1c707d66b11b2d342634efd0
+ end05d10e0a1c707d66b11b2d342634efd0:
+ ;
+ case OpTrunc64to16:
+ // match: (Trunc64to16 x)
+ // cond:
+ // result: (Copy x)
+ {
+ x := v.Args[0]
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end4623ae65eb76feca3936354f22d45fa7
+ end4623ae65eb76feca3936354f22d45fa7:
+ ;
+ case OpTrunc64to32:
+ // match: (Trunc64to32 x)
+ // cond:
+ // result: (Copy x)
+ {
+ x := v.Args[0]
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end93e0b16b58a717a3e4f5c2ca67b6be87
+ end93e0b16b58a717a3e4f5c2ca67b6be87:
+ ;
+ case OpTrunc64to8:
+ // match: (Trunc64to8 x)
+ // cond:
+ // result: (Copy x)
+ {
+ x := v.Args[0]
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endc4c1a1b86edd0f082339d17eb5096ad0
+ endc4c1a1b86edd0f082339d17eb5096ad0:
+ ;
+ case OpAMD64XORB:
+ // match: (XORB x (MOVBconst [c]))
+ // cond:
+ // result: (XORBconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVBconst {
+ goto enda9ed9fdd115ffdffa8127c007c34d7b7
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64XORBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto enda9ed9fdd115ffdffa8127c007c34d7b7
+ enda9ed9fdd115ffdffa8127c007c34d7b7:
+ ;
+ // match: (XORB (MOVBconst [c]) x)
+ // cond:
+ // result: (XORBconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVBconst {
+ goto endb02a07d9dc7b802c59f013116e952f3f
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64XORBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto endb02a07d9dc7b802c59f013116e952f3f
+ endb02a07d9dc7b802c59f013116e952f3f:
+ ;
+ case OpAMD64XORBconst:
+ // match: (XORBconst [c] (MOVBconst [d]))
+ // cond:
+ // result: (MOVBconst [c^d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVBconst {
+ goto end6d8d1b612af9d253605c8bc69b822903
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVBconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c ^ d
+ return true
+ }
+ goto end6d8d1b612af9d253605c8bc69b822903
+ end6d8d1b612af9d253605c8bc69b822903:
+ ;
+ case OpAMD64XORL:
+ // match: (XORL x (MOVLconst [c]))
+ // cond:
+ // result: (XORLconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLconst {
+ goto enda9459d509d3416da67d13a22dd074a9c
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64XORLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto enda9459d509d3416da67d13a22dd074a9c
+ enda9459d509d3416da67d13a22dd074a9c:
+ ;
+ // match: (XORL (MOVLconst [c]) x)
+ // cond:
+ // result: (XORLconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto end9c1a0af00eeadd8aa325e55f1f3fb89c
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64XORLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end9c1a0af00eeadd8aa325e55f1f3fb89c
+ end9c1a0af00eeadd8aa325e55f1f3fb89c:
+ ;
+ case OpAMD64XORLconst:
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c^d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVLconst {
+ goto end71238075b10b68a226903cc453c4715c
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVLconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c ^ d
+ return true
+ }
+ goto end71238075b10b68a226903cc453c4715c
+ end71238075b10b68a226903cc453c4715c:
+ ;
+ case OpAMD64XORQ:
+ // match: (XORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORQconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVQconst {
+ goto end452341f950062e0483f16438fb9ec500
+ }
+ c := v.Args[1].AuxInt
+ if !(is32Bit(c)) {
+ goto end452341f950062e0483f16438fb9ec500
+ }
+ v.Op = OpAMD64XORQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end452341f950062e0483f16438fb9ec500
+ end452341f950062e0483f16438fb9ec500:
+ ;
+ // match: (XORQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (XORQconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto endd221a7e3daaaaa29ee385ad36e061b57
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ goto endd221a7e3daaaaa29ee385ad36e061b57
+ }
+ v.Op = OpAMD64XORQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto endd221a7e3daaaaa29ee385ad36e061b57
+ endd221a7e3daaaaa29ee385ad36e061b57:
+ ;
+ case OpAMD64XORQconst:
+ // match: (XORQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [c^d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVQconst {
+ goto end3f404d4f07362319fbad2e1ba0827a9f
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVQconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c ^ d
+ return true
+ }
+ goto end3f404d4f07362319fbad2e1ba0827a9f
+ end3f404d4f07362319fbad2e1ba0827a9f:
+ ;
+ case OpAMD64XORW:
+ // match: (XORW x (MOVWconst [c]))
+ // cond:
+ // result: (XORWconst [c] x)
+ {
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWconst {
+ goto end2ca109efd66c221a5691a4da95ec6c67
+ }
+ c := v.Args[1].AuxInt
+ v.Op = OpAMD64XORWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end2ca109efd66c221a5691a4da95ec6c67
+ end2ca109efd66c221a5691a4da95ec6c67:
+ ;
+ // match: (XORW (MOVWconst [c]) x)
+ // cond:
+ // result: (XORWconst [c] x)
+ {
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto end51ee62a06d4301e5a4aed7a6639b1d53
+ }
+ c := v.Args[0].AuxInt
+ x := v.Args[1]
+ v.Op = OpAMD64XORWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ goto end51ee62a06d4301e5a4aed7a6639b1d53
+ end51ee62a06d4301e5a4aed7a6639b1d53:
+ ;
+ case OpAMD64XORWconst:
+ // match: (XORWconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c^d])
+ {
+ c := v.AuxInt
+ if v.Args[0].Op != OpAMD64MOVWconst {
+ goto ende24881ccdfa8486c4593fd9aa5df1ed6
+ }
+ d := v.Args[0].AuxInt
+ v.Op = OpAMD64MOVWconst
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c ^ d
+ return true
+ }
+ goto ende24881ccdfa8486c4593fd9aa5df1ed6
+ ende24881ccdfa8486c4593fd9aa5df1ed6:
+ ;
+ case OpXor16:
+ // match: (Xor16 x y)
+ // cond:
+ // result: (XORW x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64XORW
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end20efdd5dfd5130abf818de5546a991a0
+ end20efdd5dfd5130abf818de5546a991a0:
+ ;
+ case OpXor32:
+ // match: (Xor32 x y)
+ // cond:
+ // result: (XORL x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64XORL
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end9da6bce98b437e2609488346116a75d8
+ end9da6bce98b437e2609488346116a75d8:
+ ;
+ case OpXor64:
+ // match: (Xor64 x y)
+ // cond:
+ // result: (XORQ x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64XORQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto endc88cd189c2a6f07ecff324ed94809f8f
+ endc88cd189c2a6f07ecff324ed94809f8f:
+ ;
+ case OpXor8:
+ // match: (Xor8 x y)
+ // cond:
+ // result: (XORB x y)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.Op = OpAMD64XORB
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ goto end50f4434ef96916d3e65ad3cc236d1723
+ end50f4434ef96916d3e65ad3cc236d1723:
+ ;
+ case OpZero:
+ // match: (Zero [0] _ mem)
+ // cond:
+ // result: (Copy mem)
+ {
+ if v.AuxInt != 0 {
+ goto endb85a34a7d102b0e0d801454f437db5bf
+ }
+ mem := v.Args[1]
+ v.Op = OpCopy
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(mem)
+ return true
+ }
+ goto endb85a34a7d102b0e0d801454f437db5bf
+ endb85a34a7d102b0e0d801454f437db5bf:
+ ;
+ // match: (Zero [1] destptr mem)
+ // cond:
+ // result: (MOVBstore destptr (MOVBconst <TypeInt8> [0]) mem)
+ {
+ if v.AuxInt != 1 {
+ goto end16839f51d2e9cf9548f216848406bd97
+ }
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ v.Op = OpAMD64MOVBstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(destptr)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64MOVBconst, TypeInvalid)
+ v0.Type = TypeInt8
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ goto end16839f51d2e9cf9548f216848406bd97
+ end16839f51d2e9cf9548f216848406bd97:
+ ;
+ // match: (Zero [2] destptr mem)
+ // cond:
+ // result: (MOVWstore destptr (MOVWconst <TypeInt16> [0]) mem)
+ {
+ if v.AuxInt != 2 {
+ goto enddc4a090329efde9ca19983ad18174cbb
+ }
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ v.Op = OpAMD64MOVWstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(destptr)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64MOVWconst, TypeInvalid)
+ v0.Type = TypeInt16
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ goto enddc4a090329efde9ca19983ad18174cbb
+ enddc4a090329efde9ca19983ad18174cbb:
+ ;
+ // match: (Zero [4] destptr mem)
+ // cond:
+ // result: (MOVLstore destptr (MOVLconst <TypeInt32> [0]) mem)
+ {
+ if v.AuxInt != 4 {
+ goto end365a027b67399ad8d5d2d5eca847f7d8
+ }
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ v.Op = OpAMD64MOVLstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(destptr)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64MOVLconst, TypeInvalid)
+ v0.Type = TypeInt32
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ goto end365a027b67399ad8d5d2d5eca847f7d8
+ end365a027b67399ad8d5d2d5eca847f7d8:
+ ;
+ // match: (Zero [8] destptr mem)
+ // cond:
+ // result: (MOVQstore destptr (MOVQconst <TypeInt64> [0]) mem)
+ {
+ if v.AuxInt != 8 {
+ goto end5808a5e9c68555a82c3514db39017e56
+ }
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ v.Op = OpAMD64MOVQstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(destptr)
+ v0 := v.Block.NewValue0(v.Line, OpAMD64MOVQconst, TypeInvalid)
+ v0.Type = TypeInt64
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ goto end5808a5e9c68555a82c3514db39017e56
+ end5808a5e9c68555a82c3514db39017e56:
+ ;
+ // match: (Zero [size] destptr mem)
+ // cond: size < 4*8
+ // result: (MOVXzero [size] destptr mem)
+ {
+ size := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(size < 4*8) {
+ goto endf0a22f1506977610ac0a310eee152075
+ }
+ v.Op = OpAMD64MOVXzero
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = size
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ goto endf0a22f1506977610ac0a310eee152075
+ endf0a22f1506977610ac0a310eee152075:
+ ;
+ // match: (Zero [size] destptr mem)
+ // cond: size >= 4*8
+ // result: (Zero [size%8] (OffPtr <TypeUInt64> [size-(size%8)] destptr) (REPSTOSQ <TypeMem> destptr (MOVQconst <TypeUInt64> [size/8]) mem))
+ {
+ size := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(size >= 4*8) {
+ goto endb3058a90f909821d5689fb358519828b
+ }
+ v.Op = OpZero
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = size % 8
+ v0 := v.Block.NewValue0(v.Line, OpOffPtr, TypeInvalid)
+ v0.Type = TypeUInt64
+ v0.AuxInt = size - (size % 8)
+ v0.AddArg(destptr)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpAMD64REPSTOSQ, TypeInvalid)
+ v1.Type = TypeMem
+ v1.AddArg(destptr)
+ v2 := v.Block.NewValue0(v.Line, OpAMD64MOVQconst, TypeInvalid)
+ v2.Type = TypeUInt64
+ v2.AuxInt = size / 8
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ goto endb3058a90f909821d5689fb358519828b
+ endb3058a90f909821d5689fb358519828b:
+ ;
+ case OpZeroExt16to32:
+ // match: (ZeroExt16to32 x)
+ // cond:
+ // result: (MOVWQZX x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64MOVWQZX
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endbfff79412a2cc96095069c66812844b4
+ endbfff79412a2cc96095069c66812844b4:
+ ;
+ case OpZeroExt16to64:
+ // match: (ZeroExt16to64 x)
+ // cond:
+ // result: (MOVWQZX x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64MOVWQZX
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end7a40262c5c856101058d2bd518ed0910
+ end7a40262c5c856101058d2bd518ed0910:
+ ;
+ case OpZeroExt32to64:
+ // match: (ZeroExt32to64 x)
+ // cond:
+ // result: (MOVLQZX x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64MOVLQZX
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto enddf83bdc8cc6c5673a9ef7aca7affe45a
+ enddf83bdc8cc6c5673a9ef7aca7affe45a:
+ ;
+ case OpZeroExt8to16:
+ // match: (ZeroExt8to16 x)
+ // cond:
+ // result: (MOVBQZX x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64MOVBQZX
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endd03d53d2a585727e4107ae1a3cc55479
+ endd03d53d2a585727e4107ae1a3cc55479:
+ ;
+ case OpZeroExt8to32:
+ // match: (ZeroExt8to32 x)
+ // cond:
+ // result: (MOVBQZX x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64MOVBQZX
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto endcbd33e965b3dab14fced5ae93d8949de
+ endcbd33e965b3dab14fced5ae93d8949de:
+ ;
+ case OpZeroExt8to64:
+ // match: (ZeroExt8to64 x)
+ // cond:
+ // result: (MOVBQZX x)
+ {
+ x := v.Args[0]
+ v.Op = OpAMD64MOVBQZX
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(x)
+ return true
+ }
+ goto end63ae7cc15db9d15189b2f1342604b2cb
+ end63ae7cc15db9d15189b2f1342604b2cb:
+ }
+ return false
+}
+func rewriteBlockAMD64(b *Block) bool {
+ switch b.Kind {
+ case BlockAMD64EQ:
+ // match: (EQ (InvertFlags cmp) yes no)
+ // cond:
+ // result: (EQ cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64InvertFlags {
+ goto end6b8e9afc73b1c4d528f31a60d2575fae
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64EQ
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto end6b8e9afc73b1c4d528f31a60d2575fae
+ end6b8e9afc73b1c4d528f31a60d2575fae:
+ ;
+ case BlockAMD64GE:
+ // match: (GE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LE cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64InvertFlags {
+ goto end0610f000a6988ee8310307ec2ea138f8
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64LE
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto end0610f000a6988ee8310307ec2ea138f8
+ end0610f000a6988ee8310307ec2ea138f8:
+ ;
+ case BlockAMD64GT:
+ // match: (GT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LT cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64InvertFlags {
+ goto endf60c0660b6a8aa9565c97fc87f04eb34
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64LT
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto endf60c0660b6a8aa9565c97fc87f04eb34
+ endf60c0660b6a8aa9565c97fc87f04eb34:
+ ;
+ case BlockIf:
+ // match: (If (SETL cmp) yes no)
+ // cond:
+ // result: (LT cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64SETL {
+ goto ende4d36879bb8e1bd8facaa8c91ba99dcc
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64LT
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto ende4d36879bb8e1bd8facaa8c91ba99dcc
+ ende4d36879bb8e1bd8facaa8c91ba99dcc:
+ ;
+ // match: (If (SETLE cmp) yes no)
+ // cond:
+ // result: (LE cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64SETLE {
+ goto end40df18679690e8f9005d8642fab44654
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64LE
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto end40df18679690e8f9005d8642fab44654
+ end40df18679690e8f9005d8642fab44654:
+ ;
+ // match: (If (SETG cmp) yes no)
+ // cond:
+ // result: (GT cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64SETG {
+ goto endb1faff07a84ae08a4b05a4a7e71eb740
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64GT
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto endb1faff07a84ae08a4b05a4a7e71eb740
+ endb1faff07a84ae08a4b05a4a7e71eb740:
+ ;
+ // match: (If (SETGE cmp) yes no)
+ // cond:
+ // result: (GE cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64SETGE {
+ goto enda9211ccfa5b0ab8eafc0017630c542b6
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64GE
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto enda9211ccfa5b0ab8eafc0017630c542b6
+ enda9211ccfa5b0ab8eafc0017630c542b6:
+ ;
+ // match: (If (SETEQ cmp) yes no)
+ // cond:
+ // result: (EQ cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64SETEQ {
+ goto endf113deb06abc88613840e6282942921a
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64EQ
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto endf113deb06abc88613840e6282942921a
+ endf113deb06abc88613840e6282942921a:
+ ;
+ // match: (If (SETNE cmp) yes no)
+ // cond:
+ // result: (NE cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64SETNE {
+ goto end5ff1403aaf7b543bc454177ab584e4f5
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64NE
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto end5ff1403aaf7b543bc454177ab584e4f5
+ end5ff1403aaf7b543bc454177ab584e4f5:
+ ;
+ // match: (If (SETB cmp) yes no)
+ // cond:
+ // result: (ULT cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64SETB {
+ goto end04935012db9defeafceef8175f803ea2
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64ULT
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto end04935012db9defeafceef8175f803ea2
+ end04935012db9defeafceef8175f803ea2:
+ ;
+ // match: (If (SETBE cmp) yes no)
+ // cond:
+ // result: (ULE cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64SETBE {
+ goto endfe0178f6f4406945ca8966817d04be60
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64ULE
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto endfe0178f6f4406945ca8966817d04be60
+ endfe0178f6f4406945ca8966817d04be60:
+ ;
+ // match: (If (SETA cmp) yes no)
+ // cond:
+ // result: (UGT cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64SETA {
+ goto endbd22a7d56a98d85e4e132ff952dae262
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64UGT
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto endbd22a7d56a98d85e4e132ff952dae262
+ endbd22a7d56a98d85e4e132ff952dae262:
+ ;
+ // match: (If (SETAE cmp) yes no)
+ // cond:
+ // result: (UGE cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64SETAE {
+ goto end9bea9963c3c5dfb97249a5feb8287f94
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64UGE
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto end9bea9963c3c5dfb97249a5feb8287f94
+ end9bea9963c3c5dfb97249a5feb8287f94:
+ ;
+ // match: (If cond yes no)
+ // cond: cond.Op == OpAMD64MOVBload
+ // result: (NE (TESTB <TypeFlags> cond cond) yes no)
+ {
+ v := b.Control
+ cond := v
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ if !(cond.Op == OpAMD64MOVBload) {
+ goto end7e22019fb0effc80f85c05ea30bdb5d9
+ }
+ b.Kind = BlockAMD64NE
+ v0 := v.Block.NewValue0(v.Line, OpAMD64TESTB, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(cond)
+ v0.AddArg(cond)
+ b.Control = v0
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto end7e22019fb0effc80f85c05ea30bdb5d9
+ end7e22019fb0effc80f85c05ea30bdb5d9:
+ ;
+ case BlockAMD64LE:
+ // match: (LE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GE cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64InvertFlags {
+ goto end0d49d7d087fe7578e8015cf13dae37e3
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64GE
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto end0d49d7d087fe7578e8015cf13dae37e3
+ end0d49d7d087fe7578e8015cf13dae37e3:
+ ;
+ case BlockAMD64LT:
+ // match: (LT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GT cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64InvertFlags {
+ goto end6a408cde0fee0ae7b7da0443c8d902bf
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64GT
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto end6a408cde0fee0ae7b7da0443c8d902bf
+ end6a408cde0fee0ae7b7da0443c8d902bf:
+ ;
+ case BlockAMD64NE:
+ // match: (NE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (NE cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64InvertFlags {
+ goto end713001aba794e50b582fbff930e110af
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64NE
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto end713001aba794e50b582fbff930e110af
+ end713001aba794e50b582fbff930e110af:
+ ;
+ case BlockAMD64UGE:
+ // match: (UGE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (ULE cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64InvertFlags {
+ goto ende3e4ddc183ca1a46598b11c2d0d13966
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64ULE
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto ende3e4ddc183ca1a46598b11c2d0d13966
+ ende3e4ddc183ca1a46598b11c2d0d13966:
+ ;
+ case BlockAMD64UGT:
+ // match: (UGT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (ULT cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64InvertFlags {
+ goto end49818853af2e5251175d06c62768cae7
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64ULT
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto end49818853af2e5251175d06c62768cae7
+ end49818853af2e5251175d06c62768cae7:
+ ;
+ case BlockAMD64ULE:
+ // match: (ULE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (UGE cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64InvertFlags {
+ goto endd6698aac0d67261293b558c95ea17b4f
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64UGE
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto endd6698aac0d67261293b558c95ea17b4f
+ endd6698aac0d67261293b558c95ea17b4f:
+ ;
+ case BlockAMD64ULT:
+ // match: (ULT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (UGT cmp yes no)
+ {
+ v := b.Control
+ if v.Op != OpAMD64InvertFlags {
+ goto end35105dbc9646f02577167e45ae2f2fd2
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockAMD64UGT
+ b.Control = cmp
+ b.Succs[0] = yes
+ b.Succs[1] = no
+ return true
+ }
+ goto end35105dbc9646f02577167e45ae2f2fd2
+ end35105dbc9646f02577167e45ae2f2fd2:
+ }
+ return false
+}
--- /dev/null
+// autogenerated from gen/generic.rules: do not edit!
+// generated with: cd gen; go run *.go
+package ssa
+
+func rewriteValuegeneric(v *Value, config *Config) bool {
+ switch v.Op {
+ case OpAdd64:
+ // match: (Add64 (Const64 [c]) (Const64 [d]))
+ // cond:
+ // result: (Const64 [c+d])
+ {
+ if v.Args[0].Op != OpConst64 {
+ goto end8c46df6f85a11cb1d594076b0e467908
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[1].Op != OpConst64 {
+ goto end8c46df6f85a11cb1d594076b0e467908
+ }
+ d := v.Args[1].AuxInt
+ v.Op = OpConst64
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c + d
+ return true
+ }
+ goto end8c46df6f85a11cb1d594076b0e467908
+ end8c46df6f85a11cb1d594076b0e467908:
+ ;
+ case OpAddPtr:
+ // match: (AddPtr (ConstPtr [c]) (ConstPtr [d]))
+ // cond:
+ // result: (ConstPtr [c+d])
+ {
+ if v.Args[0].Op != OpConstPtr {
+ goto end145c1aec793b2befff34bc8983b48a38
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[1].Op != OpConstPtr {
+ goto end145c1aec793b2befff34bc8983b48a38
+ }
+ d := v.Args[1].AuxInt
+ v.Op = OpConstPtr
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c + d
+ return true
+ }
+ goto end145c1aec793b2befff34bc8983b48a38
+ end145c1aec793b2befff34bc8983b48a38:
+ ;
+ case OpArrayIndex:
+ // match: (ArrayIndex (Load ptr mem) idx)
+ // cond:
+ // result: (Load (PtrIndex <v.Type.PtrTo()> ptr idx) mem)
+ {
+ if v.Args[0].Op != OpLoad {
+ goto end4894dd7b58383fee5f8a92be08437c33
+ }
+ ptr := v.Args[0].Args[0]
+ mem := v.Args[0].Args[1]
+ idx := v.Args[1]
+ v.Op = OpLoad
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpPtrIndex, TypeInvalid)
+ v0.Type = v.Type.PtrTo()
+ v0.AddArg(ptr)
+ v0.AddArg(idx)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ goto end4894dd7b58383fee5f8a92be08437c33
+ end4894dd7b58383fee5f8a92be08437c33:
+ ;
+ case OpConstString:
+ // match: (ConstString {s})
+ // cond:
+ // result: (StringMake (Addr <TypeBytePtr> {config.fe.StringData(s.(string))} (SB <config.Uintptr>)) (ConstPtr <config.Uintptr> [int64(len(s.(string)))]))
+ {
+ s := v.Aux
+ v.Op = OpStringMake
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAddr, TypeInvalid)
+ v0.Type = TypeBytePtr
+ v0.Aux = config.fe.StringData(s.(string))
+ v1 := v.Block.NewValue0(v.Line, OpSB, TypeInvalid)
+ v1.Type = config.Uintptr
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := v.Block.NewValue0(v.Line, OpConstPtr, TypeInvalid)
+ v2.Type = config.Uintptr
+ v2.AuxInt = int64(len(s.(string)))
+ v.AddArg(v2)
+ return true
+ }
+ goto end1a01fc02fad8727f9a3b716cfdac3a44
+ end1a01fc02fad8727f9a3b716cfdac3a44:
+ ;
+ case OpEqFat:
+ // match: (EqFat x y)
+ // cond: x.Op == OpConstNil && y.Op != OpConstNil
+ // result: (EqFat y x)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(x.Op == OpConstNil && y.Op != OpConstNil) {
+ goto endcea7f7399afcff860c54d82230a9a934
+ }
+ v.Op = OpEqFat
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+ goto endcea7f7399afcff860c54d82230a9a934
+ endcea7f7399afcff860c54d82230a9a934:
+ ;
+ // match: (EqFat (Load ptr mem) (ConstNil))
+ // cond:
+ // result: (EqPtr (Load <config.Uintptr> ptr mem) (ConstPtr <config.Uintptr> [0]))
+ {
+ if v.Args[0].Op != OpLoad {
+ goto end2597220d1792c84d362da7901d2065d2
+ }
+ ptr := v.Args[0].Args[0]
+ mem := v.Args[0].Args[1]
+ if v.Args[1].Op != OpConstNil {
+ goto end2597220d1792c84d362da7901d2065d2
+ }
+ v.Op = OpEqPtr
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpLoad, TypeInvalid)
+ v0.Type = config.Uintptr
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpConstPtr, TypeInvalid)
+ v1.Type = config.Uintptr
+ v1.AuxInt = 0
+ v.AddArg(v1)
+ return true
+ }
+ goto end2597220d1792c84d362da7901d2065d2
+ end2597220d1792c84d362da7901d2065d2:
+ ;
+ case OpIsInBounds:
+ // match: (IsInBounds (ConstPtr [c]) (ConstPtr [d]))
+ // cond:
+ // result: (ConstPtr {inBounds(c,d)})
+ {
+ if v.Args[0].Op != OpConstPtr {
+ goto enddfd340bc7103ca323354aec96b113c23
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[1].Op != OpConstPtr {
+ goto enddfd340bc7103ca323354aec96b113c23
+ }
+ d := v.Args[1].AuxInt
+ v.Op = OpConstPtr
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Aux = inBounds(c, d)
+ return true
+ }
+ goto enddfd340bc7103ca323354aec96b113c23
+ enddfd340bc7103ca323354aec96b113c23:
+ ;
+ case OpLoad:
+ // match: (Load <t> ptr mem)
+ // cond: t.IsString()
+ // result: (StringMake (Load <TypeBytePtr> ptr mem) (Load <config.Uintptr> (OffPtr <TypeBytePtr> [config.PtrSize] ptr) mem))
+ {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsString()) {
+ goto endce3ba169a57b8a9f6b12751d49b4e23a
+ }
+ v.Op = OpStringMake
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpLoad, TypeInvalid)
+ v0.Type = TypeBytePtr
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpLoad, TypeInvalid)
+ v1.Type = config.Uintptr
+ v2 := v.Block.NewValue0(v.Line, OpOffPtr, TypeInvalid)
+ v2.Type = TypeBytePtr
+ v2.AuxInt = config.PtrSize
+ v2.AddArg(ptr)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ goto endce3ba169a57b8a9f6b12751d49b4e23a
+ endce3ba169a57b8a9f6b12751d49b4e23a:
+ ;
+ case OpMul64:
+ // match: (Mul64 (Const64 [c]) (Const64 [d]))
+ // cond:
+ // result: (Const64 [c*d])
+ {
+ if v.Args[0].Op != OpConst64 {
+ goto end7aea1048b5d1230974b97f17238380ae
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[1].Op != OpConst64 {
+ goto end7aea1048b5d1230974b97f17238380ae
+ }
+ d := v.Args[1].AuxInt
+ v.Op = OpConst64
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c * d
+ return true
+ }
+ goto end7aea1048b5d1230974b97f17238380ae
+ end7aea1048b5d1230974b97f17238380ae:
+ ;
+ case OpMulPtr:
+ // match: (MulPtr (ConstPtr [c]) (ConstPtr [d]))
+ // cond:
+ // result: (ConstPtr [c*d])
+ {
+ if v.Args[0].Op != OpConstPtr {
+ goto end808c190f346658bb1ad032bf37a1059f
+ }
+ c := v.Args[0].AuxInt
+ if v.Args[1].Op != OpConstPtr {
+ goto end808c190f346658bb1ad032bf37a1059f
+ }
+ d := v.Args[1].AuxInt
+ v.Op = OpConstPtr
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = c * d
+ return true
+ }
+ goto end808c190f346658bb1ad032bf37a1059f
+ end808c190f346658bb1ad032bf37a1059f:
+ ;
+ case OpNeqFat:
+ // match: (NeqFat x y)
+ // cond: x.Op == OpConstNil && y.Op != OpConstNil
+ // result: (NeqFat y x)
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(x.Op == OpConstNil && y.Op != OpConstNil) {
+ goto end94c68f7dc30c66ed42e507e01c4e5dc7
+ }
+ v.Op = OpNeqFat
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+ goto end94c68f7dc30c66ed42e507e01c4e5dc7
+ end94c68f7dc30c66ed42e507e01c4e5dc7:
+ ;
+ // match: (NeqFat (Load ptr mem) (ConstNil))
+ // cond:
+ // result: (NeqPtr (Load <config.Uintptr> ptr mem) (ConstPtr <config.Uintptr> [0]))
+ {
+ if v.Args[0].Op != OpLoad {
+ goto end03a0fc8dde062c55439174f70c19e6ce
+ }
+ ptr := v.Args[0].Args[0]
+ mem := v.Args[0].Args[1]
+ if v.Args[1].Op != OpConstNil {
+ goto end03a0fc8dde062c55439174f70c19e6ce
+ }
+ v.Op = OpNeqPtr
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpLoad, TypeInvalid)
+ v0.Type = config.Uintptr
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpConstPtr, TypeInvalid)
+ v1.Type = config.Uintptr
+ v1.AuxInt = 0
+ v.AddArg(v1)
+ return true
+ }
+ goto end03a0fc8dde062c55439174f70c19e6ce
+ end03a0fc8dde062c55439174f70c19e6ce:
+ ;
+ case OpPtrIndex:
+ // match: (PtrIndex <t> ptr idx)
+ // cond:
+ // result: (AddPtr ptr (MulPtr <config.Uintptr> idx (ConstPtr <config.Uintptr> [t.Elem().Size()])))
+ {
+ t := v.Type
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v.Op = OpAddPtr
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v0 := v.Block.NewValue0(v.Line, OpMulPtr, TypeInvalid)
+ v0.Type = config.Uintptr
+ v0.AddArg(idx)
+ v1 := v.Block.NewValue0(v.Line, OpConstPtr, TypeInvalid)
+ v1.Type = config.Uintptr
+ v1.AuxInt = t.Elem().Size()
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ goto endfb3e605edaa4c3c0684c4fa9c8f150ee
+ endfb3e605edaa4c3c0684c4fa9c8f150ee:
+ ;
+ case OpSliceCap:
+ // match: (SliceCap (Load ptr mem))
+ // cond:
+ // result: (Load (AddPtr <ptr.Type> ptr (ConstPtr <config.Uintptr> [config.PtrSize*2])) mem)
+ {
+ if v.Args[0].Op != OpLoad {
+ goto end18c7acae3d96b30b9e5699194df4a687
+ }
+ ptr := v.Args[0].Args[0]
+ mem := v.Args[0].Args[1]
+ v.Op = OpLoad
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAddPtr, TypeInvalid)
+ v0.Type = ptr.Type
+ v0.AddArg(ptr)
+ v1 := v.Block.NewValue0(v.Line, OpConstPtr, TypeInvalid)
+ v1.Type = config.Uintptr
+ v1.AuxInt = config.PtrSize * 2
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ goto end18c7acae3d96b30b9e5699194df4a687
+ end18c7acae3d96b30b9e5699194df4a687:
+ ;
+ case OpSliceLen:
+ // match: (SliceLen (Load ptr mem))
+ // cond:
+ // result: (Load (AddPtr <ptr.Type> ptr (ConstPtr <config.Uintptr> [config.PtrSize])) mem)
+ {
+ if v.Args[0].Op != OpLoad {
+ goto end2dc65aee31bb0d91847032be777777d2
+ }
+ ptr := v.Args[0].Args[0]
+ mem := v.Args[0].Args[1]
+ v.Op = OpLoad
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAddPtr, TypeInvalid)
+ v0.Type = ptr.Type
+ v0.AddArg(ptr)
+ v1 := v.Block.NewValue0(v.Line, OpConstPtr, TypeInvalid)
+ v1.Type = config.Uintptr
+ v1.AuxInt = config.PtrSize
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ goto end2dc65aee31bb0d91847032be777777d2
+ end2dc65aee31bb0d91847032be777777d2:
+ ;
+ case OpSlicePtr:
+ // match: (SlicePtr (Load ptr mem))
+ // cond:
+ // result: (Load ptr mem)
+ {
+ if v.Args[0].Op != OpLoad {
+ goto end459613b83f95b65729d45c2ed663a153
+ }
+ ptr := v.Args[0].Args[0]
+ mem := v.Args[0].Args[1]
+ v.Op = OpLoad
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ goto end459613b83f95b65729d45c2ed663a153
+ end459613b83f95b65729d45c2ed663a153:
+ ;
+ case OpStore:
+ // match: (Store dst (Load <t> src mem) mem)
+ // cond: t.Size() > 8
+ // result: (Move [t.Size()] dst src mem)
+ {
+ dst := v.Args[0]
+ if v.Args[1].Op != OpLoad {
+ goto end324ffb6d2771808da4267f62c854e9c8
+ }
+ t := v.Args[1].Type
+ src := v.Args[1].Args[0]
+ mem := v.Args[1].Args[1]
+ if v.Args[2] != v.Args[1].Args[1] {
+ goto end324ffb6d2771808da4267f62c854e9c8
+ }
+ if !(t.Size() > 8) {
+ goto end324ffb6d2771808da4267f62c854e9c8
+ }
+ v.Op = OpMove
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = t.Size()
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(mem)
+ return true
+ }
+ goto end324ffb6d2771808da4267f62c854e9c8
+ end324ffb6d2771808da4267f62c854e9c8:
+ ;
+ // match: (Store dst str mem)
+ // cond: str.Type.IsString()
+ // result: (Store (OffPtr <TypeBytePtr> [config.PtrSize] dst) (StringLen <config.Uintptr> str) (Store <TypeMem> dst (StringPtr <TypeBytePtr> str) mem))
+ {
+ dst := v.Args[0]
+ str := v.Args[1]
+ mem := v.Args[2]
+ if !(str.Type.IsString()) {
+ goto endb47e037c1e5ac54c3a41d53163d8aef6
+ }
+ v.Op = OpStore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpOffPtr, TypeInvalid)
+ v0.Type = TypeBytePtr
+ v0.AuxInt = config.PtrSize
+ v0.AddArg(dst)
+ v.AddArg(v0)
+ v1 := v.Block.NewValue0(v.Line, OpStringLen, TypeInvalid)
+ v1.Type = config.Uintptr
+ v1.AddArg(str)
+ v.AddArg(v1)
+ v2 := v.Block.NewValue0(v.Line, OpStore, TypeInvalid)
+ v2.Type = TypeMem
+ v2.AddArg(dst)
+ v3 := v.Block.NewValue0(v.Line, OpStringPtr, TypeInvalid)
+ v3.Type = TypeBytePtr
+ v3.AddArg(str)
+ v2.AddArg(v3)
+ v2.AddArg(mem)
+ v.AddArg(v2)
+ return true
+ }
+ goto endb47e037c1e5ac54c3a41d53163d8aef6
+ endb47e037c1e5ac54c3a41d53163d8aef6:
+ ;
+ case OpStringLen:
+ // match: (StringLen (StringMake _ len))
+ // cond:
+ // result: len
+ {
+ if v.Args[0].Op != OpStringMake {
+ goto end0d922460b7e5ca88324034f4bd6c027c
+ }
+ len := v.Args[0].Args[1]
+ v.Op = len.Op
+ v.AuxInt = len.AuxInt
+ v.Aux = len.Aux
+ v.resetArgs()
+ v.AddArgs(len.Args...)
+ return true
+ }
+ goto end0d922460b7e5ca88324034f4bd6c027c
+ end0d922460b7e5ca88324034f4bd6c027c:
+ ;
+ case OpStringPtr:
+ // match: (StringPtr (StringMake ptr _))
+ // cond:
+ // result: ptr
+ {
+ if v.Args[0].Op != OpStringMake {
+ goto end061edc5d85c73ad909089af2556d9380
+ }
+ ptr := v.Args[0].Args[0]
+ v.Op = ptr.Op
+ v.AuxInt = ptr.AuxInt
+ v.Aux = ptr.Aux
+ v.resetArgs()
+ v.AddArgs(ptr.Args...)
+ return true
+ }
+ goto end061edc5d85c73ad909089af2556d9380
+ end061edc5d85c73ad909089af2556d9380:
+ ;
+ case OpStructSelect:
+ // match: (StructSelect [idx] (Load ptr mem))
+ // cond:
+ // result: (Load (OffPtr <v.Type.PtrTo()> [idx] ptr) mem)
+ {
+ idx := v.AuxInt
+ if v.Args[0].Op != OpLoad {
+ goto end16fdb45e1dd08feb36e3cc3fb5ed8935
+ }
+ ptr := v.Args[0].Args[0]
+ mem := v.Args[0].Args[1]
+ v.Op = OpLoad
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpOffPtr, TypeInvalid)
+ v0.Type = v.Type.PtrTo()
+ v0.AuxInt = idx
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ goto end16fdb45e1dd08feb36e3cc3fb5ed8935
+ end16fdb45e1dd08feb36e3cc3fb5ed8935:
+ }
+ return false
+}
+func rewriteBlockgeneric(b *Block) bool {
+ switch b.Kind {
+ case BlockIf:
+ // match: (If (Not cond) yes no)
+ // cond:
+ // result: (If cond no yes)
+ {
+ v := b.Control
+ if v.Op != OpNot {
+ goto endebe19c1c3c3bec068cdb2dd29ef57f96
+ }
+ cond := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockIf
+ b.Control = cond
+ b.Succs[0] = no
+ b.Succs[1] = yes
+ return true
+ }
+ goto endebe19c1c3c3bec068cdb2dd29ef57f96
+ endebe19c1c3c3bec068cdb2dd29ef57f96:
+ ;
+ // match: (If (ConstBool {c}) yes no)
+ // cond: c.(bool)
+ // result: (Plain nil yes)
+ {
+ v := b.Control
+ if v.Op != OpConstBool {
+ goto end9ff0273f9b1657f4afc287562ca889f0
+ }
+ c := v.Aux
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ if !(c.(bool)) {
+ goto end9ff0273f9b1657f4afc287562ca889f0
+ }
+ v.Block.Func.removePredecessor(b, no)
+ b.Kind = BlockPlain
+ b.Control = nil
+ b.Succs = b.Succs[:1]
+ b.Succs[0] = yes
+ return true
+ }
+ goto end9ff0273f9b1657f4afc287562ca889f0
+ end9ff0273f9b1657f4afc287562ca889f0:
+ ;
+ // match: (If (ConstBool {c}) yes no)
+ // cond: !c.(bool)
+ // result: (Plain nil no)
+ {
+ v := b.Control
+ if v.Op != OpConstBool {
+ goto endf401a4553c3c7c6bed64801da7bba076
+ }
+ c := v.Aux
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ if !(!c.(bool)) {
+ goto endf401a4553c3c7c6bed64801da7bba076
+ }
+ v.Block.Func.removePredecessor(b, yes)
+ b.Kind = BlockPlain
+ b.Control = nil
+ b.Succs = b.Succs[:1]
+ b.Succs[0] = no
+ return true
+ }
+ goto endf401a4553c3c7c6bed64801da7bba076
+ endf401a4553c3c7c6bed64801da7bba076:
+ }
+ return false
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// Schedule the Values in each Block. After this phase returns, the
+// order of b.Values matters and is the order in which those values
+// will appear in the assembly output. For now it generates an
+// arbitrary valid schedule using a topological sort. TODO(khr):
+// schedule smarter.
+func schedule(f *Func) {
+ const (
+ unmarked = 0
+ found = 1
+ expanded = 2
+ done = 3
+ )
+ state := make([]byte, f.NumValues())
+ var queue []*Value //stack-like worklist. Contains found and expanded nodes.
+ var order []*Value
+
+ nextMem := make([]*Value, f.NumValues()) // maps mem values to the next live value
+ additionalEdges := make([][]*Value, f.NumValues())
+ for _, b := range f.Blocks {
+ // Set the nextMem values for this block. If the previous
+ // write is from a different block, then its nextMem entry
+ // might have already been set during processing of an earlier
+ // block. This loop resets the nextMem entries to be correct
+ // for this block.
+ for _, v := range b.Values {
+ if v.Type.IsMemory() {
+ for _, w := range v.Args {
+ if w.Type.IsMemory() {
+ nextMem[w.ID] = v
+ }
+ }
+ }
+ }
+ // Add a anti-dependency between each load v and the memory value n
+ // following the memory value that v loads from.
+ // This will enforce the single-live-mem restriction.
+ for _, v := range b.Values {
+ if v.Type.IsMemory() {
+ continue
+ }
+ for _, w := range v.Args {
+ if w.Type.IsMemory() && nextMem[w.ID] != nil {
+ // Filter for intra-block edges.
+ if n := nextMem[w.ID]; n.Block == b {
+ additionalEdges[n.ID] = append(additionalEdges[n.ID], v)
+ }
+ }
+ }
+ }
+
+ order = order[:0]
+
+ // Schedule phis first
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ // TODO: what if a phi is also a control op? It happens for
+ // mem ops all the time, which shouldn't matter. But for
+ // regular ops we might be violating invariants about where
+ // control ops live.
+ if v == b.Control && !v.Type.IsMemory() {
+ f.Unimplementedf("phi is a control op %s %s", v, b)
+ }
+ order = append(order, v)
+ }
+ }
+
+ // Topologically sort the non-phi values in b.
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ continue
+ }
+ if v == b.Control {
+ continue
+ }
+ if state[v.ID] != unmarked {
+ if state[v.ID] != done {
+ panic("bad state")
+ }
+ continue
+ }
+ state[v.ID] = found
+ queue = append(queue, v)
+ for len(queue) > 0 {
+ v = queue[len(queue)-1]
+ switch state[v.ID] {
+ case found:
+ state[v.ID] = expanded
+ // Note that v is not popped. We leave it in place
+ // until all its children have been explored.
+ for _, w := range v.Args {
+ if w.Block == b && w.Op != OpPhi && w != b.Control && state[w.ID] == unmarked {
+ state[w.ID] = found
+ queue = append(queue, w)
+ }
+ }
+ for _, w := range additionalEdges[v.ID] {
+ if w.Block == b && w.Op != OpPhi && w != b.Control && state[w.ID] == unmarked {
+ state[w.ID] = found
+ queue = append(queue, w)
+ }
+ }
+ case expanded:
+ queue = queue[:len(queue)-1]
+ state[v.ID] = done
+ order = append(order, v)
+ default:
+ panic("bad state")
+ }
+ }
+ }
+ if b.Control != nil {
+ order = append(order, b.Control)
+ }
+ copy(b.Values, order)
+ }
+ // TODO: only allow one live flags type (x86)
+ // This restriction will force and any flag uses to appear before
+ // the next flag update. This "anti-dependence" is not recorded
+ // explicitly in ssa form.
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+func TestSchedule(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ cases := []fun{
+ Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem0", OpArg, TypeMem, 0, ".mem"),
+ Valu("ptr", OpConst64, TypeInt64, 0xABCD, nil),
+ Valu("v", OpConst64, TypeInt64, 12, nil),
+ Valu("mem1", OpStore, TypeMem, 0, nil, "ptr", "v", "mem0"),
+ Valu("mem2", OpStore, TypeMem, 0, nil, "ptr", "v", "mem1"),
+ Valu("mem3", OpStore, TypeInt64, 0, nil, "ptr", "sum", "mem2"),
+ Valu("l1", OpLoad, TypeInt64, 0, nil, "ptr", "mem1"),
+ Valu("l2", OpLoad, TypeInt64, 0, nil, "ptr", "mem2"),
+ Valu("sum", OpAdd64, TypeInt64, 0, nil, "l1", "l2"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem3"))),
+ }
+ for _, c := range cases {
+ schedule(c.f)
+ if !isSingleLiveMem(c.f) {
+ t.Error("single-live-mem restriction not enforced by schedule for func:")
+ printFunc(c.f)
+ }
+ }
+}
+
+func isSingleLiveMem(f *Func) bool {
+ for _, b := range f.Blocks {
+ var liveMem *Value
+ for _, v := range b.Values {
+ for _, w := range v.Args {
+ if w.Type.IsMemory() {
+ if liveMem == nil {
+ liveMem = w
+ continue
+ }
+ if w != liveMem {
+ return false
+ }
+ }
+ }
+ if v.Type.IsMemory() {
+ liveMem = v
+ }
+ }
+ }
+ return true
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "testing"
+)
+
+func TestShiftConstAMD64(t *testing.T) {
+ c := NewConfig("amd64", DummyFrontend{t})
+ fun := makeConstShiftFunc(c, 18, OpLsh64x64, TypeUInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+ fun = makeConstShiftFunc(c, 66, OpLsh64x64, TypeUInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+ fun = makeConstShiftFunc(c, 18, OpRsh64Ux64, TypeUInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+ fun = makeConstShiftFunc(c, 66, OpRsh64Ux64, TypeUInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+ fun = makeConstShiftFunc(c, 18, OpRsh64x64, TypeInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
+ fun = makeConstShiftFunc(c, 66, OpRsh64x64, TypeInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
+}
+
+func makeConstShiftFunc(c *Config, amount int64, op Op, typ Type) fun {
+ ptyp := &TypeImpl{Size_: 8, Ptr: true, Name: "ptr"}
+ fun := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("SP", OpSP, TypeUInt64, 0, nil),
+ Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"),
+ Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"),
+ Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"),
+ Valu("c", OpConst64, TypeUInt64, amount, nil),
+ Valu("shift", op, typ, 0, nil, "load", "c"),
+ Valu("store", OpStore, TypeMem, 0, nil, "resptr", "shift", "mem"),
+ Exit("store")))
+ Compile(fun.f)
+ return fun
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// from http://research.swtch.com/sparse
+// in turn, from Briggs and Torczon
+
+type sparseSet struct {
+ dense []ID
+ sparse []int
+}
+
+// newSparseSet returns a sparseSet that can represent
+// integers between 0 and n-1
+func newSparseSet(n int) *sparseSet {
+ return &sparseSet{nil, make([]int, n)}
+}
+
+func (s *sparseSet) size() int {
+ return len(s.dense)
+}
+
+func (s *sparseSet) contains(x ID) bool {
+ i := s.sparse[x]
+ return i < len(s.dense) && s.dense[i] == x
+}
+
+func (s *sparseSet) add(x ID) {
+ i := s.sparse[x]
+ if i < len(s.dense) && s.dense[i] == x {
+ return
+ }
+ s.dense = append(s.dense, x)
+ s.sparse[x] = len(s.dense) - 1
+}
+
+func (s *sparseSet) addAll(a []ID) {
+ for _, x := range a {
+ s.add(x)
+ }
+}
+
+func (s *sparseSet) addAllValues(a []*Value) {
+ for _, v := range a {
+ s.add(v.ID)
+ }
+}
+
+func (s *sparseSet) remove(x ID) {
+ i := s.sparse[x]
+ if i < len(s.dense) && s.dense[i] == x {
+ y := s.dense[len(s.dense)-1]
+ s.dense[i] = y
+ s.sparse[y] = i
+ s.dense = s.dense[:len(s.dense)-1]
+ }
+}
+
+// pop removes an arbitrary element from the set.
+// The set must be nonempty.
+func (s *sparseSet) pop() ID {
+ x := s.dense[len(s.dense)-1]
+ s.dense = s.dense[:len(s.dense)-1]
+ return x
+}
+
+func (s *sparseSet) clear() {
+ s.dense = s.dense[:0]
+}
+
+func (s *sparseSet) contents() []ID {
+ return s.dense
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// stackalloc allocates storage in the stack frame for
+// all Values that did not get a register.
+func stackalloc(f *Func) {
+ home := f.RegAlloc
+
+ // Start with space for callee arguments/returns.
+ var n int64
+ for _, b := range f.Blocks {
+ if b.Kind != BlockCall {
+ continue
+ }
+ v := b.Control
+ if n < v.AuxInt {
+ n = v.AuxInt
+ }
+ }
+ f.Logf("stackalloc: 0-%d for callee arguments/returns\n", n)
+
+ // TODO: group variables by ptr/nonptr, size, etc. Emit ptr vars last
+ // so stackmap is smaller.
+
+ // Assign stack locations to phis first, because we
+ // must also assign the same locations to the phi copies
+ // introduced during regalloc.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ if v.Type.IsMemory() { // TODO: only "regallocable" types
+ continue
+ }
+ n = align(n, v.Type.Alignment())
+ f.Logf("stackalloc: %d-%d for %v\n", n, n+v.Type.Size(), v)
+ loc := &LocalSlot{n}
+ n += v.Type.Size()
+ home = setloc(home, v, loc)
+ for _, w := range v.Args {
+ home = setloc(home, w, loc)
+ }
+ }
+ }
+
+ // Now do all other unassigned values.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.ID < ID(len(home)) && home[v.ID] != nil {
+ continue
+ }
+ if v.Type.IsMemory() { // TODO: only "regallocable" types
+ continue
+ }
+ if len(v.Args) == 0 {
+ // v will have been materialized wherever it is needed.
+ continue
+ }
+ if len(v.Args) == 1 && (v.Args[0].Op == OpSP || v.Args[0].Op == OpSB) {
+ continue
+ }
+ n = align(n, v.Type.Alignment())
+ f.Logf("stackalloc: %d-%d for %v\n", n, n+v.Type.Size(), v)
+ loc := &LocalSlot{n}
+ n += v.Type.Size()
+ home = setloc(home, v, loc)
+ }
+ }
+
+ // Finally, allocate space for all autos that we used
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ s, ok := v.Aux.(*AutoSymbol)
+ if !ok || s.Offset >= 0 {
+ continue
+ }
+ t := s.Typ
+ n = align(n, t.Alignment())
+ f.Logf("stackalloc: %d-%d for auto %v\n", n, n+t.Size(), v)
+ s.Offset = n
+ n += t.Size()
+ }
+ }
+
+ n = align(n, f.Config.PtrSize)
+ f.Logf("stackalloc: %d-%d for return address\n", n, n+f.Config.PtrSize)
+ n += f.Config.PtrSize // space for return address. TODO: arch-dependent
+ f.RegAlloc = home
+ f.FrameSize = n
+
+ // TODO: share stack slots among noninterfering (& gc type compatible) values
+}
+
+// align increases n to the next multiple of a. a must be a power of 2.
+func align(n int64, a int64) int64 {
+ if a == 0 {
+ return n
+ }
+ return (n + a - 1) &^ (a - 1)
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// TODO: use go/types instead?
+
+// A type interface used to import cmd/internal/gc:Type
+// Type instances are not guaranteed to be canonical.
+type Type interface {
+ Size() int64 // return the size in bytes
+ Alignment() int64
+
+ IsBoolean() bool // is a named or unnamed boolean type
+ IsInteger() bool // ... ditto for the others
+ IsSigned() bool
+ IsFloat() bool
+ IsPtr() bool
+ IsString() bool
+
+ IsMemory() bool // special ssa-package-only types
+ IsFlags() bool
+
+ Elem() Type // given []T or *T, return T
+ PtrTo() Type // given T, return *T
+
+ String() string
+ Equal(Type) bool
+}
+
+// Stub implementation for now, until we are completely using ../gc:Type
+type TypeImpl struct {
+ Size_ int64
+ Align int64
+ Boolean bool
+ Integer bool
+ Signed bool
+ Float bool
+ Ptr bool
+ string bool
+
+ Memory bool
+ Flags bool
+
+ Name string
+}
+
+func (t *TypeImpl) Size() int64 { return t.Size_ }
+func (t *TypeImpl) Alignment() int64 { return t.Align }
+func (t *TypeImpl) IsBoolean() bool { return t.Boolean }
+func (t *TypeImpl) IsInteger() bool { return t.Integer }
+func (t *TypeImpl) IsSigned() bool { return t.Signed }
+func (t *TypeImpl) IsFloat() bool { return t.Float }
+func (t *TypeImpl) IsPtr() bool { return t.Ptr }
+func (t *TypeImpl) IsString() bool { return t.string }
+func (t *TypeImpl) IsMemory() bool { return t.Memory }
+func (t *TypeImpl) IsFlags() bool { return t.Flags }
+func (t *TypeImpl) String() string { return t.Name }
+func (t *TypeImpl) Elem() Type { panic("not implemented"); return nil }
+func (t *TypeImpl) PtrTo() Type { panic("not implemented"); return nil }
+
+func (t *TypeImpl) Equal(u Type) bool {
+ x, ok := u.(*TypeImpl)
+ if !ok {
+ return false
+ }
+ return x == t
+}
+
+var (
+ // shortcuts for commonly used basic types
+ TypeInt8 = &TypeImpl{Size_: 1, Align: 1, Integer: true, Signed: true, Name: "int8"}
+ TypeInt16 = &TypeImpl{Size_: 2, Align: 2, Integer: true, Signed: true, Name: "int16"}
+ TypeInt32 = &TypeImpl{Size_: 4, Align: 4, Integer: true, Signed: true, Name: "int32"}
+ TypeInt64 = &TypeImpl{Size_: 8, Align: 8, Integer: true, Signed: true, Name: "int64"}
+ TypeUInt8 = &TypeImpl{Size_: 1, Align: 1, Integer: true, Name: "uint8"}
+ TypeUInt16 = &TypeImpl{Size_: 2, Align: 2, Integer: true, Name: "uint16"}
+ TypeUInt32 = &TypeImpl{Size_: 4, Align: 4, Integer: true, Name: "uint32"}
+ TypeUInt64 = &TypeImpl{Size_: 8, Align: 8, Integer: true, Name: "uint64"}
+ TypeBool = &TypeImpl{Size_: 1, Align: 1, Boolean: true, Name: "bool"}
+ //TypeString = types.Typ[types.String]
+ TypeBytePtr = &TypeImpl{Size_: 8, Align: 8, Ptr: true, Name: "*byte"}
+
+ TypeInvalid = &TypeImpl{Name: "invalid"}
+
+ // Additional compiler-only types go here.
+ TypeMem = &TypeImpl{Memory: true, Name: "mem"}
+ TypeFlags = &TypeImpl{Flags: true, Name: "flags"}
+)
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "fmt"
+
+// A Value represents a value in the SSA representation of the program.
+// The ID and Type fields must not be modified. The remainder may be modified
+// if they preserve the value of the Value (e.g. changing a (mul 2 x) to an (add x x)).
+type Value struct {
+ // A unique identifier for the value. For performance we allocate these IDs
+ // densely starting at 0. There is no guarantee that there won't be occasional holes, though.
+ ID ID
+
+ // The operation that computes this value. See op.go.
+ Op Op
+
+ // The type of this value. Normally this will be a Go type, but there
+ // are a few other pseudo-types, see type.go.
+ Type Type
+
+ // Auxiliary info for this value. The type of this information depends on the opcode and type.
+ // AuxInt is used for integer values, Aux is used for other values.
+ AuxInt int64
+ Aux interface{}
+
+ // Arguments of this value
+ Args []*Value
+
+ // Containing basic block
+ Block *Block
+
+ // Source line number
+ Line int32
+
+ // Storage for the first two args
+ argstorage [2]*Value
+}
+
+// Examples:
+// Opcode aux args
+// OpAdd nil 2
+// OpConst string 0 string constant
+// OpConst int64 0 int64 constant
+// OpAddcq int64 1 amd64 op: v = arg[0] + constant
+
+// short form print. Just v#.
+func (v *Value) String() string {
+ return fmt.Sprintf("v%d", v.ID)
+}
+
+// long form print. v# = opcode <type> [aux] args [: reg]
+func (v *Value) LongString() string {
+ s := fmt.Sprintf("v%d = %s", v.ID, v.Op.String())
+ s += " <" + v.Type.String() + ">"
+ if v.AuxInt != 0 {
+ s += fmt.Sprintf(" [%d]", v.AuxInt)
+ }
+ if v.Aux != nil {
+ if _, ok := v.Aux.(string); ok {
+ s += fmt.Sprintf(" {%q}", v.Aux)
+ } else {
+ s += fmt.Sprintf(" {%v}", v.Aux)
+ }
+ }
+ for _, a := range v.Args {
+ s += fmt.Sprintf(" %v", a)
+ }
+ r := v.Block.Func.RegAlloc
+ if r != nil && r[v.ID] != nil {
+ s += " : " + r[v.ID].Name()
+ }
+ return s
+}
+
+func (v *Value) AddArg(w *Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, w)
+}
+func (v *Value) AddArgs(a ...*Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, a...)
+}
+func (v *Value) SetArg(i int, w *Value) {
+ v.Args[i] = w
+}
+func (v *Value) RemoveArg(i int) {
+ copy(v.Args[i:], v.Args[i+1:])
+ v.Args[len(v.Args)-1] = nil // aid GC
+ v.Args = v.Args[:len(v.Args)-1]
+}
+func (v *Value) SetArgs1(a *Value) {
+ v.resetArgs()
+ v.AddArg(a)
+}
+func (v *Value) SetArgs2(a *Value, b *Value) {
+ v.resetArgs()
+ v.AddArg(a)
+ v.AddArg(b)
+}
+
+func (v *Value) resetArgs() {
+ v.argstorage[0] = nil
+ v.argstorage[1] = nil
+ v.Args = v.argstorage[:0]
+}
+
+func (v *Value) Logf(msg string, args ...interface{}) { v.Block.Logf(msg, args...) }
+func (v *Value) Fatalf(msg string, args ...interface{}) { v.Block.Fatalf(msg, args...) }
+func (v *Value) Unimplementedf(msg string, args ...interface{}) { v.Block.Unimplementedf(msg, args...) }
+
+// ExternSymbol is an aux value that encodes a variable's
+// constant offset from the static base pointer.
+type ExternSymbol struct {
+ Typ Type // Go type
+ Sym fmt.Stringer // A *gc.Sym referring to a global variable
+ // Note: the offset for an external symbol is not
+ // calculated until link time.
+}
+
+// ArgSymbol is an aux value that encodes an argument or result
+// variable's constant offset from FP (FP = SP + framesize).
+type ArgSymbol struct {
+ Typ Type // Go type
+ Offset int64 // Distance above frame pointer
+ Sym fmt.Stringer // A *gc.Sym referring to the argument/result variable.
+}
+
+// AutoSymbol is an aux value that encodes a local variable's
+// constant offset from SP.
+type AutoSymbol struct {
+ Typ Type // Go type
+ Offset int64 // Distance above stack pointer. Set by stackalloc in SSA.
+ Sym fmt.Stringer // A *gc.Sym referring to a local (auto) variable.
+}
+
+func (s *ExternSymbol) String() string {
+ return s.Sym.String()
+}
+
+func (s *ArgSymbol) String() string {
+ return s.Sym.String()
+}
+
+func (s *AutoSymbol) String() string {
+ return s.Sym.String()
+}
"compile/internal/big",
"compile/internal/gc",
"compile/internal/ppc64",
+ "compile/internal/ssa",
"compile/internal/x86",
"internal/gcprog",
"internal/obj",
AINTO
AIRETL
AIRETW
- AJCC
- AJCS
+ AJCC // >= unsigned
+ AJCS // < unsigned
AJCXZL
- AJEQ
- AJGE
- AJGT
- AJHI
- AJLE
- AJLS
- AJLT
- AJMI
- AJNE
- AJOC
- AJOS
- AJPC
- AJPL
- AJPS
+ AJEQ // == (zero)
+ AJGE // >= signed
+ AJGT // > signed
+ AJHI // > unsigned
+ AJLE // <= signed
+ AJLS // <= unsigned
+ AJLT // < signed
+ AJMI // sign bit set (negative)
+ AJNE // != (nonzero)
+ AJOC // overflow clear
+ AJOS // overflow set
+ AJPC // parity clear
+ AJPL // sign bit clear (positive)
+ AJPS // parity set
ALAHF
ALARL
ALARW
// goto across declaration not okay
func _() {
goto L // ERROR "goto L jumps over declaration of x at LINE+1|goto jumps over declaration"
- x := 1 // GCCGO_ERROR "defined here"
+ x := 1 // GCCGO_ERROR "defined here"
_ = x
L:
}
x := 1
_ = x
}
- x := 1 // GCCGO_ERROR "defined here"
+ x := 1 // GCCGO_ERROR "defined here"
_ = x
L:
}
// error shows first offending variable
func _() {
goto L // ERROR "goto L jumps over declaration of x at LINE+1|goto jumps over declaration"
- x := 1 // GCCGO_ERROR "defined here"
+ x := 1 // GCCGO_ERROR "defined here"
_ = x
y := 1
_ = y
// goto not okay even if code path is dead
func _() {
goto L // ERROR "goto L jumps over declaration of x at LINE+1|goto jumps over declaration"
- x := 1 // GCCGO_ERROR "defined here"
+ x := 1 // GCCGO_ERROR "defined here"
_ = x
y := 1
_ = y
// goto into inner block not okay
func _() {
goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
- { // GCCGO_ERROR "block starts here"
+ { // GCCGO_ERROR "block starts here"
L:
}
}
// goto backward into inner block still not okay
func _() {
- { // GCCGO_ERROR "block starts here"
+ { // GCCGO_ERROR "block starts here"
L:
}
goto L // ERROR "goto L jumps into block starting at LINE-3|goto jumps into block"
goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
{
{
- { // GCCGO_ERROR "block starts here"
+ { // GCCGO_ERROR "block starts here"
L:
}
}
goto L // ERROR "goto L jumps into block starting at LINE+3|goto jumps into block"
x := 1
_ = x
- { // GCCGO_ERROR "block starts here"
+ { // GCCGO_ERROR "block starts here"
L:
}
}
}
func _() {
- goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
- if true { // GCCGO_ERROR "block starts here"
+ goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
+ if true { // GCCGO_ERROR "block starts here"
L:
}
}
func _() {
- goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
- if true { // GCCGO_ERROR "block starts here"
+ goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
+ if true { // GCCGO_ERROR "block starts here"
L:
} else {
}
func _() {
goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
if true {
- } else { // GCCGO_ERROR "block starts here"
+ } else { // GCCGO_ERROR "block starts here"
L:
}
}
func _() {
- if false { // GCCGO_ERROR "block starts here"
+ if false { // GCCGO_ERROR "block starts here"
L:
} else {
goto L // ERROR "goto L jumps into block starting at LINE-3|goto jumps into block"
func _() {
if true {
goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
- } else { // GCCGO_ERROR "block starts here"
+ } else { // GCCGO_ERROR "block starts here"
L:
}
}
func _() {
if true {
goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
- } else if false { // GCCGO_ERROR "block starts here"
+ } else if false { // GCCGO_ERROR "block starts here"
L:
}
}
func _() {
if true {
goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
- } else if false { // GCCGO_ERROR "block starts here"
+ } else if false { // GCCGO_ERROR "block starts here"
L:
} else {
}
if true {
goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
} else if false {
- } else { // GCCGO_ERROR "block starts here"
+ } else { // GCCGO_ERROR "block starts here"
L:
}
}
}
func _() {
- for { // GCCGO_ERROR "block starts here"
+ for { // GCCGO_ERROR "block starts here"
L:
}
goto L // ERROR "goto L jumps into block starting at LINE-3|goto jumps into block"
}
func _() {
- for { // GCCGO_ERROR "block starts here"
+ for { // GCCGO_ERROR "block starts here"
goto L
L1:
}
}
func _() {
- for i < n { // GCCGO_ERROR "block starts here"
+ for i < n { // GCCGO_ERROR "block starts here"
L:
}
goto L // ERROR "goto L jumps into block starting at LINE-3|goto jumps into block"
}
func _() {
- for i = 0; i < n; i++ { // GCCGO_ERROR "block starts here"
+ for i = 0; i < n; i++ { // GCCGO_ERROR "block starts here"
L:
}
goto L // ERROR "goto L jumps into block starting at LINE-3|goto jumps into block"
}
func _() {
- for i = range x { // GCCGO_ERROR "block starts here"
+ for i = range x { // GCCGO_ERROR "block starts here"
L:
}
goto L // ERROR "goto L jumps into block starting at LINE-3|goto jumps into block"
}
func _() {
- for i = range c { // GCCGO_ERROR "block starts here"
+ for i = range c { // GCCGO_ERROR "block starts here"
L:
}
goto L // ERROR "goto L jumps into block starting at LINE-3|goto jumps into block"
}
func _() {
- for i = range m { // GCCGO_ERROR "block starts here"
+ for i = range m { // GCCGO_ERROR "block starts here"
L:
}
goto L // ERROR "goto L jumps into block starting at LINE-3|goto jumps into block"
}
func _() {
- for i = range s { // GCCGO_ERROR "block starts here"
+ for i = range s { // GCCGO_ERROR "block starts here"
L:
}
goto L // ERROR "goto L jumps into block starting at LINE-3|goto jumps into block"
goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
switch i {
case 0:
- L: // GCCGO_ERROR "block starts here"
+ L: // GCCGO_ERROR "block starts here"
}
}
goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
switch i {
case 0:
- L: // GCCGO_ERROR "block starts here"
+ L: // GCCGO_ERROR "block starts here"
;
default:
}
switch i {
case 0:
default:
- L: // GCCGO_ERROR "block starts here"
+ L: // GCCGO_ERROR "block starts here"
}
}
default:
goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
case 0:
- L: // GCCGO_ERROR "block starts here"
+ L: // GCCGO_ERROR "block starts here"
}
}
func _() {
switch i {
case 0:
- L: // GCCGO_ERROR "block starts here"
+ L: // GCCGO_ERROR "block starts here"
;
default:
goto L // ERROR "goto L jumps into block starting at LINE-4|goto jumps into block"
goto L // ERROR "goto L jumps into block starting at LINE+2|goto jumps into block"
select {
case c <- 1:
- L: // GCCGO_ERROR "block starts here"
+ L: // GCCGO_ERROR "block starts here"
}
}
goto L // ERROR "goto L jumps into block starting at LINE+2|goto jumps into block"
select {
case c <- 1:
- L: // GCCGO_ERROR "block starts here"
+ L: // GCCGO_ERROR "block starts here"
;
default:
}
select {
case <-c:
default:
- L: // GCCGO_ERROR "block starts here"
+ L: // GCCGO_ERROR "block starts here"
}
}
default:
goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block"
case <-c:
- L: // GCCGO_ERROR "block starts here"
+ L: // GCCGO_ERROR "block starts here"
}
}
func _() {
select {
case <-c:
- L: // GCCGO_ERROR "block starts here"
+ L: // GCCGO_ERROR "block starts here"
;
default:
goto L // ERROR "goto L jumps into block starting at LINE-4|goto jumps into block"
for {
}
L2: // ERROR "label .*L2.* defined and not used"
- select {
- }
+ select {}
L3: // ERROR "label .*L3.* defined and not used"
switch {
}
default:
break L10
}
+
+ goto L10
+
+ goto go2 // ERROR "label go2 not defined"
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-
// Verify that erroneous labels are caught by the compiler.
// This set is caught by pass 2. That's why this file is label1.go.
// Does not compile.
break L2
}
if x == 1 {
- continue L2 // ERROR "invalid continue label .*L2"
+ continue L2 // ERROR "invalid continue label .*L2|continue is not in a loop"
}
goto L2
}
+ for {
+ if x == 1 {
+ continue L2 // ERROR "invalid continue label .*L2"
+ }
+ }
+
L3:
switch {
case x > 10:
break L3
}
if x == 12 {
- continue L3 // ERROR "invalid continue label .*L3"
+ continue L3 // ERROR "invalid continue label .*L3|continue is not in a loop"
}
goto L3
}
break L4 // ERROR "invalid break label .*L4"
}
if x == 14 {
- continue L4 // ERROR "invalid continue label .*L4"
+ continue L4 // ERROR "invalid continue label .*L4|continue is not in a loop"
}
if x == 15 {
goto L4
break L5 // ERROR "invalid break label .*L5"
}
if x == 17 {
- continue L5 // ERROR "invalid continue label .*L5"
+ continue L5 // ERROR "invalid continue label .*L5|continue is not in a loop"
}
if x == 18 {
goto L5
goto L1
}
}
+
+ continue // ERROR "continue is not in a loop"
+ for {
+ continue on // ERROR "continue label not defined: on"
+ }
+
+ break // ERROR "break is not in a loop"
+ for {
+ break dance // ERROR "break label not defined: dance"
+ }
+
+ for {
+ switch x {
+ case 1:
+ continue
+ }
+ }
}