]> Cypherpunks.ru repositories - gostls13.git/commitdiff
[dev.ssa] Merge remote-tracking branch 'origin/master' into mergebranch
authorKeith Randall <khr@golang.org>
Thu, 28 May 2015 20:49:20 +0000 (13:49 -0700)
committerKeith Randall <khr@golang.org>
Thu, 28 May 2015 20:51:18 +0000 (13:51 -0700)
Semi-regular merge of tip to dev.ssa.

Complicated a bit by the move of cmd/internal/* to cmd/compile/internal/*.

Change-Id: I1c66d3c29bb95cce4a53c5a3476373aa5245303d

43 files changed:
1  2 
src/cmd/compile/internal/gc/pgen.go
src/cmd/compile/internal/gc/ssa.go
src/cmd/compile/internal/gc/type.go
src/cmd/compile/internal/ssa/TODO
src/cmd/compile/internal/ssa/block.go
src/cmd/compile/internal/ssa/blockkind_string.go
src/cmd/compile/internal/ssa/cgen.go
src/cmd/compile/internal/ssa/check.go
src/cmd/compile/internal/ssa/compile.go
src/cmd/compile/internal/ssa/config.go
src/cmd/compile/internal/ssa/copyelim.go
src/cmd/compile/internal/ssa/critical.go
src/cmd/compile/internal/ssa/cse.go
src/cmd/compile/internal/ssa/deadcode.go
src/cmd/compile/internal/ssa/deadcode_test.go
src/cmd/compile/internal/ssa/dom.go
src/cmd/compile/internal/ssa/export_test.go
src/cmd/compile/internal/ssa/func.go
src/cmd/compile/internal/ssa/func_test.go
src/cmd/compile/internal/ssa/fuse.go
src/cmd/compile/internal/ssa/generic.go
src/cmd/compile/internal/ssa/id.go
src/cmd/compile/internal/ssa/layout.go
src/cmd/compile/internal/ssa/location.go
src/cmd/compile/internal/ssa/lower.go
src/cmd/compile/internal/ssa/lowerAmd64.go
src/cmd/compile/internal/ssa/op.go
src/cmd/compile/internal/ssa/op_string.go
src/cmd/compile/internal/ssa/opamd64.go
src/cmd/compile/internal/ssa/opt.go
src/cmd/compile/internal/ssa/phielim.go
src/cmd/compile/internal/ssa/print.go
src/cmd/compile/internal/ssa/regalloc.go
src/cmd/compile/internal/ssa/rewrite.go
src/cmd/compile/internal/ssa/rulegen/generic.rules
src/cmd/compile/internal/ssa/rulegen/lower_amd64.rules
src/cmd/compile/internal/ssa/rulegen/rulegen.go
src/cmd/compile/internal/ssa/schedule.go
src/cmd/compile/internal/ssa/sparseset.go
src/cmd/compile/internal/ssa/stackalloc.go
src/cmd/compile/internal/ssa/type.go
src/cmd/compile/internal/ssa/value.go
src/cmd/dist/buildtool.go

index 2c225c877819465ff7e9bc697882de26a90c64da,5fb0776f3c3fd6bae332f76bb1f73ed2afc9e1a8..c170060896a724b74ba1ebaf58d409ad37577a34
@@@ -5,8 -5,7 +5,8 @@@
  package gc
  
  import (
++      "cmd/compile/internal/ssa"
        "cmd/internal/obj"
-       "cmd/internal/ssa"
        "crypto/md5"
        "fmt"
        "strings"
index bb4d2783834dad27a2aceefeb20683e648409a5a,0000000000000000000000000000000000000000..7f78fce17e12142b6cc9ee9115737e094aff7139
mode 100644,000000..100644
--- /dev/null
@@@ -1,909 -1,0 +1,909 @@@
-       "cmd/internal/ssa"
 +// Copyright 2015 The Go Authors. All rights reserved.
 +// Use of this source code is governed by a BSD-style
 +// license that can be found in the LICENSE file.
 +
 +package gc
 +
 +import (
 +      "log"
 +
++      "cmd/compile/internal/ssa"
 +      "cmd/internal/obj"
 +      "cmd/internal/obj/x86" // TODO: remove
-                       return s.f.ConstInt(n.Type, Mpgetfix(n.Val.U.Xval))
 +)
 +
 +func buildssa(fn *Node) *ssa.Func {
 +      dumplist("buildssa", Curfn.Nbody)
 +
 +      var s state
 +
 +      // TODO(khr): build config just once at the start of the compiler binary
 +      s.config = ssa.NewConfig(Thearch.Thestring)
 +      s.f = s.config.NewFunc()
 +      s.f.Name = fn.Nname.Sym.Name
 +
 +      // We construct SSA using an algorithm similar to
 +      // Brau, Buchwald, Hack, Leißa, Mallon, and Zwinkau
 +      // http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
 +      // TODO: check this comment
 +
 +      // Allocate starting block
 +      s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
 +
 +      // Allocate exit block
 +      s.exit = s.f.NewBlock(ssa.BlockExit)
 +
 +      // Allocate starting values
 +      s.startmem = s.f.Entry.NewValue(ssa.OpArg, ssa.TypeMem, ".mem")
 +      s.fp = s.f.Entry.NewValue(ssa.OpFP, s.config.Uintptr, nil) // TODO: use generic pointer type (unsafe.Pointer?) instead
 +      s.sp = s.f.Entry.NewValue(ssa.OpSP, s.config.Uintptr, nil)
 +
 +      s.vars = map[string]*ssa.Value{}
 +      s.labels = map[string]*ssa.Block{}
 +      s.argOffsets = map[string]int64{}
 +
 +      // Convert the AST-based IR to the SSA-based IR
 +      s.startBlock(s.f.Entry)
 +      s.stmtList(fn.Nbody)
 +
 +      // fallthrough to exit
 +      if b := s.endBlock(); b != nil {
 +              addEdge(b, s.exit)
 +      }
 +
 +      // Finish up exit block
 +      s.startBlock(s.exit)
 +      s.exit.Control = s.mem()
 +      s.endBlock()
 +
 +      // Link up variable uses to variable definitions
 +      s.linkForwardReferences()
 +
 +      // Main call to ssa package to compile function
 +      ssa.Compile(s.f)
 +
 +      return s.f
 +}
 +
 +type state struct {
 +      // configuration (arch) information
 +      config *ssa.Config
 +
 +      // function we're building
 +      f *ssa.Func
 +
 +      // exit block that "return" jumps to (and panics jump to)
 +      exit *ssa.Block
 +
 +      // the target block for each label in f
 +      labels map[string]*ssa.Block
 +
 +      // current location where we're interpreting the AST
 +      curBlock *ssa.Block
 +
 +      // variable assignments in the current block (map from variable name to ssa value)
 +      vars map[string]*ssa.Value
 +
 +      // all defined variables at the end of each block.  Indexed by block ID.
 +      defvars []map[string]*ssa.Value
 +
 +      // offsets of argument slots
 +      // unnamed and unused args are not listed.
 +      argOffsets map[string]int64
 +
 +      // starting values.  Memory, frame pointer, and stack pointer
 +      startmem *ssa.Value
 +      fp       *ssa.Value
 +      sp       *ssa.Value
 +}
 +
 +// startBlock sets the current block we're generating code in to b.
 +func (s *state) startBlock(b *ssa.Block) {
 +      if s.curBlock != nil {
 +              log.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
 +      }
 +      s.curBlock = b
 +      s.vars = map[string]*ssa.Value{}
 +}
 +
 +// endBlock marks the end of generating code for the current block.
 +// Returns the (former) current block.  Returns nil if there is no current
 +// block, i.e. if no code flows to the current execution point.
 +func (s *state) endBlock() *ssa.Block {
 +      b := s.curBlock
 +      if b == nil {
 +              return nil
 +      }
 +      for len(s.defvars) <= int(b.ID) {
 +              s.defvars = append(s.defvars, nil)
 +      }
 +      s.defvars[b.ID] = s.vars
 +      s.curBlock = nil
 +      s.vars = nil
 +      return b
 +}
 +
 +// ssaStmtList converts the statement n to SSA and adds it to s.
 +func (s *state) stmtList(l *NodeList) {
 +      for ; l != nil; l = l.Next {
 +              s.stmt(l.N)
 +      }
 +}
 +
 +// ssaStmt converts the statement n to SSA and adds it to s.
 +func (s *state) stmt(n *Node) {
 +      s.stmtList(n.Ninit)
 +      switch n.Op {
 +
 +      case OBLOCK:
 +              s.stmtList(n.List)
 +
 +      case ODCL:
 +              // TODO: ???  Assign 0?
 +
 +      case OLABEL, OGOTO:
 +              // get block at label, or make one
 +              t := s.labels[n.Left.Sym.Name]
 +              if t == nil {
 +                      t = s.f.NewBlock(ssa.BlockPlain)
 +                      s.labels[n.Left.Sym.Name] = t
 +              }
 +              // go to that label (we pretend "label:" is preceded by "goto label")
 +              b := s.endBlock()
 +              addEdge(b, t)
 +
 +              if n.Op == OLABEL {
 +                      // next we work on the label's target block
 +                      s.startBlock(t)
 +              }
 +
 +      case OAS:
 +              // TODO(khr): colas?
 +              val := s.expr(n.Right)
 +              if n.Left.Op == ONAME && !n.Left.Addrtaken && n.Left.Class&PHEAP == 0 && n.Left.Class != PEXTERN && n.Left.Class != PPARAMOUT {
 +                      // ssa-able variable.
 +                      s.vars[n.Left.Sym.Name] = val
 +                      return
 +              }
 +              // not ssa-able.  Treat as a store.
 +              addr := s.addr(n.Left)
 +              s.vars[".mem"] = s.curBlock.NewValue3(ssa.OpStore, ssa.TypeMem, nil, addr, val, s.mem())
 +              // TODO: try to make more variables registerizeable.
 +      case OIF:
 +              cond := s.expr(n.Ntest)
 +              b := s.endBlock()
 +              b.Kind = ssa.BlockIf
 +              b.Control = cond
 +              // TODO(khr): likely direction
 +
 +              bThen := s.f.NewBlock(ssa.BlockPlain)
 +              bEnd := s.f.NewBlock(ssa.BlockPlain)
 +              var bElse *ssa.Block
 +
 +              if n.Nelse == nil {
 +                      addEdge(b, bThen)
 +                      addEdge(b, bEnd)
 +              } else {
 +                      bElse = s.f.NewBlock(ssa.BlockPlain)
 +                      addEdge(b, bThen)
 +                      addEdge(b, bElse)
 +              }
 +
 +              s.startBlock(bThen)
 +              s.stmtList(n.Nbody)
 +              b = s.endBlock()
 +              if b != nil {
 +                      addEdge(b, bEnd)
 +              }
 +
 +              if n.Nelse != nil {
 +                      s.startBlock(bElse)
 +                      s.stmtList(n.Nelse)
 +                      b = s.endBlock()
 +                      if b != nil {
 +                              addEdge(b, bEnd)
 +                      }
 +              }
 +              s.startBlock(bEnd)
 +
 +      case ORETURN:
 +              s.stmtList(n.List)
 +              b := s.endBlock()
 +              addEdge(b, s.exit)
 +
 +      case OFOR:
 +              bCond := s.f.NewBlock(ssa.BlockPlain)
 +              bBody := s.f.NewBlock(ssa.BlockPlain)
 +              bEnd := s.f.NewBlock(ssa.BlockPlain)
 +
 +              // first, jump to condition test
 +              b := s.endBlock()
 +              addEdge(b, bCond)
 +
 +              // generate code to test condition
 +              // TODO(khr): Ntest == nil exception
 +              s.startBlock(bCond)
 +              cond := s.expr(n.Ntest)
 +              b = s.endBlock()
 +              b.Kind = ssa.BlockIf
 +              b.Control = cond
 +              // TODO(khr): likely direction
 +              addEdge(b, bBody)
 +              addEdge(b, bEnd)
 +
 +              // generate body
 +              s.startBlock(bBody)
 +              s.stmtList(n.Nbody)
 +              s.stmt(n.Nincr)
 +              b = s.endBlock()
 +              addEdge(b, bCond)
 +
 +              s.startBlock(bEnd)
 +
 +      case OVARKILL:
 +              // TODO(khr): ??? anything to do here?  Only for addrtaken variables?
 +              // Maybe just link it in the store chain?
 +      default:
 +              log.Fatalf("unhandled stmt %s", opnames[n.Op])
 +      }
 +}
 +
 +// expr converts the expression n to ssa, adds it to s and returns the ssa result.
 +func (s *state) expr(n *Node) *ssa.Value {
 +      if n == nil {
 +              // TODO(khr): is this nil???
 +              return s.f.Entry.NewValue(ssa.OpConst, n.Type, nil)
 +      }
 +      switch n.Op {
 +      case ONAME:
 +              // TODO: remember offsets for PPARAM names
 +              if n.Class == PEXTERN {
 +                      // global variable
 +                      addr := s.f.Entry.NewValue(ssa.OpGlobal, Ptrto(n.Type), n.Sym)
 +                      return s.curBlock.NewValue2(ssa.OpLoad, n.Type, nil, addr, s.mem())
 +              }
 +              s.argOffsets[n.Sym.Name] = n.Xoffset
 +              return s.variable(n.Sym.Name, n.Type)
 +      case OLITERAL:
 +              switch n.Val.Ctype {
 +              case CTINT:
++                      return s.f.ConstInt(n.Type, Mpgetfix(n.Val.U.(*Mpint)))
 +              default:
 +                      log.Fatalf("unhandled OLITERAL %v", n.Val.Ctype)
 +                      return nil
 +              }
 +
 +              // binary ops
 +      case OLT:
 +              a := s.expr(n.Left)
 +              b := s.expr(n.Right)
 +              return s.curBlock.NewValue2(ssa.OpLess, ssa.TypeBool, nil, a, b)
 +      case OADD:
 +              a := s.expr(n.Left)
 +              b := s.expr(n.Right)
 +              return s.curBlock.NewValue2(ssa.OpAdd, a.Type, nil, a, b)
 +      case OSUB:
 +              // TODO:(khr) fold code for all binary ops together somehow
 +              a := s.expr(n.Left)
 +              b := s.expr(n.Right)
 +              return s.curBlock.NewValue2(ssa.OpSub, a.Type, nil, a, b)
 +      case OLSH:
 +              a := s.expr(n.Left)
 +              b := s.expr(n.Right)
 +              return s.curBlock.NewValue2(ssa.OpLsh, a.Type, nil, a, b)
 +      case ORSH:
 +              a := s.expr(n.Left)
 +              b := s.expr(n.Right)
 +              return s.curBlock.NewValue2(ssa.OpRsh, a.Type, nil, a, b)
 +
 +      case OADDR:
 +              return s.addr(n.Left)
 +
 +      case OIND:
 +              p := s.expr(n.Left)
 +              s.nilCheck(p)
 +              return s.curBlock.NewValue2(ssa.OpLoad, n.Type, nil, p, s.mem())
 +
 +      case ODOTPTR:
 +              p := s.expr(n.Left)
 +              s.nilCheck(p)
 +              p = s.curBlock.NewValue2(ssa.OpAdd, p.Type, nil, p, s.f.ConstInt(s.config.Uintptr, n.Xoffset))
 +              return s.curBlock.NewValue2(ssa.OpLoad, n.Type, nil, p, s.mem())
 +
 +      case OINDEX:
 +              if n.Left.Type.Bound >= 0 { // array
 +                      a := s.expr(n.Left)
 +                      i := s.expr(n.Right)
 +                      s.boundsCheck(i, s.f.ConstInt(s.config.Uintptr, n.Left.Type.Bound))
 +                      return s.curBlock.NewValue2(ssa.OpArrayIndex, n.Left.Type.Type, nil, a, i)
 +              } else { // slice
 +                      p := s.addr(n)
 +                      return s.curBlock.NewValue2(ssa.OpLoad, n.Left.Type.Type, nil, p, s.mem())
 +              }
 +
 +      case OCALLFUNC:
 +              // run all argument assignments
 +              // TODO(khr): do we need to evaluate function first?
 +              // Or is it already side-effect-free and does not require a call?
 +              s.stmtList(n.List)
 +
 +              if n.Left.Op != ONAME {
 +                      // TODO(khr): closure calls?
 +                      log.Fatalf("can't handle CALLFUNC with non-ONAME fn %s", opnames[n.Left.Op])
 +              }
 +              bNext := s.f.NewBlock(ssa.BlockPlain)
 +              call := s.curBlock.NewValue1(ssa.OpStaticCall, ssa.TypeMem, n.Left.Sym, s.mem())
 +              b := s.endBlock()
 +              b.Kind = ssa.BlockCall
 +              b.Control = call
 +              addEdge(b, bNext)
 +              addEdge(b, s.exit)
 +
 +              // read result from stack at the start of the fallthrough block
 +              s.startBlock(bNext)
 +              var titer Iter
 +              fp := Structfirst(&titer, Getoutarg(n.Left.Type))
 +              a := s.f.Entry.NewValue1(ssa.OpOffPtr, Ptrto(fp.Type), fp.Width, s.sp)
 +              return s.curBlock.NewValue2(ssa.OpLoad, fp.Type, nil, a, call)
 +      default:
 +              log.Fatalf("unhandled expr %s", opnames[n.Op])
 +              return nil
 +      }
 +}
 +
 +// expr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
 +func (s *state) addr(n *Node) *ssa.Value {
 +      switch n.Op {
 +      case ONAME:
 +              if n.Class == PEXTERN {
 +                      // global variable
 +                      return s.f.Entry.NewValue(ssa.OpGlobal, Ptrto(n.Type), n.Sym)
 +              }
 +              if n.Class == PPARAMOUT {
 +                      // store to parameter slot
 +                      return s.f.Entry.NewValue1(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.fp)
 +              }
 +              // TODO: address of locals
 +              log.Fatalf("variable address of %v not implemented", n)
 +              return nil
 +      case OINDREG:
 +              // indirect off a register (TODO: always SP?)
 +              // used for storing/loading arguments/returns to/from callees
 +              return s.f.Entry.NewValue1(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.sp)
 +      case OINDEX:
 +              if n.Left.Type.Bound >= 0 { // array
 +                      a := s.addr(n.Left)
 +                      i := s.expr(n.Right)
 +                      len := s.f.ConstInt(s.config.Uintptr, n.Left.Type.Bound)
 +                      s.boundsCheck(i, len)
 +                      return s.curBlock.NewValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Type), nil, a, i)
 +              } else { // slice
 +                      a := s.expr(n.Left)
 +                      i := s.expr(n.Right)
 +                      len := s.curBlock.NewValue1(ssa.OpSliceLen, s.config.Uintptr, nil, a)
 +                      s.boundsCheck(i, len)
 +                      p := s.curBlock.NewValue1(ssa.OpSlicePtr, Ptrto(n.Left.Type.Type), nil, a)
 +                      return s.curBlock.NewValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Type), nil, p, i)
 +              }
 +      default:
 +              log.Fatalf("addr: bad op %v", n.Op)
 +              return nil
 +      }
 +}
 +
 +// nilCheck generates nil pointer checking code.
 +// Starts a new block on return.
 +func (s *state) nilCheck(ptr *ssa.Value) {
 +      c := s.curBlock.NewValue1(ssa.OpIsNonNil, ssa.TypeBool, nil, ptr)
 +      b := s.endBlock()
 +      b.Kind = ssa.BlockIf
 +      b.Control = c
 +      bNext := s.f.NewBlock(ssa.BlockPlain)
 +      addEdge(b, bNext)
 +      addEdge(b, s.exit)
 +      s.startBlock(bNext)
 +      // TODO(khr): Don't go directly to exit.  Go to a stub that calls panicmem first.
 +      // TODO: implicit nil checks somehow?
 +}
 +
 +// boundsCheck generates bounds checking code.  Checks if 0 <= idx < len, branches to exit if not.
 +// Starts a new block on return.
 +func (s *state) boundsCheck(idx, len *ssa.Value) {
 +      // TODO: convert index to full width?
 +      // TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero.
 +
 +      // bounds check
 +      cmp := s.curBlock.NewValue2(ssa.OpIsInBounds, ssa.TypeBool, nil, idx, len)
 +      b := s.endBlock()
 +      b.Kind = ssa.BlockIf
 +      b.Control = cmp
 +      bNext := s.f.NewBlock(ssa.BlockPlain)
 +      addEdge(b, bNext)
 +      addEdge(b, s.exit)
 +      // TODO: don't go directly to s.exit.  Go to a stub that calls panicindex first.
 +      s.startBlock(bNext)
 +}
 +
 +// variable returns the value of a variable at the current location.
 +func (s *state) variable(name string, t ssa.Type) *ssa.Value {
 +      if s.curBlock == nil {
 +              log.Fatalf("nil curblock!")
 +      }
 +      v := s.vars[name]
 +      if v == nil {
 +              // TODO: get type?  Take Sym as arg?
 +              v = s.curBlock.NewValue(ssa.OpFwdRef, t, name)
 +              s.vars[name] = v
 +      }
 +      return v
 +}
 +
 +func (s *state) mem() *ssa.Value {
 +      return s.variable(".mem", ssa.TypeMem)
 +}
 +
 +func (s *state) linkForwardReferences() {
 +      // Build ssa graph.  Each variable on its first use in a basic block
 +      // leaves a FwdRef in that block representing the incoming value
 +      // of that variable.  This function links that ref up with possible definitions,
 +      // inserting Phi values as needed.  This is essentially the algorithm
 +      // described by Brau, Buchwald, Hack, Leißa, Mallon, and Zwinkau:
 +      // http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
 +      for _, b := range s.f.Blocks {
 +              for _, v := range b.Values {
 +                      if v.Op != ssa.OpFwdRef {
 +                              continue
 +                      }
 +                      name := v.Aux.(string)
 +                      v.Op = ssa.OpCopy
 +                      v.Aux = nil
 +                      v.SetArgs1(s.lookupVarIncoming(b, v.Type, name))
 +              }
 +      }
 +}
 +
 +// lookupVarIncoming finds the variable's value at the start of block b.
 +func (s *state) lookupVarIncoming(b *ssa.Block, t ssa.Type, name string) *ssa.Value {
 +      // TODO(khr): have lookupVarIncoming overwrite the fwdRef or copy it
 +      // will be used in, instead of having the result used in a copy value.
 +      if b == s.f.Entry {
 +              if name == ".mem" {
 +                      return s.startmem
 +              }
 +              // variable is live at the entry block.  Load it.
 +              addr := s.f.Entry.NewValue1(ssa.OpOffPtr, Ptrto(t.(*Type)), s.argOffsets[name], s.fp)
 +              return b.NewValue2(ssa.OpLoad, t, nil, addr, s.startmem)
 +      }
 +      var vals []*ssa.Value
 +      for _, p := range b.Preds {
 +              vals = append(vals, s.lookupVarOutgoing(p, t, name))
 +      }
 +      v0 := vals[0]
 +      for i := 1; i < len(vals); i++ {
 +              if vals[i] != v0 {
 +                      // need a phi value
 +                      v := b.NewValue(ssa.OpPhi, t, nil)
 +                      v.AddArgs(vals...)
 +                      return v
 +              }
 +      }
 +      return v0
 +}
 +
 +// lookupVarOutgoing finds the variable's value at the end of block b.
 +func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name string) *ssa.Value {
 +      m := s.defvars[b.ID]
 +      if v, ok := m[name]; ok {
 +              return v
 +      }
 +      // The variable is not defined by b and we haven't
 +      // looked it up yet.  Generate v, a copy value which
 +      // will be the outgoing value of the variable.  Then
 +      // look up w, the incoming value of the variable.
 +      // Make v = copy(w).  We need the extra copy to
 +      // prevent infinite recursion when looking up the
 +      // incoming value of the variable.
 +      v := b.NewValue(ssa.OpCopy, t, nil)
 +      m[name] = v
 +      v.AddArg(s.lookupVarIncoming(b, t, name))
 +      return v
 +}
 +
 +// TODO: the above mutually recursive functions can lead to very deep stacks.  Fix that.
 +
 +// addEdge adds an edge from b to c.
 +func addEdge(b, c *ssa.Block) {
 +      b.Succs = append(b.Succs, c)
 +      c.Preds = append(c.Preds, b)
 +}
 +
 +// an unresolved branch
 +type branch struct {
 +      p *obj.Prog  // branch instruction
 +      b *ssa.Block // target
 +}
 +
 +// genssa appends entries to ptxt for each instruction in f.
 +// gcargs and gclocals are filled in with pointer maps for the frame.
 +func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
 +      // TODO: line numbers
 +
 +      if f.FrameSize > 1<<31 {
 +              Yyerror("stack frame too large (>2GB)")
 +              return
 +      }
 +
 +      ptxt.To.Type = obj.TYPE_TEXTSIZE
 +      ptxt.To.Val = int32(Rnd(Curfn.Type.Argwid, int64(Widthptr))) // arg size
 +      ptxt.To.Offset = f.FrameSize - 8                             // TODO: arch-dependent
 +
 +      // Remember where each block starts.
 +      bstart := make([]*obj.Prog, f.NumBlocks())
 +
 +      // Remember all the branch instructions we've seen
 +      // and where they would like to go
 +      var branches []branch
 +
 +      // Emit basic blocks
 +      for i, b := range f.Blocks {
 +              bstart[b.ID] = Pc
 +              // Emit values in block
 +              for _, v := range b.Values {
 +                      genValue(v)
 +              }
 +              // Emit control flow instructions for block
 +              var next *ssa.Block
 +              if i < len(f.Blocks)-1 {
 +                      next = f.Blocks[i+1]
 +              }
 +              branches = genBlock(b, next, branches)
 +      }
 +
 +      // Resolve branches
 +      for _, br := range branches {
 +              br.p.To.Val = bstart[br.b.ID]
 +      }
 +
 +      Pc.As = obj.ARET // overwrite AEND
 +
 +      // TODO: liveness
 +      // TODO: gcargs
 +      // TODO: gclocals
 +
 +      // TODO: dump frame if -f
 +
 +      // Emit garbage collection symbols.  TODO: put something in them
 +      liveness(Curfn, ptxt, gcargs, gclocals)
 +}
 +
 +func genValue(v *ssa.Value) {
 +      switch v.Op {
 +      case ssa.OpADDQ:
 +              // TODO: use addq instead of leaq if target is in the right register.
 +              p := Prog(x86.ALEAQ)
 +              p.From.Type = obj.TYPE_MEM
 +              p.From.Reg = regnum(v.Args[0])
 +              p.From.Scale = 1
 +              p.From.Index = regnum(v.Args[1])
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = regnum(v)
 +      case ssa.OpADDQconst:
 +              // TODO: use addq instead of leaq if target is in the right register.
 +              p := Prog(x86.ALEAQ)
 +              p.From.Type = obj.TYPE_MEM
 +              p.From.Reg = regnum(v.Args[0])
 +              p.From.Offset = v.Aux.(int64)
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = regnum(v)
 +      case ssa.OpMULQconst:
 +              // TODO: this isn't right.  doasm fails on it.  I don't think obj
 +              // has ever been taught to compile imul $c, r1, r2.
 +              p := Prog(x86.AIMULQ)
 +              p.From.Type = obj.TYPE_CONST
 +              p.From.Offset = v.Aux.(int64)
 +              p.From3.Type = obj.TYPE_REG
 +              p.From3.Reg = regnum(v.Args[0])
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = regnum(v)
 +      case ssa.OpSUBQconst:
 +              // This code compensates for the fact that the register allocator
 +              // doesn't understand 2-address instructions yet.  TODO: fix that.
 +              x := regnum(v.Args[0])
 +              r := regnum(v)
 +              if x != r {
 +                      p := Prog(x86.AMOVQ)
 +                      p.From.Type = obj.TYPE_REG
 +                      p.From.Reg = x
 +                      p.To.Type = obj.TYPE_REG
 +                      p.To.Reg = r
 +                      x = r
 +              }
 +              p := Prog(x86.ASUBQ)
 +              p.From.Type = obj.TYPE_CONST
 +              p.From.Offset = v.Aux.(int64)
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = r
 +      case ssa.OpSHLQconst:
 +              x := regnum(v.Args[0])
 +              r := regnum(v)
 +              if x != r {
 +                      p := Prog(x86.AMOVQ)
 +                      p.From.Type = obj.TYPE_REG
 +                      p.From.Reg = x
 +                      p.To.Type = obj.TYPE_REG
 +                      p.To.Reg = r
 +                      x = r
 +              }
 +              p := Prog(x86.ASHLQ)
 +              p.From.Type = obj.TYPE_CONST
 +              p.From.Offset = v.Aux.(int64)
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = r
 +      case ssa.OpLEAQ:
 +              p := Prog(x86.ALEAQ)
 +              p.From.Type = obj.TYPE_MEM
 +              p.From.Reg = regnum(v.Args[0])
 +              p.From.Scale = 1
 +              p.From.Index = regnum(v.Args[1])
 +              p.From.Offset = v.Aux.(int64)
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = regnum(v)
 +      case ssa.OpCMPQ:
 +              p := Prog(x86.ACMPQ)
 +              p.From.Type = obj.TYPE_REG
 +              p.From.Reg = regnum(v.Args[0])
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = regnum(v.Args[1])
 +      case ssa.OpCMPQconst:
 +              p := Prog(x86.ACMPQ)
 +              p.From.Type = obj.TYPE_REG
 +              p.From.Reg = regnum(v.Args[0])
 +              p.To.Type = obj.TYPE_CONST
 +              p.To.Offset = v.Aux.(int64)
 +      case ssa.OpTESTB:
 +              p := Prog(x86.ATESTB)
 +              p.From.Type = obj.TYPE_REG
 +              p.From.Reg = regnum(v.Args[0])
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = regnum(v.Args[1])
 +      case ssa.OpMOVQconst:
 +              x := regnum(v)
 +              p := Prog(x86.AMOVQ)
 +              p.From.Type = obj.TYPE_CONST
 +              p.From.Offset = v.Aux.(int64)
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = x
 +      case ssa.OpMOVQload:
 +              p := Prog(x86.AMOVQ)
 +              p.From.Type = obj.TYPE_MEM
 +              p.From.Reg = regnum(v.Args[0])
 +              p.From.Offset = v.Aux.(int64)
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = regnum(v)
 +      case ssa.OpMOVBload:
 +              p := Prog(x86.AMOVB)
 +              p.From.Type = obj.TYPE_MEM
 +              p.From.Reg = regnum(v.Args[0])
 +              p.From.Offset = v.Aux.(int64)
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = regnum(v)
 +      case ssa.OpMOVQloadidx8:
 +              p := Prog(x86.AMOVQ)
 +              p.From.Type = obj.TYPE_MEM
 +              p.From.Reg = regnum(v.Args[0])
 +              p.From.Offset = v.Aux.(int64)
 +              p.From.Scale = 8
 +              p.From.Index = regnum(v.Args[1])
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = regnum(v)
 +      case ssa.OpMOVQstore:
 +              p := Prog(x86.AMOVQ)
 +              p.From.Type = obj.TYPE_REG
 +              p.From.Reg = regnum(v.Args[1])
 +              p.To.Type = obj.TYPE_MEM
 +              p.To.Reg = regnum(v.Args[0])
 +              p.To.Offset = v.Aux.(int64)
 +      case ssa.OpCopy:
 +              x := regnum(v.Args[0])
 +              y := regnum(v)
 +              if x != y {
 +                      p := Prog(x86.AMOVQ)
 +                      p.From.Type = obj.TYPE_REG
 +                      p.From.Reg = x
 +                      p.To.Type = obj.TYPE_REG
 +                      p.To.Reg = y
 +              }
 +      case ssa.OpLoadReg8:
 +              p := Prog(x86.AMOVQ)
 +              p.From.Type = obj.TYPE_MEM
 +              p.From.Reg = x86.REG_SP
 +              p.From.Offset = localOffset(v.Args[0])
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = regnum(v)
 +      case ssa.OpStoreReg8:
 +              p := Prog(x86.AMOVQ)
 +              p.From.Type = obj.TYPE_REG
 +              p.From.Reg = regnum(v.Args[0])
 +              p.To.Type = obj.TYPE_MEM
 +              p.To.Reg = x86.REG_SP
 +              p.To.Offset = localOffset(v)
 +      case ssa.OpPhi:
 +              // just check to make sure regalloc did it right
 +              f := v.Block.Func
 +              loc := f.RegAlloc[v.ID]
 +              for _, a := range v.Args {
 +                      if f.RegAlloc[a.ID] != loc { // TODO: .Equal() instead?
 +                              log.Fatalf("phi arg at different location than phi %v %v %v %v", v, loc, a, f.RegAlloc[a.ID])
 +                      }
 +              }
 +      case ssa.OpConst:
 +              if v.Block.Func.RegAlloc[v.ID] != nil {
 +                      log.Fatalf("const value %v shouldn't have a location", v)
 +              }
 +      case ssa.OpArg:
 +              // memory arg needs no code
 +              // TODO: only mem arg goes here.
 +      case ssa.OpLEAQglobal:
 +              g := v.Aux.(ssa.GlobalOffset)
 +              p := Prog(x86.ALEAQ)
 +              p.From.Type = obj.TYPE_MEM
 +              p.From.Name = obj.NAME_EXTERN
 +              p.From.Sym = Linksym(g.Global.(*Sym))
 +              p.From.Offset = g.Offset
 +              p.To.Type = obj.TYPE_REG
 +              p.To.Reg = regnum(v)
 +      case ssa.OpStaticCall:
 +              p := Prog(obj.ACALL)
 +              p.To.Type = obj.TYPE_MEM
 +              p.To.Name = obj.NAME_EXTERN
 +              p.To.Sym = Linksym(v.Aux.(*Sym))
 +      case ssa.OpFP, ssa.OpSP:
 +              // nothing to do
 +      default:
 +              log.Fatalf("value %s not implemented", v.LongString())
 +      }
 +}
 +
 +func genBlock(b, next *ssa.Block, branches []branch) []branch {
 +      switch b.Kind {
 +      case ssa.BlockPlain:
 +              if b.Succs[0] != next {
 +                      p := Prog(obj.AJMP)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[0]})
 +              }
 +      case ssa.BlockExit:
 +              Prog(obj.ARET)
 +      case ssa.BlockCall:
 +              if b.Succs[0] != next {
 +                      p := Prog(obj.AJMP)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[0]})
 +              }
 +      case ssa.BlockEQ:
 +              if b.Succs[0] == next {
 +                      p := Prog(x86.AJNE)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[1]})
 +              } else if b.Succs[1] == next {
 +                      p := Prog(x86.AJEQ)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[0]})
 +              } else {
 +                      p := Prog(x86.AJEQ)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[0]})
 +                      q := Prog(obj.AJMP)
 +                      q.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{q, b.Succs[1]})
 +              }
 +      case ssa.BlockNE:
 +              if b.Succs[0] == next {
 +                      p := Prog(x86.AJEQ)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[1]})
 +              } else if b.Succs[1] == next {
 +                      p := Prog(x86.AJNE)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[0]})
 +              } else {
 +                      p := Prog(x86.AJNE)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[0]})
 +                      q := Prog(obj.AJMP)
 +                      q.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{q, b.Succs[1]})
 +              }
 +      case ssa.BlockLT:
 +              if b.Succs[0] == next {
 +                      p := Prog(x86.AJGE)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[1]})
 +              } else if b.Succs[1] == next {
 +                      p := Prog(x86.AJLT)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[0]})
 +              } else {
 +                      p := Prog(x86.AJLT)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[0]})
 +                      q := Prog(obj.AJMP)
 +                      q.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{q, b.Succs[1]})
 +              }
 +      case ssa.BlockULT:
 +              if b.Succs[0] == next {
 +                      p := Prog(x86.AJCC)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[1]})
 +              } else if b.Succs[1] == next {
 +                      p := Prog(x86.AJCS)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[0]})
 +              } else {
 +                      p := Prog(x86.AJCS)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[0]})
 +                      q := Prog(obj.AJMP)
 +                      q.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{q, b.Succs[1]})
 +              }
 +      case ssa.BlockUGT:
 +              if b.Succs[0] == next {
 +                      p := Prog(x86.AJLS)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[1]})
 +              } else if b.Succs[1] == next {
 +                      p := Prog(x86.AJHI)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[0]})
 +              } else {
 +                      p := Prog(x86.AJHI)
 +                      p.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{p, b.Succs[0]})
 +                      q := Prog(obj.AJMP)
 +                      q.To.Type = obj.TYPE_BRANCH
 +                      branches = append(branches, branch{q, b.Succs[1]})
 +              }
 +
 +      default:
 +              log.Fatalf("branch %s not implemented", b.LongString())
 +      }
 +      return branches
 +}
 +
 +// ssaRegToReg maps ssa register numbers to obj register numbers.
 +var ssaRegToReg = [...]int16{
 +      x86.REG_AX,
 +      x86.REG_CX,
 +      x86.REG_DX,
 +      x86.REG_BX,
 +      x86.REG_SP,
 +      x86.REG_BP,
 +      x86.REG_SI,
 +      x86.REG_DI,
 +      x86.REG_R8,
 +      x86.REG_R9,
 +      x86.REG_R10,
 +      x86.REG_R11,
 +      x86.REG_R12,
 +      x86.REG_R13,
 +      x86.REG_R14,
 +      x86.REG_R15,
 +      // TODO: more
 +      // TODO: arch-dependent
 +}
 +
 +// regnum returns the register (in cmd/internal/obj numbering) to
 +// which v has been allocated.  Panics if v is not assigned to a
 +// register.
 +func regnum(v *ssa.Value) int16 {
 +      return ssaRegToReg[v.Block.Func.RegAlloc[v.ID].(*ssa.Register).Num]
 +}
 +
 +// localOffset returns the offset below the frame pointer where
 +// a stack-allocated local has been allocated.  Panics if v
 +// is not assigned to a local slot.
 +func localOffset(v *ssa.Value) int64 {
 +      return v.Block.Func.RegAlloc[v.ID].(*ssa.LocalSlot).Idx
 +}
index 6f7830d70aa86e76773206cb83181029db052582,0000000000000000000000000000000000000000..cf1589eb0378cca05a716d15473fe75b46e8c3d5
mode 100644,000000..100644
--- /dev/null
@@@ -1,58 -1,0 +1,58 @@@
-       "cmd/internal/ssa"
 +// Copyright 2015 The Go Authors. All rights reserved.
 +// Use of this source code is governed by a BSD-style
 +// license that can be found in the LICENSE file.
 +
 +// This file provides methods that let us export a Type as an ../ssa:Type.
 +// We don't export this package's Type directly because it would lead
 +// to an import cycle with this package and ../ssa.
 +// TODO: move Type to its own package, then we don't need to dance around import cycles.
 +
 +package gc
 +
 +import (
++      "cmd/compile/internal/ssa"
 +)
 +
 +func (t *Type) Size() int64 {
 +      dowidth(t)
 +      return t.Width
 +}
 +
 +func (t *Type) IsBoolean() bool {
 +      return t.Etype == TBOOL
 +}
 +
 +func (t *Type) IsInteger() bool {
 +      switch t.Etype {
 +      case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR:
 +              return true
 +      }
 +      return false
 +}
 +
 +func (t *Type) IsSigned() bool {
 +      switch t.Etype {
 +      case TINT8, TINT16, TINT32, TINT64, TINT:
 +              return true
 +      }
 +      return false
 +}
 +
 +func (t *Type) IsFloat() bool {
 +      return t.Etype == TFLOAT32 || t.Etype == TFLOAT64
 +}
 +
 +func (t *Type) IsPtr() bool {
 +      return t.Etype == TPTR32 || t.Etype == TPTR64 ||
 +              t.Etype == TMAP || t.Etype == TCHAN || t.Etype == TFUNC
 +}
 +
 +func (t *Type) Elem() ssa.Type {
 +      return t.Type
 +}
 +func (t *Type) PtrTo() ssa.Type {
 +      return Ptrto(t)
 +}
 +
 +func (t *Type) IsMemory() bool { return false }
 +func (t *Type) IsFlags() bool  { return false }
index afb723ae4cfc6882516e235b3b02d7362fb5a3d1,0000000000000000000000000000000000000000..afb723ae4cfc6882516e235b3b02d7362fb5a3d1
mode 100644,000000..100644
--- /dev/null
index dcf3676bc20b833ac04685dbe134d7c1a229252b,0000000000000000000000000000000000000000..dcf3676bc20b833ac04685dbe134d7c1a229252b
mode 100644,000000..100644
--- /dev/null
index 6204f1948f30a085340d2d0ee601b7f65ef16deb,0000000000000000000000000000000000000000..6204f1948f30a085340d2d0ee601b7f65ef16deb
mode 100644,000000..100644
--- /dev/null
index 51c72aacd9910e77741d3a6b6af71f67a2d17aad,0000000000000000000000000000000000000000..51c72aacd9910e77741d3a6b6af71f67a2d17aad
mode 100644,000000..100644
--- /dev/null
index 667313ad9f825e34a9417f6fd5329b2a7e95b2ca,0000000000000000000000000000000000000000..667313ad9f825e34a9417f6fd5329b2a7e95b2ca
mode 100644,000000..100644
--- /dev/null
index c1f79567915d2935de16752d2bda1044233c309f,0000000000000000000000000000000000000000..c1f79567915d2935de16752d2bda1044233c309f
mode 100644,000000..100644
--- /dev/null
index 9f1d2a8593ae39dca7da8753ce4dfaf7a46116ad,0000000000000000000000000000000000000000..9f1d2a8593ae39dca7da8753ce4dfaf7a46116ad
mode 100644,000000..100644
--- /dev/null
index 10c2dcc440e85c82681b892036a372f0cff9580c,0000000000000000000000000000000000000000..10c2dcc440e85c82681b892036a372f0cff9580c
mode 100644,000000..100644
--- /dev/null
index 503681ffd3b14b53367d18cb0e88f38b6fbce4da,0000000000000000000000000000000000000000..503681ffd3b14b53367d18cb0e88f38b6fbce4da
mode 100644,000000..100644
--- /dev/null
index aba24aeabcd59c25e2304a6fe7fede990c4f597a,0000000000000000000000000000000000000000..aba24aeabcd59c25e2304a6fe7fede990c4f597a
mode 100644,000000..100644
--- /dev/null
index a8058614893b16c685ecefe1cfc1d49f5c8e9b7f,0000000000000000000000000000000000000000..a8058614893b16c685ecefe1cfc1d49f5c8e9b7f
mode 100644,000000..100644
--- /dev/null
index ced46e524b0d68f0aa6ad5382ef1c9a82da60794,0000000000000000000000000000000000000000..ced46e524b0d68f0aa6ad5382ef1c9a82da60794
mode 100644,000000..100644
--- /dev/null
index aaf3ab3da1f224f5fb027a9f172ebaf59040d4f4,0000000000000000000000000000000000000000..aaf3ab3da1f224f5fb027a9f172ebaf59040d4f4
mode 100644,000000..100644
--- /dev/null
index ab4ab82345caaec6e2db6e3ed8f10f6b8cf449d9,0000000000000000000000000000000000000000..ab4ab82345caaec6e2db6e3ed8f10f6b8cf449d9
mode 100644,000000..100644
--- /dev/null
index 3e41ef3bc12ba61b21ad14ed77d5a008ae061d18,0000000000000000000000000000000000000000..3e41ef3bc12ba61b21ad14ed77d5a008ae061d18
mode 100644,000000..100644
--- /dev/null
index e7619ca4f85241ffcbc008bb6907586c6076cf4b,0000000000000000000000000000000000000000..e7619ca4f85241ffcbc008bb6907586c6076cf4b
mode 100644,000000..100644
--- /dev/null
index af3e8a8e1440782481acd6eff81a07d6bd40a9e8,0000000000000000000000000000000000000000..af3e8a8e1440782481acd6eff81a07d6bd40a9e8
mode 100644,000000..100644
--- /dev/null
index 91f9c17d1139598a02b24f8063f087cbf676e745,0000000000000000000000000000000000000000..91f9c17d1139598a02b24f8063f087cbf676e745
mode 100644,000000..100644
--- /dev/null
index 3f53e1a434663ea23f1e352c2daa9718a325b6c6,0000000000000000000000000000000000000000..3f53e1a434663ea23f1e352c2daa9718a325b6c6
mode 100644,000000..100644
--- /dev/null
index 7123397c4c39a571e7117e8839a1e78ba327be00,0000000000000000000000000000000000000000..7123397c4c39a571e7117e8839a1e78ba327be00
mode 100644,000000..100644
--- /dev/null
index 1b6f6d66c19ba21afd3cd6bae7bf8bd30c5dbfe6,0000000000000000000000000000000000000000..1b6f6d66c19ba21afd3cd6bae7bf8bd30c5dbfe6
mode 100644,000000..100644
--- /dev/null
index 44f0b83fa85aad9d8e372dae7133386c2cb77dbf,0000000000000000000000000000000000000000..44f0b83fa85aad9d8e372dae7133386c2cb77dbf
mode 100644,000000..100644
--- /dev/null
index 51cef97b306b95d5366797b173fcaa8eaf1480c7,0000000000000000000000000000000000000000..51cef97b306b95d5366797b173fcaa8eaf1480c7
mode 100644,000000..100644
--- /dev/null
index f02c1ae0c076d8725e679927bb6fc581d359f550,0000000000000000000000000000000000000000..f02c1ae0c076d8725e679927bb6fc581d359f550
mode 100644,000000..100644
--- /dev/null
index c8f27bb2e4fc1b5a35705ba239cd6d7840ce556b,0000000000000000000000000000000000000000..c8f27bb2e4fc1b5a35705ba239cd6d7840ce556b
mode 100644,000000..100644
--- /dev/null
index 46a0069a1855a5ad39947c0f54211886523a4df4,0000000000000000000000000000000000000000..46a0069a1855a5ad39947c0f54211886523a4df4
mode 100644,000000..100644
--- /dev/null
index ea2bcf0e98b8cc09c60b1f4910ce73eb53039cf6,0000000000000000000000000000000000000000..ea2bcf0e98b8cc09c60b1f4910ce73eb53039cf6
mode 100644,000000..100644
--- /dev/null
index 19c0d077e5b0e2190a42472443d7fbf1b4512b0e,0000000000000000000000000000000000000000..19c0d077e5b0e2190a42472443d7fbf1b4512b0e
mode 100644,000000..100644
--- /dev/null
index eeea30d970b53cb34c6d3a4e2e563bc307801800,0000000000000000000000000000000000000000..eeea30d970b53cb34c6d3a4e2e563bc307801800
mode 100644,000000..100644
--- /dev/null
index c798d2e936591e0c5a59d55b125fd172ef2a3ba3,0000000000000000000000000000000000000000..c798d2e936591e0c5a59d55b125fd172ef2a3ba3
mode 100644,000000..100644
--- /dev/null
index 671270d7f29d0ee19ffcebd9d1e4a089c33d166b,0000000000000000000000000000000000000000..671270d7f29d0ee19ffcebd9d1e4a089c33d166b
mode 100644,000000..100644
--- /dev/null
index c49d9d9f2e0068326cef05455cfd637cf0ed2bd8,0000000000000000000000000000000000000000..c49d9d9f2e0068326cef05455cfd637cf0ed2bd8
mode 100644,000000..100644
--- /dev/null
index dc910b70b1d36ea461e57bf45aea42de8e1a92bf,0000000000000000000000000000000000000000..dc910b70b1d36ea461e57bf45aea42de8e1a92bf
mode 100644,000000..100644
--- /dev/null
index 4ac930298b2f33a967603cae6709520a81c3754c,0000000000000000000000000000000000000000..4ac930298b2f33a967603cae6709520a81c3754c
mode 100644,000000..100644
--- /dev/null
index 0a89ac3773311812b7dd747314e0ac7456e2aa02,0000000000000000000000000000000000000000..0a89ac3773311812b7dd747314e0ac7456e2aa02
mode 100644,000000..100644
--- /dev/null
index b79aee84977894c242d5a1ca71340853578006c6,0000000000000000000000000000000000000000..b79aee84977894c242d5a1ca71340853578006c6
mode 100644,000000..100644
--- /dev/null
index ab686470be575ab6132ba8e0dc7c6ce0067a0267,0000000000000000000000000000000000000000..ab686470be575ab6132ba8e0dc7c6ce0067a0267
mode 100644,000000..100644
--- /dev/null
index 611c85834aad8f9cb69738d80326939b46587655,0000000000000000000000000000000000000000..611c85834aad8f9cb69738d80326939b46587655
mode 100644,000000..100644
--- /dev/null
index dab6239dee48019645fc52b0d9e0264215f1a7c0,0000000000000000000000000000000000000000..dab6239dee48019645fc52b0d9e0264215f1a7c0
mode 100644,000000..100644
--- /dev/null
index f5037fab88e0678f575340f4aee67ce769771f79,2840f71749228ca886f2bd349a9df7334120ac0b..7988129868700c071b5afe381b2fd3d9cf582f8e
@@@ -38,10 -28,16 +28,17 @@@ var bootstrapDirs = []string
        "asm/internal/asm",
        "asm/internal/flags",
        "asm/internal/lex",
+       "compile",
+       "compile/internal/amd64",
+       "compile/internal/arm",
+       "compile/internal/arm64",
+       "compile/internal/big",
+       "compile/internal/gc",
+       "compile/internal/ppc64",
++      "compile/internal/ssa",
+       "compile/internal/x86",
        "internal/asm",
-       "internal/gc/big",
-       "internal/gc",
-       "internal/ld",
+       "internal/gcprog",
        "internal/obj",
        "internal/obj/arm",
        "internal/obj/arm64",