1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
14 "cmd/compile/internal/ssa"
21 var ssaConfig *ssa.Config
24 func initssa() *ssa.Config {
25 ssaExp.unimplemented = false
26 ssaExp.mustImplement = true
28 ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0)
33 func shouldssa(fn *Node) bool {
34 switch Thearch.LinkArch.Name {
36 // Only available for testing.
37 if os.Getenv("SSATEST") == "" {
40 // Generally available.
47 // Environment variable control of SSA CG
48 // 1. IF GOSSAFUNC == current function name THEN
49 // compile this function with SSA and log output to ssa.html
51 // 2. IF GOSSAHASH == "" THEN
52 // compile this function (and everything else) with SSA
54 // 3. IF GOSSAHASH == "n" or "N"
55 // IF GOSSAPKG == current package name THEN
56 // compile this function (and everything in this package) with SSA
58 // use the old back end for this function.
59 // This is for compatibility with existing test harness and should go away.
61 // 4. IF GOSSAHASH is a suffix of the binary-rendered SHA1 hash of the function name THEN
62 // compile this function with SSA
64 // compile this function with the old back end.
66 // Plan is for 3 to be removed when the tests are revised.
67 // SSA is now default, and is disabled by setting
68 // GOSSAHASH to n or N, or selectively with strings of
71 name := fn.Func.Nname.Sym.Name
73 funcname := os.Getenv("GOSSAFUNC")
75 // If GOSSAFUNC is set, compile only that function.
76 return name == funcname
79 pkg := os.Getenv("GOSSAPKG")
81 // If GOSSAPKG is set, compile only that package.
82 return localpkg.Name == pkg
85 return initssa().DebugHashMatch("GOSSAHASH", name)
88 // buildssa builds an SSA function.
89 func buildssa(fn *Node) *ssa.Func {
90 name := fn.Func.Nname.Sym.Name
91 printssa := name == os.Getenv("GOSSAFUNC")
93 fmt.Println("generating SSA for", name)
94 dumplist("buildssa-enter", fn.Func.Enter)
95 dumplist("buildssa-body", fn.Nbody)
96 dumplist("buildssa-exit", fn.Func.Exit)
100 s.pushLine(fn.Lineno)
103 if fn.Func.Pragma&CgoUnsafeArgs != 0 {
104 s.cgoUnsafeArgs = true
106 if fn.Func.Pragma&Nowritebarrier != 0 {
111 fn.Func.WBLineno = s.WBLineno
114 // TODO(khr): build config just once at the start of the compiler binary
116 ssaExp.log = printssa
119 s.f = s.config.NewFunc()
121 s.exitCode = fn.Func.Exit
122 s.panics = map[funcLine]*ssa.Block{}
124 if name == os.Getenv("GOSSAFUNC") {
125 // TODO: tempfile? it is handy to have the location
126 // of this file be stable, so you can just reload in the browser.
127 s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name)
128 // TODO: generate and print a mapping from nodes to values and blocks
132 s.config.HTML.Close()
136 // Allocate starting block
137 s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
139 // Allocate starting values
140 s.labels = map[string]*ssaLabel{}
141 s.labeledNodes = map[*Node]*ssaLabel{}
142 s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem)
143 s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
144 s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR])
146 s.startBlock(s.f.Entry)
147 s.vars[&memVar] = s.startmem
149 s.varsyms = map[*Node]interface{}{}
151 // Generate addresses of local declarations
152 s.decladdrs = map[*Node]*ssa.Value{}
153 for _, n := range fn.Func.Dcl {
155 case PPARAM, PPARAMOUT:
156 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
157 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
158 if n.Class == PPARAMOUT && s.canSSA(n) {
159 // Save ssa-able PPARAMOUT variables so we can
160 // store them back to the stack at the end of
162 s.returns = append(s.returns, n)
164 if n.Class == PPARAM && s.canSSA(n) && n.Type.IsPtrShaped() {
165 s.ptrargs = append(s.ptrargs, n)
166 n.SetNotLiveAtEnd(true) // SSA takes care of this explicitly
169 // processed at each use, to prevent Addr coming
172 // moved to heap - already handled by frontend
174 // local function - already handled by frontend
176 s.Unimplementedf("local variable with class %s unimplemented", classnames[n.Class])
180 // Convert the AST-based IR to the SSA-based IR
181 s.stmts(fn.Func.Enter)
184 // fallthrough to exit
185 if s.curBlock != nil {
186 s.pushLine(fn.Func.Endlineno)
191 // Check that we used all labels
192 for name, lab := range s.labels {
193 if !lab.used() && !lab.reported {
194 yyerrorl(lab.defNode.Lineno, "label %v defined and not used", name)
197 if lab.used() && !lab.defined() && !lab.reported {
198 yyerrorl(lab.useNode.Lineno, "label %v not defined", name)
203 // Check any forward gotos. Non-forward gotos have already been checked.
204 for _, n := range s.fwdGotos {
205 lab := s.labels[n.Left.Sym.Name]
206 // If the label is undefined, we have already have printed an error.
208 s.checkgoto(n, lab.defNode)
217 prelinkNumvars := s.f.NumValues()
218 sparseDefState := s.locatePotentialPhiFunctions(fn)
220 // Link up variable uses to variable definitions
221 s.linkForwardReferences(sparseDefState)
223 if ssa.BuildStats > 0 {
224 s.f.LogStat("build", s.f.NumBlocks(), "blocks", prelinkNumvars, "vars_before",
225 s.f.NumValues(), "vars_after", prelinkNumvars*s.f.NumBlocks(), "ssa_phi_loc_cutoff_score")
228 // Don't carry reference this around longer than necessary
231 // Main call to ssa package to compile function
238 // configuration (arch) information
241 // function we're building
244 // labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f
245 labels map[string]*ssaLabel
246 labeledNodes map[*Node]*ssaLabel
248 // gotos that jump forward; required for deferred checkgoto calls
250 // Code that must precede any return
251 // (e.g., copying value of heap-escaped paramout back to true paramout)
254 // unlabeled break and continue statement tracking
255 breakTo *ssa.Block // current target for plain break statement
256 continueTo *ssa.Block // current target for plain continue statement
258 // current location where we're interpreting the AST
261 // variable assignments in the current block (map from variable symbol to ssa value)
262 // *Node is the unique identifier (an ONAME Node) for the variable.
263 vars map[*Node]*ssa.Value
265 // all defined variables at the end of each block. Indexed by block ID.
266 defvars []map[*Node]*ssa.Value
268 // addresses of PPARAM and PPARAMOUT variables.
269 decladdrs map[*Node]*ssa.Value
271 // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused.
272 varsyms map[*Node]interface{}
274 // starting values. Memory, stack pointer, and globals pointer
279 // line number stack. The current line number is top of stack
282 // list of panic calls by function name and line number.
283 // Used to deduplicate panic calls.
284 panics map[funcLine]*ssa.Block
286 // list of FwdRef values.
289 // list of PPARAMOUT (return) variables.
292 // list of PPARAM SSA-able pointer-shaped args. We ensure these are live
293 // throughout the function to help users avoid premature finalizers.
298 WBLineno int32 // line number of first write barrier. 0=no write barriers
301 type funcLine struct {
306 type ssaLabel struct {
307 target *ssa.Block // block identified by this label
308 breakTarget *ssa.Block // block to break to in control flow node identified by this label
309 continueTarget *ssa.Block // block to continue to in control flow node identified by this label
310 defNode *Node // label definition Node (OLABEL)
311 // Label use Node (OGOTO, OBREAK, OCONTINUE).
312 // Used only for error detection and reporting.
313 // There might be multiple uses, but we only need to track one.
315 reported bool // reported indicates whether an error has already been reported for this label
318 // defined reports whether the label has a definition (OLABEL node).
319 func (l *ssaLabel) defined() bool { return l.defNode != nil }
321 // used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node).
322 func (l *ssaLabel) used() bool { return l.useNode != nil }
324 // label returns the label associated with sym, creating it if necessary.
325 func (s *state) label(sym *Sym) *ssaLabel {
326 lab := s.labels[sym.Name]
329 s.labels[sym.Name] = lab
334 func (s *state) Logf(msg string, args ...interface{}) { s.config.Logf(msg, args...) }
335 func (s *state) Log() bool { return s.config.Log() }
336 func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(s.peekLine(), msg, args...) }
337 func (s *state) Unimplementedf(msg string, args ...interface{}) {
338 s.config.Unimplementedf(s.peekLine(), msg, args...)
340 func (s *state) Warnl(line int32, msg string, args ...interface{}) { s.config.Warnl(line, msg, args...) }
341 func (s *state) Debug_checknil() bool { return s.config.Debug_checknil() }
344 // dummy node for the memory variable
345 memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}}
347 // dummy nodes for temporary variables
348 ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}}
349 lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}}
350 newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}}
351 capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}}
352 typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}}
353 idataVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "idata"}}
354 okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}}
355 deltaVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "delta"}}
358 // startBlock sets the current block we're generating code in to b.
359 func (s *state) startBlock(b *ssa.Block) {
360 if s.curBlock != nil {
361 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
364 s.vars = map[*Node]*ssa.Value{}
367 // endBlock marks the end of generating code for the current block.
368 // Returns the (former) current block. Returns nil if there is no current
369 // block, i.e. if no code flows to the current execution point.
370 func (s *state) endBlock() *ssa.Block {
375 for len(s.defvars) <= int(b.ID) {
376 s.defvars = append(s.defvars, nil)
378 s.defvars[b.ID] = s.vars
381 b.Line = s.peekLine()
385 // pushLine pushes a line number on the line number stack.
386 func (s *state) pushLine(line int32) {
387 s.line = append(s.line, line)
390 // popLine pops the top of the line number stack.
391 func (s *state) popLine() {
392 s.line = s.line[:len(s.line)-1]
395 // peekLine peek the top of the line number stack.
396 func (s *state) peekLine() int32 {
397 return s.line[len(s.line)-1]
400 func (s *state) Error(msg string, args ...interface{}) {
401 yyerrorl(s.peekLine(), msg, args...)
404 // newValue0 adds a new value with no arguments to the current block.
405 func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value {
406 return s.curBlock.NewValue0(s.peekLine(), op, t)
409 // newValue0A adds a new value with no arguments and an aux value to the current block.
410 func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
411 return s.curBlock.NewValue0A(s.peekLine(), op, t, aux)
414 // newValue0I adds a new value with no arguments and an auxint value to the current block.
415 func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
416 return s.curBlock.NewValue0I(s.peekLine(), op, t, auxint)
419 // newValue1 adds a new value with one argument to the current block.
420 func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
421 return s.curBlock.NewValue1(s.peekLine(), op, t, arg)
424 // newValue1A adds a new value with one argument and an aux value to the current block.
425 func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
426 return s.curBlock.NewValue1A(s.peekLine(), op, t, aux, arg)
429 // newValue1I adds a new value with one argument and an auxint value to the current block.
430 func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value {
431 return s.curBlock.NewValue1I(s.peekLine(), op, t, aux, arg)
434 // newValue2 adds a new value with two arguments to the current block.
435 func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
436 return s.curBlock.NewValue2(s.peekLine(), op, t, arg0, arg1)
439 // newValue2I adds a new value with two arguments and an auxint value to the current block.
440 func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
441 return s.curBlock.NewValue2I(s.peekLine(), op, t, aux, arg0, arg1)
444 // newValue3 adds a new value with three arguments to the current block.
445 func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
446 return s.curBlock.NewValue3(s.peekLine(), op, t, arg0, arg1, arg2)
449 // newValue3I adds a new value with three arguments and an auxint value to the current block.
450 func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
451 return s.curBlock.NewValue3I(s.peekLine(), op, t, aux, arg0, arg1, arg2)
454 // entryNewValue0 adds a new value with no arguments to the entry block.
455 func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value {
456 return s.f.Entry.NewValue0(s.peekLine(), op, t)
459 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
460 func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
461 return s.f.Entry.NewValue0A(s.peekLine(), op, t, aux)
464 // entryNewValue0I adds a new value with no arguments and an auxint value to the entry block.
465 func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
466 return s.f.Entry.NewValue0I(s.peekLine(), op, t, auxint)
469 // entryNewValue1 adds a new value with one argument to the entry block.
470 func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
471 return s.f.Entry.NewValue1(s.peekLine(), op, t, arg)
474 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
475 func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value {
476 return s.f.Entry.NewValue1I(s.peekLine(), op, t, auxint, arg)
479 // entryNewValue1A adds a new value with one argument and an aux value to the entry block.
480 func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
481 return s.f.Entry.NewValue1A(s.peekLine(), op, t, aux, arg)
484 // entryNewValue2 adds a new value with two arguments to the entry block.
485 func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
486 return s.f.Entry.NewValue2(s.peekLine(), op, t, arg0, arg1)
489 // const* routines add a new const value to the entry block.
490 func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekLine(), t) }
491 func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekLine(), t) }
492 func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekLine(), t) }
493 func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekLine(), t) }
494 func (s *state) constBool(c bool) *ssa.Value {
495 return s.f.ConstBool(s.peekLine(), Types[TBOOL], c)
497 func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value {
498 return s.f.ConstInt8(s.peekLine(), t, c)
500 func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value {
501 return s.f.ConstInt16(s.peekLine(), t, c)
503 func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value {
504 return s.f.ConstInt32(s.peekLine(), t, c)
506 func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value {
507 return s.f.ConstInt64(s.peekLine(), t, c)
509 func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value {
510 return s.f.ConstFloat32(s.peekLine(), t, c)
512 func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value {
513 return s.f.ConstFloat64(s.peekLine(), t, c)
515 func (s *state) constInt(t ssa.Type, c int64) *ssa.Value {
516 if s.config.IntSize == 8 {
517 return s.constInt64(t, c)
519 if int64(int32(c)) != c {
520 s.Fatalf("integer constant too big %d", c)
522 return s.constInt32(t, int32(c))
525 func (s *state) stmts(a Nodes) {
526 for _, x := range a.Slice() {
531 // ssaStmtList converts the statement n to SSA and adds it to s.
532 func (s *state) stmtList(l Nodes) {
533 for _, n := range l.Slice() {
538 // ssaStmt converts the statement n to SSA and adds it to s.
539 func (s *state) stmt(n *Node) {
543 // If s.curBlock is nil, then we're about to generate dead code.
544 // We can't just short-circuit here, though,
545 // because we check labels and gotos as part of SSA generation.
546 // Provide a block for the dead code so that we don't have
547 // to add special cases everywhere else.
548 if s.curBlock == nil {
549 dead := s.f.NewBlock(ssa.BlockPlain)
560 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
562 // Expression statements
563 case OCALLFUNC, OCALLMETH, OCALLINTER:
564 s.call(n, callNormal)
565 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC &&
566 (compiling_runtime && n.Left.Sym.Name == "throw" ||
567 n.Left.Sym.Pkg == Runtimepkg && (n.Left.Sym.Name == "gopanic" || n.Left.Sym.Name == "selectgo" || n.Left.Sym.Name == "block")) {
570 b.Kind = ssa.BlockExit
572 // TODO: never rewrite OPANIC to OCALLFUNC in the
573 // first place. Need to wait until all backends
577 s.call(n.Left, callDefer)
579 s.call(n.Left, callGo)
582 res, resok := s.dottype(n.Rlist.First(), true)
583 s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), false, n.Lineno, 0, false)
584 s.assign(n.List.Second(), resok, false, false, n.Lineno, 0, false)
588 if n.Left.Class == PAUTOHEAP {
596 // Empty identifier is valid but useless.
597 // See issues 11589, 11593.
603 // Associate label with its control flow node, if any
604 if ctl := n.Name.Defn; ctl != nil {
606 case OFOR, OSWITCH, OSELECT:
607 s.labeledNodes[ctl] = lab
614 s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Lineno))
617 // The label might already have a target block via a goto.
618 if lab.target == nil {
619 lab.target = s.f.NewBlock(ssa.BlockPlain)
622 // go to that label (we pretend "label:" is preceded by "goto label")
624 b.AddEdgeTo(lab.target)
625 s.startBlock(lab.target)
631 if lab.target == nil {
632 lab.target = s.f.NewBlock(ssa.BlockPlain)
639 s.checkgoto(n, lab.defNode)
641 s.fwdGotos = append(s.fwdGotos, n)
645 b.AddEdgeTo(lab.target)
648 // Check whether we can generate static data rather than code.
649 // If so, ignore n and defer data generation until codegen.
650 // Failure to do this causes writes to readonly symbols.
651 if gen_as_init(n, true) {
653 if s.f.StaticData != nil {
654 data = s.f.StaticData.([]*Node)
656 s.f.StaticData = append(data, n)
660 if n.Left == n.Right && n.Left.Op == ONAME {
661 // An x=x assignment. No point in doing anything
662 // here. In addition, skipping this assignment
663 // prevents generating:
666 // which is bad because x is incorrectly considered
667 // dead before the vardef. See issue #14904.
682 case OSTRUCTLIT, OARRAYLIT:
683 // All literals with nonzero fields have already been
684 // rewritten during walk. Any that remain are just T{}
685 // or equivalents. Use the zero value.
687 Fatalf("literal with nonzero value in SSA: %v", rhs)
691 // If we're writing the result of an append back to the same slice,
692 // handle it specially to avoid write barriers on the fast (non-growth) path.
693 // If the slice can be SSA'd, it'll be on the stack,
694 // so there will be no write barriers,
695 // so there's no need to attempt to prevent them.
696 if samesafeexpr(n.Left, rhs.List.First()) && !s.canSSA(n.Left) {
704 needwb := n.Op == OASWB && rhs != nil
705 deref := !canSSAType(t)
708 r = nil // Signal assign to use OpZero.
710 r, isVolatile = s.addr(rhs, false)
719 if rhs != nil && rhs.Op == OAPPEND {
720 // The frontend gets rid of the write barrier to enable the special OAPPEND
721 // handling above, but since this is not a special case, we need it.
722 // TODO: just add a ptr graying to the end of growslice?
723 // TODO: check whether we need to provide special handling and a write barrier
724 // for ODOTTYPE and ORECV also.
725 // They get similar wb-removal treatment in walk.go:OAS.
730 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
731 // We're assigning a slicing operation back to its source.
732 // Don't write back fields we aren't changing. See issue #14855.
733 i, j, k := rhs.SliceBounds()
734 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
735 // [0:...] is the same as [:...]
738 // TODO: detect defaults for len/cap also.
739 // Currently doesn't really work because (*p)[:len(*p)] appears here as:
742 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
745 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
759 s.assign(n.Left, r, needwb, deref, n.Lineno, skip, isVolatile)
762 bThen := s.f.NewBlock(ssa.BlockPlain)
763 bEnd := s.f.NewBlock(ssa.BlockPlain)
765 if n.Rlist.Len() != 0 {
766 bElse = s.f.NewBlock(ssa.BlockPlain)
767 s.condBranch(n.Left, bThen, bElse, n.Likely)
769 s.condBranch(n.Left, bThen, bEnd, n.Likely)
774 if b := s.endBlock(); b != nil {
778 if n.Rlist.Len() != 0 {
781 if b := s.endBlock(); b != nil {
793 b.Kind = ssa.BlockRetJmp // override BlockRet
796 case OCONTINUE, OBREAK:
808 // plain break/continue
810 s.Error("%s is not in a loop", op)
813 // nothing to do; "to" is already the correct target
815 // labeled break/continue; look up the target
822 s.Error("%s label not defined: %v", op, sym)
828 to = lab.continueTarget
833 // Valid label but not usable with a break/continue here, e.g.:
839 s.Error("invalid %s label %v", op, sym)
849 // OFOR: for Ninit; Left; Right { Nbody }
850 bCond := s.f.NewBlock(ssa.BlockPlain)
851 bBody := s.f.NewBlock(ssa.BlockPlain)
852 bIncr := s.f.NewBlock(ssa.BlockPlain)
853 bEnd := s.f.NewBlock(ssa.BlockPlain)
855 // first, jump to condition test
859 // generate code to test condition
862 s.condBranch(n.Left, bBody, bEnd, 1)
865 b.Kind = ssa.BlockPlain
869 // set up for continue/break in body
870 prevContinue := s.continueTo
871 prevBreak := s.breakTo
874 lab := s.labeledNodes[n]
877 lab.continueTarget = bIncr
878 lab.breakTarget = bEnd
885 // tear down continue/break
886 s.continueTo = prevContinue
887 s.breakTo = prevBreak
889 lab.continueTarget = nil
890 lab.breakTarget = nil
893 // done with body, goto incr
894 if b := s.endBlock(); b != nil {
903 if b := s.endBlock(); b != nil {
908 case OSWITCH, OSELECT:
909 // These have been mostly rewritten by the front end into their Nbody fields.
910 // Our main task is to correctly hook up any break statements.
911 bEnd := s.f.NewBlock(ssa.BlockPlain)
913 prevBreak := s.breakTo
915 lab := s.labeledNodes[n]
918 lab.breakTarget = bEnd
921 // generate body code
924 s.breakTo = prevBreak
926 lab.breakTarget = nil
929 // OSWITCH never falls through (s.curBlock == nil here).
930 // OSELECT does not fall through if we're calling selectgo.
931 // OSELECT does fall through if we're calling selectnb{send,recv}[2].
932 // In those latter cases, go to the code after the select.
933 if b := s.endBlock(); b != nil {
939 // Insert a varkill op to record that a variable is no longer live.
940 // We only care about liveness info at call sites, so putting the
941 // varkill in the store chain is enough to keep it correctly ordered
942 // with respect to call ops.
943 if !s.canSSA(n.Left) {
944 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem())
948 // Insert a varlive op to record that a variable is still live.
949 if !n.Left.Addrtaken {
950 s.Fatalf("VARLIVE variable %s must have Addrtaken set", n.Left)
952 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem())
959 s.Unimplementedf("unhandled stmt %s", n.Op)
963 // exit processes any code that needs to be generated just before returning.
964 // It returns a BlockRet block that ends the control flow. Its control value
965 // will be set to the final memory state.
966 func (s *state) exit() *ssa.Block {
968 s.rtcall(Deferreturn, true, nil)
971 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT
972 // variables back to the stack.
975 // Store SSAable PPARAMOUT variables back to stack locations.
976 for _, n := range s.returns {
977 addr := s.decladdrs[n]
978 val := s.variable(n, n.Type)
979 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem())
980 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem())
981 // TODO: if val is ever spilled, we'd like to use the
982 // PPARAMOUT slot for spilling it. That won't happen
986 // Keep input pointer args live until the return. This is a bandaid
987 // fix for 1.7 for what will become in 1.8 explicit runtime.KeepAlive calls.
988 // For <= 1.7 we guarantee that pointer input arguments live to the end of
989 // the function to prevent premature (from the user's point of view)
990 // execution of finalizers. See issue 15277.
991 // TODO: remove for 1.8?
992 for _, n := range s.ptrargs {
993 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem())
999 b.Kind = ssa.BlockRet
1004 type opAndType struct {
1009 var opToSSA = map[opAndType]ssa.Op{
1010 opAndType{OADD, TINT8}: ssa.OpAdd8,
1011 opAndType{OADD, TUINT8}: ssa.OpAdd8,
1012 opAndType{OADD, TINT16}: ssa.OpAdd16,
1013 opAndType{OADD, TUINT16}: ssa.OpAdd16,
1014 opAndType{OADD, TINT32}: ssa.OpAdd32,
1015 opAndType{OADD, TUINT32}: ssa.OpAdd32,
1016 opAndType{OADD, TPTR32}: ssa.OpAdd32,
1017 opAndType{OADD, TINT64}: ssa.OpAdd64,
1018 opAndType{OADD, TUINT64}: ssa.OpAdd64,
1019 opAndType{OADD, TPTR64}: ssa.OpAdd64,
1020 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
1021 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
1023 opAndType{OSUB, TINT8}: ssa.OpSub8,
1024 opAndType{OSUB, TUINT8}: ssa.OpSub8,
1025 opAndType{OSUB, TINT16}: ssa.OpSub16,
1026 opAndType{OSUB, TUINT16}: ssa.OpSub16,
1027 opAndType{OSUB, TINT32}: ssa.OpSub32,
1028 opAndType{OSUB, TUINT32}: ssa.OpSub32,
1029 opAndType{OSUB, TINT64}: ssa.OpSub64,
1030 opAndType{OSUB, TUINT64}: ssa.OpSub64,
1031 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
1032 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
1034 opAndType{ONOT, TBOOL}: ssa.OpNot,
1036 opAndType{OMINUS, TINT8}: ssa.OpNeg8,
1037 opAndType{OMINUS, TUINT8}: ssa.OpNeg8,
1038 opAndType{OMINUS, TINT16}: ssa.OpNeg16,
1039 opAndType{OMINUS, TUINT16}: ssa.OpNeg16,
1040 opAndType{OMINUS, TINT32}: ssa.OpNeg32,
1041 opAndType{OMINUS, TUINT32}: ssa.OpNeg32,
1042 opAndType{OMINUS, TINT64}: ssa.OpNeg64,
1043 opAndType{OMINUS, TUINT64}: ssa.OpNeg64,
1044 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F,
1045 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F,
1047 opAndType{OCOM, TINT8}: ssa.OpCom8,
1048 opAndType{OCOM, TUINT8}: ssa.OpCom8,
1049 opAndType{OCOM, TINT16}: ssa.OpCom16,
1050 opAndType{OCOM, TUINT16}: ssa.OpCom16,
1051 opAndType{OCOM, TINT32}: ssa.OpCom32,
1052 opAndType{OCOM, TUINT32}: ssa.OpCom32,
1053 opAndType{OCOM, TINT64}: ssa.OpCom64,
1054 opAndType{OCOM, TUINT64}: ssa.OpCom64,
1056 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag,
1057 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
1058 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal,
1059 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
1061 opAndType{OMUL, TINT8}: ssa.OpMul8,
1062 opAndType{OMUL, TUINT8}: ssa.OpMul8,
1063 opAndType{OMUL, TINT16}: ssa.OpMul16,
1064 opAndType{OMUL, TUINT16}: ssa.OpMul16,
1065 opAndType{OMUL, TINT32}: ssa.OpMul32,
1066 opAndType{OMUL, TUINT32}: ssa.OpMul32,
1067 opAndType{OMUL, TINT64}: ssa.OpMul64,
1068 opAndType{OMUL, TUINT64}: ssa.OpMul64,
1069 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
1070 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
1072 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
1073 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
1075 opAndType{OHMUL, TINT8}: ssa.OpHmul8,
1076 opAndType{OHMUL, TUINT8}: ssa.OpHmul8u,
1077 opAndType{OHMUL, TINT16}: ssa.OpHmul16,
1078 opAndType{OHMUL, TUINT16}: ssa.OpHmul16u,
1079 opAndType{OHMUL, TINT32}: ssa.OpHmul32,
1080 opAndType{OHMUL, TUINT32}: ssa.OpHmul32u,
1082 opAndType{ODIV, TINT8}: ssa.OpDiv8,
1083 opAndType{ODIV, TUINT8}: ssa.OpDiv8u,
1084 opAndType{ODIV, TINT16}: ssa.OpDiv16,
1085 opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
1086 opAndType{ODIV, TINT32}: ssa.OpDiv32,
1087 opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
1088 opAndType{ODIV, TINT64}: ssa.OpDiv64,
1089 opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
1091 opAndType{OMOD, TINT8}: ssa.OpMod8,
1092 opAndType{OMOD, TUINT8}: ssa.OpMod8u,
1093 opAndType{OMOD, TINT16}: ssa.OpMod16,
1094 opAndType{OMOD, TUINT16}: ssa.OpMod16u,
1095 opAndType{OMOD, TINT32}: ssa.OpMod32,
1096 opAndType{OMOD, TUINT32}: ssa.OpMod32u,
1097 opAndType{OMOD, TINT64}: ssa.OpMod64,
1098 opAndType{OMOD, TUINT64}: ssa.OpMod64u,
1100 opAndType{OAND, TINT8}: ssa.OpAnd8,
1101 opAndType{OAND, TUINT8}: ssa.OpAnd8,
1102 opAndType{OAND, TINT16}: ssa.OpAnd16,
1103 opAndType{OAND, TUINT16}: ssa.OpAnd16,
1104 opAndType{OAND, TINT32}: ssa.OpAnd32,
1105 opAndType{OAND, TUINT32}: ssa.OpAnd32,
1106 opAndType{OAND, TINT64}: ssa.OpAnd64,
1107 opAndType{OAND, TUINT64}: ssa.OpAnd64,
1109 opAndType{OOR, TINT8}: ssa.OpOr8,
1110 opAndType{OOR, TUINT8}: ssa.OpOr8,
1111 opAndType{OOR, TINT16}: ssa.OpOr16,
1112 opAndType{OOR, TUINT16}: ssa.OpOr16,
1113 opAndType{OOR, TINT32}: ssa.OpOr32,
1114 opAndType{OOR, TUINT32}: ssa.OpOr32,
1115 opAndType{OOR, TINT64}: ssa.OpOr64,
1116 opAndType{OOR, TUINT64}: ssa.OpOr64,
1118 opAndType{OXOR, TINT8}: ssa.OpXor8,
1119 opAndType{OXOR, TUINT8}: ssa.OpXor8,
1120 opAndType{OXOR, TINT16}: ssa.OpXor16,
1121 opAndType{OXOR, TUINT16}: ssa.OpXor16,
1122 opAndType{OXOR, TINT32}: ssa.OpXor32,
1123 opAndType{OXOR, TUINT32}: ssa.OpXor32,
1124 opAndType{OXOR, TINT64}: ssa.OpXor64,
1125 opAndType{OXOR, TUINT64}: ssa.OpXor64,
1127 opAndType{OEQ, TBOOL}: ssa.OpEqB,
1128 opAndType{OEQ, TINT8}: ssa.OpEq8,
1129 opAndType{OEQ, TUINT8}: ssa.OpEq8,
1130 opAndType{OEQ, TINT16}: ssa.OpEq16,
1131 opAndType{OEQ, TUINT16}: ssa.OpEq16,
1132 opAndType{OEQ, TINT32}: ssa.OpEq32,
1133 opAndType{OEQ, TUINT32}: ssa.OpEq32,
1134 opAndType{OEQ, TINT64}: ssa.OpEq64,
1135 opAndType{OEQ, TUINT64}: ssa.OpEq64,
1136 opAndType{OEQ, TINTER}: ssa.OpEqInter,
1137 opAndType{OEQ, TSLICE}: ssa.OpEqSlice,
1138 opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
1139 opAndType{OEQ, TMAP}: ssa.OpEqPtr,
1140 opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
1141 opAndType{OEQ, TPTR32}: ssa.OpEqPtr,
1142 opAndType{OEQ, TPTR64}: ssa.OpEqPtr,
1143 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
1144 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
1145 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
1146 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F,
1148 opAndType{ONE, TBOOL}: ssa.OpNeqB,
1149 opAndType{ONE, TINT8}: ssa.OpNeq8,
1150 opAndType{ONE, TUINT8}: ssa.OpNeq8,
1151 opAndType{ONE, TINT16}: ssa.OpNeq16,
1152 opAndType{ONE, TUINT16}: ssa.OpNeq16,
1153 opAndType{ONE, TINT32}: ssa.OpNeq32,
1154 opAndType{ONE, TUINT32}: ssa.OpNeq32,
1155 opAndType{ONE, TINT64}: ssa.OpNeq64,
1156 opAndType{ONE, TUINT64}: ssa.OpNeq64,
1157 opAndType{ONE, TINTER}: ssa.OpNeqInter,
1158 opAndType{ONE, TSLICE}: ssa.OpNeqSlice,
1159 opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
1160 opAndType{ONE, TMAP}: ssa.OpNeqPtr,
1161 opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
1162 opAndType{ONE, TPTR32}: ssa.OpNeqPtr,
1163 opAndType{ONE, TPTR64}: ssa.OpNeqPtr,
1164 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
1165 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
1166 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
1167 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F,
1169 opAndType{OLT, TINT8}: ssa.OpLess8,
1170 opAndType{OLT, TUINT8}: ssa.OpLess8U,
1171 opAndType{OLT, TINT16}: ssa.OpLess16,
1172 opAndType{OLT, TUINT16}: ssa.OpLess16U,
1173 opAndType{OLT, TINT32}: ssa.OpLess32,
1174 opAndType{OLT, TUINT32}: ssa.OpLess32U,
1175 opAndType{OLT, TINT64}: ssa.OpLess64,
1176 opAndType{OLT, TUINT64}: ssa.OpLess64U,
1177 opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
1178 opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
1180 opAndType{OGT, TINT8}: ssa.OpGreater8,
1181 opAndType{OGT, TUINT8}: ssa.OpGreater8U,
1182 opAndType{OGT, TINT16}: ssa.OpGreater16,
1183 opAndType{OGT, TUINT16}: ssa.OpGreater16U,
1184 opAndType{OGT, TINT32}: ssa.OpGreater32,
1185 opAndType{OGT, TUINT32}: ssa.OpGreater32U,
1186 opAndType{OGT, TINT64}: ssa.OpGreater64,
1187 opAndType{OGT, TUINT64}: ssa.OpGreater64U,
1188 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F,
1189 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F,
1191 opAndType{OLE, TINT8}: ssa.OpLeq8,
1192 opAndType{OLE, TUINT8}: ssa.OpLeq8U,
1193 opAndType{OLE, TINT16}: ssa.OpLeq16,
1194 opAndType{OLE, TUINT16}: ssa.OpLeq16U,
1195 opAndType{OLE, TINT32}: ssa.OpLeq32,
1196 opAndType{OLE, TUINT32}: ssa.OpLeq32U,
1197 opAndType{OLE, TINT64}: ssa.OpLeq64,
1198 opAndType{OLE, TUINT64}: ssa.OpLeq64U,
1199 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
1200 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
1202 opAndType{OGE, TINT8}: ssa.OpGeq8,
1203 opAndType{OGE, TUINT8}: ssa.OpGeq8U,
1204 opAndType{OGE, TINT16}: ssa.OpGeq16,
1205 opAndType{OGE, TUINT16}: ssa.OpGeq16U,
1206 opAndType{OGE, TINT32}: ssa.OpGeq32,
1207 opAndType{OGE, TUINT32}: ssa.OpGeq32U,
1208 opAndType{OGE, TINT64}: ssa.OpGeq64,
1209 opAndType{OGE, TUINT64}: ssa.OpGeq64U,
1210 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F,
1211 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
1213 opAndType{OLROT, TUINT8}: ssa.OpLrot8,
1214 opAndType{OLROT, TUINT16}: ssa.OpLrot16,
1215 opAndType{OLROT, TUINT32}: ssa.OpLrot32,
1216 opAndType{OLROT, TUINT64}: ssa.OpLrot64,
1218 opAndType{OSQRT, TFLOAT64}: ssa.OpSqrt,
1221 func (s *state) concreteEtype(t *Type) EType {
1227 if s.config.IntSize == 8 {
1232 if s.config.IntSize == 8 {
1237 if s.config.PtrSize == 8 {
1244 func (s *state) ssaOp(op Op, t *Type) ssa.Op {
1245 etype := s.concreteEtype(t)
1246 x, ok := opToSSA[opAndType{op, etype}]
1248 s.Unimplementedf("unhandled binary op %s %s", op, etype)
1253 func floatForComplex(t *Type) *Type {
1255 return Types[TFLOAT32]
1257 return Types[TFLOAT64]
1261 type opAndTwoTypes struct {
1267 type twoTypes struct {
1272 type twoOpsAndType struct {
1275 intermediateType EType
1278 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
1280 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
1281 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
1282 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
1283 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
1285 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
1286 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
1287 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
1288 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
1290 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
1291 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
1292 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
1293 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
1295 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
1296 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
1297 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
1298 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
1300 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
1301 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
1302 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
1303 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead
1305 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
1306 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
1307 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
1308 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead
1310 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
1311 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
1312 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
1313 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead
1315 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
1316 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
1317 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
1318 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead
1321 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
1322 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT64},
1323 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT32},
1324 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
1327 // this map is used only for 32-bit arch, and only includes the difference
1328 // on 32-bit arch, don't use int64<->float conversion for uint32
1329 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
1330 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
1331 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
1332 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
1333 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
1336 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
1337 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
1338 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
1339 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
1340 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
1341 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
1342 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
1343 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
1344 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
1346 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
1347 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
1348 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
1349 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
1350 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
1351 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
1352 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
1353 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
1355 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
1356 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
1357 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
1358 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
1359 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
1360 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
1361 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
1362 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
1364 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
1365 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
1366 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
1367 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
1368 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
1369 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
1370 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
1371 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
1373 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
1374 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
1375 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
1376 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
1377 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
1378 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
1379 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
1380 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
1382 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
1383 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
1384 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
1385 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
1386 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
1387 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
1388 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
1389 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
1391 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
1392 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
1393 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
1394 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
1395 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
1396 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
1397 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
1398 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
1400 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
1401 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
1402 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
1403 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
1404 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
1405 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
1406 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
1407 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
1410 func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op {
1411 etype1 := s.concreteEtype(t)
1412 etype2 := s.concreteEtype(u)
1413 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
1415 s.Unimplementedf("unhandled shift op %s etype=%s/%s", op, etype1, etype2)
1420 func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op {
1421 etype1 := s.concreteEtype(t)
1422 x, ok := opToSSA[opAndType{op, etype1}]
1424 s.Unimplementedf("unhandled rotate op %s etype=%s", op, etype1)
1429 // expr converts the expression n to ssa, adds it to s and returns the ssa result.
1430 func (s *state) expr(n *Node) *ssa.Value {
1431 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
1432 // ONAMEs and named OLITERALs have the line number
1433 // of the decl, not the use. See issue 14742.
1434 s.pushLine(n.Lineno)
1441 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Left.Sym})
1442 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
1444 if n.Class == PFUNC {
1445 // "value" of a function is the address of the function's closure
1446 sym := funcsym(n.Sym)
1447 aux := &ssa.ExternSymbol{Typ: n.Type, Sym: sym}
1448 return s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sb)
1451 return s.variable(n, n.Type)
1453 addr, _ := s.addr(n, false)
1454 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
1456 addr, _ := s.addr(n, false)
1457 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
1459 switch u := n.Val().U.(type) {
1462 switch n.Type.Size() {
1464 return s.constInt8(n.Type, int8(i))
1466 return s.constInt16(n.Type, int16(i))
1468 return s.constInt32(n.Type, int32(i))
1470 return s.constInt64(n.Type, i)
1472 s.Fatalf("bad integer size %d", n.Type.Size())
1477 return s.constEmptyString(n.Type)
1479 return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
1481 return s.constBool(u)
1486 return s.constSlice(t)
1487 case t.IsInterface():
1488 return s.constInterface(t)
1490 return s.constNil(t)
1493 switch n.Type.Size() {
1495 return s.constFloat32(n.Type, u.Float32())
1497 return s.constFloat64(n.Type, u.Float64())
1499 s.Fatalf("bad float size %d", n.Type.Size())
1505 switch n.Type.Size() {
1507 pt := Types[TFLOAT32]
1508 return s.newValue2(ssa.OpComplexMake, n.Type,
1509 s.constFloat32(pt, r.Float32()),
1510 s.constFloat32(pt, i.Float32()))
1512 pt := Types[TFLOAT64]
1513 return s.newValue2(ssa.OpComplexMake, n.Type,
1514 s.constFloat64(pt, r.Float64()),
1515 s.constFloat64(pt, i.Float64()))
1517 s.Fatalf("bad float size %d", n.Type.Size())
1522 s.Unimplementedf("unhandled OLITERAL %v", n.Val().Ctype())
1529 // Assume everything will work out, so set up our return value.
1530 // Anything interesting that happens from here is a fatal.
1533 // Special case for not confusing GC and liveness.
1534 // We don't want pointers accidentally classified
1535 // as not-pointers or vice-versa because of copy
1537 if to.IsPtrShaped() != from.IsPtrShaped() {
1538 return s.newValue2(ssa.OpConvert, to, x, s.mem())
1541 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
1544 if to.Etype == TFUNC && from.IsPtrShaped() {
1548 // named <--> unnamed type or typed <--> untyped const
1549 if from.Etype == to.Etype {
1553 // unsafe.Pointer <--> *T
1554 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() {
1560 if from.Width != to.Width {
1561 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
1564 if etypesign(from.Etype) != etypesign(to.Etype) {
1565 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
1570 // These appear to be fine, but they fail the
1571 // integer constraint below, so okay them here.
1572 // Sample non-integer conversion: map[string]string -> *uint8
1576 if etypesign(from.Etype) == 0 {
1577 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
1581 // integer, same width, same sign
1586 ft := n.Left.Type // from type
1587 tt := n.Type // to type
1588 if ft.IsInteger() && tt.IsInteger() {
1590 if tt.Size() == ft.Size() {
1592 } else if tt.Size() < ft.Size() {
1594 switch 10*ft.Size() + tt.Size() {
1596 op = ssa.OpTrunc16to8
1598 op = ssa.OpTrunc32to8
1600 op = ssa.OpTrunc32to16
1602 op = ssa.OpTrunc64to8
1604 op = ssa.OpTrunc64to16
1606 op = ssa.OpTrunc64to32
1608 s.Fatalf("weird integer truncation %s -> %s", ft, tt)
1610 } else if ft.IsSigned() {
1612 switch 10*ft.Size() + tt.Size() {
1614 op = ssa.OpSignExt8to16
1616 op = ssa.OpSignExt8to32
1618 op = ssa.OpSignExt8to64
1620 op = ssa.OpSignExt16to32
1622 op = ssa.OpSignExt16to64
1624 op = ssa.OpSignExt32to64
1626 s.Fatalf("bad integer sign extension %s -> %s", ft, tt)
1630 switch 10*ft.Size() + tt.Size() {
1632 op = ssa.OpZeroExt8to16
1634 op = ssa.OpZeroExt8to32
1636 op = ssa.OpZeroExt8to64
1638 op = ssa.OpZeroExt16to32
1640 op = ssa.OpZeroExt16to64
1642 op = ssa.OpZeroExt32to64
1644 s.Fatalf("weird integer sign extension %s -> %s", ft, tt)
1647 return s.newValue1(op, n.Type, x)
1650 if ft.IsFloat() || tt.IsFloat() {
1651 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
1652 if s.config.IntSize == 4 {
1653 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
1658 s.Fatalf("weird float conversion %s -> %s", ft, tt)
1660 op1, op2, it := conv.op1, conv.op2, conv.intermediateType
1662 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
1663 // normal case, not tripping over unsigned 64
1664 if op1 == ssa.OpCopy {
1665 if op2 == ssa.OpCopy {
1668 return s.newValue1(op2, n.Type, x)
1670 if op2 == ssa.OpCopy {
1671 return s.newValue1(op1, n.Type, x)
1673 return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x))
1675 // Tricky 64-bit unsigned cases.
1677 // therefore tt is float32 or float64, and ft is also unsigned
1679 return s.uint64Tofloat32(n, x, ft, tt)
1682 return s.uint64Tofloat64(n, x, ft, tt)
1684 s.Fatalf("weird unsigned integer to float conversion %s -> %s", ft, tt)
1686 // therefore ft is float32 or float64, and tt is unsigned integer
1688 return s.float32ToUint64(n, x, ft, tt)
1691 return s.float64ToUint64(n, x, ft, tt)
1693 s.Fatalf("weird float to unsigned integer conversion %s -> %s", ft, tt)
1697 if ft.IsComplex() && tt.IsComplex() {
1699 if ft.Size() == tt.Size() {
1701 } else if ft.Size() == 8 && tt.Size() == 16 {
1702 op = ssa.OpCvt32Fto64F
1703 } else if ft.Size() == 16 && tt.Size() == 8 {
1704 op = ssa.OpCvt64Fto32F
1706 s.Fatalf("weird complex conversion %s -> %s", ft, tt)
1708 ftp := floatForComplex(ft)
1709 ttp := floatForComplex(tt)
1710 return s.newValue2(ssa.OpComplexMake, tt,
1711 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
1712 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
1715 s.Unimplementedf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
1719 res, _ := s.dottype(n, false)
1723 case OLT, OEQ, ONE, OLE, OGE, OGT:
1725 b := s.expr(n.Right)
1726 if n.Left.Type.IsComplex() {
1727 pt := floatForComplex(n.Left.Type)
1728 op := s.ssaOp(OEQ, pt)
1729 r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
1730 i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
1731 c := s.newValue2(ssa.OpAnd8, Types[TBOOL], r, i)
1736 return s.newValue1(ssa.OpNot, Types[TBOOL], c)
1738 s.Fatalf("ordered complex compare %s", n.Op)
1741 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b)
1744 b := s.expr(n.Right)
1745 if n.Type.IsComplex() {
1746 mulop := ssa.OpMul64F
1747 addop := ssa.OpAdd64F
1748 subop := ssa.OpSub64F
1749 pt := floatForComplex(n.Type) // Could be Float32 or Float64
1750 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
1752 areal := s.newValue1(ssa.OpComplexReal, pt, a)
1753 breal := s.newValue1(ssa.OpComplexReal, pt, b)
1754 aimag := s.newValue1(ssa.OpComplexImag, pt, a)
1755 bimag := s.newValue1(ssa.OpComplexImag, pt, b)
1757 if pt != wt { // Widen for calculation
1758 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
1759 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
1760 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
1761 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
1764 xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
1765 ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal))
1767 if pt != wt { // Narrow to store back
1768 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
1769 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
1772 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
1774 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1778 b := s.expr(n.Right)
1779 if n.Type.IsComplex() {
1780 // TODO this is not executed because the front-end substitutes a runtime call.
1781 // That probably ought to change; with modest optimization the widen/narrow
1782 // conversions could all be elided in larger expression trees.
1783 mulop := ssa.OpMul64F
1784 addop := ssa.OpAdd64F
1785 subop := ssa.OpSub64F
1786 divop := ssa.OpDiv64F
1787 pt := floatForComplex(n.Type) // Could be Float32 or Float64
1788 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
1790 areal := s.newValue1(ssa.OpComplexReal, pt, a)
1791 breal := s.newValue1(ssa.OpComplexReal, pt, b)
1792 aimag := s.newValue1(ssa.OpComplexImag, pt, a)
1793 bimag := s.newValue1(ssa.OpComplexImag, pt, b)
1795 if pt != wt { // Widen for calculation
1796 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
1797 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
1798 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
1799 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
1802 denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag))
1803 xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
1804 ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag))
1806 // TODO not sure if this is best done in wide precision or narrow
1807 // Double-rounding might be an issue.
1808 // Note that the pre-SSA implementation does the entire calculation
1809 // in wide format, so wide is compatible.
1810 xreal = s.newValue2(divop, wt, xreal, denom)
1811 ximag = s.newValue2(divop, wt, ximag, denom)
1813 if pt != wt { // Narrow to store back
1814 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
1815 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
1817 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
1819 if n.Type.IsFloat() {
1820 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1822 // do a size-appropriate check for zero
1823 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
1824 s.check(cmp, panicdivide)
1825 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1829 b := s.expr(n.Right)
1830 // do a size-appropriate check for zero
1831 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
1832 s.check(cmp, panicdivide)
1833 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1836 b := s.expr(n.Right)
1837 if n.Type.IsComplex() {
1838 pt := floatForComplex(n.Type)
1839 op := s.ssaOp(n.Op, pt)
1840 return s.newValue2(ssa.OpComplexMake, n.Type,
1841 s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
1842 s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
1844 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1845 case OAND, OOR, OHMUL, OXOR:
1847 b := s.expr(n.Right)
1848 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1851 b := s.expr(n.Right)
1852 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b)
1855 i := n.Right.Int64()
1856 if i <= 0 || i >= n.Type.Size()*8 {
1857 s.Fatalf("Wrong rotate distance for LROT, expected 1 through %d, saw %d", n.Type.Size()*8-1, i)
1859 return s.newValue1I(s.ssaRotateOp(n.Op, n.Type), a.Type, i, a)
1860 case OANDAND, OOROR:
1861 // To implement OANDAND (and OOROR), we introduce a
1862 // new temporary variable to hold the result. The
1863 // variable is associated with the OANDAND node in the
1864 // s.vars table (normally variables are only
1865 // associated with ONAME nodes). We convert
1872 // Using var in the subsequent block introduces the
1873 // necessary phi variable.
1874 el := s.expr(n.Left)
1878 b.Kind = ssa.BlockIf
1880 // In theory, we should set b.Likely here based on context.
1881 // However, gc only gives us likeliness hints
1882 // in a single place, for plain OIF statements,
1883 // and passing around context is finnicky, so don't bother for now.
1885 bRight := s.f.NewBlock(ssa.BlockPlain)
1886 bResult := s.f.NewBlock(ssa.BlockPlain)
1887 if n.Op == OANDAND {
1889 b.AddEdgeTo(bResult)
1890 } else if n.Op == OOROR {
1891 b.AddEdgeTo(bResult)
1895 s.startBlock(bRight)
1896 er := s.expr(n.Right)
1900 b.AddEdgeTo(bResult)
1902 s.startBlock(bResult)
1903 return s.variable(n, Types[TBOOL])
1906 i := s.expr(n.Right)
1907 return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
1912 if n.Type.IsComplex() {
1913 tp := floatForComplex(n.Type)
1914 negop := s.ssaOp(n.Op, tp)
1915 return s.newValue2(ssa.OpComplexMake, n.Type,
1916 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
1917 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
1919 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
1920 case ONOT, OCOM, OSQRT:
1922 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
1925 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
1927 return s.expr(n.Left)
1930 a, _ := s.addr(n.Left, n.Bounded)
1931 // Note we know the volatile result is false because you can't write &f() in Go.
1935 if int(n.Reg) != Thearch.REGSP {
1936 s.Unimplementedf("OINDREG of non-SP register %s in expr: %v", obj.Rconv(int(n.Reg)), n)
1939 addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.sp)
1940 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
1943 p := s.exprPtr(n.Left, false, n.Lineno)
1944 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
1950 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
1952 p, _ := s.addr(n, false)
1953 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
1956 p := s.exprPtr(n.Left, false, n.Lineno)
1957 p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p)
1958 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
1962 case n.Left.Type.IsString():
1964 i := s.expr(n.Right)
1965 i = s.extendIndex(i, Panicindex)
1967 len := s.newValue1(ssa.OpStringLen, Types[TINT], a)
1968 s.boundsCheck(i, len)
1970 ptrtyp := Ptrto(Types[TUINT8])
1971 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
1972 if Isconst(n.Right, CTINT) {
1973 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
1975 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
1977 return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem())
1978 case n.Left.Type.IsSlice():
1979 p, _ := s.addr(n, false)
1980 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
1981 case n.Left.Type.IsArray():
1982 // TODO: fix when we can SSA arrays of length 1.
1983 p, _ := s.addr(n, false)
1984 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
1986 s.Fatalf("bad type for index %v", n.Left.Type)
1992 case n.Left.Type.IsSlice():
1993 op := ssa.OpSliceLen
1997 return s.newValue1(op, Types[TINT], s.expr(n.Left))
1998 case n.Left.Type.IsString(): // string; not reachable for OCAP
1999 return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left))
2000 case n.Left.Type.IsMap(), n.Left.Type.IsChan():
2001 return s.referenceTypeBuiltin(n, s.expr(n.Left))
2003 return s.constInt(Types[TINT], n.Left.Type.NumElem())
2008 if n.Left.Type.IsSlice() {
2009 return s.newValue1(ssa.OpSlicePtr, n.Type, a)
2011 return s.newValue1(ssa.OpStringPtr, n.Type, a)
2016 return s.newValue1(ssa.OpITab, n.Type, a)
2019 tab := s.expr(n.Left)
2020 data := s.expr(n.Right)
2021 // The frontend allows putting things like struct{*byte} in
2022 // the data portion of an eface. But we don't want struct{*byte}
2023 // as a register type because (among other reasons) the liveness
2024 // analysis is confused by the "fat" variables that result from
2025 // such types being spilled.
2026 // So here we ensure that we are selecting the underlying pointer
2027 // when we build an eface.
2028 // TODO: get rid of this now that structs can be SSA'd?
2029 for !data.Type.IsPtrShaped() {
2031 case data.Type.IsArray():
2032 data = s.newValue1I(ssa.OpArrayIndex, data.Type.ElemType(), 0, data)
2033 case data.Type.IsStruct():
2034 for i := data.Type.NumFields() - 1; i >= 0; i-- {
2035 f := data.Type.FieldType(i)
2037 // eface type could also be struct{p *byte; q [0]int}
2040 data = s.newValue1I(ssa.OpStructSelect, f, int64(i), data)
2044 s.Fatalf("type being put into an eface isn't a pointer")
2047 return s.newValue2(ssa.OpIMake, n.Type, tab, data)
2049 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
2051 var i, j, k *ssa.Value
2052 low, high, max := n.SliceBounds()
2054 i = s.extendIndex(s.expr(low), panicslice)
2057 j = s.extendIndex(s.expr(high), panicslice)
2060 k = s.extendIndex(s.expr(max), panicslice)
2062 p, l, c := s.slice(n.Left.Type, v, i, j, k)
2063 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
2068 low, high, _ := n.SliceBounds()
2070 i = s.extendIndex(s.expr(low), panicslice)
2073 j = s.extendIndex(s.expr(high), panicslice)
2075 p, l, _ := s.slice(n.Left.Type, v, i, j, nil)
2076 return s.newValue2(ssa.OpStringMake, n.Type, p, l)
2079 if isIntrinsicCall1(n) {
2080 return s.intrinsicCall1(n)
2084 case OCALLINTER, OCALLMETH:
2085 a := s.call(n, callNormal)
2086 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem())
2089 return s.newValue1(ssa.OpGetG, n.Type, s.mem())
2092 return s.append(n, false)
2095 s.Unimplementedf("unhandled expr %s", n.Op)
2100 // append converts an OAPPEND node to SSA.
2101 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
2102 // adds it to s, and returns the Value.
2103 // If inplace is true, it writes the result of the OAPPEND expression n
2104 // back to the slice being appended to, and returns nil.
2105 // inplace MUST be set to false if the slice can be SSA'd.
2106 func (s *state) append(n *Node, inplace bool) *ssa.Value {
2107 // If inplace is false, process as expression "append(s, e1, e2, e3)":
2109 // ptr, len, cap := s
2110 // newlen := len + 3
2111 // if newlen > cap {
2112 // ptr, len, cap = growslice(s, newlen)
2113 // newlen = len + 3 // recalculate to avoid a spill
2115 // // with write barriers, if needed:
2117 // *(ptr+len+1) = e2
2118 // *(ptr+len+2) = e3
2119 // return makeslice(ptr, newlen, cap)
2122 // If inplace is true, process as statement "s = append(s, e1, e2, e3)":
2125 // ptr, len, cap := s
2126 // newlen := len + 3
2127 // if newlen > cap {
2128 // newptr, len, newcap = growslice(ptr, len, cap, newlen)
2129 // vardef(a) // if necessary, advise liveness we are writing a new a
2130 // *a.cap = newcap // write before ptr to avoid a spill
2131 // *a.ptr = newptr // with write barrier
2133 // newlen = len + 3 // recalculate to avoid a spill
2135 // // with write barriers, if needed:
2137 // *(ptr+len+1) = e2
2138 // *(ptr+len+2) = e3
2144 sn := n.List.First() // the slice node is the first in the list
2146 var slice, addr *ssa.Value
2148 addr, _ = s.addr(sn, false)
2149 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
2154 // Allocate new blocks
2155 grow := s.f.NewBlock(ssa.BlockPlain)
2156 assign := s.f.NewBlock(ssa.BlockPlain)
2158 // Decide if we need to grow
2159 nargs := int64(n.List.Len() - 1)
2160 p := s.newValue1(ssa.OpSlicePtr, pt, slice)
2161 l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
2162 c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice)
2163 nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
2165 cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c)
2169 s.vars[&newlenVar] = nl
2176 b.Kind = ssa.BlockIf
2177 b.Likely = ssa.BranchUnlikely
2184 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(n.Type.Elem())}, s.sb)
2186 r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl)
2190 // Tell liveness we're about to build a new slice
2191 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem())
2193 capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_cap), addr)
2194 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem())
2195 s.insertWBstore(pt, addr, r[0], n.Lineno, 0)
2196 // load the value we just stored to avoid having to spill it
2197 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
2198 s.vars[&lenVar] = r[1] // avoid a spill in the fast path
2200 s.vars[&ptrVar] = r[0]
2201 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs))
2202 s.vars[&capVar] = r[2]
2208 // assign new elements to slots
2209 s.startBlock(assign)
2212 l = s.variable(&lenVar, Types[TINT]) // generates phi for len
2213 nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
2214 lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_nel), addr)
2215 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem())
2219 type argRec struct {
2220 // if store is true, we're appending the value v. If false, we're appending the
2221 // value at *v. If store==false, isVolatile reports whether the source
2222 // is in the outargs section of the stack frame.
2227 args := make([]argRec, 0, nargs)
2228 for _, n := range n.List.Slice()[1:] {
2229 if canSSAType(n.Type) {
2230 args = append(args, argRec{v: s.expr(n), store: true})
2232 v, isVolatile := s.addr(n, false)
2233 args = append(args, argRec{v: v, isVolatile: isVolatile})
2237 p = s.variable(&ptrVar, pt) // generates phi for ptr
2239 nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl
2240 c = s.variable(&capVar, Types[TINT]) // generates phi for cap
2242 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
2243 // TODO: just one write barrier call for all of these writes?
2244 // TODO: maybe just one writeBarrier.enabled check?
2245 for i, arg := range args {
2246 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i)))
2248 if haspointers(et) {
2249 s.insertWBstore(et, addr, arg.v, n.Lineno, 0)
2251 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg.v, s.mem())
2254 if haspointers(et) {
2255 s.insertWBmove(et, addr, arg.v, n.Lineno, arg.isVolatile)
2257 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg.v, s.mem())
2262 delete(s.vars, &ptrVar)
2264 delete(s.vars, &lenVar)
2267 delete(s.vars, &newlenVar)
2268 delete(s.vars, &capVar)
2270 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
2273 // condBranch evaluates the boolean expression cond and branches to yes
2274 // if cond is true and no if cond is false.
2275 // This function is intended to handle && and || better than just calling
2276 // s.expr(cond) and branching on the result.
2277 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
2278 if cond.Op == OANDAND {
2279 mid := s.f.NewBlock(ssa.BlockPlain)
2280 s.stmtList(cond.Ninit)
2281 s.condBranch(cond.Left, mid, no, max8(likely, 0))
2283 s.condBranch(cond.Right, yes, no, likely)
2285 // Note: if likely==1, then both recursive calls pass 1.
2286 // If likely==-1, then we don't have enough information to decide
2287 // whether the first branch is likely or not. So we pass 0 for
2288 // the likeliness of the first branch.
2289 // TODO: have the frontend give us branch prediction hints for
2290 // OANDAND and OOROR nodes (if it ever has such info).
2292 if cond.Op == OOROR {
2293 mid := s.f.NewBlock(ssa.BlockPlain)
2294 s.stmtList(cond.Ninit)
2295 s.condBranch(cond.Left, yes, mid, min8(likely, 0))
2297 s.condBranch(cond.Right, yes, no, likely)
2299 // Note: if likely==-1, then both recursive calls pass -1.
2300 // If likely==1, then we don't have enough info to decide
2301 // the likelihood of the first branch.
2303 if cond.Op == ONOT {
2304 s.stmtList(cond.Ninit)
2305 s.condBranch(cond.Left, no, yes, -likely)
2310 b.Kind = ssa.BlockIf
2312 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
2320 skipPtr skipMask = 1 << iota
2325 // assign does left = right.
2326 // Right has already been evaluated to ssa, left has not.
2327 // If deref is true, then we do left = *right instead (and right has already been nil-checked).
2328 // If deref is true and right == nil, just do left = 0.
2329 // If deref is true, rightIsVolatile reports whether right points to volatile (clobbered by a call) storage.
2330 // Include a write barrier if wb is true.
2331 // skip indicates assignments (at the top level) that can be avoided.
2332 func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32, skip skipMask, rightIsVolatile bool) {
2333 if left.Op == ONAME && isblank(left) {
2340 s.Fatalf("can SSA LHS %s but not RHS %s", left, right)
2342 if left.Op == ODOT {
2343 // We're assigning to a field of an ssa-able value.
2344 // We need to build a new structure with the new value for the
2345 // field we're assigning and the old values for the other fields.
2347 // type T struct {a, b, c int}
2350 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
2352 // Grab information about the structure type.
2355 idx := fieldIdx(left)
2357 // Grab old value of structure.
2358 old := s.expr(left.Left)
2360 // Make new structure.
2361 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
2363 // Add fields as args.
2364 for i := 0; i < nf; i++ {
2368 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
2372 // Recursively assign the new value we've made to the base of the dot op.
2373 s.assign(left.Left, new, false, false, line, 0, rightIsVolatile)
2374 // TODO: do we need to update named values here?
2377 // Update variable assignment.
2378 s.vars[left] = right
2379 s.addNamedValue(left, right)
2382 // Left is not ssa-able. Compute its address.
2383 addr, _ := s.addr(left, false)
2384 if left.Op == ONAME && skip == 0 {
2385 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem())
2388 // Treat as a mem->mem move.
2390 s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem())
2394 s.insertWBmove(t, addr, right, line, rightIsVolatile)
2397 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem())
2400 // Treat as a store.
2402 if skip&skipPtr != 0 {
2403 // Special case: if we don't write back the pointers, don't bother
2404 // doing the write barrier check.
2405 s.storeTypeScalars(t, addr, right, skip)
2408 s.insertWBstore(t, addr, right, line, skip)
2412 if skip&skipPtr == 0 {
2413 s.storeTypePtrs(t, addr, right)
2415 s.storeTypeScalars(t, addr, right, skip)
2418 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem())
2421 // zeroVal returns the zero value for type t.
2422 func (s *state) zeroVal(t *Type) *ssa.Value {
2427 return s.constInt8(t, 0)
2429 return s.constInt16(t, 0)
2431 return s.constInt32(t, 0)
2433 return s.constInt64(t, 0)
2435 s.Fatalf("bad sized integer type %s", t)
2440 return s.constFloat32(t, 0)
2442 return s.constFloat64(t, 0)
2444 s.Fatalf("bad sized float type %s", t)
2449 z := s.constFloat32(Types[TFLOAT32], 0)
2450 return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
2452 z := s.constFloat64(Types[TFLOAT64], 0)
2453 return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
2455 s.Fatalf("bad sized complex type %s", t)
2459 return s.constEmptyString(t)
2460 case t.IsPtrShaped():
2461 return s.constNil(t)
2463 return s.constBool(false)
2464 case t.IsInterface():
2465 return s.constInterface(t)
2467 return s.constSlice(t)
2470 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
2471 for i := 0; i < n; i++ {
2472 v.AddArg(s.zeroVal(t.FieldType(i).(*Type)))
2476 s.Unimplementedf("zero for type %v not implemented", t)
2483 callNormal callKind = iota
2488 // isSSAIntrinsic1 returns true if n is a call to a recognized 1-arg intrinsic
2489 // that can be handled by the SSA backend.
2490 // SSA uses this, but so does the front end to see if should not
2491 // inline a function because it is a candidate for intrinsic
2493 func isSSAIntrinsic1(s *Sym) bool {
2494 // The test below is not quite accurate -- in the event that
2495 // a function is disabled on a per-function basis, for example
2496 // because of hash-keyed binary failure search, SSA might be
2497 // disabled for that function but it would not be noted here,
2498 // and thus an inlining would not occur (in practice, inlining
2499 // so far has only been noticed for Bswap32 and the 16-bit count
2500 // leading/trailing instructions, but heuristics might change
2501 // in the future or on different architectures).
2502 if !ssaEnabled || ssa.IntrinsicsDisable || Thearch.LinkArch.Family != sys.AMD64 {
2505 if s != nil && s.Pkg != nil && s.Pkg.Path == "runtime/internal/sys" {
2508 "Ctz64", "Ctz32", "Ctz16",
2509 "Bswap64", "Bswap32":
2516 func isIntrinsicCall1(n *Node) bool {
2517 if n == nil || n.Left == nil {
2520 return isSSAIntrinsic1(n.Left.Sym)
2523 // intrinsicFirstArg extracts arg from n.List and eval
2524 func (s *state) intrinsicFirstArg(n *Node) *ssa.Value {
2532 // intrinsicCall1 converts a call to a recognized 1-arg intrinsic
2533 // into the intrinsic
2534 func (s *state) intrinsicCall1(n *Node) *ssa.Value {
2535 var result *ssa.Value
2536 switch n.Left.Sym.Name {
2538 result = s.newValue1(ssa.OpCtz64, Types[TUINT64], s.intrinsicFirstArg(n))
2540 result = s.newValue1(ssa.OpCtz32, Types[TUINT32], s.intrinsicFirstArg(n))
2542 result = s.newValue1(ssa.OpCtz16, Types[TUINT16], s.intrinsicFirstArg(n))
2544 result = s.newValue1(ssa.OpBswap64, Types[TUINT64], s.intrinsicFirstArg(n))
2546 result = s.newValue1(ssa.OpBswap32, Types[TUINT32], s.intrinsicFirstArg(n))
2549 Fatalf("Unknown special call: %v", n.Left.Sym)
2551 if ssa.IntrinsicsDebug > 0 {
2552 Warnl(n.Lineno, "intrinsic substitution for %v with %s", n.Left.Sym.Name, result.LongString())
2557 // Calls the function n using the specified call type.
2558 // Returns the address of the return value (or nil if none).
2559 func (s *state) call(n *Node, k callKind) *ssa.Value {
2560 var sym *Sym // target symbol (if static)
2561 var closure *ssa.Value // ptr to closure to run (if dynamic)
2562 var codeptr *ssa.Value // ptr to target code (if dynamic)
2563 var rcvr *ssa.Value // receiver to set
2567 if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC {
2571 closure = s.expr(fn)
2573 if fn.Op != ODOTMETH {
2574 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
2576 if k == callNormal {
2580 n2 := newname(fn.Sym)
2582 n2.Lineno = fn.Lineno
2583 closure = s.expr(n2)
2584 // Note: receiver is already assigned in n.List, so we don't
2585 // want to set it here.
2587 if fn.Op != ODOTINTER {
2588 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
2590 i := s.expr(fn.Left)
2591 itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i)
2592 if k != callNormal {
2595 itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab
2596 itab = s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), itabidx, itab)
2597 if k == callNormal {
2598 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem())
2602 rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i)
2605 stksize := fn.Type.ArgWidth() // includes receiver
2607 // Run all argument assignments. The arg slots have already
2608 // been offset by the appropriate amount (+2*widthptr for go/defer,
2609 // +widthptr for interface calls).
2610 // For OCALLMETH, the receiver is set in these statements.
2613 // Set receiver (for interface calls)
2615 argStart := Ctxt.FixedFrameSize()
2616 if k != callNormal {
2617 argStart += int64(2 * Widthptr)
2619 addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), argStart, s.sp)
2620 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem())
2624 if k != callNormal {
2625 // Write argsize and closure (args to Newproc/Deferproc).
2626 argStart := Ctxt.FixedFrameSize()
2627 argsize := s.constInt32(Types[TUINT32], int32(stksize))
2628 addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT32]), argStart, s.sp)
2629 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, addr, argsize, s.mem())
2630 addr = s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), argStart+int64(Widthptr), s.sp)
2631 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem())
2632 stksize += 2 * int64(Widthptr)
2636 bNext := s.f.NewBlock(ssa.BlockPlain)
2639 case k == callDefer:
2640 call = s.newValue1(ssa.OpDeferCall, ssa.TypeMem, s.mem())
2642 call = s.newValue1(ssa.OpGoCall, ssa.TypeMem, s.mem())
2643 case closure != nil:
2644 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem())
2645 call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem())
2646 case codeptr != nil:
2647 call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem())
2649 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym, s.mem())
2651 Fatalf("bad call type %s %v", n.Op, n)
2653 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
2655 // Finish call block
2656 s.vars[&memVar] = call
2658 b.Kind = ssa.BlockCall
2662 // Add recover edge to exit code.
2663 b.Kind = ssa.BlockDefer
2664 r := s.f.NewBlock(ssa.BlockPlain)
2668 b.Likely = ssa.BranchLikely
2671 // Start exit block, find address of result.
2673 // Keep input pointer args live across calls. This is a bandaid until 1.8.
2674 for _, n := range s.ptrargs {
2675 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem())
2677 res := n.Left.Type.Results()
2678 if res.NumFields() == 0 || k != callNormal {
2679 // call has no return value. Continue with the next statement.
2683 return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Offset+Ctxt.FixedFrameSize(), s.sp)
2686 // etypesign returns the signed-ness of e, for integer/pointer etypes.
2687 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
2688 func etypesign(e EType) int8 {
2690 case TINT8, TINT16, TINT32, TINT64, TINT:
2692 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
2698 // lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node.
2699 // This improves the effectiveness of cse by using the same Aux values for the
2701 func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} {
2704 s.Fatalf("sym %v is of uknown type %T", sym, sym)
2705 case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol:
2706 // these are the only valid types
2709 if lsym, ok := s.varsyms[n]; ok {
2717 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
2718 // Also returns a bool reporting whether the returned value is "volatile", that is it
2719 // points to the outargs section and thus the referent will be clobbered by any call.
2720 // The value that the returned Value represents is guaranteed to be non-nil.
2721 // If bounded is true then this address does not require a nil check for its operand
2722 // even if that would otherwise be implied.
2723 func (s *state) addr(n *Node, bounded bool) (*ssa.Value, bool) {
2730 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Sym})
2731 v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb)
2732 // TODO: Make OpAddr use AuxInt as well as Aux.
2734 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
2743 if n.String() == ".fp" {
2744 // Special arg that points to the frame pointer.
2745 // (Used by the race detector, others?)
2746 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
2747 return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp), false
2749 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
2752 aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n})
2753 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false
2754 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
2755 // ensure that we reuse symbols for out parameters so
2756 // that cse works on their addresses
2757 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
2758 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false
2760 s.Unimplementedf("variable address class %v not implemented", classnames[n.Class])
2764 // indirect off a register
2765 // used for storing/loading arguments/returns to/from callees
2766 if int(n.Reg) != Thearch.REGSP {
2767 s.Unimplementedf("OINDREG of non-SP register %s in addr: %v", obj.Rconv(int(n.Reg)), n)
2770 return s.entryNewValue1I(ssa.OpOffPtr, t, n.Xoffset, s.sp), true
2772 if n.Left.Type.IsSlice() {
2774 i := s.expr(n.Right)
2775 i = s.extendIndex(i, Panicindex)
2776 len := s.newValue1(ssa.OpSliceLen, Types[TINT], a)
2778 s.boundsCheck(i, len)
2780 p := s.newValue1(ssa.OpSlicePtr, t, a)
2781 return s.newValue2(ssa.OpPtrIndex, t, p, i), false
2783 a, isVolatile := s.addr(n.Left, bounded)
2784 i := s.expr(n.Right)
2785 i = s.extendIndex(i, Panicindex)
2786 len := s.constInt(Types[TINT], n.Left.Type.NumElem())
2788 s.boundsCheck(i, len)
2790 return s.newValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Elem()), a, i), isVolatile
2793 return s.exprPtr(n.Left, bounded, n.Lineno), false
2795 p, isVolatile := s.addr(n.Left, bounded)
2796 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), isVolatile
2798 p := s.exprPtr(n.Left, bounded, n.Lineno)
2799 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), false
2801 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
2802 s.entryNewValue0(ssa.OpGetClosurePtr, Ptrto(Types[TUINT8]))), false
2804 addr, isVolatile := s.addr(n.Left, bounded)
2805 return s.newValue1(ssa.OpCopy, t, addr), isVolatile // ensure that addr has the right type
2806 case OCALLFUNC, OCALLINTER, OCALLMETH:
2807 return s.call(n, callNormal), true
2810 s.Unimplementedf("unhandled addr %v", n.Op)
2815 // canSSA reports whether n is SSA-able.
2816 // n must be an ONAME (or an ODOT sequence with an ONAME base).
2817 func (s *state) canSSA(n *Node) bool {
2818 if Debug['N'] != 0 {
2830 if n.isParamHeapCopy() {
2833 if n.Class == PAUTOHEAP {
2834 Fatalf("canSSA of PAUTOHEAP %v", n)
2841 // TODO: handle this case? Named return values must be
2842 // in memory so that the deferred function can see them.
2843 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
2846 if s.cgoUnsafeArgs {
2847 // Cgo effectively takes the address of all result args,
2848 // but the compiler can't see that.
2852 if n.Class == PPARAM && n.String() == ".this" {
2853 // wrappers generated by genwrapper need to update
2854 // the .this pointer in place.
2855 // TODO: treat as a PPARMOUT?
2858 return canSSAType(n.Type)
2859 // TODO: try to make more variables SSAable?
2862 // canSSA reports whether variables of type t are SSA-able.
2863 func canSSAType(t *Type) bool {
2865 if t.Width > int64(4*Widthptr) {
2866 // 4*Widthptr is an arbitrary constant. We want it
2867 // to be at least 3*Widthptr so slices can be registerized.
2868 // Too big and we'll introduce too much register pressure.
2873 // We can't do arrays because dynamic indexing is
2874 // not supported on SSA variables.
2875 // TODO: maybe allow if length is <=1? All indexes
2876 // are constant? Might be good for the arrays
2877 // introduced by the compiler for variadic functions.
2880 if t.NumFields() > ssa.MaxStruct {
2883 for _, t1 := range t.Fields().Slice() {
2884 if !canSSAType(t1.Type) {
2894 // exprPtr evaluates n to a pointer and nil-checks it.
2895 func (s *state) exprPtr(n *Node, bounded bool, lineno int32) *ssa.Value {
2897 if bounded || n.NonNil {
2898 if s.f.Config.Debug_checknil() && lineno > 1 {
2899 s.f.Config.Warnl(lineno, "removed nil check")
2907 // nilCheck generates nil pointer checking code.
2908 // Starts a new block on return, unless nil checks are disabled.
2909 // Used only for automatically inserted nil checks,
2910 // not for user code like 'x != nil'.
2911 func (s *state) nilCheck(ptr *ssa.Value) {
2912 if Disable_checknil != 0 {
2915 chk := s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem())
2917 b.Kind = ssa.BlockCheck
2919 bNext := s.f.NewBlock(ssa.BlockPlain)
2924 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
2925 // Starts a new block on return.
2926 // idx is already converted to full int width.
2927 func (s *state) boundsCheck(idx, len *ssa.Value) {
2928 if Debug['B'] != 0 {
2933 cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len)
2934 s.check(cmp, Panicindex)
2937 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
2938 // Starts a new block on return.
2939 // idx and len are already converted to full int width.
2940 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
2941 if Debug['B'] != 0 {
2946 cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len)
2947 s.check(cmp, panicslice)
2950 // If cmp (a bool) is false, panic using the given function.
2951 func (s *state) check(cmp *ssa.Value, fn *Node) {
2953 b.Kind = ssa.BlockIf
2955 b.Likely = ssa.BranchLikely
2956 bNext := s.f.NewBlock(ssa.BlockPlain)
2957 line := s.peekLine()
2958 bPanic := s.panics[funcLine{fn, line}]
2960 bPanic = s.f.NewBlock(ssa.BlockPlain)
2961 s.panics[funcLine{fn, line}] = bPanic
2962 s.startBlock(bPanic)
2963 // The panic call takes/returns memory to ensure that the right
2964 // memory state is observed if the panic happens.
2965 s.rtcall(fn, false, nil)
2972 // rtcall issues a call to the given runtime function fn with the listed args.
2973 // Returns a slice of results of the given result types.
2974 // The call is added to the end of the current block.
2975 // If returns is false, the block is marked as an exit block.
2976 // If returns is true, the block is marked as a call block. A new block
2977 // is started to load the return values.
2978 func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
2979 // Write args to the stack
2980 off := Ctxt.FixedFrameSize()
2981 for _, arg := range args {
2983 off = Rnd(off, t.Alignment())
2986 ptr = s.newValue1I(ssa.OpOffPtr, t.PtrTo(), off, s.sp)
2989 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem())
2992 off = Rnd(off, int64(Widthptr))
2995 call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem())
2996 s.vars[&memVar] = call
3001 b.Kind = ssa.BlockExit
3004 if len(results) > 0 {
3005 Fatalf("panic call can't have results")
3009 b.Kind = ssa.BlockCall
3011 bNext := s.f.NewBlock(ssa.BlockPlain)
3015 // Keep input pointer args live across calls. This is a bandaid until 1.8.
3016 for _, n := range s.ptrargs {
3017 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem())
3021 res := make([]*ssa.Value, len(results))
3022 for i, t := range results {
3023 off = Rnd(off, t.Alignment())
3026 ptr = s.newValue1I(ssa.OpOffPtr, Ptrto(t), off, s.sp)
3028 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
3031 off = Rnd(off, int64(Widthptr))
3033 // Remember how much callee stack space we needed.
3039 // insertWBmove inserts the assignment *left = *right including a write barrier.
3040 // t is the type being assigned.
3041 func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightIsVolatile bool) {
3042 // if writeBarrier.enabled {
3043 // typedmemmove(&t, left, right)
3049 s.Fatalf("write barrier prohibited")
3051 if s.WBLineno == 0 {
3052 s.WBLineno = left.Line
3054 bThen := s.f.NewBlock(ssa.BlockPlain)
3055 bElse := s.f.NewBlock(ssa.BlockPlain)
3056 bEnd := s.f.NewBlock(ssa.BlockPlain)
3058 aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym}
3059 flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
3060 // Load word, test word, avoiding partial register write from load byte.
3061 flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
3062 flag = s.newValue2(ssa.OpNeq32, Types[TBOOL], flag, s.constInt32(Types[TUINT32], 0))
3064 b.Kind = ssa.BlockIf
3065 b.Likely = ssa.BranchUnlikely
3072 if !rightIsVolatile {
3073 // Issue typedmemmove call.
3074 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb)
3075 s.rtcall(typedmemmove, true, nil, taddr, left, right)
3077 // Copy to temp location if the source is volatile (will be clobbered by
3078 // a function call). Marshaling the args to typedmemmove might clobber the
3079 // value we're trying to move.
3081 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem())
3082 tmpaddr, _ := s.addr(tmp, true)
3083 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), tmpaddr, right, s.mem())
3084 // Issue typedmemmove call.
3085 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb)
3086 s.rtcall(typedmemmove, true, nil, taddr, left, tmpaddr)
3087 // Mark temp as dead.
3088 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem())
3090 s.endBlock().AddEdgeTo(bEnd)
3093 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), left, right, s.mem())
3094 s.endBlock().AddEdgeTo(bEnd)
3099 Warnl(line, "write barrier")
3103 // insertWBstore inserts the assignment *left = right including a write barrier.
3104 // t is the type being assigned.
3105 func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32, skip skipMask) {
3106 // store scalar fields
3107 // if writeBarrier.enabled {
3108 // writebarrierptr for pointer fields
3110 // store pointer fields
3114 s.Fatalf("write barrier prohibited")
3116 if s.WBLineno == 0 {
3117 s.WBLineno = left.Line
3119 s.storeTypeScalars(t, left, right, skip)
3121 bThen := s.f.NewBlock(ssa.BlockPlain)
3122 bElse := s.f.NewBlock(ssa.BlockPlain)
3123 bEnd := s.f.NewBlock(ssa.BlockPlain)
3125 aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym}
3126 flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
3127 // Load word, test word, avoiding partial register write from load byte.
3128 flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
3129 flag = s.newValue2(ssa.OpNeq32, Types[TBOOL], flag, s.constInt32(Types[TUINT32], 0))
3131 b.Kind = ssa.BlockIf
3132 b.Likely = ssa.BranchUnlikely
3137 // Issue write barriers for pointer writes.
3139 s.storeTypePtrsWB(t, left, right)
3140 s.endBlock().AddEdgeTo(bEnd)
3142 // Issue regular stores for pointer writes.
3144 s.storeTypePtrs(t, left, right)
3145 s.endBlock().AddEdgeTo(bEnd)
3150 Warnl(line, "write barrier")
3154 // do *left = right for all scalar (non-pointer) parts of t.
3155 func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) {
3157 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
3158 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), left, right, s.mem())
3159 case t.IsPtrShaped():
3160 // no scalar fields.
3162 if skip&skipLen != 0 {
3165 len := s.newValue1(ssa.OpStringLen, Types[TINT], right)
3166 lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left)
3167 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem())
3169 if skip&skipLen == 0 {
3170 len := s.newValue1(ssa.OpSliceLen, Types[TINT], right)
3171 lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left)
3172 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem())
3174 if skip&skipCap == 0 {
3175 cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right)
3176 capAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), 2*s.config.IntSize, left)
3177 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capAddr, cap, s.mem())
3179 case t.IsInterface():
3180 // itab field doesn't need a write barrier (even though it is a pointer).
3181 itab := s.newValue1(ssa.OpITab, Ptrto(Types[TUINT8]), right)
3182 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, left, itab, s.mem())
3185 for i := 0; i < n; i++ {
3186 ft := t.FieldType(i)
3187 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
3188 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
3189 s.storeTypeScalars(ft.(*Type), addr, val, 0)
3192 s.Fatalf("bad write barrier type %s", t)
3196 // do *left = right for all pointer parts of t.
3197 func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) {
3199 case t.IsPtrShaped():
3200 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem())
3202 ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right)
3203 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
3205 ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right)
3206 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
3207 case t.IsInterface():
3208 // itab field is treated as a scalar.
3209 idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right)
3210 idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left)
3211 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem())
3214 for i := 0; i < n; i++ {
3215 ft := t.FieldType(i)
3216 if !haspointers(ft.(*Type)) {
3219 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
3220 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
3221 s.storeTypePtrs(ft.(*Type), addr, val)
3224 s.Fatalf("bad write barrier type %s", t)
3228 // do *left = right with a write barrier for all pointer parts of t.
3229 func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) {
3231 case t.IsPtrShaped():
3232 s.rtcall(writebarrierptr, true, nil, left, right)
3234 ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right)
3235 s.rtcall(writebarrierptr, true, nil, left, ptr)
3237 ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right)
3238 s.rtcall(writebarrierptr, true, nil, left, ptr)
3239 case t.IsInterface():
3240 idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right)
3241 idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left)
3242 s.rtcall(writebarrierptr, true, nil, idataAddr, idata)
3245 for i := 0; i < n; i++ {
3246 ft := t.FieldType(i)
3247 if !haspointers(ft.(*Type)) {
3250 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
3251 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
3252 s.storeTypePtrsWB(ft.(*Type), addr, val)
3255 s.Fatalf("bad write barrier type %s", t)
3259 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
3260 // i,j,k may be nil, in which case they are set to their default value.
3261 // t is a slice, ptr to array, or string type.
3262 func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
3268 zero := s.constInt(Types[TINT], 0)
3272 ptrtype = Ptrto(elemtype)
3273 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v)
3274 len = s.newValue1(ssa.OpSliceLen, Types[TINT], v)
3275 cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v)
3277 elemtype = Types[TUINT8]
3278 ptrtype = Ptrto(elemtype)
3279 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v)
3280 len = s.newValue1(ssa.OpStringLen, Types[TINT], v)
3283 if !t.Elem().IsArray() {
3284 s.Fatalf("bad ptr to array in slice %v\n", t)
3286 elemtype = t.Elem().Elem()
3287 ptrtype = Ptrto(elemtype)
3290 len = s.constInt(Types[TINT], t.Elem().NumElem())
3293 s.Fatalf("bad type in slice %v\n", t)
3296 // Set default values
3307 // Panic if slice indices are not in bounds.
3308 s.sliceBoundsCheck(i, j)
3310 s.sliceBoundsCheck(j, k)
3313 s.sliceBoundsCheck(k, cap)
3316 // Generate the following code assuming that indexes are in bounds.
3317 // The conditional is to make sure that we don't generate a slice
3318 // that points to the next object in memory.
3321 // delta = i*elemsize
3326 // result = (SliceMake rptr rlen rcap)
3327 subOp := s.ssaOp(OSUB, Types[TINT])
3328 eqOp := s.ssaOp(OEQ, Types[TINT])
3329 mulOp := s.ssaOp(OMUL, Types[TINT])
3330 rlen := s.newValue2(subOp, Types[TINT], j, i)
3334 // Capacity of the result is unimportant. However, we use
3335 // rcap to test if we've generated a zero-length slice.
3336 // Use length of strings for that.
3341 rcap = s.newValue2(subOp, Types[TINT], k, i)
3344 // delta = # of elements to offset pointer by.
3345 s.vars[&deltaVar] = i
3347 // Generate code to set delta=0 if the resulting capacity is zero.
3348 if !((i.Op == ssa.OpConst64 && i.AuxInt == 0) ||
3349 (i.Op == ssa.OpConst32 && int32(i.AuxInt) == 0)) {
3350 cmp := s.newValue2(eqOp, Types[TBOOL], rcap, zero)
3353 b.Kind = ssa.BlockIf
3354 b.Likely = ssa.BranchUnlikely
3357 // Generate block which zeros the delta variable.
3358 nz := s.f.NewBlock(ssa.BlockPlain)
3361 s.vars[&deltaVar] = zero
3365 merge := s.f.NewBlock(ssa.BlockPlain)
3370 // TODO: use conditional moves somehow?
3373 // Compute rptr = ptr + delta * elemsize
3374 rptr := s.newValue2(ssa.OpAddPtr, ptrtype, ptr, s.newValue2(mulOp, Types[TINT], s.variable(&deltaVar, Types[TINT]), s.constInt(Types[TINT], elemtype.Width)))
3375 delete(s.vars, &deltaVar)
3376 return rptr, rlen, rcap
3379 type u2fcvtTab struct {
3380 geq, cvt2F, and, rsh, or, add ssa.Op
3381 one func(*state, ssa.Type, int64) *ssa.Value
3384 var u64_f64 u2fcvtTab = u2fcvtTab{
3386 cvt2F: ssa.OpCvt64to64F,
3388 rsh: ssa.OpRsh64Ux64,
3391 one: (*state).constInt64,
3394 var u64_f32 u2fcvtTab = u2fcvtTab{
3396 cvt2F: ssa.OpCvt64to32F,
3398 rsh: ssa.OpRsh64Ux64,
3401 one: (*state).constInt64,
3404 // Excess generality on a machine with 64-bit integer registers.
3405 // Not used on AMD64.
3406 var u32_f32 u2fcvtTab = u2fcvtTab{
3408 cvt2F: ssa.OpCvt32to32F,
3410 rsh: ssa.OpRsh32Ux32,
3413 one: func(s *state, t ssa.Type, x int64) *ssa.Value {
3414 return s.constInt32(t, int32(x))
3418 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
3419 return s.uintTofloat(&u64_f64, n, x, ft, tt)
3422 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
3423 return s.uintTofloat(&u64_f32, n, x, ft, tt)
3426 func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
3428 // result = (floatY) x
3430 // y = uintX(x) ; y = x & 1
3431 // z = uintX(x) ; z = z >> 1
3434 // result = floatY(z)
3435 // result = result + result
3438 // Code borrowed from old code generator.
3439 // What's going on: large 64-bit "unsigned" looks like
3440 // negative number to hardware's integer-to-float
3441 // conversion. However, because the mantissa is only
3442 // 63 bits, we don't need the LSB, so instead we do an
3443 // unsigned right shift (divide by two), convert, and
3444 // double. However, before we do that, we need to be
3445 // sure that we do not lose a "1" if that made the
3446 // difference in the resulting rounding. Therefore, we
3447 // preserve it, and OR (not ADD) it back in. The case
3448 // that matters is when the eleven discarded bits are
3449 // equal to 10000000001; that rounds up, and the 1 cannot
3450 // be lost else it would round down if the LSB of the
3451 // candidate mantissa is 0.
3452 cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft))
3454 b.Kind = ssa.BlockIf
3456 b.Likely = ssa.BranchLikely
3458 bThen := s.f.NewBlock(ssa.BlockPlain)
3459 bElse := s.f.NewBlock(ssa.BlockPlain)
3460 bAfter := s.f.NewBlock(ssa.BlockPlain)
3464 a0 := s.newValue1(cvttab.cvt2F, tt, x)
3467 bThen.AddEdgeTo(bAfter)
3471 one := cvttab.one(s, ft, 1)
3472 y := s.newValue2(cvttab.and, ft, x, one)
3473 z := s.newValue2(cvttab.rsh, ft, x, one)
3474 z = s.newValue2(cvttab.or, ft, z, y)
3475 a := s.newValue1(cvttab.cvt2F, tt, z)
3476 a1 := s.newValue2(cvttab.add, tt, a, a)
3479 bElse.AddEdgeTo(bAfter)
3481 s.startBlock(bAfter)
3482 return s.variable(n, n.Type)
3485 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
3486 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
3487 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
3488 s.Fatalf("node must be a map or a channel")
3494 // return *((*int)n)
3496 // return *(((*int)n)+1)
3499 nilValue := s.constNil(Types[TUINTPTR])
3500 cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue)
3502 b.Kind = ssa.BlockIf
3504 b.Likely = ssa.BranchUnlikely
3506 bThen := s.f.NewBlock(ssa.BlockPlain)
3507 bElse := s.f.NewBlock(ssa.BlockPlain)
3508 bAfter := s.f.NewBlock(ssa.BlockPlain)
3510 // length/capacity of a nil map/chan is zero
3513 s.vars[n] = s.zeroVal(lenType)
3515 bThen.AddEdgeTo(bAfter)
3520 // length is stored in the first word for map/chan
3521 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem())
3522 } else if n.Op == OCAP {
3523 // capacity is stored in the second word for chan
3524 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
3525 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem())
3527 s.Fatalf("op must be OLEN or OCAP")
3530 bElse.AddEdgeTo(bAfter)
3532 s.startBlock(bAfter)
3533 return s.variable(n, lenType)
3536 type f2uCvtTab struct {
3537 ltf, cvt2U, subf ssa.Op
3538 value func(*state, ssa.Type, float64) *ssa.Value
3541 var f32_u64 f2uCvtTab = f2uCvtTab{
3543 cvt2U: ssa.OpCvt32Fto64,
3545 value: (*state).constFloat32,
3548 var f64_u64 f2uCvtTab = f2uCvtTab{
3550 cvt2U: ssa.OpCvt64Fto64,
3552 value: (*state).constFloat64,
3555 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
3556 return s.floatToUint(&f32_u64, n, x, ft, tt)
3558 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
3559 return s.floatToUint(&f64_u64, n, x, ft, tt)
3562 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
3563 // if x < 9223372036854775808.0 {
3564 // result = uintY(x)
3566 // y = x - 9223372036854775808.0
3568 // result = z | -9223372036854775808
3570 twoToThe63 := cvttab.value(s, ft, 9223372036854775808.0)
3571 cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, twoToThe63)
3573 b.Kind = ssa.BlockIf
3575 b.Likely = ssa.BranchLikely
3577 bThen := s.f.NewBlock(ssa.BlockPlain)
3578 bElse := s.f.NewBlock(ssa.BlockPlain)
3579 bAfter := s.f.NewBlock(ssa.BlockPlain)
3583 a0 := s.newValue1(cvttab.cvt2U, tt, x)
3586 bThen.AddEdgeTo(bAfter)
3590 y := s.newValue2(cvttab.subf, ft, x, twoToThe63)
3591 y = s.newValue1(cvttab.cvt2U, tt, y)
3592 z := s.constInt64(tt, -9223372036854775808)
3593 a1 := s.newValue2(ssa.OpOr64, tt, y, z)
3596 bElse.AddEdgeTo(bAfter)
3598 s.startBlock(bAfter)
3599 return s.variable(n, n.Type)
3602 // ifaceType returns the value for the word containing the type.
3603 // n is the node for the interface expression.
3604 // v is the corresponding value.
3605 func (s *state) ifaceType(n *Node, v *ssa.Value) *ssa.Value {
3606 byteptr := Ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte)
3608 if n.Type.IsEmptyInterface() {
3609 // Have *eface. The type is the first word in the struct.
3610 return s.newValue1(ssa.OpITab, byteptr, v)
3614 // The first word in the struct is the *itab.
3615 // If the *itab is nil, return 0.
3616 // Otherwise, the second word in the *itab is the type.
3618 tab := s.newValue1(ssa.OpITab, byteptr, v)
3619 s.vars[&typVar] = tab
3620 isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.constNil(byteptr))
3622 b.Kind = ssa.BlockIf
3623 b.SetControl(isnonnil)
3624 b.Likely = ssa.BranchLikely
3626 bLoad := s.f.NewBlock(ssa.BlockPlain)
3627 bEnd := s.f.NewBlock(ssa.BlockPlain)
3631 bLoad.AddEdgeTo(bEnd)
3634 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), tab)
3635 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
3639 typ := s.variable(&typVar, byteptr)
3640 delete(s.vars, &typVar)
3644 // dottype generates SSA for a type assertion node.
3645 // commaok indicates whether to panic or return a bool.
3646 // If commaok is false, resok will be nil.
3647 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
3648 iface := s.expr(n.Left)
3649 typ := s.ifaceType(n.Left, iface) // actual concrete type
3650 target := s.expr(typename(n.Type)) // target type
3651 if !isdirectiface(n.Type) {
3652 // walk rewrites ODOTTYPE/OAS2DOTTYPE into runtime calls except for this case.
3653 Fatalf("dottype needs a direct iface type %s", n.Type)
3656 if Debug_typeassert > 0 {
3657 Warnl(n.Lineno, "type assertion inlined")
3660 // TODO: If we have a nonempty interface and its itab field is nil,
3661 // then this test is redundant and ifaceType should just branch directly to bFail.
3662 cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], typ, target)
3664 b.Kind = ssa.BlockIf
3666 b.Likely = ssa.BranchLikely
3668 byteptr := Ptrto(Types[TUINT8])
3670 bOk := s.f.NewBlock(ssa.BlockPlain)
3671 bFail := s.f.NewBlock(ssa.BlockPlain)
3676 // on failure, panic by calling panicdottype
3678 taddr := s.newValue1A(ssa.OpAddr, byteptr, &ssa.ExternSymbol{Typ: byteptr, Sym: typenamesym(n.Left.Type)}, s.sb)
3679 s.rtcall(panicdottype, false, nil, typ, target, taddr)
3681 // on success, return idata field
3683 return s.newValue1(ssa.OpIData, n.Type, iface), nil
3686 // commaok is the more complicated case because we have
3687 // a control flow merge point.
3688 bEnd := s.f.NewBlock(ssa.BlockPlain)
3690 // type assertion succeeded
3692 s.vars[&idataVar] = s.newValue1(ssa.OpIData, n.Type, iface)
3693 s.vars[&okVar] = s.constBool(true)
3697 // type assertion failed
3699 s.vars[&idataVar] = s.constNil(byteptr)
3700 s.vars[&okVar] = s.constBool(false)
3702 bFail.AddEdgeTo(bEnd)
3706 res = s.variable(&idataVar, byteptr)
3707 resok = s.variable(&okVar, Types[TBOOL])
3708 delete(s.vars, &idataVar)
3709 delete(s.vars, &okVar)
3713 // checkgoto checks that a goto from from to to does not
3714 // jump into a block or jump over variable declarations.
3715 // It is a copy of checkgoto in the pre-SSA backend,
3716 // modified only for line number handling.
3717 // TODO: document how this works and why it is designed the way it is.
3718 func (s *state) checkgoto(from *Node, to *Node) {
3719 if from.Sym == to.Sym {
3724 for fs := from.Sym; fs != nil; fs = fs.Link {
3728 for fs := to.Sym; fs != nil; fs = fs.Link {
3732 for ; nf > nt; nf-- {
3736 // decide what to complain about.
3737 // prefer to complain about 'into block' over declarations,
3738 // so scan backward to find most recent block or else dcl.
3743 for ; nt > nf; nt-- {
3762 lno := from.Left.Lineno
3764 yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno))
3766 yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno))
3771 // variable returns the value of a variable at the current location.
3772 func (s *state) variable(name *Node, t ssa.Type) *ssa.Value {
3775 v = s.newValue0A(ssa.OpFwdRef, t, name)
3776 s.fwdRefs = append(s.fwdRefs, v)
3778 s.addNamedValue(name, v)
3783 func (s *state) mem() *ssa.Value {
3784 return s.variable(&memVar, ssa.TypeMem)
3787 func (s *state) linkForwardReferences(dm *sparseDefState) {
3789 // Build SSA graph. Each variable on its first use in a basic block
3790 // leaves a FwdRef in that block representing the incoming value
3791 // of that variable. This function links that ref up with possible definitions,
3792 // inserting Phi values as needed. This is essentially the algorithm
3793 // described by Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau:
3794 // http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
3796 // - We use FwdRef nodes to postpone phi building until the CFG is
3797 // completely built. That way we can avoid the notion of "sealed"
3799 // - Phi optimization is a separate pass (in ../ssa/phielim.go).
3800 for len(s.fwdRefs) > 0 {
3801 v := s.fwdRefs[len(s.fwdRefs)-1]
3802 s.fwdRefs = s.fwdRefs[:len(s.fwdRefs)-1]
3803 s.resolveFwdRef(v, dm)
3807 // resolveFwdRef modifies v to be the variable's value at the start of its block.
3808 // v must be a FwdRef op.
3809 func (s *state) resolveFwdRef(v *ssa.Value, dm *sparseDefState) {
3811 name := v.Aux.(*Node)
3814 // Live variable at start of function.
3816 if strings.HasPrefix(name.Sym.Name, "autotmp_") {
3817 // It's likely that this is an uninitialized variable in the entry block.
3818 s.Fatalf("Treating auto as if it were arg, func %s, node %v, value %v", b.Func.Name, name, v)
3824 // Not SSAable. Load it.
3825 addr := s.decladdrs[name]
3827 // TODO: closure args reach here.
3828 s.Unimplementedf("unhandled closure arg %s at entry to function %s", name, b.Func.Name)
3830 if _, ok := addr.Aux.(*ssa.ArgSymbol); !ok {
3831 s.Fatalf("variable live at start of function %s is not an argument %s", b.Func.Name, name)
3834 v.AddArgs(addr, s.startmem)
3837 if len(b.Preds) == 0 {
3838 // This block is dead; we have no predecessors and we're not the entry block.
3839 // It doesn't matter what we use here as long as it is well-formed.
3840 v.Op = ssa.OpUnknown
3843 // Find variable value on each predecessor.
3844 var argstore [4]*ssa.Value
3845 args := argstore[:0]
3846 for _, e := range b.Preds {
3848 p = dm.FindBetterDefiningBlock(name, p) // try sparse improvement on p
3849 args = append(args, s.lookupVarOutgoing(p, v.Type, name, v.Line))
3852 // Decide if we need a phi or not. We need a phi if there
3853 // are two different args (which are both not v).
3855 for _, a := range args {
3857 continue // self-reference
3860 continue // already have this witness
3863 // two witnesses, need a phi value
3868 w = a // save witness
3871 s.Fatalf("no witness for reachable phi %s", v)
3873 // One witness. Make v a copy of w.
3878 // lookupVarOutgoing finds the variable's value at the end of block b.
3879 func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name *Node, line int32) *ssa.Value {
3881 if v, ok := s.defvars[b.ID][name]; ok {
3884 // The variable is not defined by b and we haven't looked it up yet.
3885 // If b has exactly one predecessor, loop to look it up there.
3886 // Otherwise, give up and insert a new FwdRef and resolve it later.
3887 if len(b.Preds) != 1 {
3890 b = b.Preds[0].Block()
3892 // Generate a FwdRef for the variable and return that.
3893 v := b.NewValue0A(line, ssa.OpFwdRef, t, name)
3894 s.fwdRefs = append(s.fwdRefs, v)
3895 s.defvars[b.ID][name] = v
3896 s.addNamedValue(name, v)
3900 func (s *state) addNamedValue(n *Node, v *ssa.Value) {
3901 if n.Class == Pxxx {
3902 // Don't track our dummy nodes (&memVar etc.).
3905 if strings.HasPrefix(n.Sym.Name, "autotmp_") {
3906 // Don't track autotmp_ variables.
3909 if n.Class == PPARAMOUT {
3910 // Don't track named output values. This prevents return values
3911 // from being assigned too early. See #14591 and #14762. TODO: allow this.
3914 if n.Class == PAUTO && n.Xoffset != 0 {
3915 s.Fatalf("AUTO var with offset %s %d", n, n.Xoffset)
3917 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
3918 values, ok := s.f.NamedValues[loc]
3920 s.f.Names = append(s.f.Names, loc)
3922 s.f.NamedValues[loc] = append(values, v)
3925 // Branch is an unresolved branch.
3926 type Branch struct {
3927 P *obj.Prog // branch instruction
3928 B *ssa.Block // target
3931 // SSAGenState contains state needed during Prog generation.
3932 type SSAGenState struct {
3933 // Branches remembers all the branch instructions we've seen
3934 // and where they would like to go.
3937 // bstart remembers where each block starts (indexed by block ID)
3941 // Pc returns the current Prog.
3942 func (s *SSAGenState) Pc() *obj.Prog {
3946 // SetLineno sets the current source line number.
3947 func (s *SSAGenState) SetLineno(l int32) {
3951 // genssa appends entries to ptxt for each instruction in f.
3952 // gcargs and gclocals are filled in with pointer maps for the frame.
3953 func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
3956 e := f.Config.Frontend().(*ssaExport)
3957 // We're about to emit a bunch of Progs.
3958 // Since the only way to get here is to explicitly request it,
3959 // just fail on unimplemented instead of trying to unwind our mess.
3960 e.mustImplement = true
3962 // Remember where each block starts.
3963 s.bstart = make([]*obj.Prog, f.NumBlocks())
3965 var valueProgs map[*obj.Prog]*ssa.Value
3966 var blockProgs map[*obj.Prog]*ssa.Block
3967 var logProgs = e.log
3969 valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues())
3970 blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
3971 f.Logf("genssa %s\n", f.Name)
3972 blockProgs[Pc] = f.Blocks[0]
3975 // Emit basic blocks
3976 for i, b := range f.Blocks {
3978 // Emit values in block
3979 Thearch.SSAMarkMoves(&s, b)
3980 for _, v := range b.Values {
3982 Thearch.SSAGenValue(&s, v)
3984 for ; x != Pc; x = x.Link {
3989 // Emit control flow instructions for block
3991 if i < len(f.Blocks)-1 && (Debug['N'] == 0 || b.Kind == ssa.BlockCall) {
3992 // If -N, leave next==nil so every block with successors
3993 // ends in a JMP (except call blocks - plive doesn't like
3994 // select{send,recv} followed by a JMP call). Helps keep
3995 // line numbers for otherwise empty blocks.
3996 next = f.Blocks[i+1]
3999 Thearch.SSAGenBlock(&s, b, next)
4001 for ; x != Pc; x = x.Link {
4008 for _, br := range s.Branches {
4009 br.P.To.Val = s.bstart[br.B.ID]
4013 for p := ptxt; p != nil; p = p.Link {
4015 if v, ok := valueProgs[p]; ok {
4017 } else if b, ok := blockProgs[p]; ok {
4020 s = " " // most value and branch strings are 2-3 characters long
4022 f.Logf("%s\t%s\n", s, p)
4024 if f.Config.HTML != nil {
4025 saved := ptxt.Ctxt.LineHist.PrintFilenameOnly
4026 ptxt.Ctxt.LineHist.PrintFilenameOnly = true
4027 var buf bytes.Buffer
4028 buf.WriteString("<code>")
4029 buf.WriteString("<dl class=\"ssa-gen\">")
4030 for p := ptxt; p != nil; p = p.Link {
4031 buf.WriteString("<dt class=\"ssa-prog-src\">")
4032 if v, ok := valueProgs[p]; ok {
4033 buf.WriteString(v.HTML())
4034 } else if b, ok := blockProgs[p]; ok {
4035 buf.WriteString(b.HTML())
4037 buf.WriteString("</dt>")
4038 buf.WriteString("<dd class=\"ssa-prog\">")
4039 buf.WriteString(html.EscapeString(p.String()))
4040 buf.WriteString("</dd>")
4041 buf.WriteString("</li>")
4043 buf.WriteString("</dl>")
4044 buf.WriteString("</code>")
4045 f.Config.HTML.WriteColumn("genssa", buf.String())
4046 ptxt.Ctxt.LineHist.PrintFilenameOnly = saved
4051 if f.StaticData != nil {
4052 for _, n := range f.StaticData.([]*Node) {
4053 if !gen_as_init(n, false) {
4054 Fatalf("non-static data marked as static: %v\n\n", n)
4059 // Allocate stack frame
4062 // Generate gc bitmaps.
4063 liveness(Curfn, ptxt, gcargs, gclocals)
4065 // Add frame prologue. Zero ambiguously live variables.
4066 Thearch.Defframe(ptxt)
4067 if Debug['f'] != 0 {
4071 // Remove leftover instrumentation from the instruction stream.
4074 f.Config.HTML.Close()
4077 // movZero generates a register indirect move with a 0 immediate and keeps track of bytes left and next offset
4078 func movZero(as obj.As, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
4080 // TODO: use zero register on archs that support it.
4081 p.From.Type = obj.TYPE_CONST
4083 p.To.Type = obj.TYPE_MEM
4085 p.To.Offset = offset
4087 nleft = nbytes - width
4088 return nleft, offset
4091 type FloatingEQNEJump struct {
4096 func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch {
4097 p := Prog(jumps.Jump)
4098 p.To.Type = obj.TYPE_BRANCH
4100 branches = append(branches, Branch{p, b.Succs[to].Block()})
4104 // liblink reorders the instruction stream as it sees fit.
4105 // Pass along what we know so liblink can make use of it.
4106 // TODO: Once we've fully switched to SSA,
4107 // make liblink leave our output alone.
4109 case ssa.BranchUnlikely:
4110 p.From.Type = obj.TYPE_CONST
4112 case ssa.BranchLikely:
4113 p.From.Type = obj.TYPE_CONST
4119 func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
4122 case b.Succs[0].Block():
4123 s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches)
4124 s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches)
4125 case b.Succs[1].Block():
4126 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
4127 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
4129 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
4130 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
4132 q.To.Type = obj.TYPE_BRANCH
4133 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
4137 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
4138 func AddAux(a *obj.Addr, v *ssa.Value) {
4139 AddAux2(a, v, v.AuxInt)
4141 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
4142 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
4143 v.Fatalf("bad AddAux addr %v", a)
4145 // add integer offset
4148 // If no additional symbol offset, we're done.
4152 // Add symbol's offset from its base register.
4153 switch sym := v.Aux.(type) {
4154 case *ssa.ExternSymbol:
4155 a.Name = obj.NAME_EXTERN
4156 switch s := sym.Sym.(type) {
4162 v.Fatalf("ExternSymbol.Sym is %T", s)
4164 case *ssa.ArgSymbol:
4165 n := sym.Node.(*Node)
4166 a.Name = obj.NAME_PARAM
4168 a.Sym = Linksym(n.Orig.Sym)
4169 a.Offset += n.Xoffset // TODO: why do I have to add this here? I don't for auto variables.
4170 case *ssa.AutoSymbol:
4171 n := sym.Node.(*Node)
4172 a.Name = obj.NAME_AUTO
4174 a.Sym = Linksym(n.Sym)
4176 v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
4180 // extendIndex extends v to a full int width.
4181 // panic using the given function if v does not fit in an int (only on 32-bit archs).
4182 func (s *state) extendIndex(v *ssa.Value, panicfn *Node) *ssa.Value {
4183 size := v.Type.Size()
4184 if size == s.config.IntSize {
4187 if size > s.config.IntSize {
4188 // truncate 64-bit indexes on 32-bit pointer archs. Test the
4189 // high word and branch to out-of-bounds failure if it is not 0.
4190 if Debug['B'] == 0 {
4191 hi := s.newValue1(ssa.OpInt64Hi, Types[TUINT32], v)
4192 cmp := s.newValue2(ssa.OpEq32, Types[TBOOL], hi, s.constInt32(Types[TUINT32], 0))
4193 s.check(cmp, panicfn)
4195 return s.newValue1(ssa.OpTrunc64to32, Types[TINT], v)
4198 // Extend value to the required size
4200 if v.Type.IsSigned() {
4201 switch 10*size + s.config.IntSize {
4203 op = ssa.OpSignExt8to32
4205 op = ssa.OpSignExt8to64
4207 op = ssa.OpSignExt16to32
4209 op = ssa.OpSignExt16to64
4211 op = ssa.OpSignExt32to64
4213 s.Fatalf("bad signed index extension %s", v.Type)
4216 switch 10*size + s.config.IntSize {
4218 op = ssa.OpZeroExt8to32
4220 op = ssa.OpZeroExt8to64
4222 op = ssa.OpZeroExt16to32
4224 op = ssa.OpZeroExt16to64
4226 op = ssa.OpZeroExt32to64
4228 s.Fatalf("bad unsigned index extension %s", v.Type)
4231 return s.newValue1(op, Types[TINT], v)
4234 // SSARegNum returns the register (in cmd/internal/obj numbering) to
4235 // which v has been allocated. Panics if v is not assigned to a
4237 // TODO: Make this panic again once it stops happening routinely.
4238 func SSARegNum(v *ssa.Value) int16 {
4239 reg := v.Block.Func.RegAlloc[v.ID]
4241 v.Unimplementedf("nil regnum for value: %s\n%s\n", v.LongString(), v.Block.Func)
4244 return Thearch.SSARegToReg[reg.(*ssa.Register).Num]
4247 // AutoVar returns a *Node and int64 representing the auto variable and offset within it
4248 // where v should be spilled.
4249 func AutoVar(v *ssa.Value) (*Node, int64) {
4250 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
4251 if v.Type.Size() > loc.Type.Size() {
4252 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
4254 return loc.N.(*Node), loc.Off
4257 // fieldIdx finds the index of the field referred to by the ODOT node n.
4258 func fieldIdx(n *Node) int {
4262 panic("ODOT's LHS is not a struct")
4266 for _, t1 := range t.Fields().Slice() {
4271 if t1.Offset != n.Xoffset {
4272 panic("field offset doesn't match")
4276 panic(fmt.Sprintf("can't find field in expr %s\n", n))
4278 // TODO: keep the result of this function somewhere in the ODOT Node
4279 // so we don't have to recompute it each time we need it.
4282 // ssaExport exports a bunch of compiler services for the ssa backend.
4283 type ssaExport struct {
4289 func (s *ssaExport) TypeBool() ssa.Type { return Types[TBOOL] }
4290 func (s *ssaExport) TypeInt8() ssa.Type { return Types[TINT8] }
4291 func (s *ssaExport) TypeInt16() ssa.Type { return Types[TINT16] }
4292 func (s *ssaExport) TypeInt32() ssa.Type { return Types[TINT32] }
4293 func (s *ssaExport) TypeInt64() ssa.Type { return Types[TINT64] }
4294 func (s *ssaExport) TypeUInt8() ssa.Type { return Types[TUINT8] }
4295 func (s *ssaExport) TypeUInt16() ssa.Type { return Types[TUINT16] }
4296 func (s *ssaExport) TypeUInt32() ssa.Type { return Types[TUINT32] }
4297 func (s *ssaExport) TypeUInt64() ssa.Type { return Types[TUINT64] }
4298 func (s *ssaExport) TypeFloat32() ssa.Type { return Types[TFLOAT32] }
4299 func (s *ssaExport) TypeFloat64() ssa.Type { return Types[TFLOAT64] }
4300 func (s *ssaExport) TypeInt() ssa.Type { return Types[TINT] }
4301 func (s *ssaExport) TypeUintptr() ssa.Type { return Types[TUINTPTR] }
4302 func (s *ssaExport) TypeString() ssa.Type { return Types[TSTRING] }
4303 func (s *ssaExport) TypeBytePtr() ssa.Type { return Ptrto(Types[TUINT8]) }
4305 // StringData returns a symbol (a *Sym wrapped in an interface) which
4306 // is the data component of a global string constant containing s.
4307 func (*ssaExport) StringData(s string) interface{} {
4308 // TODO: is idealstring correct? It might not matter...
4309 _, data := stringsym(s)
4310 return &ssa.ExternSymbol{Typ: idealstring, Sym: data}
4313 func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode {
4314 n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list
4315 e.mustImplement = true // This modifies the input to SSA, so we want to make sure we succeed from here!
4319 func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
4321 ptrType := Ptrto(Types[TUINT8])
4322 lenType := Types[TINT]
4323 if n.Class == PAUTO && !n.Addrtaken {
4324 // Split this string up into two separate variables.
4325 p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
4326 l := e.namedAuto(n.Sym.Name+".len", lenType)
4327 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}
4329 // Return the two parts of the larger variable.
4330 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
4333 func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
4335 t := Ptrto(Types[TUINT8])
4336 if n.Class == PAUTO && !n.Addrtaken {
4337 // Split this interface up into two separate variables.
4339 if n.Type.IsEmptyInterface() {
4342 c := e.namedAuto(n.Sym.Name+f, t)
4343 d := e.namedAuto(n.Sym.Name+".data", t)
4344 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0}
4346 // Return the two parts of the larger variable.
4347 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
4350 func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
4352 ptrType := Ptrto(name.Type.ElemType().(*Type))
4353 lenType := Types[TINT]
4354 if n.Class == PAUTO && !n.Addrtaken {
4355 // Split this slice up into three separate variables.
4356 p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
4357 l := e.namedAuto(n.Sym.Name+".len", lenType)
4358 c := e.namedAuto(n.Sym.Name+".cap", lenType)
4359 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0}
4361 // Return the three parts of the larger variable.
4362 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
4363 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
4364 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
4367 func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
4369 s := name.Type.Size() / 2
4376 if n.Class == PAUTO && !n.Addrtaken {
4377 // Split this complex up into two separate variables.
4378 c := e.namedAuto(n.Sym.Name+".real", t)
4379 d := e.namedAuto(n.Sym.Name+".imag", t)
4380 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0}
4382 // Return the two parts of the larger variable.
4383 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
4386 func (e *ssaExport) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
4389 if name.Type.IsSigned() {
4394 if n.Class == PAUTO && !n.Addrtaken {
4395 // Split this int64 up into two separate variables.
4396 h := e.namedAuto(n.Sym.Name+".hi", t)
4397 l := e.namedAuto(n.Sym.Name+".lo", Types[TUINT32])
4398 return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0}
4400 // Return the two parts of the larger variable.
4401 // Assuming little endian (we don't support big endian 32-bit architecture yet)
4402 return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off}
4405 func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
4408 ft := st.FieldType(i)
4409 if n.Class == PAUTO && !n.Addrtaken {
4410 // Note: the _ field may appear several times. But
4411 // have no fear, identically-named but distinct Autos are
4412 // ok, albeit maybe confusing for a debugger.
4413 x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft)
4414 return ssa.LocalSlot{N: x, Type: ft, Off: 0}
4416 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
4419 // namedAuto returns a new AUTO variable with the given name and type.
4420 func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode {
4422 s := &Sym{Name: name, Pkg: autopkg}
4423 n := Nod(ONAME, nil, nil)
4433 n.Name.Curfn = Curfn
4434 Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
4437 e.mustImplement = true
4442 func (e *ssaExport) CanSSA(t ssa.Type) bool {
4443 return canSSAType(t.(*Type))
4446 func (e *ssaExport) Line(line int32) string {
4447 return linestr(line)
4450 // Log logs a message from the compiler.
4451 func (e *ssaExport) Logf(msg string, args ...interface{}) {
4452 // If e was marked as unimplemented, anything could happen. Ignore.
4453 if e.log && !e.unimplemented {
4454 fmt.Printf(msg, args...)
4458 func (e *ssaExport) Log() bool {
4462 // Fatal reports a compiler error and exits.
4463 func (e *ssaExport) Fatalf(line int32, msg string, args ...interface{}) {
4464 // If e was marked as unimplemented, anything could happen. Ignore.
4465 if !e.unimplemented {
4467 Fatalf(msg, args...)
4471 // Unimplemented reports that the function cannot be compiled.
4472 // It will be removed once SSA work is complete.
4473 func (e *ssaExport) Unimplementedf(line int32, msg string, args ...interface{}) {
4474 if e.mustImplement {
4476 Fatalf(msg, args...)
4478 const alwaysLog = false // enable to calculate top unimplemented features
4479 if !e.unimplemented && (e.log || alwaysLog) {
4480 // first implementation failure, print explanation
4481 fmt.Printf("SSA unimplemented: "+msg+"\n", args...)
4483 e.unimplemented = true
4486 // Warnl reports a "warning", which is usually flag-triggered
4487 // logging output for the benefit of tests.
4488 func (e *ssaExport) Warnl(line int32, fmt_ string, args ...interface{}) {
4489 Warnl(line, fmt_, args...)
4492 func (e *ssaExport) Debug_checknil() bool {
4493 return Debug_checknil != 0
4496 func (n *Node) Typ() ssa.Type {