1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
14 "cmd/compile/internal/ssa"
21 var ssaConfig *ssa.Config
24 func initssa() *ssa.Config {
25 ssaExp.unimplemented = false
26 ssaExp.mustImplement = true
28 ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0)
33 func shouldssa(fn *Node) bool {
34 switch Thearch.LinkArch.Name {
36 // Only available for testing.
37 if os.Getenv("SSATEST") == "" {
40 // Generally available.
47 // Environment variable control of SSA CG
48 // 1. IF GOSSAFUNC == current function name THEN
49 // compile this function with SSA and log output to ssa.html
51 // 2. IF GOSSAHASH == "" THEN
52 // compile this function (and everything else) with SSA
54 // 3. IF GOSSAHASH == "n" or "N"
55 // IF GOSSAPKG == current package name THEN
56 // compile this function (and everything in this package) with SSA
58 // use the old back end for this function.
59 // This is for compatibility with existing test harness and should go away.
61 // 4. IF GOSSAHASH is a suffix of the binary-rendered SHA1 hash of the function name THEN
62 // compile this function with SSA
64 // compile this function with the old back end.
66 // Plan is for 3 to be removed when the tests are revised.
67 // SSA is now default, and is disabled by setting
68 // GOSSAHASH to n or N, or selectively with strings of
71 name := fn.Func.Nname.Sym.Name
73 funcname := os.Getenv("GOSSAFUNC")
75 // If GOSSAFUNC is set, compile only that function.
76 return name == funcname
79 pkg := os.Getenv("GOSSAPKG")
81 // If GOSSAPKG is set, compile only that package.
82 return localpkg.Name == pkg
85 return initssa().DebugHashMatch("GOSSAHASH", name)
88 // buildssa builds an SSA function.
89 func buildssa(fn *Node) *ssa.Func {
90 name := fn.Func.Nname.Sym.Name
91 printssa := name == os.Getenv("GOSSAFUNC")
93 fmt.Println("generating SSA for", name)
94 dumplist("buildssa-enter", fn.Func.Enter)
95 dumplist("buildssa-body", fn.Nbody)
96 dumplist("buildssa-exit", fn.Func.Exit)
100 s.pushLine(fn.Lineno)
103 if fn.Func.Pragma&CgoUnsafeArgs != 0 {
104 s.cgoUnsafeArgs = true
106 if fn.Func.Pragma&Nowritebarrier != 0 {
111 fn.Func.WBLineno = s.WBLineno
114 // TODO(khr): build config just once at the start of the compiler binary
116 ssaExp.log = printssa
119 s.f = s.config.NewFunc()
121 s.exitCode = fn.Func.Exit
122 s.panics = map[funcLine]*ssa.Block{}
124 if name == os.Getenv("GOSSAFUNC") {
125 // TODO: tempfile? it is handy to have the location
126 // of this file be stable, so you can just reload in the browser.
127 s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name)
128 // TODO: generate and print a mapping from nodes to values and blocks
132 s.config.HTML.Close()
136 // Allocate starting block
137 s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
139 // Allocate starting values
140 s.labels = map[string]*ssaLabel{}
141 s.labeledNodes = map[*Node]*ssaLabel{}
142 s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem)
143 s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
144 s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR])
146 s.startBlock(s.f.Entry)
147 s.vars[&memVar] = s.startmem
149 s.varsyms = map[*Node]interface{}{}
151 // Generate addresses of local declarations
152 s.decladdrs = map[*Node]*ssa.Value{}
153 for _, n := range fn.Func.Dcl {
155 case PPARAM, PPARAMOUT:
156 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
157 s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
158 if n.Class == PPARAMOUT && s.canSSA(n) {
159 // Save ssa-able PPARAMOUT variables so we can
160 // store them back to the stack at the end of
162 s.returns = append(s.returns, n)
164 if n.Class == PPARAM && s.canSSA(n) && n.Type.IsPtrShaped() {
165 s.ptrargs = append(s.ptrargs, n)
166 n.SetNotLiveAtEnd(true) // SSA takes care of this explicitly
169 // processed at each use, to prevent Addr coming
172 // moved to heap - already handled by frontend
174 // local function - already handled by frontend
176 s.Unimplementedf("local variable with class %s unimplemented", classnames[n.Class])
180 // Convert the AST-based IR to the SSA-based IR
181 s.stmts(fn.Func.Enter)
184 // fallthrough to exit
185 if s.curBlock != nil {
186 s.pushLine(fn.Func.Endlineno)
191 // Check that we used all labels
192 for name, lab := range s.labels {
193 if !lab.used() && !lab.reported {
194 yyerrorl(lab.defNode.Lineno, "label %v defined and not used", name)
197 if lab.used() && !lab.defined() && !lab.reported {
198 yyerrorl(lab.useNode.Lineno, "label %v not defined", name)
203 // Check any forward gotos. Non-forward gotos have already been checked.
204 for _, n := range s.fwdGotos {
205 lab := s.labels[n.Left.Sym.Name]
206 // If the label is undefined, we have already have printed an error.
208 s.checkgoto(n, lab.defNode)
217 prelinkNumvars := s.f.NumValues()
218 sparseDefState := s.locatePotentialPhiFunctions(fn)
220 // Link up variable uses to variable definitions
221 s.linkForwardReferences(sparseDefState)
223 if ssa.BuildStats > 0 {
224 s.f.LogStat("build", s.f.NumBlocks(), "blocks", prelinkNumvars, "vars_before",
225 s.f.NumValues(), "vars_after", prelinkNumvars*s.f.NumBlocks(), "ssa_phi_loc_cutoff_score")
228 // Don't carry reference this around longer than necessary
231 // Main call to ssa package to compile function
238 // configuration (arch) information
241 // function we're building
244 // labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f
245 labels map[string]*ssaLabel
246 labeledNodes map[*Node]*ssaLabel
248 // gotos that jump forward; required for deferred checkgoto calls
250 // Code that must precede any return
251 // (e.g., copying value of heap-escaped paramout back to true paramout)
254 // unlabeled break and continue statement tracking
255 breakTo *ssa.Block // current target for plain break statement
256 continueTo *ssa.Block // current target for plain continue statement
258 // current location where we're interpreting the AST
261 // variable assignments in the current block (map from variable symbol to ssa value)
262 // *Node is the unique identifier (an ONAME Node) for the variable.
263 vars map[*Node]*ssa.Value
265 // all defined variables at the end of each block. Indexed by block ID.
266 defvars []map[*Node]*ssa.Value
268 // addresses of PPARAM and PPARAMOUT variables.
269 decladdrs map[*Node]*ssa.Value
271 // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused.
272 varsyms map[*Node]interface{}
274 // starting values. Memory, stack pointer, and globals pointer
279 // line number stack. The current line number is top of stack
282 // list of panic calls by function name and line number.
283 // Used to deduplicate panic calls.
284 panics map[funcLine]*ssa.Block
286 // list of FwdRef values.
289 // list of PPARAMOUT (return) variables.
292 // list of PPARAM SSA-able pointer-shaped args. We ensure these are live
293 // throughout the function to help users avoid premature finalizers.
298 WBLineno int32 // line number of first write barrier. 0=no write barriers
301 type funcLine struct {
306 type ssaLabel struct {
307 target *ssa.Block // block identified by this label
308 breakTarget *ssa.Block // block to break to in control flow node identified by this label
309 continueTarget *ssa.Block // block to continue to in control flow node identified by this label
310 defNode *Node // label definition Node (OLABEL)
311 // Label use Node (OGOTO, OBREAK, OCONTINUE).
312 // Used only for error detection and reporting.
313 // There might be multiple uses, but we only need to track one.
315 reported bool // reported indicates whether an error has already been reported for this label
318 // defined reports whether the label has a definition (OLABEL node).
319 func (l *ssaLabel) defined() bool { return l.defNode != nil }
321 // used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node).
322 func (l *ssaLabel) used() bool { return l.useNode != nil }
324 // label returns the label associated with sym, creating it if necessary.
325 func (s *state) label(sym *Sym) *ssaLabel {
326 lab := s.labels[sym.Name]
329 s.labels[sym.Name] = lab
334 func (s *state) Logf(msg string, args ...interface{}) { s.config.Logf(msg, args...) }
335 func (s *state) Log() bool { return s.config.Log() }
336 func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(s.peekLine(), msg, args...) }
337 func (s *state) Unimplementedf(msg string, args ...interface{}) {
338 s.config.Unimplementedf(s.peekLine(), msg, args...)
340 func (s *state) Warnl(line int32, msg string, args ...interface{}) { s.config.Warnl(line, msg, args...) }
341 func (s *state) Debug_checknil() bool { return s.config.Debug_checknil() }
344 // dummy node for the memory variable
345 memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}}
347 // dummy nodes for temporary variables
348 ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}}
349 lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}}
350 newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}}
351 capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}}
352 typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}}
353 idataVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "idata"}}
354 okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}}
355 deltaVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "delta"}}
358 // startBlock sets the current block we're generating code in to b.
359 func (s *state) startBlock(b *ssa.Block) {
360 if s.curBlock != nil {
361 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
364 s.vars = map[*Node]*ssa.Value{}
367 // endBlock marks the end of generating code for the current block.
368 // Returns the (former) current block. Returns nil if there is no current
369 // block, i.e. if no code flows to the current execution point.
370 func (s *state) endBlock() *ssa.Block {
375 for len(s.defvars) <= int(b.ID) {
376 s.defvars = append(s.defvars, nil)
378 s.defvars[b.ID] = s.vars
381 b.Line = s.peekLine()
385 // pushLine pushes a line number on the line number stack.
386 func (s *state) pushLine(line int32) {
387 s.line = append(s.line, line)
390 // popLine pops the top of the line number stack.
391 func (s *state) popLine() {
392 s.line = s.line[:len(s.line)-1]
395 // peekLine peek the top of the line number stack.
396 func (s *state) peekLine() int32 {
397 return s.line[len(s.line)-1]
400 func (s *state) Error(msg string, args ...interface{}) {
401 yyerrorl(s.peekLine(), msg, args...)
404 // newValue0 adds a new value with no arguments to the current block.
405 func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value {
406 return s.curBlock.NewValue0(s.peekLine(), op, t)
409 // newValue0A adds a new value with no arguments and an aux value to the current block.
410 func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
411 return s.curBlock.NewValue0A(s.peekLine(), op, t, aux)
414 // newValue0I adds a new value with no arguments and an auxint value to the current block.
415 func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
416 return s.curBlock.NewValue0I(s.peekLine(), op, t, auxint)
419 // newValue1 adds a new value with one argument to the current block.
420 func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
421 return s.curBlock.NewValue1(s.peekLine(), op, t, arg)
424 // newValue1A adds a new value with one argument and an aux value to the current block.
425 func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
426 return s.curBlock.NewValue1A(s.peekLine(), op, t, aux, arg)
429 // newValue1I adds a new value with one argument and an auxint value to the current block.
430 func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value {
431 return s.curBlock.NewValue1I(s.peekLine(), op, t, aux, arg)
434 // newValue2 adds a new value with two arguments to the current block.
435 func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
436 return s.curBlock.NewValue2(s.peekLine(), op, t, arg0, arg1)
439 // newValue2I adds a new value with two arguments and an auxint value to the current block.
440 func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
441 return s.curBlock.NewValue2I(s.peekLine(), op, t, aux, arg0, arg1)
444 // newValue3 adds a new value with three arguments to the current block.
445 func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
446 return s.curBlock.NewValue3(s.peekLine(), op, t, arg0, arg1, arg2)
449 // newValue3I adds a new value with three arguments and an auxint value to the current block.
450 func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
451 return s.curBlock.NewValue3I(s.peekLine(), op, t, aux, arg0, arg1, arg2)
454 // entryNewValue0 adds a new value with no arguments to the entry block.
455 func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value {
456 return s.f.Entry.NewValue0(s.peekLine(), op, t)
459 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
460 func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
461 return s.f.Entry.NewValue0A(s.peekLine(), op, t, aux)
464 // entryNewValue0I adds a new value with no arguments and an auxint value to the entry block.
465 func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
466 return s.f.Entry.NewValue0I(s.peekLine(), op, t, auxint)
469 // entryNewValue1 adds a new value with one argument to the entry block.
470 func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
471 return s.f.Entry.NewValue1(s.peekLine(), op, t, arg)
474 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
475 func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value {
476 return s.f.Entry.NewValue1I(s.peekLine(), op, t, auxint, arg)
479 // entryNewValue1A adds a new value with one argument and an aux value to the entry block.
480 func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
481 return s.f.Entry.NewValue1A(s.peekLine(), op, t, aux, arg)
484 // entryNewValue2 adds a new value with two arguments to the entry block.
485 func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
486 return s.f.Entry.NewValue2(s.peekLine(), op, t, arg0, arg1)
489 // const* routines add a new const value to the entry block.
490 func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekLine(), t) }
491 func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekLine(), t) }
492 func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekLine(), t) }
493 func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekLine(), t) }
494 func (s *state) constBool(c bool) *ssa.Value {
495 return s.f.ConstBool(s.peekLine(), Types[TBOOL], c)
497 func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value {
498 return s.f.ConstInt8(s.peekLine(), t, c)
500 func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value {
501 return s.f.ConstInt16(s.peekLine(), t, c)
503 func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value {
504 return s.f.ConstInt32(s.peekLine(), t, c)
506 func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value {
507 return s.f.ConstInt64(s.peekLine(), t, c)
509 func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value {
510 return s.f.ConstFloat32(s.peekLine(), t, c)
512 func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value {
513 return s.f.ConstFloat64(s.peekLine(), t, c)
515 func (s *state) constInt(t ssa.Type, c int64) *ssa.Value {
516 if s.config.IntSize == 8 {
517 return s.constInt64(t, c)
519 if int64(int32(c)) != c {
520 s.Fatalf("integer constant too big %d", c)
522 return s.constInt32(t, int32(c))
525 func (s *state) stmts(a Nodes) {
526 for _, x := range a.Slice() {
531 // ssaStmtList converts the statement n to SSA and adds it to s.
532 func (s *state) stmtList(l Nodes) {
533 for _, n := range l.Slice() {
538 // ssaStmt converts the statement n to SSA and adds it to s.
539 func (s *state) stmt(n *Node) {
543 // If s.curBlock is nil, then we're about to generate dead code.
544 // We can't just short-circuit here, though,
545 // because we check labels and gotos as part of SSA generation.
546 // Provide a block for the dead code so that we don't have
547 // to add special cases everywhere else.
548 if s.curBlock == nil {
549 dead := s.f.NewBlock(ssa.BlockPlain)
560 case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
562 // Expression statements
563 case OCALLFUNC, OCALLMETH, OCALLINTER:
564 s.call(n, callNormal)
565 if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC &&
566 (compiling_runtime && n.Left.Sym.Name == "throw" ||
567 n.Left.Sym.Pkg == Runtimepkg && (n.Left.Sym.Name == "gopanic" || n.Left.Sym.Name == "selectgo" || n.Left.Sym.Name == "block")) {
570 b.Kind = ssa.BlockExit
572 // TODO: never rewrite OPANIC to OCALLFUNC in the
573 // first place. Need to wait until all backends
577 s.call(n.Left, callDefer)
579 s.call(n.Left, callGo)
582 res, resok := s.dottype(n.Rlist.First(), true)
583 s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), false, n.Lineno, 0, false)
584 s.assign(n.List.Second(), resok, false, false, n.Lineno, 0, false)
588 if n.Left.Class == PAUTOHEAP {
596 // Empty identifier is valid but useless.
597 // See issues 11589, 11593.
603 // Associate label with its control flow node, if any
604 if ctl := n.Name.Defn; ctl != nil {
606 case OFOR, OSWITCH, OSELECT:
607 s.labeledNodes[ctl] = lab
614 s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Lineno))
617 // The label might already have a target block via a goto.
618 if lab.target == nil {
619 lab.target = s.f.NewBlock(ssa.BlockPlain)
622 // go to that label (we pretend "label:" is preceded by "goto label")
624 b.AddEdgeTo(lab.target)
625 s.startBlock(lab.target)
631 if lab.target == nil {
632 lab.target = s.f.NewBlock(ssa.BlockPlain)
639 s.checkgoto(n, lab.defNode)
641 s.fwdGotos = append(s.fwdGotos, n)
645 b.AddEdgeTo(lab.target)
648 // Check whether we can generate static data rather than code.
649 // If so, ignore n and defer data generation until codegen.
650 // Failure to do this causes writes to readonly symbols.
651 if gen_as_init(n, true) {
653 if s.f.StaticData != nil {
654 data = s.f.StaticData.([]*Node)
656 s.f.StaticData = append(data, n)
660 if n.Left == n.Right && n.Left.Op == ONAME {
661 // An x=x assignment. No point in doing anything
662 // here. In addition, skipping this assignment
663 // prevents generating:
666 // which is bad because x is incorrectly considered
667 // dead before the vardef. See issue #14904.
682 case OSTRUCTLIT, OARRAYLIT:
683 // All literals with nonzero fields have already been
684 // rewritten during walk. Any that remain are just T{}
685 // or equivalents. Use the zero value.
687 Fatalf("literal with nonzero value in SSA: %v", rhs)
691 // If we're writing the result of an append back to the same slice,
692 // handle it specially to avoid write barriers on the fast (non-growth) path.
693 // If the slice can be SSA'd, it'll be on the stack,
694 // so there will be no write barriers,
695 // so there's no need to attempt to prevent them.
696 if samesafeexpr(n.Left, rhs.List.First()) && !s.canSSA(n.Left) {
704 needwb := n.Op == OASWB && rhs != nil
705 deref := !canSSAType(t)
708 r = nil // Signal assign to use OpZero.
710 r, isVolatile = s.addr(rhs, false)
719 if rhs != nil && rhs.Op == OAPPEND {
720 // The frontend gets rid of the write barrier to enable the special OAPPEND
721 // handling above, but since this is not a special case, we need it.
722 // TODO: just add a ptr graying to the end of growslice?
723 // TODO: check whether we need to provide special handling and a write barrier
724 // for ODOTTYPE and ORECV also.
725 // They get similar wb-removal treatment in walk.go:OAS.
730 if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
731 // We're assigning a slicing operation back to its source.
732 // Don't write back fields we aren't changing. See issue #14855.
733 i, j, k := rhs.SliceBounds()
734 if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
735 // [0:...] is the same as [:...]
738 // TODO: detect defaults for len/cap also.
739 // Currently doesn't really work because (*p)[:len(*p)] appears here as:
742 //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
745 //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
759 s.assign(n.Left, r, needwb, deref, n.Lineno, skip, isVolatile)
762 bThen := s.f.NewBlock(ssa.BlockPlain)
763 bEnd := s.f.NewBlock(ssa.BlockPlain)
765 if n.Rlist.Len() != 0 {
766 bElse = s.f.NewBlock(ssa.BlockPlain)
767 s.condBranch(n.Left, bThen, bElse, n.Likely)
769 s.condBranch(n.Left, bThen, bEnd, n.Likely)
774 if b := s.endBlock(); b != nil {
778 if n.Rlist.Len() != 0 {
781 if b := s.endBlock(); b != nil {
793 b.Kind = ssa.BlockRetJmp // override BlockRet
796 case OCONTINUE, OBREAK:
808 // plain break/continue
810 s.Error("%s is not in a loop", op)
813 // nothing to do; "to" is already the correct target
815 // labeled break/continue; look up the target
822 s.Error("%s label not defined: %v", op, sym)
828 to = lab.continueTarget
833 // Valid label but not usable with a break/continue here, e.g.:
839 s.Error("invalid %s label %v", op, sym)
849 // OFOR: for Ninit; Left; Right { Nbody }
850 bCond := s.f.NewBlock(ssa.BlockPlain)
851 bBody := s.f.NewBlock(ssa.BlockPlain)
852 bIncr := s.f.NewBlock(ssa.BlockPlain)
853 bEnd := s.f.NewBlock(ssa.BlockPlain)
855 // first, jump to condition test
859 // generate code to test condition
862 s.condBranch(n.Left, bBody, bEnd, 1)
865 b.Kind = ssa.BlockPlain
869 // set up for continue/break in body
870 prevContinue := s.continueTo
871 prevBreak := s.breakTo
874 lab := s.labeledNodes[n]
877 lab.continueTarget = bIncr
878 lab.breakTarget = bEnd
885 // tear down continue/break
886 s.continueTo = prevContinue
887 s.breakTo = prevBreak
889 lab.continueTarget = nil
890 lab.breakTarget = nil
893 // done with body, goto incr
894 if b := s.endBlock(); b != nil {
903 if b := s.endBlock(); b != nil {
908 case OSWITCH, OSELECT:
909 // These have been mostly rewritten by the front end into their Nbody fields.
910 // Our main task is to correctly hook up any break statements.
911 bEnd := s.f.NewBlock(ssa.BlockPlain)
913 prevBreak := s.breakTo
915 lab := s.labeledNodes[n]
918 lab.breakTarget = bEnd
921 // generate body code
924 s.breakTo = prevBreak
926 lab.breakTarget = nil
929 // OSWITCH never falls through (s.curBlock == nil here).
930 // OSELECT does not fall through if we're calling selectgo.
931 // OSELECT does fall through if we're calling selectnb{send,recv}[2].
932 // In those latter cases, go to the code after the select.
933 if b := s.endBlock(); b != nil {
939 // Insert a varkill op to record that a variable is no longer live.
940 // We only care about liveness info at call sites, so putting the
941 // varkill in the store chain is enough to keep it correctly ordered
942 // with respect to call ops.
943 if !s.canSSA(n.Left) {
944 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem())
948 // Insert a varlive op to record that a variable is still live.
949 if !n.Left.Addrtaken {
950 s.Fatalf("VARLIVE variable %s must have Addrtaken set", n.Left)
952 s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem())
959 s.Unimplementedf("unhandled stmt %s", n.Op)
963 // exit processes any code that needs to be generated just before returning.
964 // It returns a BlockRet block that ends the control flow. Its control value
965 // will be set to the final memory state.
966 func (s *state) exit() *ssa.Block {
968 s.rtcall(Deferreturn, true, nil)
971 // Run exit code. Typically, this code copies heap-allocated PPARAMOUT
972 // variables back to the stack.
975 // Store SSAable PPARAMOUT variables back to stack locations.
976 for _, n := range s.returns {
977 addr := s.decladdrs[n]
978 val := s.variable(n, n.Type)
979 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem())
980 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem())
981 // TODO: if val is ever spilled, we'd like to use the
982 // PPARAMOUT slot for spilling it. That won't happen
986 // Keep input pointer args live until the return. This is a bandaid
987 // fix for 1.7 for what will become in 1.8 explicit runtime.KeepAlive calls.
988 // For <= 1.7 we guarantee that pointer input arguments live to the end of
989 // the function to prevent premature (from the user's point of view)
990 // execution of finalizers. See issue 15277.
991 // TODO: remove for 1.8?
992 for _, n := range s.ptrargs {
993 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem())
999 b.Kind = ssa.BlockRet
1004 type opAndType struct {
1009 var opToSSA = map[opAndType]ssa.Op{
1010 opAndType{OADD, TINT8}: ssa.OpAdd8,
1011 opAndType{OADD, TUINT8}: ssa.OpAdd8,
1012 opAndType{OADD, TINT16}: ssa.OpAdd16,
1013 opAndType{OADD, TUINT16}: ssa.OpAdd16,
1014 opAndType{OADD, TINT32}: ssa.OpAdd32,
1015 opAndType{OADD, TUINT32}: ssa.OpAdd32,
1016 opAndType{OADD, TPTR32}: ssa.OpAdd32,
1017 opAndType{OADD, TINT64}: ssa.OpAdd64,
1018 opAndType{OADD, TUINT64}: ssa.OpAdd64,
1019 opAndType{OADD, TPTR64}: ssa.OpAdd64,
1020 opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
1021 opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
1023 opAndType{OSUB, TINT8}: ssa.OpSub8,
1024 opAndType{OSUB, TUINT8}: ssa.OpSub8,
1025 opAndType{OSUB, TINT16}: ssa.OpSub16,
1026 opAndType{OSUB, TUINT16}: ssa.OpSub16,
1027 opAndType{OSUB, TINT32}: ssa.OpSub32,
1028 opAndType{OSUB, TUINT32}: ssa.OpSub32,
1029 opAndType{OSUB, TINT64}: ssa.OpSub64,
1030 opAndType{OSUB, TUINT64}: ssa.OpSub64,
1031 opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
1032 opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
1034 opAndType{ONOT, TBOOL}: ssa.OpNot,
1036 opAndType{OMINUS, TINT8}: ssa.OpNeg8,
1037 opAndType{OMINUS, TUINT8}: ssa.OpNeg8,
1038 opAndType{OMINUS, TINT16}: ssa.OpNeg16,
1039 opAndType{OMINUS, TUINT16}: ssa.OpNeg16,
1040 opAndType{OMINUS, TINT32}: ssa.OpNeg32,
1041 opAndType{OMINUS, TUINT32}: ssa.OpNeg32,
1042 opAndType{OMINUS, TINT64}: ssa.OpNeg64,
1043 opAndType{OMINUS, TUINT64}: ssa.OpNeg64,
1044 opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F,
1045 opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F,
1047 opAndType{OCOM, TINT8}: ssa.OpCom8,
1048 opAndType{OCOM, TUINT8}: ssa.OpCom8,
1049 opAndType{OCOM, TINT16}: ssa.OpCom16,
1050 opAndType{OCOM, TUINT16}: ssa.OpCom16,
1051 opAndType{OCOM, TINT32}: ssa.OpCom32,
1052 opAndType{OCOM, TUINT32}: ssa.OpCom32,
1053 opAndType{OCOM, TINT64}: ssa.OpCom64,
1054 opAndType{OCOM, TUINT64}: ssa.OpCom64,
1056 opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag,
1057 opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
1058 opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal,
1059 opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
1061 opAndType{OMUL, TINT8}: ssa.OpMul8,
1062 opAndType{OMUL, TUINT8}: ssa.OpMul8,
1063 opAndType{OMUL, TINT16}: ssa.OpMul16,
1064 opAndType{OMUL, TUINT16}: ssa.OpMul16,
1065 opAndType{OMUL, TINT32}: ssa.OpMul32,
1066 opAndType{OMUL, TUINT32}: ssa.OpMul32,
1067 opAndType{OMUL, TINT64}: ssa.OpMul64,
1068 opAndType{OMUL, TUINT64}: ssa.OpMul64,
1069 opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
1070 opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
1072 opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
1073 opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
1075 opAndType{OHMUL, TINT8}: ssa.OpHmul8,
1076 opAndType{OHMUL, TUINT8}: ssa.OpHmul8u,
1077 opAndType{OHMUL, TINT16}: ssa.OpHmul16,
1078 opAndType{OHMUL, TUINT16}: ssa.OpHmul16u,
1079 opAndType{OHMUL, TINT32}: ssa.OpHmul32,
1080 opAndType{OHMUL, TUINT32}: ssa.OpHmul32u,
1082 opAndType{ODIV, TINT8}: ssa.OpDiv8,
1083 opAndType{ODIV, TUINT8}: ssa.OpDiv8u,
1084 opAndType{ODIV, TINT16}: ssa.OpDiv16,
1085 opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
1086 opAndType{ODIV, TINT32}: ssa.OpDiv32,
1087 opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
1088 opAndType{ODIV, TINT64}: ssa.OpDiv64,
1089 opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
1091 opAndType{OMOD, TINT8}: ssa.OpMod8,
1092 opAndType{OMOD, TUINT8}: ssa.OpMod8u,
1093 opAndType{OMOD, TINT16}: ssa.OpMod16,
1094 opAndType{OMOD, TUINT16}: ssa.OpMod16u,
1095 opAndType{OMOD, TINT32}: ssa.OpMod32,
1096 opAndType{OMOD, TUINT32}: ssa.OpMod32u,
1097 opAndType{OMOD, TINT64}: ssa.OpMod64,
1098 opAndType{OMOD, TUINT64}: ssa.OpMod64u,
1100 opAndType{OAND, TINT8}: ssa.OpAnd8,
1101 opAndType{OAND, TUINT8}: ssa.OpAnd8,
1102 opAndType{OAND, TINT16}: ssa.OpAnd16,
1103 opAndType{OAND, TUINT16}: ssa.OpAnd16,
1104 opAndType{OAND, TINT32}: ssa.OpAnd32,
1105 opAndType{OAND, TUINT32}: ssa.OpAnd32,
1106 opAndType{OAND, TINT64}: ssa.OpAnd64,
1107 opAndType{OAND, TUINT64}: ssa.OpAnd64,
1109 opAndType{OOR, TINT8}: ssa.OpOr8,
1110 opAndType{OOR, TUINT8}: ssa.OpOr8,
1111 opAndType{OOR, TINT16}: ssa.OpOr16,
1112 opAndType{OOR, TUINT16}: ssa.OpOr16,
1113 opAndType{OOR, TINT32}: ssa.OpOr32,
1114 opAndType{OOR, TUINT32}: ssa.OpOr32,
1115 opAndType{OOR, TINT64}: ssa.OpOr64,
1116 opAndType{OOR, TUINT64}: ssa.OpOr64,
1118 opAndType{OXOR, TINT8}: ssa.OpXor8,
1119 opAndType{OXOR, TUINT8}: ssa.OpXor8,
1120 opAndType{OXOR, TINT16}: ssa.OpXor16,
1121 opAndType{OXOR, TUINT16}: ssa.OpXor16,
1122 opAndType{OXOR, TINT32}: ssa.OpXor32,
1123 opAndType{OXOR, TUINT32}: ssa.OpXor32,
1124 opAndType{OXOR, TINT64}: ssa.OpXor64,
1125 opAndType{OXOR, TUINT64}: ssa.OpXor64,
1127 opAndType{OEQ, TBOOL}: ssa.OpEqB,
1128 opAndType{OEQ, TINT8}: ssa.OpEq8,
1129 opAndType{OEQ, TUINT8}: ssa.OpEq8,
1130 opAndType{OEQ, TINT16}: ssa.OpEq16,
1131 opAndType{OEQ, TUINT16}: ssa.OpEq16,
1132 opAndType{OEQ, TINT32}: ssa.OpEq32,
1133 opAndType{OEQ, TUINT32}: ssa.OpEq32,
1134 opAndType{OEQ, TINT64}: ssa.OpEq64,
1135 opAndType{OEQ, TUINT64}: ssa.OpEq64,
1136 opAndType{OEQ, TINTER}: ssa.OpEqInter,
1137 opAndType{OEQ, TSLICE}: ssa.OpEqSlice,
1138 opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
1139 opAndType{OEQ, TMAP}: ssa.OpEqPtr,
1140 opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
1141 opAndType{OEQ, TPTR64}: ssa.OpEqPtr,
1142 opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
1143 opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
1144 opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
1145 opAndType{OEQ, TFLOAT32}: ssa.OpEq32F,
1147 opAndType{ONE, TBOOL}: ssa.OpNeqB,
1148 opAndType{ONE, TINT8}: ssa.OpNeq8,
1149 opAndType{ONE, TUINT8}: ssa.OpNeq8,
1150 opAndType{ONE, TINT16}: ssa.OpNeq16,
1151 opAndType{ONE, TUINT16}: ssa.OpNeq16,
1152 opAndType{ONE, TINT32}: ssa.OpNeq32,
1153 opAndType{ONE, TUINT32}: ssa.OpNeq32,
1154 opAndType{ONE, TINT64}: ssa.OpNeq64,
1155 opAndType{ONE, TUINT64}: ssa.OpNeq64,
1156 opAndType{ONE, TINTER}: ssa.OpNeqInter,
1157 opAndType{ONE, TSLICE}: ssa.OpNeqSlice,
1158 opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
1159 opAndType{ONE, TMAP}: ssa.OpNeqPtr,
1160 opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
1161 opAndType{ONE, TPTR64}: ssa.OpNeqPtr,
1162 opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
1163 opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
1164 opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
1165 opAndType{ONE, TFLOAT32}: ssa.OpNeq32F,
1167 opAndType{OLT, TINT8}: ssa.OpLess8,
1168 opAndType{OLT, TUINT8}: ssa.OpLess8U,
1169 opAndType{OLT, TINT16}: ssa.OpLess16,
1170 opAndType{OLT, TUINT16}: ssa.OpLess16U,
1171 opAndType{OLT, TINT32}: ssa.OpLess32,
1172 opAndType{OLT, TUINT32}: ssa.OpLess32U,
1173 opAndType{OLT, TINT64}: ssa.OpLess64,
1174 opAndType{OLT, TUINT64}: ssa.OpLess64U,
1175 opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
1176 opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
1178 opAndType{OGT, TINT8}: ssa.OpGreater8,
1179 opAndType{OGT, TUINT8}: ssa.OpGreater8U,
1180 opAndType{OGT, TINT16}: ssa.OpGreater16,
1181 opAndType{OGT, TUINT16}: ssa.OpGreater16U,
1182 opAndType{OGT, TINT32}: ssa.OpGreater32,
1183 opAndType{OGT, TUINT32}: ssa.OpGreater32U,
1184 opAndType{OGT, TINT64}: ssa.OpGreater64,
1185 opAndType{OGT, TUINT64}: ssa.OpGreater64U,
1186 opAndType{OGT, TFLOAT64}: ssa.OpGreater64F,
1187 opAndType{OGT, TFLOAT32}: ssa.OpGreater32F,
1189 opAndType{OLE, TINT8}: ssa.OpLeq8,
1190 opAndType{OLE, TUINT8}: ssa.OpLeq8U,
1191 opAndType{OLE, TINT16}: ssa.OpLeq16,
1192 opAndType{OLE, TUINT16}: ssa.OpLeq16U,
1193 opAndType{OLE, TINT32}: ssa.OpLeq32,
1194 opAndType{OLE, TUINT32}: ssa.OpLeq32U,
1195 opAndType{OLE, TINT64}: ssa.OpLeq64,
1196 opAndType{OLE, TUINT64}: ssa.OpLeq64U,
1197 opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
1198 opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
1200 opAndType{OGE, TINT8}: ssa.OpGeq8,
1201 opAndType{OGE, TUINT8}: ssa.OpGeq8U,
1202 opAndType{OGE, TINT16}: ssa.OpGeq16,
1203 opAndType{OGE, TUINT16}: ssa.OpGeq16U,
1204 opAndType{OGE, TINT32}: ssa.OpGeq32,
1205 opAndType{OGE, TUINT32}: ssa.OpGeq32U,
1206 opAndType{OGE, TINT64}: ssa.OpGeq64,
1207 opAndType{OGE, TUINT64}: ssa.OpGeq64U,
1208 opAndType{OGE, TFLOAT64}: ssa.OpGeq64F,
1209 opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
1211 opAndType{OLROT, TUINT8}: ssa.OpLrot8,
1212 opAndType{OLROT, TUINT16}: ssa.OpLrot16,
1213 opAndType{OLROT, TUINT32}: ssa.OpLrot32,
1214 opAndType{OLROT, TUINT64}: ssa.OpLrot64,
1216 opAndType{OSQRT, TFLOAT64}: ssa.OpSqrt,
1219 func (s *state) concreteEtype(t *Type) EType {
1225 if s.config.IntSize == 8 {
1230 if s.config.IntSize == 8 {
1235 if s.config.PtrSize == 8 {
1242 func (s *state) ssaOp(op Op, t *Type) ssa.Op {
1243 etype := s.concreteEtype(t)
1244 x, ok := opToSSA[opAndType{op, etype}]
1246 s.Unimplementedf("unhandled binary op %s %s", op, etype)
1251 func floatForComplex(t *Type) *Type {
1253 return Types[TFLOAT32]
1255 return Types[TFLOAT64]
1259 type opAndTwoTypes struct {
1265 type twoTypes struct {
1270 type twoOpsAndType struct {
1273 intermediateType EType
1276 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
1278 twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
1279 twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
1280 twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
1281 twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
1283 twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
1284 twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
1285 twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
1286 twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
1288 twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
1289 twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
1290 twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
1291 twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
1293 twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
1294 twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
1295 twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
1296 twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
1298 twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
1299 twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
1300 twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
1301 twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead
1303 twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
1304 twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
1305 twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
1306 twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead
1308 twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
1309 twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
1310 twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
1311 twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead
1313 twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
1314 twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
1315 twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
1316 twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead
1319 twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
1320 twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT64},
1321 twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT32},
1322 twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
1325 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
1326 opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
1327 opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
1328 opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
1329 opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
1330 opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
1331 opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
1332 opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
1333 opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
1335 opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
1336 opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
1337 opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
1338 opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
1339 opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
1340 opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
1341 opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
1342 opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
1344 opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
1345 opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
1346 opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
1347 opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
1348 opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
1349 opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
1350 opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
1351 opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
1353 opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
1354 opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
1355 opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
1356 opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
1357 opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
1358 opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
1359 opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
1360 opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
1362 opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
1363 opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
1364 opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
1365 opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
1366 opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
1367 opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
1368 opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
1369 opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
1371 opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
1372 opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
1373 opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
1374 opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
1375 opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
1376 opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
1377 opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
1378 opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
1380 opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
1381 opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
1382 opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
1383 opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
1384 opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
1385 opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
1386 opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
1387 opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
1389 opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
1390 opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
1391 opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
1392 opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
1393 opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
1394 opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
1395 opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
1396 opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
1399 func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op {
1400 etype1 := s.concreteEtype(t)
1401 etype2 := s.concreteEtype(u)
1402 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
1404 s.Unimplementedf("unhandled shift op %s etype=%s/%s", op, etype1, etype2)
1409 func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op {
1410 etype1 := s.concreteEtype(t)
1411 x, ok := opToSSA[opAndType{op, etype1}]
1413 s.Unimplementedf("unhandled rotate op %s etype=%s", op, etype1)
1418 // expr converts the expression n to ssa, adds it to s and returns the ssa result.
1419 func (s *state) expr(n *Node) *ssa.Value {
1420 if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
1421 // ONAMEs and named OLITERALs have the line number
1422 // of the decl, not the use. See issue 14742.
1423 s.pushLine(n.Lineno)
1430 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Left.Sym})
1431 return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
1433 if n.Class == PFUNC {
1434 // "value" of a function is the address of the function's closure
1435 sym := funcsym(n.Sym)
1436 aux := &ssa.ExternSymbol{Typ: n.Type, Sym: sym}
1437 return s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sb)
1440 return s.variable(n, n.Type)
1442 addr, _ := s.addr(n, false)
1443 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
1445 addr, _ := s.addr(n, false)
1446 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
1448 switch u := n.Val().U.(type) {
1451 switch n.Type.Size() {
1453 return s.constInt8(n.Type, int8(i))
1455 return s.constInt16(n.Type, int16(i))
1457 return s.constInt32(n.Type, int32(i))
1459 return s.constInt64(n.Type, i)
1461 s.Fatalf("bad integer size %d", n.Type.Size())
1466 return s.constEmptyString(n.Type)
1468 return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
1470 return s.constBool(u)
1475 return s.constSlice(t)
1476 case t.IsInterface():
1477 return s.constInterface(t)
1479 return s.constNil(t)
1482 switch n.Type.Size() {
1484 return s.constFloat32(n.Type, u.Float32())
1486 return s.constFloat64(n.Type, u.Float64())
1488 s.Fatalf("bad float size %d", n.Type.Size())
1494 switch n.Type.Size() {
1496 pt := Types[TFLOAT32]
1497 return s.newValue2(ssa.OpComplexMake, n.Type,
1498 s.constFloat32(pt, r.Float32()),
1499 s.constFloat32(pt, i.Float32()))
1501 pt := Types[TFLOAT64]
1502 return s.newValue2(ssa.OpComplexMake, n.Type,
1503 s.constFloat64(pt, r.Float64()),
1504 s.constFloat64(pt, i.Float64()))
1506 s.Fatalf("bad float size %d", n.Type.Size())
1511 s.Unimplementedf("unhandled OLITERAL %v", n.Val().Ctype())
1518 // Assume everything will work out, so set up our return value.
1519 // Anything interesting that happens from here is a fatal.
1522 // Special case for not confusing GC and liveness.
1523 // We don't want pointers accidentally classified
1524 // as not-pointers or vice-versa because of copy
1526 if to.IsPtrShaped() != from.IsPtrShaped() {
1527 return s.newValue2(ssa.OpConvert, to, x, s.mem())
1530 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
1533 if to.Etype == TFUNC && from.IsPtrShaped() {
1537 // named <--> unnamed type or typed <--> untyped const
1538 if from.Etype == to.Etype {
1542 // unsafe.Pointer <--> *T
1543 if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() {
1549 if from.Width != to.Width {
1550 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
1553 if etypesign(from.Etype) != etypesign(to.Etype) {
1554 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
1559 // These appear to be fine, but they fail the
1560 // integer constraint below, so okay them here.
1561 // Sample non-integer conversion: map[string]string -> *uint8
1565 if etypesign(from.Etype) == 0 {
1566 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
1570 // integer, same width, same sign
1575 ft := n.Left.Type // from type
1576 tt := n.Type // to type
1577 if ft.IsInteger() && tt.IsInteger() {
1579 if tt.Size() == ft.Size() {
1581 } else if tt.Size() < ft.Size() {
1583 switch 10*ft.Size() + tt.Size() {
1585 op = ssa.OpTrunc16to8
1587 op = ssa.OpTrunc32to8
1589 op = ssa.OpTrunc32to16
1591 op = ssa.OpTrunc64to8
1593 op = ssa.OpTrunc64to16
1595 op = ssa.OpTrunc64to32
1597 s.Fatalf("weird integer truncation %s -> %s", ft, tt)
1599 } else if ft.IsSigned() {
1601 switch 10*ft.Size() + tt.Size() {
1603 op = ssa.OpSignExt8to16
1605 op = ssa.OpSignExt8to32
1607 op = ssa.OpSignExt8to64
1609 op = ssa.OpSignExt16to32
1611 op = ssa.OpSignExt16to64
1613 op = ssa.OpSignExt32to64
1615 s.Fatalf("bad integer sign extension %s -> %s", ft, tt)
1619 switch 10*ft.Size() + tt.Size() {
1621 op = ssa.OpZeroExt8to16
1623 op = ssa.OpZeroExt8to32
1625 op = ssa.OpZeroExt8to64
1627 op = ssa.OpZeroExt16to32
1629 op = ssa.OpZeroExt16to64
1631 op = ssa.OpZeroExt32to64
1633 s.Fatalf("weird integer sign extension %s -> %s", ft, tt)
1636 return s.newValue1(op, n.Type, x)
1639 if ft.IsFloat() || tt.IsFloat() {
1640 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
1642 s.Fatalf("weird float conversion %s -> %s", ft, tt)
1644 op1, op2, it := conv.op1, conv.op2, conv.intermediateType
1646 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
1647 // normal case, not tripping over unsigned 64
1648 if op1 == ssa.OpCopy {
1649 if op2 == ssa.OpCopy {
1652 return s.newValue1(op2, n.Type, x)
1654 if op2 == ssa.OpCopy {
1655 return s.newValue1(op1, n.Type, x)
1657 return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x))
1659 // Tricky 64-bit unsigned cases.
1661 // therefore tt is float32 or float64, and ft is also unsigned
1663 return s.uint64Tofloat32(n, x, ft, tt)
1666 return s.uint64Tofloat64(n, x, ft, tt)
1668 s.Fatalf("weird unsigned integer to float conversion %s -> %s", ft, tt)
1670 // therefore ft is float32 or float64, and tt is unsigned integer
1672 return s.float32ToUint64(n, x, ft, tt)
1675 return s.float64ToUint64(n, x, ft, tt)
1677 s.Fatalf("weird float to unsigned integer conversion %s -> %s", ft, tt)
1681 if ft.IsComplex() && tt.IsComplex() {
1683 if ft.Size() == tt.Size() {
1685 } else if ft.Size() == 8 && tt.Size() == 16 {
1686 op = ssa.OpCvt32Fto64F
1687 } else if ft.Size() == 16 && tt.Size() == 8 {
1688 op = ssa.OpCvt64Fto32F
1690 s.Fatalf("weird complex conversion %s -> %s", ft, tt)
1692 ftp := floatForComplex(ft)
1693 ttp := floatForComplex(tt)
1694 return s.newValue2(ssa.OpComplexMake, tt,
1695 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
1696 s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
1699 s.Unimplementedf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
1703 res, _ := s.dottype(n, false)
1707 case OLT, OEQ, ONE, OLE, OGE, OGT:
1709 b := s.expr(n.Right)
1710 if n.Left.Type.IsComplex() {
1711 pt := floatForComplex(n.Left.Type)
1712 op := s.ssaOp(OEQ, pt)
1713 r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
1714 i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
1715 c := s.newValue2(ssa.OpAnd8, Types[TBOOL], r, i)
1720 return s.newValue1(ssa.OpNot, Types[TBOOL], c)
1722 s.Fatalf("ordered complex compare %s", n.Op)
1725 return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b)
1728 b := s.expr(n.Right)
1729 if n.Type.IsComplex() {
1730 mulop := ssa.OpMul64F
1731 addop := ssa.OpAdd64F
1732 subop := ssa.OpSub64F
1733 pt := floatForComplex(n.Type) // Could be Float32 or Float64
1734 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
1736 areal := s.newValue1(ssa.OpComplexReal, pt, a)
1737 breal := s.newValue1(ssa.OpComplexReal, pt, b)
1738 aimag := s.newValue1(ssa.OpComplexImag, pt, a)
1739 bimag := s.newValue1(ssa.OpComplexImag, pt, b)
1741 if pt != wt { // Widen for calculation
1742 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
1743 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
1744 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
1745 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
1748 xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
1749 ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal))
1751 if pt != wt { // Narrow to store back
1752 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
1753 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
1756 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
1758 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1762 b := s.expr(n.Right)
1763 if n.Type.IsComplex() {
1764 // TODO this is not executed because the front-end substitutes a runtime call.
1765 // That probably ought to change; with modest optimization the widen/narrow
1766 // conversions could all be elided in larger expression trees.
1767 mulop := ssa.OpMul64F
1768 addop := ssa.OpAdd64F
1769 subop := ssa.OpSub64F
1770 divop := ssa.OpDiv64F
1771 pt := floatForComplex(n.Type) // Could be Float32 or Float64
1772 wt := Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
1774 areal := s.newValue1(ssa.OpComplexReal, pt, a)
1775 breal := s.newValue1(ssa.OpComplexReal, pt, b)
1776 aimag := s.newValue1(ssa.OpComplexImag, pt, a)
1777 bimag := s.newValue1(ssa.OpComplexImag, pt, b)
1779 if pt != wt { // Widen for calculation
1780 areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
1781 breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
1782 aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
1783 bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
1786 denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag))
1787 xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
1788 ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag))
1790 // TODO not sure if this is best done in wide precision or narrow
1791 // Double-rounding might be an issue.
1792 // Note that the pre-SSA implementation does the entire calculation
1793 // in wide format, so wide is compatible.
1794 xreal = s.newValue2(divop, wt, xreal, denom)
1795 ximag = s.newValue2(divop, wt, ximag, denom)
1797 if pt != wt { // Narrow to store back
1798 xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
1799 ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
1801 return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
1803 if n.Type.IsFloat() {
1804 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1806 // do a size-appropriate check for zero
1807 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
1808 s.check(cmp, panicdivide)
1809 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1813 b := s.expr(n.Right)
1814 // do a size-appropriate check for zero
1815 cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
1816 s.check(cmp, panicdivide)
1817 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1820 b := s.expr(n.Right)
1821 if n.Type.IsComplex() {
1822 pt := floatForComplex(n.Type)
1823 op := s.ssaOp(n.Op, pt)
1824 return s.newValue2(ssa.OpComplexMake, n.Type,
1825 s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
1826 s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
1828 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1829 case OAND, OOR, OHMUL, OXOR:
1831 b := s.expr(n.Right)
1832 return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1835 b := s.expr(n.Right)
1836 return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b)
1839 i := n.Right.Int64()
1840 if i <= 0 || i >= n.Type.Size()*8 {
1841 s.Fatalf("Wrong rotate distance for LROT, expected 1 through %d, saw %d", n.Type.Size()*8-1, i)
1843 return s.newValue1I(s.ssaRotateOp(n.Op, n.Type), a.Type, i, a)
1844 case OANDAND, OOROR:
1845 // To implement OANDAND (and OOROR), we introduce a
1846 // new temporary variable to hold the result. The
1847 // variable is associated with the OANDAND node in the
1848 // s.vars table (normally variables are only
1849 // associated with ONAME nodes). We convert
1856 // Using var in the subsequent block introduces the
1857 // necessary phi variable.
1858 el := s.expr(n.Left)
1862 b.Kind = ssa.BlockIf
1864 // In theory, we should set b.Likely here based on context.
1865 // However, gc only gives us likeliness hints
1866 // in a single place, for plain OIF statements,
1867 // and passing around context is finnicky, so don't bother for now.
1869 bRight := s.f.NewBlock(ssa.BlockPlain)
1870 bResult := s.f.NewBlock(ssa.BlockPlain)
1871 if n.Op == OANDAND {
1873 b.AddEdgeTo(bResult)
1874 } else if n.Op == OOROR {
1875 b.AddEdgeTo(bResult)
1879 s.startBlock(bRight)
1880 er := s.expr(n.Right)
1884 b.AddEdgeTo(bResult)
1886 s.startBlock(bResult)
1887 return s.variable(n, Types[TBOOL])
1890 i := s.expr(n.Right)
1891 return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
1896 if n.Type.IsComplex() {
1897 tp := floatForComplex(n.Type)
1898 negop := s.ssaOp(n.Op, tp)
1899 return s.newValue2(ssa.OpComplexMake, n.Type,
1900 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
1901 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
1903 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
1904 case ONOT, OCOM, OSQRT:
1906 return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
1909 return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
1911 return s.expr(n.Left)
1914 a, _ := s.addr(n.Left, n.Bounded)
1915 // Note we know the volatile result is false because you can't write &f() in Go.
1919 if int(n.Reg) != Thearch.REGSP {
1920 s.Unimplementedf("OINDREG of non-SP register %s in expr: %v", obj.Rconv(int(n.Reg)), n)
1923 addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.sp)
1924 return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
1927 p := s.exprPtr(n.Left, false, n.Lineno)
1928 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
1934 return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
1936 p, _ := s.addr(n, false)
1937 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
1940 p := s.exprPtr(n.Left, false, n.Lineno)
1941 p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p)
1942 return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
1946 case n.Left.Type.IsString():
1948 i := s.expr(n.Right)
1949 i = s.extendIndex(i)
1951 len := s.newValue1(ssa.OpStringLen, Types[TINT], a)
1952 s.boundsCheck(i, len)
1954 ptrtyp := Ptrto(Types[TUINT8])
1955 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
1956 if Isconst(n.Right, CTINT) {
1957 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
1959 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
1961 return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem())
1962 case n.Left.Type.IsSlice():
1963 p, _ := s.addr(n, false)
1964 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
1965 case n.Left.Type.IsArray():
1966 // TODO: fix when we can SSA arrays of length 1.
1967 p, _ := s.addr(n, false)
1968 return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
1970 s.Fatalf("bad type for index %v", n.Left.Type)
1976 case n.Left.Type.IsSlice():
1977 op := ssa.OpSliceLen
1981 return s.newValue1(op, Types[TINT], s.expr(n.Left))
1982 case n.Left.Type.IsString(): // string; not reachable for OCAP
1983 return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left))
1984 case n.Left.Type.IsMap(), n.Left.Type.IsChan():
1985 return s.referenceTypeBuiltin(n, s.expr(n.Left))
1987 return s.constInt(Types[TINT], n.Left.Type.NumElem())
1992 if n.Left.Type.IsSlice() {
1993 return s.newValue1(ssa.OpSlicePtr, n.Type, a)
1995 return s.newValue1(ssa.OpStringPtr, n.Type, a)
2000 return s.newValue1(ssa.OpITab, n.Type, a)
2003 tab := s.expr(n.Left)
2004 data := s.expr(n.Right)
2005 // The frontend allows putting things like struct{*byte} in
2006 // the data portion of an eface. But we don't want struct{*byte}
2007 // as a register type because (among other reasons) the liveness
2008 // analysis is confused by the "fat" variables that result from
2009 // such types being spilled.
2010 // So here we ensure that we are selecting the underlying pointer
2011 // when we build an eface.
2012 // TODO: get rid of this now that structs can be SSA'd?
2013 for !data.Type.IsPtrShaped() {
2015 case data.Type.IsArray():
2016 data = s.newValue1I(ssa.OpArrayIndex, data.Type.ElemType(), 0, data)
2017 case data.Type.IsStruct():
2018 for i := data.Type.NumFields() - 1; i >= 0; i-- {
2019 f := data.Type.FieldType(i)
2021 // eface type could also be struct{p *byte; q [0]int}
2024 data = s.newValue1I(ssa.OpStructSelect, f, int64(i), data)
2028 s.Fatalf("type being put into an eface isn't a pointer")
2031 return s.newValue2(ssa.OpIMake, n.Type, tab, data)
2033 case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
2035 var i, j, k *ssa.Value
2036 low, high, max := n.SliceBounds()
2038 i = s.extendIndex(s.expr(low))
2041 j = s.extendIndex(s.expr(high))
2044 k = s.extendIndex(s.expr(max))
2046 p, l, c := s.slice(n.Left.Type, v, i, j, k)
2047 return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
2052 low, high, _ := n.SliceBounds()
2054 i = s.extendIndex(s.expr(low))
2057 j = s.extendIndex(s.expr(high))
2059 p, l, _ := s.slice(n.Left.Type, v, i, j, nil)
2060 return s.newValue2(ssa.OpStringMake, n.Type, p, l)
2063 if isIntrinsicCall1(n) {
2064 return s.intrinsicCall1(n)
2068 case OCALLINTER, OCALLMETH:
2069 a := s.call(n, callNormal)
2070 return s.newValue2(ssa.OpLoad, n.Type, a, s.mem())
2073 return s.newValue1(ssa.OpGetG, n.Type, s.mem())
2076 return s.append(n, false)
2079 s.Unimplementedf("unhandled expr %s", n.Op)
2084 // append converts an OAPPEND node to SSA.
2085 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
2086 // adds it to s, and returns the Value.
2087 // If inplace is true, it writes the result of the OAPPEND expression n
2088 // back to the slice being appended to, and returns nil.
2089 // inplace MUST be set to false if the slice can be SSA'd.
2090 func (s *state) append(n *Node, inplace bool) *ssa.Value {
2091 // If inplace is false, process as expression "append(s, e1, e2, e3)":
2093 // ptr, len, cap := s
2094 // newlen := len + 3
2095 // if newlen > cap {
2096 // ptr, len, cap = growslice(s, newlen)
2097 // newlen = len + 3 // recalculate to avoid a spill
2099 // // with write barriers, if needed:
2101 // *(ptr+len+1) = e2
2102 // *(ptr+len+2) = e3
2103 // return makeslice(ptr, newlen, cap)
2106 // If inplace is true, process as statement "s = append(s, e1, e2, e3)":
2109 // ptr, len, cap := s
2110 // newlen := len + 3
2111 // if newlen > cap {
2112 // newptr, len, newcap = growslice(ptr, len, cap, newlen)
2113 // vardef(a) // if necessary, advise liveness we are writing a new a
2114 // *a.cap = newcap // write before ptr to avoid a spill
2115 // *a.ptr = newptr // with write barrier
2117 // newlen = len + 3 // recalculate to avoid a spill
2119 // // with write barriers, if needed:
2121 // *(ptr+len+1) = e2
2122 // *(ptr+len+2) = e3
2128 sn := n.List.First() // the slice node is the first in the list
2130 var slice, addr *ssa.Value
2132 addr, _ = s.addr(sn, false)
2133 slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
2138 // Allocate new blocks
2139 grow := s.f.NewBlock(ssa.BlockPlain)
2140 assign := s.f.NewBlock(ssa.BlockPlain)
2142 // Decide if we need to grow
2143 nargs := int64(n.List.Len() - 1)
2144 p := s.newValue1(ssa.OpSlicePtr, pt, slice)
2145 l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
2146 c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice)
2147 nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
2149 cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c)
2153 s.vars[&newlenVar] = nl
2160 b.Kind = ssa.BlockIf
2161 b.Likely = ssa.BranchUnlikely
2168 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(n.Type.Elem())}, s.sb)
2170 r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl)
2174 // Tell liveness we're about to build a new slice
2175 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem())
2177 capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_cap), addr)
2178 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem())
2179 s.insertWBstore(pt, addr, r[0], n.Lineno, 0)
2180 // load the value we just stored to avoid having to spill it
2181 s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
2182 s.vars[&lenVar] = r[1] // avoid a spill in the fast path
2184 s.vars[&ptrVar] = r[0]
2185 s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs))
2186 s.vars[&capVar] = r[2]
2192 // assign new elements to slots
2193 s.startBlock(assign)
2196 l = s.variable(&lenVar, Types[TINT]) // generates phi for len
2197 nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
2198 lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_nel), addr)
2199 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem())
2203 type argRec struct {
2204 // if store is true, we're appending the value v. If false, we're appending the
2205 // value at *v. If store==false, isVolatile reports whether the source
2206 // is in the outargs section of the stack frame.
2211 args := make([]argRec, 0, nargs)
2212 for _, n := range n.List.Slice()[1:] {
2213 if canSSAType(n.Type) {
2214 args = append(args, argRec{v: s.expr(n), store: true})
2216 v, isVolatile := s.addr(n, false)
2217 args = append(args, argRec{v: v, isVolatile: isVolatile})
2221 p = s.variable(&ptrVar, pt) // generates phi for ptr
2223 nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl
2224 c = s.variable(&capVar, Types[TINT]) // generates phi for cap
2226 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
2227 // TODO: just one write barrier call for all of these writes?
2228 // TODO: maybe just one writeBarrier.enabled check?
2229 for i, arg := range args {
2230 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i)))
2232 if haspointers(et) {
2233 s.insertWBstore(et, addr, arg.v, n.Lineno, 0)
2235 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg.v, s.mem())
2238 if haspointers(et) {
2239 s.insertWBmove(et, addr, arg.v, n.Lineno, arg.isVolatile)
2241 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg.v, s.mem())
2246 delete(s.vars, &ptrVar)
2248 delete(s.vars, &lenVar)
2251 delete(s.vars, &newlenVar)
2252 delete(s.vars, &capVar)
2254 return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
2257 // condBranch evaluates the boolean expression cond and branches to yes
2258 // if cond is true and no if cond is false.
2259 // This function is intended to handle && and || better than just calling
2260 // s.expr(cond) and branching on the result.
2261 func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
2262 if cond.Op == OANDAND {
2263 mid := s.f.NewBlock(ssa.BlockPlain)
2264 s.stmtList(cond.Ninit)
2265 s.condBranch(cond.Left, mid, no, max8(likely, 0))
2267 s.condBranch(cond.Right, yes, no, likely)
2269 // Note: if likely==1, then both recursive calls pass 1.
2270 // If likely==-1, then we don't have enough information to decide
2271 // whether the first branch is likely or not. So we pass 0 for
2272 // the likeliness of the first branch.
2273 // TODO: have the frontend give us branch prediction hints for
2274 // OANDAND and OOROR nodes (if it ever has such info).
2276 if cond.Op == OOROR {
2277 mid := s.f.NewBlock(ssa.BlockPlain)
2278 s.stmtList(cond.Ninit)
2279 s.condBranch(cond.Left, yes, mid, min8(likely, 0))
2281 s.condBranch(cond.Right, yes, no, likely)
2283 // Note: if likely==-1, then both recursive calls pass -1.
2284 // If likely==1, then we don't have enough info to decide
2285 // the likelihood of the first branch.
2287 if cond.Op == ONOT {
2288 s.stmtList(cond.Ninit)
2289 s.condBranch(cond.Left, no, yes, -likely)
2294 b.Kind = ssa.BlockIf
2296 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
2304 skipPtr skipMask = 1 << iota
2309 // assign does left = right.
2310 // Right has already been evaluated to ssa, left has not.
2311 // If deref is true, then we do left = *right instead (and right has already been nil-checked).
2312 // If deref is true and right == nil, just do left = 0.
2313 // If deref is true, rightIsVolatile reports whether right points to volatile (clobbered by a call) storage.
2314 // Include a write barrier if wb is true.
2315 // skip indicates assignments (at the top level) that can be avoided.
2316 func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32, skip skipMask, rightIsVolatile bool) {
2317 if left.Op == ONAME && isblank(left) {
2324 s.Fatalf("can SSA LHS %s but not RHS %s", left, right)
2326 if left.Op == ODOT {
2327 // We're assigning to a field of an ssa-able value.
2328 // We need to build a new structure with the new value for the
2329 // field we're assigning and the old values for the other fields.
2331 // type T struct {a, b, c int}
2334 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
2336 // Grab information about the structure type.
2339 idx := fieldIdx(left)
2341 // Grab old value of structure.
2342 old := s.expr(left.Left)
2344 // Make new structure.
2345 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
2347 // Add fields as args.
2348 for i := 0; i < nf; i++ {
2352 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
2356 // Recursively assign the new value we've made to the base of the dot op.
2357 s.assign(left.Left, new, false, false, line, 0, rightIsVolatile)
2358 // TODO: do we need to update named values here?
2361 // Update variable assignment.
2362 s.vars[left] = right
2363 s.addNamedValue(left, right)
2366 // Left is not ssa-able. Compute its address.
2367 addr, _ := s.addr(left, false)
2368 if left.Op == ONAME && skip == 0 {
2369 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem())
2372 // Treat as a mem->mem move.
2374 s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem())
2378 s.insertWBmove(t, addr, right, line, rightIsVolatile)
2381 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem())
2384 // Treat as a store.
2386 if skip&skipPtr != 0 {
2387 // Special case: if we don't write back the pointers, don't bother
2388 // doing the write barrier check.
2389 s.storeTypeScalars(t, addr, right, skip)
2392 s.insertWBstore(t, addr, right, line, skip)
2396 if skip&skipPtr == 0 {
2397 s.storeTypePtrs(t, addr, right)
2399 s.storeTypeScalars(t, addr, right, skip)
2402 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem())
2405 // zeroVal returns the zero value for type t.
2406 func (s *state) zeroVal(t *Type) *ssa.Value {
2411 return s.constInt8(t, 0)
2413 return s.constInt16(t, 0)
2415 return s.constInt32(t, 0)
2417 return s.constInt64(t, 0)
2419 s.Fatalf("bad sized integer type %s", t)
2424 return s.constFloat32(t, 0)
2426 return s.constFloat64(t, 0)
2428 s.Fatalf("bad sized float type %s", t)
2433 z := s.constFloat32(Types[TFLOAT32], 0)
2434 return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
2436 z := s.constFloat64(Types[TFLOAT64], 0)
2437 return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
2439 s.Fatalf("bad sized complex type %s", t)
2443 return s.constEmptyString(t)
2444 case t.IsPtrShaped():
2445 return s.constNil(t)
2447 return s.constBool(false)
2448 case t.IsInterface():
2449 return s.constInterface(t)
2451 return s.constSlice(t)
2454 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
2455 for i := 0; i < n; i++ {
2456 v.AddArg(s.zeroVal(t.FieldType(i).(*Type)))
2460 s.Unimplementedf("zero for type %v not implemented", t)
2467 callNormal callKind = iota
2472 // isSSAIntrinsic1 returns true if n is a call to a recognized 1-arg intrinsic
2473 // that can be handled by the SSA backend.
2474 // SSA uses this, but so does the front end to see if should not
2475 // inline a function because it is a candidate for intrinsic
2477 func isSSAIntrinsic1(s *Sym) bool {
2478 // The test below is not quite accurate -- in the event that
2479 // a function is disabled on a per-function basis, for example
2480 // because of hash-keyed binary failure search, SSA might be
2481 // disabled for that function but it would not be noted here,
2482 // and thus an inlining would not occur (in practice, inlining
2483 // so far has only been noticed for Bswap32 and the 16-bit count
2484 // leading/trailing instructions, but heuristics might change
2485 // in the future or on different architectures).
2486 if !ssaEnabled || ssa.IntrinsicsDisable || Thearch.LinkArch.Family != sys.AMD64 {
2489 if s != nil && s.Pkg != nil && s.Pkg.Path == "runtime/internal/sys" {
2492 "Ctz64", "Ctz32", "Ctz16",
2493 "Bswap64", "Bswap32":
2500 func isIntrinsicCall1(n *Node) bool {
2501 if n == nil || n.Left == nil {
2504 return isSSAIntrinsic1(n.Left.Sym)
2507 // intrinsicFirstArg extracts arg from n.List and eval
2508 func (s *state) intrinsicFirstArg(n *Node) *ssa.Value {
2516 // intrinsicCall1 converts a call to a recognized 1-arg intrinsic
2517 // into the intrinsic
2518 func (s *state) intrinsicCall1(n *Node) *ssa.Value {
2519 var result *ssa.Value
2520 switch n.Left.Sym.Name {
2522 result = s.newValue1(ssa.OpCtz64, Types[TUINT64], s.intrinsicFirstArg(n))
2524 result = s.newValue1(ssa.OpCtz32, Types[TUINT32], s.intrinsicFirstArg(n))
2526 result = s.newValue1(ssa.OpCtz16, Types[TUINT16], s.intrinsicFirstArg(n))
2528 result = s.newValue1(ssa.OpBswap64, Types[TUINT64], s.intrinsicFirstArg(n))
2530 result = s.newValue1(ssa.OpBswap32, Types[TUINT32], s.intrinsicFirstArg(n))
2533 Fatalf("Unknown special call: %v", n.Left.Sym)
2535 if ssa.IntrinsicsDebug > 0 {
2536 Warnl(n.Lineno, "intrinsic substitution for %v with %s", n.Left.Sym.Name, result.LongString())
2541 // Calls the function n using the specified call type.
2542 // Returns the address of the return value (or nil if none).
2543 func (s *state) call(n *Node, k callKind) *ssa.Value {
2544 var sym *Sym // target symbol (if static)
2545 var closure *ssa.Value // ptr to closure to run (if dynamic)
2546 var codeptr *ssa.Value // ptr to target code (if dynamic)
2547 var rcvr *ssa.Value // receiver to set
2551 if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC {
2555 closure = s.expr(fn)
2557 if fn.Op != ODOTMETH {
2558 Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
2560 if k == callNormal {
2564 n2 := newname(fn.Sym)
2566 n2.Lineno = fn.Lineno
2567 closure = s.expr(n2)
2568 // Note: receiver is already assigned in n.List, so we don't
2569 // want to set it here.
2571 if fn.Op != ODOTINTER {
2572 Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
2574 i := s.expr(fn.Left)
2575 itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i)
2576 if k != callNormal {
2579 itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab
2580 itab = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], itabidx, itab)
2581 if k == callNormal {
2582 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem())
2586 rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i)
2589 stksize := fn.Type.ArgWidth() // includes receiver
2591 // Run all argument assignments. The arg slots have already
2592 // been offset by the appropriate amount (+2*widthptr for go/defer,
2593 // +widthptr for interface calls).
2594 // For OCALLMETH, the receiver is set in these statements.
2597 // Set receiver (for interface calls)
2599 argStart := Ctxt.FixedFrameSize()
2600 if k != callNormal {
2601 argStart += int64(2 * Widthptr)
2603 addr := s.entryNewValue1I(ssa.OpOffPtr, Types[TUINTPTR], argStart, s.sp)
2604 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem())
2608 if k != callNormal {
2609 // Write argsize and closure (args to Newproc/Deferproc).
2610 argsize := s.constInt32(Types[TUINT32], int32(stksize))
2611 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, s.sp, argsize, s.mem())
2612 addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), int64(Widthptr), s.sp)
2613 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem())
2614 stksize += 2 * int64(Widthptr)
2618 bNext := s.f.NewBlock(ssa.BlockPlain)
2621 case k == callDefer:
2622 call = s.newValue1(ssa.OpDeferCall, ssa.TypeMem, s.mem())
2624 call = s.newValue1(ssa.OpGoCall, ssa.TypeMem, s.mem())
2625 case closure != nil:
2626 codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem())
2627 call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem())
2628 case codeptr != nil:
2629 call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem())
2631 call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym, s.mem())
2633 Fatalf("bad call type %s %v", n.Op, n)
2635 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
2637 // Finish call block
2638 s.vars[&memVar] = call
2640 b.Kind = ssa.BlockCall
2644 // Add recover edge to exit code.
2645 b.Kind = ssa.BlockDefer
2646 r := s.f.NewBlock(ssa.BlockPlain)
2650 b.Likely = ssa.BranchLikely
2653 // Start exit block, find address of result.
2655 // Keep input pointer args live across calls. This is a bandaid until 1.8.
2656 for _, n := range s.ptrargs {
2657 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem())
2659 res := n.Left.Type.Results()
2660 if res.NumFields() == 0 || k != callNormal {
2661 // call has no return value. Continue with the next statement.
2665 return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Offset+Ctxt.FixedFrameSize(), s.sp)
2668 // etypesign returns the signed-ness of e, for integer/pointer etypes.
2669 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
2670 func etypesign(e EType) int8 {
2672 case TINT8, TINT16, TINT32, TINT64, TINT:
2674 case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
2680 // lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node.
2681 // This improves the effectiveness of cse by using the same Aux values for the
2683 func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} {
2686 s.Fatalf("sym %v is of uknown type %T", sym, sym)
2687 case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol:
2688 // these are the only valid types
2691 if lsym, ok := s.varsyms[n]; ok {
2699 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
2700 // Also returns a bool reporting whether the returned value is "volatile", that is it
2701 // points to the outargs section and thus the referent will be clobbered by any call.
2702 // The value that the returned Value represents is guaranteed to be non-nil.
2703 // If bounded is true then this address does not require a nil check for its operand
2704 // even if that would otherwise be implied.
2705 func (s *state) addr(n *Node, bounded bool) (*ssa.Value, bool) {
2712 aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Sym})
2713 v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb)
2714 // TODO: Make OpAddr use AuxInt as well as Aux.
2716 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
2725 if n.String() == ".fp" {
2726 // Special arg that points to the frame pointer.
2727 // (Used by the race detector, others?)
2728 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
2729 return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp), false
2731 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
2734 aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n})
2735 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false
2736 case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
2737 // ensure that we reuse symbols for out parameters so
2738 // that cse works on their addresses
2739 aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
2740 return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false
2742 s.Unimplementedf("variable address class %v not implemented", classnames[n.Class])
2746 // indirect off a register
2747 // used for storing/loading arguments/returns to/from callees
2748 if int(n.Reg) != Thearch.REGSP {
2749 s.Unimplementedf("OINDREG of non-SP register %s in addr: %v", obj.Rconv(int(n.Reg)), n)
2752 return s.entryNewValue1I(ssa.OpOffPtr, t, n.Xoffset, s.sp), true
2754 if n.Left.Type.IsSlice() {
2756 i := s.expr(n.Right)
2757 i = s.extendIndex(i)
2758 len := s.newValue1(ssa.OpSliceLen, Types[TINT], a)
2760 s.boundsCheck(i, len)
2762 p := s.newValue1(ssa.OpSlicePtr, t, a)
2763 return s.newValue2(ssa.OpPtrIndex, t, p, i), false
2765 a, isVolatile := s.addr(n.Left, bounded)
2766 i := s.expr(n.Right)
2767 i = s.extendIndex(i)
2768 len := s.constInt(Types[TINT], n.Left.Type.NumElem())
2770 s.boundsCheck(i, len)
2772 return s.newValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Elem()), a, i), isVolatile
2775 return s.exprPtr(n.Left, bounded, n.Lineno), false
2777 p, isVolatile := s.addr(n.Left, bounded)
2778 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), isVolatile
2780 p := s.exprPtr(n.Left, bounded, n.Lineno)
2781 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), false
2783 return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
2784 s.entryNewValue0(ssa.OpGetClosurePtr, Ptrto(Types[TUINT8]))), false
2786 addr, isVolatile := s.addr(n.Left, bounded)
2787 return s.newValue1(ssa.OpCopy, t, addr), isVolatile // ensure that addr has the right type
2788 case OCALLFUNC, OCALLINTER, OCALLMETH:
2789 return s.call(n, callNormal), true
2792 s.Unimplementedf("unhandled addr %v", n.Op)
2797 // canSSA reports whether n is SSA-able.
2798 // n must be an ONAME (or an ODOT sequence with an ONAME base).
2799 func (s *state) canSSA(n *Node) bool {
2800 if Debug['N'] != 0 {
2812 if n.isParamHeapCopy() {
2815 if n.Class == PAUTOHEAP {
2816 Fatalf("canSSA of PAUTOHEAP %v", n)
2823 // TODO: handle this case? Named return values must be
2824 // in memory so that the deferred function can see them.
2825 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
2828 if s.cgoUnsafeArgs {
2829 // Cgo effectively takes the address of all result args,
2830 // but the compiler can't see that.
2834 if n.Class == PPARAM && n.String() == ".this" {
2835 // wrappers generated by genwrapper need to update
2836 // the .this pointer in place.
2837 // TODO: treat as a PPARMOUT?
2840 return canSSAType(n.Type)
2841 // TODO: try to make more variables SSAable?
2844 // canSSA reports whether variables of type t are SSA-able.
2845 func canSSAType(t *Type) bool {
2847 if t.Width > int64(4*Widthptr) {
2848 // 4*Widthptr is an arbitrary constant. We want it
2849 // to be at least 3*Widthptr so slices can be registerized.
2850 // Too big and we'll introduce too much register pressure.
2855 // We can't do arrays because dynamic indexing is
2856 // not supported on SSA variables.
2857 // TODO: maybe allow if length is <=1? All indexes
2858 // are constant? Might be good for the arrays
2859 // introduced by the compiler for variadic functions.
2862 if t.NumFields() > ssa.MaxStruct {
2865 for _, t1 := range t.Fields().Slice() {
2866 if !canSSAType(t1.Type) {
2876 // exprPtr evaluates n to a pointer and nil-checks it.
2877 func (s *state) exprPtr(n *Node, bounded bool, lineno int32) *ssa.Value {
2879 if bounded || n.NonNil {
2880 if s.f.Config.Debug_checknil() && lineno > 1 {
2881 s.f.Config.Warnl(lineno, "removed nil check")
2889 // nilCheck generates nil pointer checking code.
2890 // Starts a new block on return, unless nil checks are disabled.
2891 // Used only for automatically inserted nil checks,
2892 // not for user code like 'x != nil'.
2893 func (s *state) nilCheck(ptr *ssa.Value) {
2894 if Disable_checknil != 0 {
2897 chk := s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem())
2899 b.Kind = ssa.BlockCheck
2901 bNext := s.f.NewBlock(ssa.BlockPlain)
2906 // boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
2907 // Starts a new block on return.
2908 func (s *state) boundsCheck(idx, len *ssa.Value) {
2909 if Debug['B'] != 0 {
2912 // TODO: convert index to full width?
2913 // TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero.
2916 cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len)
2917 s.check(cmp, Panicindex)
2920 // sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
2921 // Starts a new block on return.
2922 func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
2923 if Debug['B'] != 0 {
2926 // TODO: convert index to full width?
2927 // TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero.
2930 cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len)
2931 s.check(cmp, panicslice)
2934 // If cmp (a bool) is true, panic using the given function.
2935 func (s *state) check(cmp *ssa.Value, fn *Node) {
2937 b.Kind = ssa.BlockIf
2939 b.Likely = ssa.BranchLikely
2940 bNext := s.f.NewBlock(ssa.BlockPlain)
2941 line := s.peekLine()
2942 bPanic := s.panics[funcLine{fn, line}]
2944 bPanic = s.f.NewBlock(ssa.BlockPlain)
2945 s.panics[funcLine{fn, line}] = bPanic
2946 s.startBlock(bPanic)
2947 // The panic call takes/returns memory to ensure that the right
2948 // memory state is observed if the panic happens.
2949 s.rtcall(fn, false, nil)
2956 // rtcall issues a call to the given runtime function fn with the listed args.
2957 // Returns a slice of results of the given result types.
2958 // The call is added to the end of the current block.
2959 // If returns is false, the block is marked as an exit block.
2960 // If returns is true, the block is marked as a call block. A new block
2961 // is started to load the return values.
2962 func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
2963 // Write args to the stack
2964 var off int64 // TODO: arch-dependent starting offset?
2965 for _, arg := range args {
2967 off = Rnd(off, t.Alignment())
2970 ptr = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], off, s.sp)
2973 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem())
2976 off = Rnd(off, int64(Widthptr))
2979 call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem())
2980 s.vars[&memVar] = call
2985 b.Kind = ssa.BlockExit
2988 if len(results) > 0 {
2989 Fatalf("panic call can't have results")
2993 b.Kind = ssa.BlockCall
2995 bNext := s.f.NewBlock(ssa.BlockPlain)
2999 // Keep input pointer args live across calls. This is a bandaid until 1.8.
3000 for _, n := range s.ptrargs {
3001 s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem())
3005 res := make([]*ssa.Value, len(results))
3006 for i, t := range results {
3007 off = Rnd(off, t.Alignment())
3010 ptr = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], off, s.sp)
3012 res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
3015 off = Rnd(off, int64(Widthptr))
3017 // Remember how much callee stack space we needed.
3023 // insertWBmove inserts the assignment *left = *right including a write barrier.
3024 // t is the type being assigned.
3025 func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightIsVolatile bool) {
3026 // if writeBarrier.enabled {
3027 // typedmemmove(&t, left, right)
3033 s.Fatalf("write barrier prohibited")
3035 if s.WBLineno == 0 {
3036 s.WBLineno = left.Line
3038 bThen := s.f.NewBlock(ssa.BlockPlain)
3039 bElse := s.f.NewBlock(ssa.BlockPlain)
3040 bEnd := s.f.NewBlock(ssa.BlockPlain)
3042 aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym}
3043 flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
3044 // TODO: select the .enabled field. It is currently first, so not needed for now.
3045 // Load word, test byte, avoiding partial register write from load byte.
3046 flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
3047 flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
3049 b.Kind = ssa.BlockIf
3050 b.Likely = ssa.BranchUnlikely
3057 if !rightIsVolatile {
3058 // Issue typedmemmove call.
3059 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb)
3060 s.rtcall(typedmemmove, true, nil, taddr, left, right)
3062 // Copy to temp location if the source is volatile (will be clobbered by
3063 // a function call). Marshaling the args to typedmemmove might clobber the
3064 // value we're trying to move.
3066 s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem())
3067 tmpaddr, _ := s.addr(tmp, true)
3068 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), tmpaddr, right, s.mem())
3069 // Issue typedmemmove call.
3070 taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb)
3071 s.rtcall(typedmemmove, true, nil, taddr, left, tmpaddr)
3072 // Mark temp as dead.
3073 s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem())
3075 s.endBlock().AddEdgeTo(bEnd)
3078 s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), left, right, s.mem())
3079 s.endBlock().AddEdgeTo(bEnd)
3084 Warnl(line, "write barrier")
3088 // insertWBstore inserts the assignment *left = right including a write barrier.
3089 // t is the type being assigned.
3090 func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32, skip skipMask) {
3091 // store scalar fields
3092 // if writeBarrier.enabled {
3093 // writebarrierptr for pointer fields
3095 // store pointer fields
3099 s.Fatalf("write barrier prohibited")
3101 if s.WBLineno == 0 {
3102 s.WBLineno = left.Line
3104 s.storeTypeScalars(t, left, right, skip)
3106 bThen := s.f.NewBlock(ssa.BlockPlain)
3107 bElse := s.f.NewBlock(ssa.BlockPlain)
3108 bEnd := s.f.NewBlock(ssa.BlockPlain)
3110 aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym}
3111 flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
3112 // TODO: select the .enabled field. It is currently first, so not needed for now.
3113 // Load word, test byte, avoiding partial register write from load byte.
3114 flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
3115 flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
3117 b.Kind = ssa.BlockIf
3118 b.Likely = ssa.BranchUnlikely
3123 // Issue write barriers for pointer writes.
3125 s.storeTypePtrsWB(t, left, right)
3126 s.endBlock().AddEdgeTo(bEnd)
3128 // Issue regular stores for pointer writes.
3130 s.storeTypePtrs(t, left, right)
3131 s.endBlock().AddEdgeTo(bEnd)
3136 Warnl(line, "write barrier")
3140 // do *left = right for all scalar (non-pointer) parts of t.
3141 func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) {
3143 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
3144 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), left, right, s.mem())
3145 case t.IsPtrShaped():
3146 // no scalar fields.
3148 if skip&skipLen != 0 {
3151 len := s.newValue1(ssa.OpStringLen, Types[TINT], right)
3152 lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left)
3153 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem())
3155 if skip&skipLen == 0 {
3156 len := s.newValue1(ssa.OpSliceLen, Types[TINT], right)
3157 lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left)
3158 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem())
3160 if skip&skipCap == 0 {
3161 cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right)
3162 capAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), 2*s.config.IntSize, left)
3163 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capAddr, cap, s.mem())
3165 case t.IsInterface():
3166 // itab field doesn't need a write barrier (even though it is a pointer).
3167 itab := s.newValue1(ssa.OpITab, Ptrto(Types[TUINT8]), right)
3168 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, left, itab, s.mem())
3171 for i := 0; i < n; i++ {
3172 ft := t.FieldType(i)
3173 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
3174 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
3175 s.storeTypeScalars(ft.(*Type), addr, val, 0)
3178 s.Fatalf("bad write barrier type %s", t)
3182 // do *left = right for all pointer parts of t.
3183 func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) {
3185 case t.IsPtrShaped():
3186 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem())
3188 ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right)
3189 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
3191 ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right)
3192 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
3193 case t.IsInterface():
3194 // itab field is treated as a scalar.
3195 idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right)
3196 idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left)
3197 s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem())
3200 for i := 0; i < n; i++ {
3201 ft := t.FieldType(i)
3202 if !haspointers(ft.(*Type)) {
3205 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
3206 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
3207 s.storeTypePtrs(ft.(*Type), addr, val)
3210 s.Fatalf("bad write barrier type %s", t)
3214 // do *left = right with a write barrier for all pointer parts of t.
3215 func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) {
3217 case t.IsPtrShaped():
3218 s.rtcall(writebarrierptr, true, nil, left, right)
3220 ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right)
3221 s.rtcall(writebarrierptr, true, nil, left, ptr)
3223 ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right)
3224 s.rtcall(writebarrierptr, true, nil, left, ptr)
3225 case t.IsInterface():
3226 idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right)
3227 idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left)
3228 s.rtcall(writebarrierptr, true, nil, idataAddr, idata)
3231 for i := 0; i < n; i++ {
3232 ft := t.FieldType(i)
3233 if !haspointers(ft.(*Type)) {
3236 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
3237 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
3238 s.storeTypePtrsWB(ft.(*Type), addr, val)
3241 s.Fatalf("bad write barrier type %s", t)
3245 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
3246 // i,j,k may be nil, in which case they are set to their default value.
3247 // t is a slice, ptr to array, or string type.
3248 func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
3254 zero := s.constInt(Types[TINT], 0)
3258 ptrtype = Ptrto(elemtype)
3259 ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v)
3260 len = s.newValue1(ssa.OpSliceLen, Types[TINT], v)
3261 cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v)
3263 elemtype = Types[TUINT8]
3264 ptrtype = Ptrto(elemtype)
3265 ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v)
3266 len = s.newValue1(ssa.OpStringLen, Types[TINT], v)
3269 if !t.Elem().IsArray() {
3270 s.Fatalf("bad ptr to array in slice %v\n", t)
3272 elemtype = t.Elem().Elem()
3273 ptrtype = Ptrto(elemtype)
3276 len = s.constInt(Types[TINT], t.Elem().NumElem())
3279 s.Fatalf("bad type in slice %v\n", t)
3282 // Set default values
3293 // Panic if slice indices are not in bounds.
3294 s.sliceBoundsCheck(i, j)
3296 s.sliceBoundsCheck(j, k)
3299 s.sliceBoundsCheck(k, cap)
3302 // Generate the following code assuming that indexes are in bounds.
3303 // The conditional is to make sure that we don't generate a slice
3304 // that points to the next object in memory.
3307 // delta = i*elemsize
3312 // result = (SliceMake rptr rlen rcap)
3313 subOp := s.ssaOp(OSUB, Types[TINT])
3314 eqOp := s.ssaOp(OEQ, Types[TINT])
3315 mulOp := s.ssaOp(OMUL, Types[TINT])
3316 rlen := s.newValue2(subOp, Types[TINT], j, i)
3320 // Capacity of the result is unimportant. However, we use
3321 // rcap to test if we've generated a zero-length slice.
3322 // Use length of strings for that.
3327 rcap = s.newValue2(subOp, Types[TINT], k, i)
3330 // delta = # of elements to offset pointer by.
3331 s.vars[&deltaVar] = i
3333 // Generate code to set delta=0 if the resulting capacity is zero.
3334 if !((i.Op == ssa.OpConst64 && i.AuxInt == 0) ||
3335 (i.Op == ssa.OpConst32 && int32(i.AuxInt) == 0)) {
3336 cmp := s.newValue2(eqOp, Types[TBOOL], rcap, zero)
3339 b.Kind = ssa.BlockIf
3340 b.Likely = ssa.BranchUnlikely
3343 // Generate block which zeros the delta variable.
3344 nz := s.f.NewBlock(ssa.BlockPlain)
3347 s.vars[&deltaVar] = zero
3351 merge := s.f.NewBlock(ssa.BlockPlain)
3356 // TODO: use conditional moves somehow?
3359 // Compute rptr = ptr + delta * elemsize
3360 rptr := s.newValue2(ssa.OpAddPtr, ptrtype, ptr, s.newValue2(mulOp, Types[TINT], s.variable(&deltaVar, Types[TINT]), s.constInt(Types[TINT], elemtype.Width)))
3361 delete(s.vars, &deltaVar)
3362 return rptr, rlen, rcap
3365 type u2fcvtTab struct {
3366 geq, cvt2F, and, rsh, or, add ssa.Op
3367 one func(*state, ssa.Type, int64) *ssa.Value
3370 var u64_f64 u2fcvtTab = u2fcvtTab{
3372 cvt2F: ssa.OpCvt64to64F,
3374 rsh: ssa.OpRsh64Ux64,
3377 one: (*state).constInt64,
3380 var u64_f32 u2fcvtTab = u2fcvtTab{
3382 cvt2F: ssa.OpCvt64to32F,
3384 rsh: ssa.OpRsh64Ux64,
3387 one: (*state).constInt64,
3390 // Excess generality on a machine with 64-bit integer registers.
3391 // Not used on AMD64.
3392 var u32_f32 u2fcvtTab = u2fcvtTab{
3394 cvt2F: ssa.OpCvt32to32F,
3396 rsh: ssa.OpRsh32Ux32,
3399 one: func(s *state, t ssa.Type, x int64) *ssa.Value {
3400 return s.constInt32(t, int32(x))
3404 func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
3405 return s.uintTofloat(&u64_f64, n, x, ft, tt)
3408 func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
3409 return s.uintTofloat(&u64_f32, n, x, ft, tt)
3412 func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
3414 // result = (floatY) x
3416 // y = uintX(x) ; y = x & 1
3417 // z = uintX(x) ; z = z >> 1
3420 // result = floatY(z)
3421 // result = result + result
3424 // Code borrowed from old code generator.
3425 // What's going on: large 64-bit "unsigned" looks like
3426 // negative number to hardware's integer-to-float
3427 // conversion. However, because the mantissa is only
3428 // 63 bits, we don't need the LSB, so instead we do an
3429 // unsigned right shift (divide by two), convert, and
3430 // double. However, before we do that, we need to be
3431 // sure that we do not lose a "1" if that made the
3432 // difference in the resulting rounding. Therefore, we
3433 // preserve it, and OR (not ADD) it back in. The case
3434 // that matters is when the eleven discarded bits are
3435 // equal to 10000000001; that rounds up, and the 1 cannot
3436 // be lost else it would round down if the LSB of the
3437 // candidate mantissa is 0.
3438 cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft))
3440 b.Kind = ssa.BlockIf
3442 b.Likely = ssa.BranchLikely
3444 bThen := s.f.NewBlock(ssa.BlockPlain)
3445 bElse := s.f.NewBlock(ssa.BlockPlain)
3446 bAfter := s.f.NewBlock(ssa.BlockPlain)
3450 a0 := s.newValue1(cvttab.cvt2F, tt, x)
3453 bThen.AddEdgeTo(bAfter)
3457 one := cvttab.one(s, ft, 1)
3458 y := s.newValue2(cvttab.and, ft, x, one)
3459 z := s.newValue2(cvttab.rsh, ft, x, one)
3460 z = s.newValue2(cvttab.or, ft, z, y)
3461 a := s.newValue1(cvttab.cvt2F, tt, z)
3462 a1 := s.newValue2(cvttab.add, tt, a, a)
3465 bElse.AddEdgeTo(bAfter)
3467 s.startBlock(bAfter)
3468 return s.variable(n, n.Type)
3471 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
3472 func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
3473 if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
3474 s.Fatalf("node must be a map or a channel")
3480 // return *((*int)n)
3482 // return *(((*int)n)+1)
3485 nilValue := s.constNil(Types[TUINTPTR])
3486 cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue)
3488 b.Kind = ssa.BlockIf
3490 b.Likely = ssa.BranchUnlikely
3492 bThen := s.f.NewBlock(ssa.BlockPlain)
3493 bElse := s.f.NewBlock(ssa.BlockPlain)
3494 bAfter := s.f.NewBlock(ssa.BlockPlain)
3496 // length/capacity of a nil map/chan is zero
3499 s.vars[n] = s.zeroVal(lenType)
3501 bThen.AddEdgeTo(bAfter)
3506 // length is stored in the first word for map/chan
3507 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem())
3508 } else if n.Op == OCAP {
3509 // capacity is stored in the second word for chan
3510 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
3511 s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem())
3513 s.Fatalf("op must be OLEN or OCAP")
3516 bElse.AddEdgeTo(bAfter)
3518 s.startBlock(bAfter)
3519 return s.variable(n, lenType)
3522 type f2uCvtTab struct {
3523 ltf, cvt2U, subf ssa.Op
3524 value func(*state, ssa.Type, float64) *ssa.Value
3527 var f32_u64 f2uCvtTab = f2uCvtTab{
3529 cvt2U: ssa.OpCvt32Fto64,
3531 value: (*state).constFloat32,
3534 var f64_u64 f2uCvtTab = f2uCvtTab{
3536 cvt2U: ssa.OpCvt64Fto64,
3538 value: (*state).constFloat64,
3541 func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
3542 return s.floatToUint(&f32_u64, n, x, ft, tt)
3544 func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
3545 return s.floatToUint(&f64_u64, n, x, ft, tt)
3548 func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
3549 // if x < 9223372036854775808.0 {
3550 // result = uintY(x)
3552 // y = x - 9223372036854775808.0
3554 // result = z | -9223372036854775808
3556 twoToThe63 := cvttab.value(s, ft, 9223372036854775808.0)
3557 cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, twoToThe63)
3559 b.Kind = ssa.BlockIf
3561 b.Likely = ssa.BranchLikely
3563 bThen := s.f.NewBlock(ssa.BlockPlain)
3564 bElse := s.f.NewBlock(ssa.BlockPlain)
3565 bAfter := s.f.NewBlock(ssa.BlockPlain)
3569 a0 := s.newValue1(cvttab.cvt2U, tt, x)
3572 bThen.AddEdgeTo(bAfter)
3576 y := s.newValue2(cvttab.subf, ft, x, twoToThe63)
3577 y = s.newValue1(cvttab.cvt2U, tt, y)
3578 z := s.constInt64(tt, -9223372036854775808)
3579 a1 := s.newValue2(ssa.OpOr64, tt, y, z)
3582 bElse.AddEdgeTo(bAfter)
3584 s.startBlock(bAfter)
3585 return s.variable(n, n.Type)
3588 // ifaceType returns the value for the word containing the type.
3589 // n is the node for the interface expression.
3590 // v is the corresponding value.
3591 func (s *state) ifaceType(n *Node, v *ssa.Value) *ssa.Value {
3592 byteptr := Ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte)
3594 if n.Type.IsEmptyInterface() {
3595 // Have *eface. The type is the first word in the struct.
3596 return s.newValue1(ssa.OpITab, byteptr, v)
3600 // The first word in the struct is the *itab.
3601 // If the *itab is nil, return 0.
3602 // Otherwise, the second word in the *itab is the type.
3604 tab := s.newValue1(ssa.OpITab, byteptr, v)
3605 s.vars[&typVar] = tab
3606 isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.constNil(byteptr))
3608 b.Kind = ssa.BlockIf
3609 b.SetControl(isnonnil)
3610 b.Likely = ssa.BranchLikely
3612 bLoad := s.f.NewBlock(ssa.BlockPlain)
3613 bEnd := s.f.NewBlock(ssa.BlockPlain)
3617 bLoad.AddEdgeTo(bEnd)
3620 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), tab)
3621 s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
3625 typ := s.variable(&typVar, byteptr)
3626 delete(s.vars, &typVar)
3630 // dottype generates SSA for a type assertion node.
3631 // commaok indicates whether to panic or return a bool.
3632 // If commaok is false, resok will be nil.
3633 func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
3634 iface := s.expr(n.Left)
3635 typ := s.ifaceType(n.Left, iface) // actual concrete type
3636 target := s.expr(typename(n.Type)) // target type
3637 if !isdirectiface(n.Type) {
3638 // walk rewrites ODOTTYPE/OAS2DOTTYPE into runtime calls except for this case.
3639 Fatalf("dottype needs a direct iface type %s", n.Type)
3642 if Debug_typeassert > 0 {
3643 Warnl(n.Lineno, "type assertion inlined")
3646 // TODO: If we have a nonempty interface and its itab field is nil,
3647 // then this test is redundant and ifaceType should just branch directly to bFail.
3648 cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], typ, target)
3650 b.Kind = ssa.BlockIf
3652 b.Likely = ssa.BranchLikely
3654 byteptr := Ptrto(Types[TUINT8])
3656 bOk := s.f.NewBlock(ssa.BlockPlain)
3657 bFail := s.f.NewBlock(ssa.BlockPlain)
3662 // on failure, panic by calling panicdottype
3664 taddr := s.newValue1A(ssa.OpAddr, byteptr, &ssa.ExternSymbol{Typ: byteptr, Sym: typenamesym(n.Left.Type)}, s.sb)
3665 s.rtcall(panicdottype, false, nil, typ, target, taddr)
3667 // on success, return idata field
3669 return s.newValue1(ssa.OpIData, n.Type, iface), nil
3672 // commaok is the more complicated case because we have
3673 // a control flow merge point.
3674 bEnd := s.f.NewBlock(ssa.BlockPlain)
3676 // type assertion succeeded
3678 s.vars[&idataVar] = s.newValue1(ssa.OpIData, n.Type, iface)
3679 s.vars[&okVar] = s.constBool(true)
3683 // type assertion failed
3685 s.vars[&idataVar] = s.constNil(byteptr)
3686 s.vars[&okVar] = s.constBool(false)
3688 bFail.AddEdgeTo(bEnd)
3692 res = s.variable(&idataVar, byteptr)
3693 resok = s.variable(&okVar, Types[TBOOL])
3694 delete(s.vars, &idataVar)
3695 delete(s.vars, &okVar)
3699 // checkgoto checks that a goto from from to to does not
3700 // jump into a block or jump over variable declarations.
3701 // It is a copy of checkgoto in the pre-SSA backend,
3702 // modified only for line number handling.
3703 // TODO: document how this works and why it is designed the way it is.
3704 func (s *state) checkgoto(from *Node, to *Node) {
3705 if from.Sym == to.Sym {
3710 for fs := from.Sym; fs != nil; fs = fs.Link {
3714 for fs := to.Sym; fs != nil; fs = fs.Link {
3718 for ; nf > nt; nf-- {
3722 // decide what to complain about.
3723 // prefer to complain about 'into block' over declarations,
3724 // so scan backward to find most recent block or else dcl.
3729 for ; nt > nf; nt-- {
3748 lno := from.Left.Lineno
3750 yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno))
3752 yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno))
3757 // variable returns the value of a variable at the current location.
3758 func (s *state) variable(name *Node, t ssa.Type) *ssa.Value {
3761 v = s.newValue0A(ssa.OpFwdRef, t, name)
3762 s.fwdRefs = append(s.fwdRefs, v)
3764 s.addNamedValue(name, v)
3769 func (s *state) mem() *ssa.Value {
3770 return s.variable(&memVar, ssa.TypeMem)
3773 func (s *state) linkForwardReferences(dm *sparseDefState) {
3775 // Build SSA graph. Each variable on its first use in a basic block
3776 // leaves a FwdRef in that block representing the incoming value
3777 // of that variable. This function links that ref up with possible definitions,
3778 // inserting Phi values as needed. This is essentially the algorithm
3779 // described by Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau:
3780 // http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
3782 // - We use FwdRef nodes to postpone phi building until the CFG is
3783 // completely built. That way we can avoid the notion of "sealed"
3785 // - Phi optimization is a separate pass (in ../ssa/phielim.go).
3786 for len(s.fwdRefs) > 0 {
3787 v := s.fwdRefs[len(s.fwdRefs)-1]
3788 s.fwdRefs = s.fwdRefs[:len(s.fwdRefs)-1]
3789 s.resolveFwdRef(v, dm)
3793 // resolveFwdRef modifies v to be the variable's value at the start of its block.
3794 // v must be a FwdRef op.
3795 func (s *state) resolveFwdRef(v *ssa.Value, dm *sparseDefState) {
3797 name := v.Aux.(*Node)
3800 // Live variable at start of function.
3802 if strings.HasPrefix(name.Sym.Name, "autotmp_") {
3803 // It's likely that this is an uninitialized variable in the entry block.
3804 s.Fatalf("Treating auto as if it were arg, func %s, node %v, value %v", b.Func.Name, name, v)
3810 // Not SSAable. Load it.
3811 addr := s.decladdrs[name]
3813 // TODO: closure args reach here.
3814 s.Unimplementedf("unhandled closure arg %s at entry to function %s", name, b.Func.Name)
3816 if _, ok := addr.Aux.(*ssa.ArgSymbol); !ok {
3817 s.Fatalf("variable live at start of function %s is not an argument %s", b.Func.Name, name)
3820 v.AddArgs(addr, s.startmem)
3823 if len(b.Preds) == 0 {
3824 // This block is dead; we have no predecessors and we're not the entry block.
3825 // It doesn't matter what we use here as long as it is well-formed.
3826 v.Op = ssa.OpUnknown
3829 // Find variable value on each predecessor.
3830 var argstore [4]*ssa.Value
3831 args := argstore[:0]
3832 for _, e := range b.Preds {
3834 p = dm.FindBetterDefiningBlock(name, p) // try sparse improvement on p
3835 args = append(args, s.lookupVarOutgoing(p, v.Type, name, v.Line))
3838 // Decide if we need a phi or not. We need a phi if there
3839 // are two different args (which are both not v).
3841 for _, a := range args {
3843 continue // self-reference
3846 continue // already have this witness
3849 // two witnesses, need a phi value
3854 w = a // save witness
3857 s.Fatalf("no witness for reachable phi %s", v)
3859 // One witness. Make v a copy of w.
3864 // lookupVarOutgoing finds the variable's value at the end of block b.
3865 func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name *Node, line int32) *ssa.Value {
3867 if v, ok := s.defvars[b.ID][name]; ok {
3870 // The variable is not defined by b and we haven't looked it up yet.
3871 // If b has exactly one predecessor, loop to look it up there.
3872 // Otherwise, give up and insert a new FwdRef and resolve it later.
3873 if len(b.Preds) != 1 {
3876 b = b.Preds[0].Block()
3878 // Generate a FwdRef for the variable and return that.
3879 v := b.NewValue0A(line, ssa.OpFwdRef, t, name)
3880 s.fwdRefs = append(s.fwdRefs, v)
3881 s.defvars[b.ID][name] = v
3882 s.addNamedValue(name, v)
3886 func (s *state) addNamedValue(n *Node, v *ssa.Value) {
3887 if n.Class == Pxxx {
3888 // Don't track our dummy nodes (&memVar etc.).
3891 if strings.HasPrefix(n.Sym.Name, "autotmp_") {
3892 // Don't track autotmp_ variables.
3895 if n.Class == PPARAMOUT {
3896 // Don't track named output values. This prevents return values
3897 // from being assigned too early. See #14591 and #14762. TODO: allow this.
3900 if n.Class == PAUTO && n.Xoffset != 0 {
3901 s.Fatalf("AUTO var with offset %s %d", n, n.Xoffset)
3903 loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
3904 values, ok := s.f.NamedValues[loc]
3906 s.f.Names = append(s.f.Names, loc)
3908 s.f.NamedValues[loc] = append(values, v)
3911 // Branch is an unresolved branch.
3912 type Branch struct {
3913 P *obj.Prog // branch instruction
3914 B *ssa.Block // target
3917 // SSAGenState contains state needed during Prog generation.
3918 type SSAGenState struct {
3919 // Branches remembers all the branch instructions we've seen
3920 // and where they would like to go.
3923 // bstart remembers where each block starts (indexed by block ID)
3927 // Pc returns the current Prog.
3928 func (s *SSAGenState) Pc() *obj.Prog {
3932 // SetLineno sets the current source line number.
3933 func (s *SSAGenState) SetLineno(l int32) {
3937 // genssa appends entries to ptxt for each instruction in f.
3938 // gcargs and gclocals are filled in with pointer maps for the frame.
3939 func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
3942 e := f.Config.Frontend().(*ssaExport)
3943 // We're about to emit a bunch of Progs.
3944 // Since the only way to get here is to explicitly request it,
3945 // just fail on unimplemented instead of trying to unwind our mess.
3946 e.mustImplement = true
3948 // Remember where each block starts.
3949 s.bstart = make([]*obj.Prog, f.NumBlocks())
3951 var valueProgs map[*obj.Prog]*ssa.Value
3952 var blockProgs map[*obj.Prog]*ssa.Block
3953 var logProgs = e.log
3955 valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues())
3956 blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
3957 f.Logf("genssa %s\n", f.Name)
3958 blockProgs[Pc] = f.Blocks[0]
3961 // Emit basic blocks
3962 for i, b := range f.Blocks {
3964 // Emit values in block
3965 Thearch.SSAMarkMoves(&s, b)
3966 for _, v := range b.Values {
3968 Thearch.SSAGenValue(&s, v)
3970 for ; x != Pc; x = x.Link {
3975 // Emit control flow instructions for block
3977 if i < len(f.Blocks)-1 && (Debug['N'] == 0 || b.Kind == ssa.BlockCall) {
3978 // If -N, leave next==nil so every block with successors
3979 // ends in a JMP (except call blocks - plive doesn't like
3980 // select{send,recv} followed by a JMP call). Helps keep
3981 // line numbers for otherwise empty blocks.
3982 next = f.Blocks[i+1]
3985 Thearch.SSAGenBlock(&s, b, next)
3987 for ; x != Pc; x = x.Link {
3994 for _, br := range s.Branches {
3995 br.P.To.Val = s.bstart[br.B.ID]
3999 for p := ptxt; p != nil; p = p.Link {
4001 if v, ok := valueProgs[p]; ok {
4003 } else if b, ok := blockProgs[p]; ok {
4006 s = " " // most value and branch strings are 2-3 characters long
4008 f.Logf("%s\t%s\n", s, p)
4010 if f.Config.HTML != nil {
4011 saved := ptxt.Ctxt.LineHist.PrintFilenameOnly
4012 ptxt.Ctxt.LineHist.PrintFilenameOnly = true
4013 var buf bytes.Buffer
4014 buf.WriteString("<code>")
4015 buf.WriteString("<dl class=\"ssa-gen\">")
4016 for p := ptxt; p != nil; p = p.Link {
4017 buf.WriteString("<dt class=\"ssa-prog-src\">")
4018 if v, ok := valueProgs[p]; ok {
4019 buf.WriteString(v.HTML())
4020 } else if b, ok := blockProgs[p]; ok {
4021 buf.WriteString(b.HTML())
4023 buf.WriteString("</dt>")
4024 buf.WriteString("<dd class=\"ssa-prog\">")
4025 buf.WriteString(html.EscapeString(p.String()))
4026 buf.WriteString("</dd>")
4027 buf.WriteString("</li>")
4029 buf.WriteString("</dl>")
4030 buf.WriteString("</code>")
4031 f.Config.HTML.WriteColumn("genssa", buf.String())
4032 ptxt.Ctxt.LineHist.PrintFilenameOnly = saved
4037 if f.StaticData != nil {
4038 for _, n := range f.StaticData.([]*Node) {
4039 if !gen_as_init(n, false) {
4040 Fatalf("non-static data marked as static: %v\n\n", n)
4045 // Allocate stack frame
4048 // Generate gc bitmaps.
4049 liveness(Curfn, ptxt, gcargs, gclocals)
4051 // Add frame prologue. Zero ambiguously live variables.
4052 Thearch.Defframe(ptxt)
4053 if Debug['f'] != 0 {
4057 // Remove leftover instrumentation from the instruction stream.
4060 f.Config.HTML.Close()
4063 // movZero generates a register indirect move with a 0 immediate and keeps track of bytes left and next offset
4064 func movZero(as obj.As, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
4066 // TODO: use zero register on archs that support it.
4067 p.From.Type = obj.TYPE_CONST
4069 p.To.Type = obj.TYPE_MEM
4071 p.To.Offset = offset
4073 nleft = nbytes - width
4074 return nleft, offset
4077 type FloatingEQNEJump struct {
4082 func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch {
4083 p := Prog(jumps.Jump)
4084 p.To.Type = obj.TYPE_BRANCH
4086 branches = append(branches, Branch{p, b.Succs[to].Block()})
4090 // liblink reorders the instruction stream as it sees fit.
4091 // Pass along what we know so liblink can make use of it.
4092 // TODO: Once we've fully switched to SSA,
4093 // make liblink leave our output alone.
4095 case ssa.BranchUnlikely:
4096 p.From.Type = obj.TYPE_CONST
4098 case ssa.BranchLikely:
4099 p.From.Type = obj.TYPE_CONST
4105 func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
4108 case b.Succs[0].Block():
4109 s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches)
4110 s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches)
4111 case b.Succs[1].Block():
4112 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
4113 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
4115 s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
4116 s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
4118 q.To.Type = obj.TYPE_BRANCH
4119 s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
4123 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
4124 func AddAux(a *obj.Addr, v *ssa.Value) {
4125 AddAux2(a, v, v.AuxInt)
4127 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
4128 if a.Type != obj.TYPE_MEM {
4129 v.Fatalf("bad AddAux addr %v", a)
4131 // add integer offset
4134 // If no additional symbol offset, we're done.
4138 // Add symbol's offset from its base register.
4139 switch sym := v.Aux.(type) {
4140 case *ssa.ExternSymbol:
4141 a.Name = obj.NAME_EXTERN
4142 switch s := sym.Sym.(type) {
4148 v.Fatalf("ExternSymbol.Sym is %T", s)
4150 case *ssa.ArgSymbol:
4151 n := sym.Node.(*Node)
4152 a.Name = obj.NAME_PARAM
4154 a.Sym = Linksym(n.Orig.Sym)
4155 a.Offset += n.Xoffset // TODO: why do I have to add this here? I don't for auto variables.
4156 case *ssa.AutoSymbol:
4157 n := sym.Node.(*Node)
4158 a.Name = obj.NAME_AUTO
4160 a.Sym = Linksym(n.Sym)
4162 v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
4166 // extendIndex extends v to a full int width.
4167 func (s *state) extendIndex(v *ssa.Value) *ssa.Value {
4168 size := v.Type.Size()
4169 if size == s.config.IntSize {
4172 if size > s.config.IntSize {
4173 // TODO: truncate 64-bit indexes on 32-bit pointer archs. We'd need to test
4174 // the high word and branch to out-of-bounds failure if it is not 0.
4175 s.Unimplementedf("64->32 index truncation not implemented")
4179 // Extend value to the required size
4181 if v.Type.IsSigned() {
4182 switch 10*size + s.config.IntSize {
4184 op = ssa.OpSignExt8to32
4186 op = ssa.OpSignExt8to64
4188 op = ssa.OpSignExt16to32
4190 op = ssa.OpSignExt16to64
4192 op = ssa.OpSignExt32to64
4194 s.Fatalf("bad signed index extension %s", v.Type)
4197 switch 10*size + s.config.IntSize {
4199 op = ssa.OpZeroExt8to32
4201 op = ssa.OpZeroExt8to64
4203 op = ssa.OpZeroExt16to32
4205 op = ssa.OpZeroExt16to64
4207 op = ssa.OpZeroExt32to64
4209 s.Fatalf("bad unsigned index extension %s", v.Type)
4212 return s.newValue1(op, Types[TINT], v)
4215 // SSARegNum returns the register (in cmd/internal/obj numbering) to
4216 // which v has been allocated. Panics if v is not assigned to a
4218 // TODO: Make this panic again once it stops happening routinely.
4219 func SSARegNum(v *ssa.Value) int16 {
4220 reg := v.Block.Func.RegAlloc[v.ID]
4222 v.Unimplementedf("nil regnum for value: %s\n%s\n", v.LongString(), v.Block.Func)
4225 return Thearch.SSARegToReg[reg.(*ssa.Register).Num]
4228 // AutoVar returns a *Node and int64 representing the auto variable and offset within it
4229 // where v should be spilled.
4230 func AutoVar(v *ssa.Value) (*Node, int64) {
4231 loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
4232 if v.Type.Size() > loc.Type.Size() {
4233 v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
4235 return loc.N.(*Node), loc.Off
4238 // fieldIdx finds the index of the field referred to by the ODOT node n.
4239 func fieldIdx(n *Node) int {
4243 panic("ODOT's LHS is not a struct")
4247 for _, t1 := range t.Fields().Slice() {
4252 if t1.Offset != n.Xoffset {
4253 panic("field offset doesn't match")
4257 panic(fmt.Sprintf("can't find field in expr %s\n", n))
4259 // TODO: keep the result of this function somewhere in the ODOT Node
4260 // so we don't have to recompute it each time we need it.
4263 // ssaExport exports a bunch of compiler services for the ssa backend.
4264 type ssaExport struct {
4270 func (s *ssaExport) TypeBool() ssa.Type { return Types[TBOOL] }
4271 func (s *ssaExport) TypeInt8() ssa.Type { return Types[TINT8] }
4272 func (s *ssaExport) TypeInt16() ssa.Type { return Types[TINT16] }
4273 func (s *ssaExport) TypeInt32() ssa.Type { return Types[TINT32] }
4274 func (s *ssaExport) TypeInt64() ssa.Type { return Types[TINT64] }
4275 func (s *ssaExport) TypeUInt8() ssa.Type { return Types[TUINT8] }
4276 func (s *ssaExport) TypeUInt16() ssa.Type { return Types[TUINT16] }
4277 func (s *ssaExport) TypeUInt32() ssa.Type { return Types[TUINT32] }
4278 func (s *ssaExport) TypeUInt64() ssa.Type { return Types[TUINT64] }
4279 func (s *ssaExport) TypeFloat32() ssa.Type { return Types[TFLOAT32] }
4280 func (s *ssaExport) TypeFloat64() ssa.Type { return Types[TFLOAT64] }
4281 func (s *ssaExport) TypeInt() ssa.Type { return Types[TINT] }
4282 func (s *ssaExport) TypeUintptr() ssa.Type { return Types[TUINTPTR] }
4283 func (s *ssaExport) TypeString() ssa.Type { return Types[TSTRING] }
4284 func (s *ssaExport) TypeBytePtr() ssa.Type { return Ptrto(Types[TUINT8]) }
4286 // StringData returns a symbol (a *Sym wrapped in an interface) which
4287 // is the data component of a global string constant containing s.
4288 func (*ssaExport) StringData(s string) interface{} {
4289 // TODO: is idealstring correct? It might not matter...
4290 _, data := stringsym(s)
4291 return &ssa.ExternSymbol{Typ: idealstring, Sym: data}
4294 func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode {
4295 n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list
4296 e.mustImplement = true // This modifies the input to SSA, so we want to make sure we succeed from here!
4300 func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
4302 ptrType := Ptrto(Types[TUINT8])
4303 lenType := Types[TINT]
4304 if n.Class == PAUTO && !n.Addrtaken {
4305 // Split this string up into two separate variables.
4306 p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
4307 l := e.namedAuto(n.Sym.Name+".len", lenType)
4308 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}
4310 // Return the two parts of the larger variable.
4311 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
4314 func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
4316 t := Ptrto(Types[TUINT8])
4317 if n.Class == PAUTO && !n.Addrtaken {
4318 // Split this interface up into two separate variables.
4320 if n.Type.IsEmptyInterface() {
4323 c := e.namedAuto(n.Sym.Name+f, t)
4324 d := e.namedAuto(n.Sym.Name+".data", t)
4325 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0}
4327 // Return the two parts of the larger variable.
4328 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
4331 func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
4333 ptrType := Ptrto(name.Type.ElemType().(*Type))
4334 lenType := Types[TINT]
4335 if n.Class == PAUTO && !n.Addrtaken {
4336 // Split this slice up into three separate variables.
4337 p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
4338 l := e.namedAuto(n.Sym.Name+".len", lenType)
4339 c := e.namedAuto(n.Sym.Name+".cap", lenType)
4340 return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0}
4342 // Return the three parts of the larger variable.
4343 return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
4344 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
4345 ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
4348 func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
4350 s := name.Type.Size() / 2
4357 if n.Class == PAUTO && !n.Addrtaken {
4358 // Split this complex up into two separate variables.
4359 c := e.namedAuto(n.Sym.Name+".real", t)
4360 d := e.namedAuto(n.Sym.Name+".imag", t)
4361 return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0}
4363 // Return the two parts of the larger variable.
4364 return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
4367 func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
4370 ft := st.FieldType(i)
4371 if n.Class == PAUTO && !n.Addrtaken {
4372 // Note: the _ field may appear several times. But
4373 // have no fear, identically-named but distinct Autos are
4374 // ok, albeit maybe confusing for a debugger.
4375 x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft)
4376 return ssa.LocalSlot{N: x, Type: ft, Off: 0}
4378 return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
4381 // namedAuto returns a new AUTO variable with the given name and type.
4382 func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode {
4384 s := &Sym{Name: name, Pkg: autopkg}
4385 n := Nod(ONAME, nil, nil)
4395 n.Name.Curfn = Curfn
4396 Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
4399 e.mustImplement = true
4404 func (e *ssaExport) CanSSA(t ssa.Type) bool {
4405 return canSSAType(t.(*Type))
4408 func (e *ssaExport) Line(line int32) string {
4409 return linestr(line)
4412 // Log logs a message from the compiler.
4413 func (e *ssaExport) Logf(msg string, args ...interface{}) {
4414 // If e was marked as unimplemented, anything could happen. Ignore.
4415 if e.log && !e.unimplemented {
4416 fmt.Printf(msg, args...)
4420 func (e *ssaExport) Log() bool {
4424 // Fatal reports a compiler error and exits.
4425 func (e *ssaExport) Fatalf(line int32, msg string, args ...interface{}) {
4426 // If e was marked as unimplemented, anything could happen. Ignore.
4427 if !e.unimplemented {
4429 Fatalf(msg, args...)
4433 // Unimplemented reports that the function cannot be compiled.
4434 // It will be removed once SSA work is complete.
4435 func (e *ssaExport) Unimplementedf(line int32, msg string, args ...interface{}) {
4436 if e.mustImplement {
4438 Fatalf(msg, args...)
4440 const alwaysLog = false // enable to calculate top unimplemented features
4441 if !e.unimplemented && (e.log || alwaysLog) {
4442 // first implementation failure, print explanation
4443 fmt.Printf("SSA unimplemented: "+msg+"\n", args...)
4445 e.unimplemented = true
4448 // Warnl reports a "warning", which is usually flag-triggered
4449 // logging output for the benefit of tests.
4450 func (e *ssaExport) Warnl(line int32, fmt_ string, args ...interface{}) {
4451 Warnl(line, fmt_, args...)
4454 func (e *ssaExport) Debug_checknil() bool {
4455 return Debug_checknil != 0
4458 func (n *Node) Typ() ssa.Type {