1 // Copyright 2012 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
11 "cmd/compile/internal/base"
12 "cmd/compile/internal/ir"
13 "cmd/compile/internal/reflectdata"
14 "cmd/compile/internal/staticinit"
15 "cmd/compile/internal/typecheck"
16 "cmd/compile/internal/types"
20 // Rewrite tree to use separate statements to enforce
21 // order of evaluation. Makes walk easier, because it
22 // can (after this runs) reorder at will within an expression.
24 // Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %.
26 // Introduce temporaries as needed by runtime routines.
27 // For example, the map runtime routines take the map key
28 // by reference, so make sure all map keys are addressable
29 // by copying them to temporaries as needed.
30 // The same is true for channel operations.
32 // Arrange that map index expressions only appear in direct
33 // assignments x = m[k] or m[k] = x, never in larger expressions.
35 // Arrange that receive expressions only appear in direct assignments
36 // x = <-c or as standalone statements <-c, never in larger expressions.
38 // TODO(rsc): The temporary introduction during multiple assignments
39 // should be moved into this file, so that the temporaries can be cleaned
40 // and so that conversions implicit in the OAS2FUNC and OAS2RECV
41 // nodes can be made explicit and then have their temporaries cleaned.
43 // TODO(rsc): Goto and multilevel break/continue can jump over
44 // inserted VARKILL annotations. Work out a way to handle these.
45 // The current implementation is safe, in that it will execute correctly.
46 // But it won't reuse temporaries as aggressively as it might, and
47 // it can result in unnecessary zeroing of those variables in the function
50 // orderState holds state during the ordering process.
51 type orderState struct {
52 out []ir.Node // list of generated statements
53 temp []*ir.Name // stack of temporary variables
54 free map[string][]*ir.Name // free list of unused temporaries, by type.LinkString().
55 edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
58 // Order rewrites fn.Nbody to apply the ordering constraints
59 // described in the comment at the top of the file.
60 func order(fn *ir.Func) {
62 s := fmt.Sprintf("\nbefore order %v", fn.Sym())
63 ir.DumpList(s, fn.Body)
66 orderBlock(&fn.Body, map[string][]*ir.Name{})
69 // append typechecks stmt and appends it to out.
70 func (o *orderState) append(stmt ir.Node) {
71 o.out = append(o.out, typecheck.Stmt(stmt))
74 // newTemp allocates a new temporary with the given type,
75 // pushes it onto the temp stack, and returns it.
76 // If clear is true, newTemp emits code to zero the temporary.
77 func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name {
80 if a := o.free[key]; len(a) > 0 {
82 if !types.Identical(t, v.Type()) {
83 base.Fatalf("expected %L to have type %v", v, t)
85 o.free[key] = a[:len(a)-1]
90 o.append(ir.NewAssignStmt(base.Pos, v, nil))
93 o.temp = append(o.temp, v)
97 // copyExpr behaves like newTemp but also emits
98 // code to initialize the temporary to the value n.
99 func (o *orderState) copyExpr(n ir.Node) *ir.Name {
100 return o.copyExpr1(n, false)
103 // copyExprClear is like copyExpr but clears the temp before assignment.
104 // It is provided for use when the evaluation of tmp = n turns into
105 // a function call that is passed a pointer to the temporary as the output space.
106 // If the call blocks before tmp has been written,
107 // the garbage collector will still treat the temporary as live,
108 // so we must zero it before entering that call.
109 // Today, this only happens for channel receive operations.
110 // (The other candidate would be map access, but map access
111 // returns a pointer to the result data instead of taking a pointer
113 func (o *orderState) copyExprClear(n ir.Node) *ir.Name {
114 return o.copyExpr1(n, true)
117 func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name {
119 v := o.newTemp(t, clear)
120 o.append(ir.NewAssignStmt(base.Pos, v, n))
124 // cheapExpr returns a cheap version of n.
125 // The definition of cheap is that n is a variable or constant.
126 // If not, cheapExpr allocates a new tmp, emits tmp = n,
127 // and then returns tmp.
128 func (o *orderState) cheapExpr(n ir.Node) ir.Node {
134 case ir.ONAME, ir.OLITERAL, ir.ONIL:
136 case ir.OLEN, ir.OCAP:
137 n := n.(*ir.UnaryExpr)
138 l := o.cheapExpr(n.X)
142 a := ir.SepCopy(n).(*ir.UnaryExpr)
144 return typecheck.Expr(a)
150 // safeExpr returns a safe version of n.
151 // The definition of safe is that n can appear multiple times
152 // without violating the semantics of the original program,
153 // and that assigning to the safe version has the same effect
154 // as assigning to the original n.
156 // The intended use is to apply to x when rewriting x += y into x = x + y.
157 func (o *orderState) safeExpr(n ir.Node) ir.Node {
159 case ir.ONAME, ir.OLITERAL, ir.ONIL:
162 case ir.OLEN, ir.OCAP:
163 n := n.(*ir.UnaryExpr)
168 a := ir.SepCopy(n).(*ir.UnaryExpr)
170 return typecheck.Expr(a)
173 n := n.(*ir.SelectorExpr)
178 a := ir.SepCopy(n).(*ir.SelectorExpr)
180 return typecheck.Expr(a)
183 n := n.(*ir.SelectorExpr)
184 l := o.cheapExpr(n.X)
188 a := ir.SepCopy(n).(*ir.SelectorExpr)
190 return typecheck.Expr(a)
193 n := n.(*ir.StarExpr)
194 l := o.cheapExpr(n.X)
198 a := ir.SepCopy(n).(*ir.StarExpr)
200 return typecheck.Expr(a)
202 case ir.OINDEX, ir.OINDEXMAP:
203 n := n.(*ir.IndexExpr)
205 if n.X.Type().IsArray() {
210 r := o.cheapExpr(n.Index)
211 if l == n.X && r == n.Index {
214 a := ir.SepCopy(n).(*ir.IndexExpr)
217 return typecheck.Expr(a)
220 base.Fatalf("order.safeExpr %v", n.Op())
221 return nil // not reached
225 // isaddrokay reports whether it is okay to pass n's address to runtime routines.
226 // Taking the address of a variable makes the liveness and optimization analyses
227 // lose track of where the variable's lifetime ends. To avoid hurting the analyses
228 // of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
229 // because we emit explicit VARKILL instructions marking the end of those
230 // temporaries' lifetimes.
231 func isaddrokay(n ir.Node) bool {
232 return ir.IsAddressable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class == ir.PEXTERN || ir.IsAutoTmp(n))
235 // addrTemp ensures that n is okay to pass by address to runtime routines.
236 // If the original argument n is not okay, addrTemp creates a tmp, emits
237 // tmp = n, and then returns tmp.
238 // The result of addrTemp MUST be assigned back to n, e.g.
239 // n.Left = o.addrTemp(n.Left)
240 func (o *orderState) addrTemp(n ir.Node) ir.Node {
241 if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
242 // TODO: expand this to all static composite literal nodes?
243 n = typecheck.DefaultLit(n, nil)
244 types.CalcSize(n.Type())
245 vstat := readonlystaticname(n.Type())
246 var s staticinit.Schedule
247 s.StaticAssign(vstat, 0, n, n.Type())
249 base.Fatalf("staticassign of const generated code: %+v", n)
251 vstat = typecheck.Expr(vstat).(*ir.Name)
260 // mapKeyTemp prepares n to be a key in a map runtime call and returns n.
261 // It should only be used for map runtime calls which have *_fast* versions.
262 func (o *orderState) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
263 // Most map calls need to take the address of the key.
264 // Exception: map*_fast* calls. See golang.org/issue/19015.
272 kt = types.Types[types.TUINT32]
274 kt = types.Types[types.TUINT64]
275 case mapfast32ptr, mapfast64ptr:
276 kt = types.Types[types.TUNSAFEPTR]
278 kt = types.Types[types.TSTRING]
284 case nt.Kind() == kt.Kind(), nt.IsPtrShaped() && kt.IsPtrShaped():
285 // can directly convert (e.g. named type to underlying type, or one pointer to another)
286 return typecheck.Expr(ir.NewConvExpr(n.Pos(), ir.OCONVNOP, kt, n))
287 case nt.IsInteger() && kt.IsInteger():
288 // can directly convert (e.g. int32 to uint32)
289 if n.Op() == ir.OLITERAL && nt.IsSigned() {
290 // avoid constant overflow error
291 n = ir.NewConstExpr(constant.MakeUint64(uint64(ir.Int64Val(n))), n)
295 return typecheck.Expr(ir.NewConvExpr(n.Pos(), ir.OCONV, kt, n))
297 // Unsafe cast through memory.
298 // We'll need to do a load with type kt. Create a temporary of type kt to
299 // ensure sufficient alignment. nt may be under-aligned.
300 if kt.Align < nt.Align {
301 base.Fatalf("mapKeyTemp: key type is not sufficiently aligned, kt=%v nt=%v", kt, nt)
303 tmp := o.newTemp(kt, true)
305 var e ir.Node = typecheck.NodAddr(tmp)
306 e = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, nt.PtrTo(), e)
307 e = ir.NewStarExpr(n.Pos(), e)
308 o.append(ir.NewAssignStmt(base.Pos, e, n))
313 // mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP
314 // in n to avoid string allocations for keys in map lookups.
315 // Returns a bool that signals if a modification was made.
319 // x = m[T1{... Tn{..., string(k), ...}]
320 // where k is []byte, T1 to Tn is a nesting of struct and array literals,
321 // the allocation of backing bytes for the string can be avoided
322 // by reusing the []byte backing array. These are special cases
323 // for avoiding allocations when converting byte slices to strings.
324 // It would be nice to handle these generally, but because
325 // []byte keys are not allowed in maps, the use of string(k)
326 // comes up in important cases in practice. See issue 3512.
327 func mapKeyReplaceStrConv(n ir.Node) bool {
331 n := n.(*ir.ConvExpr)
332 n.SetOp(ir.OBYTES2STRTMP)
335 n := n.(*ir.CompLitExpr)
336 for _, elem := range n.List {
337 elem := elem.(*ir.StructKeyExpr)
338 if mapKeyReplaceStrConv(elem.Value) {
343 n := n.(*ir.CompLitExpr)
344 for _, elem := range n.List {
345 if elem.Op() == ir.OKEY {
346 elem = elem.(*ir.KeyExpr).Value
348 if mapKeyReplaceStrConv(elem) {
358 // markTemp returns the top of the temporary variable stack.
359 func (o *orderState) markTemp() ordermarker {
360 return ordermarker(len(o.temp))
363 // popTemp pops temporaries off the stack until reaching the mark,
364 // which must have been returned by markTemp.
365 func (o *orderState) popTemp(mark ordermarker) {
366 for _, n := range o.temp[mark:] {
367 key := n.Type().LinkString()
368 o.free[key] = append(o.free[key], n)
370 o.temp = o.temp[:mark]
373 // cleanTempNoPop emits VARKILL instructions to *out
374 // for each temporary above the mark on the temporary stack.
375 // It does not pop the temporaries from the stack.
376 func (o *orderState) cleanTempNoPop(mark ordermarker) []ir.Node {
378 for i := len(o.temp) - 1; i >= int(mark); i-- {
380 out = append(out, typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARKILL, n)))
385 // cleanTemp emits VARKILL instructions for each temporary above the
386 // mark on the temporary stack and removes them from the stack.
387 func (o *orderState) cleanTemp(top ordermarker) {
388 o.out = append(o.out, o.cleanTempNoPop(top)...)
392 // stmtList orders each of the statements in the list.
393 func (o *orderState) stmtList(l ir.Nodes) {
396 orderMakeSliceCopy(s[i:])
401 // orderMakeSliceCopy matches the pattern:
402 // m = OMAKESLICE([]T, x); OCOPY(m, s)
403 // and rewrites it to:
404 // m = OMAKESLICECOPY([]T, x, s); nil
405 func orderMakeSliceCopy(s []ir.Node) {
406 if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
409 if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY {
413 as := s[0].(*ir.AssignStmt)
414 cp := s[1].(*ir.BinaryExpr)
415 if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) ||
416 as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME ||
417 as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() {
418 // The line above this one is correct with the differing equality operators:
419 // we want as.X and cp.X to be the same name,
420 // but we want the initial data to be coming from a different name.
424 mk := as.Y.(*ir.MakeExpr)
425 if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil {
428 mk.SetOp(ir.OMAKESLICECOPY)
430 // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
431 mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y))
432 as.Y = typecheck.Expr(mk)
433 s[1] = nil // remove separate copy call
436 // edge inserts coverage instrumentation for libfuzzer.
437 func (o *orderState) edge() {
438 if base.Debug.Libfuzzer == 0 {
442 // Create a new uint8 counter to be allocated in section
443 // __libfuzzer_extra_counters.
444 counter := staticinit.StaticName(types.Types[types.TUINT8])
445 counter.SetLibfuzzerExtraCounter(true)
448 incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(1))
452 // orderBlock orders the block of statements in n into a new slice,
453 // and then replaces the old slice in n with the new slice.
454 // free is a map that can be used to obtain temporary variables by type.
455 func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
458 mark := order.markTemp()
461 order.cleanTemp(mark)
465 // exprInPlace orders the side effects in *np and
466 // leaves them as the init list of the final *np.
467 // The result of exprInPlace MUST be assigned back to n, e.g.
468 // n.Left = o.exprInPlace(n.Left)
469 func (o *orderState) exprInPlace(n ir.Node) ir.Node {
472 n = order.expr(n, nil)
473 n = ir.InitExpr(order.out, n)
475 // insert new temporaries from order
476 // at head of outer list.
477 o.temp = append(o.temp, order.temp...)
481 // orderStmtInPlace orders the side effects of the single statement *np
482 // and replaces it with the resulting statement list.
483 // The result of orderStmtInPlace MUST be assigned back to n, e.g.
484 // n.Left = orderStmtInPlace(n.Left)
485 // free is a map that can be used to obtain temporary variables by type.
486 func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
489 mark := order.markTemp()
491 order.cleanTemp(mark)
492 return ir.NewBlockStmt(src.NoXPos, order.out)
495 // init moves n's init list to o.out.
496 func (o *orderState) init(n ir.Node) {
497 if ir.MayBeShared(n) {
498 // For concurrency safety, don't mutate potentially shared nodes.
499 // First, ensure that no work is required here.
500 if len(n.Init()) > 0 {
501 base.Fatalf("order.init shared node with ninit")
505 o.stmtList(ir.TakeInit(n))
508 // call orders the call expression n.
509 // n.Op is OCALLFUNC/OCALLINTER or a builtin like OCOPY.
510 func (o *orderState) call(nn ir.Node) {
511 if len(nn.Init()) > 0 {
512 // Caller should have already called o.init(nn).
513 base.Fatalf("%v with unexpected ninit", nn.Op())
515 if nn.Op() == ir.OCALLMETH {
516 base.FatalfAt(nn.Pos(), "OCALLMETH missed by typecheck")
519 // Builtin functions.
520 if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLINTER {
521 switch n := nn.(type) {
523 base.Fatalf("unexpected call: %+v", n)
525 n.X = o.expr(n.X, nil)
527 n.X = o.expr(n.X, nil)
529 n.X = o.expr(n.X, nil)
530 n.Y = o.expr(n.Y, nil)
532 n.Len = o.expr(n.Len, nil)
533 n.Cap = o.expr(n.Cap, nil)
540 n := nn.(*ir.CallExpr)
541 typecheck.FixVariadicCall(n)
543 if isFuncPCIntrinsic(n) && isIfaceOfFunc(n.Args[0]) {
544 // For internal/abi.FuncPCABIxxx(fn), if fn is a defined function,
545 // do not introduce temporaries here, so it is easier to rewrite it
546 // to symbol address reference later in walk.
550 n.X = o.expr(n.X, nil)
554 // mapAssign appends n to o.out.
555 func (o *orderState) mapAssign(n ir.Node) {
558 base.Fatalf("order.mapAssign %v", n.Op())
561 n := n.(*ir.AssignStmt)
562 if n.X.Op() == ir.OINDEXMAP {
563 n.Y = o.safeMapRHS(n.Y)
565 o.out = append(o.out, n)
567 n := n.(*ir.AssignOpStmt)
568 if n.X.Op() == ir.OINDEXMAP {
569 n.Y = o.safeMapRHS(n.Y)
571 o.out = append(o.out, n)
575 func (o *orderState) safeMapRHS(r ir.Node) ir.Node {
576 // Make sure we evaluate the RHS before starting the map insert.
577 // We need to make sure the RHS won't panic. See issue 22881.
578 if r.Op() == ir.OAPPEND {
579 r := r.(*ir.CallExpr)
581 for i, n := range s {
582 s[i] = o.cheapExpr(n)
586 return o.cheapExpr(r)
589 // stmt orders the statement n, appending to o.out.
590 // Temporaries created during the statement are cleaned
591 // up using VARKILL instructions as possible.
592 func (o *orderState) stmt(n ir.Node) {
602 base.Fatalf("order.stmt %v", n.Op())
604 case ir.OVARKILL, ir.OVARLIVE, ir.OINLMARK:
605 o.out = append(o.out, n)
608 n := n.(*ir.AssignStmt)
610 n.X = o.expr(n.X, nil)
611 n.Y = o.expr(n.Y, n.X)
616 n := n.(*ir.AssignOpStmt)
618 n.X = o.expr(n.X, nil)
619 n.Y = o.expr(n.Y, nil)
621 if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) {
622 // Rewrite m[k] op= r into m[k] = m[k] op r so
623 // that we can ensure that if op panics
624 // because r is zero, the panic happens before
625 // the map assignment.
626 // DeepCopy is a big hammer here, but safeExpr
627 // makes sure there is nothing too deep being copied.
628 l1 := o.safeExpr(n.X)
629 l2 := ir.DeepCopy(src.NoXPos, l1)
630 if l2.Op() == ir.OINDEXMAP {
631 l2 := l2.(*ir.IndexExpr)
635 r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil)
636 as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r))
646 n := n.(*ir.AssignListStmt)
650 o.out = append(o.out, n)
653 // Special: avoid copy of func call n.Right
655 n := n.(*ir.AssignListStmt)
660 if ic, ok := call.(*ir.InlinedCallExpr); ok {
664 n.Rhs = ic.ReturnVars
667 o.out = append(o.out, n)
674 // Special: use temporary variables to hold result,
675 // so that runtime can take address of temporary.
676 // No temporary for blank assignment.
678 // OAS2MAPR: make sure key is addressable if needed,
679 // and make sure OINDEXMAP is not copied out.
680 case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
681 n := n.(*ir.AssignListStmt)
685 switch r := n.Rhs[0]; r.Op() {
687 r := r.(*ir.TypeAssertExpr)
688 r.X = o.expr(r.X, nil)
689 case ir.ODYNAMICDOTTYPE2:
690 r := r.(*ir.DynamicTypeAssertExpr)
691 r.X = o.expr(r.X, nil)
692 r.T = o.expr(r.T, nil)
694 r := r.(*ir.UnaryExpr)
695 r.X = o.expr(r.X, nil)
697 r := r.(*ir.IndexExpr)
698 r.X = o.expr(r.X, nil)
699 r.Index = o.expr(r.Index, nil)
700 // See similar conversion for OINDEXMAP below.
701 _ = mapKeyReplaceStrConv(r.Index)
702 r.Index = o.mapKeyTemp(r.X.Type(), r.Index)
704 base.Fatalf("order.stmt: %v", r.Op())
710 // Special: does not save n onto out.
712 n := n.(*ir.BlockStmt)
715 // Special: n->left is not an expression; save as is.
725 o.out = append(o.out, n)
727 // Special: handle call arguments.
728 case ir.OCALLFUNC, ir.OCALLINTER:
729 n := n.(*ir.CallExpr)
732 o.out = append(o.out, n)
736 n := n.(*ir.InlinedCallExpr)
739 // discard results; double-check for no side effects
740 for _, result := range n.ReturnVars {
741 if staticinit.AnySideEffects(result) {
742 base.FatalfAt(result.Pos(), "inlined call result has side effects: %v", result)
746 case ir.OCHECKNIL, ir.OCLOSE, ir.OPANIC, ir.ORECV:
747 n := n.(*ir.UnaryExpr)
749 n.X = o.expr(n.X, nil)
750 o.out = append(o.out, n)
754 n := n.(*ir.BinaryExpr)
756 n.X = o.expr(n.X, nil)
757 n.Y = o.expr(n.Y, nil)
758 o.out = append(o.out, n)
761 case ir.OPRINT, ir.OPRINTN, ir.ORECOVERFP:
762 n := n.(*ir.CallExpr)
765 o.out = append(o.out, n)
768 // Special: order arguments to inner call but not call itself.
769 case ir.ODEFER, ir.OGO:
770 n := n.(*ir.GoDeferStmt)
774 o.out = append(o.out, n)
778 n := n.(*ir.CallExpr)
780 n.Args[0] = o.expr(n.Args[0], nil)
781 n.Args[1] = o.expr(n.Args[1], nil)
782 n.Args[1] = o.mapKeyTemp(n.Args[0].Type(), n.Args[1])
783 o.out = append(o.out, n)
786 // Clean temporaries from condition evaluation at
787 // beginning of loop body and after for statement.
791 n.Cond = o.exprInPlace(n.Cond)
792 n.Body.Prepend(o.cleanTempNoPop(t)...)
793 orderBlock(&n.Body, o.free)
794 n.Post = orderStmtInPlace(n.Post, o.free)
795 o.out = append(o.out, n)
798 // Clean temporaries from condition at
799 // beginning of both branches.
803 n.Cond = o.exprInPlace(n.Cond)
804 n.Body.Prepend(o.cleanTempNoPop(t)...)
805 n.Else.Prepend(o.cleanTempNoPop(t)...)
807 orderBlock(&n.Body, o.free)
808 orderBlock(&n.Else, o.free)
809 o.out = append(o.out, n)
812 // n.Right is the expression being ranged over.
813 // order it, and then make a copy if we need one.
814 // We almost always do, to ensure that we don't
815 // see any value changes made during the loop.
816 // Usually the copy is cheap (e.g., array pointer,
817 // chan, slice, string are all tiny).
818 // The exception is ranging over an array value
819 // (not a slice, not a pointer to array),
820 // which must make a copy to avoid seeing updates made during
821 // the range body. Ranging over an array value is uncommon though.
823 // Mark []byte(str) range expression to reuse string backing storage.
824 // It is safe because the storage cannot be mutated.
825 n := n.(*ir.RangeStmt)
826 if n.X.Op() == ir.OSTR2BYTES {
827 n.X.(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
831 n.X = o.expr(n.X, nil)
834 xt := typecheck.RangeExprType(n.X.Type())
837 base.Fatalf("order.stmt range %v", n.Type())
839 case types.TARRAY, types.TSLICE:
840 if n.Value == nil || ir.IsBlank(n.Value) {
841 // for i := range x will only use x once, to compute len(x).
842 // No need to copy it.
847 case types.TCHAN, types.TSTRING:
848 // chan, string, slice, array ranges use value multiple times.
852 if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
853 r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r)
854 r.SetType(types.Types[types.TSTRING])
855 r = typecheck.Expr(r)
862 // Preserve the body of the map clear pattern so it can
863 // be detected during walk. The loop body will not be used
864 // when optimizing away the range loop to a runtime call.
869 // copy the map value in case it is a map literal.
870 // TODO(rsc): Make tmp = literal expressions reuse tmp.
871 // For maps tmp is just one word so it hardly matters.
875 // n.Prealloc is the temp for the iterator.
876 // MapIterType contains pointers and needs to be zeroed.
877 n.Prealloc = o.newTemp(reflectdata.MapIterType(xt), true)
879 n.Key = o.exprInPlace(n.Key)
880 n.Value = o.exprInPlace(n.Value)
882 orderBlock(&n.Body, o.free)
884 o.out = append(o.out, n)
888 n := n.(*ir.ReturnStmt)
889 o.exprList(n.Results)
890 o.out = append(o.out, n)
892 // Special: clean case temporaries in each block entry.
893 // Select must enter one of its blocks, so there is no
894 // need for a cleaning at the end.
895 // Doubly special: evaluation order for select is stricter
896 // than ordinary expressions. Even something like p.c
897 // has to be hoisted into a temporary, so that it cannot be
898 // reordered after the channel evaluation for a different
899 // case (if p were nil, then the timing of the fault would
902 n := n.(*ir.SelectStmt)
904 for _, ncas := range n.Cases {
908 // Append any new body prologue to ninit.
909 // The next loop will insert ninit into nbody.
910 if len(ncas.Init()) != 0 {
911 base.Fatalf("order select ninit")
918 ir.Dump("select case", r)
919 base.Fatalf("unknown op in select %v", r.Op())
923 r := r.(*ir.AssignListStmt)
924 recv := r.Rhs[0].(*ir.UnaryExpr)
925 recv.X = o.expr(recv.X, nil)
926 if !ir.IsAutoTmp(recv.X) {
927 recv.X = o.copyExpr(recv.X)
929 init := ir.TakeInit(r)
932 do := func(i int, t *types.Type) {
937 // If this is case x := <-ch or case x, y := <-ch, the case has
938 // the ODCL nodes to declare x and y. We want to delay that
939 // declaration (and possible allocation) until inside the case body.
940 // Delete the ODCL nodes here and recreate them inside the body below.
942 if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n {
945 dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
946 ncas.PtrInit().Append(dcl)
948 tmp := o.newTemp(t, t.HasPointers())
949 as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type())))
950 ncas.PtrInit().Append(as)
953 do(0, recv.X.Type().Elem())
954 do(1, types.Types[types.TBOOL])
956 ir.DumpList("ninit", r.Init())
957 base.Fatalf("ninit on select recv")
959 orderBlock(ncas.PtrInit(), o.free)
962 r := r.(*ir.SendStmt)
963 if len(r.Init()) != 0 {
964 ir.DumpList("ninit", r.Init())
965 base.Fatalf("ninit on select send")
969 // r->left is c, r->right is x, both are always evaluated.
970 r.Chan = o.expr(r.Chan, nil)
972 if !ir.IsAutoTmp(r.Chan) {
973 r.Chan = o.copyExpr(r.Chan)
975 r.Value = o.expr(r.Value, nil)
976 if !ir.IsAutoTmp(r.Value) {
977 r.Value = o.copyExpr(r.Value)
981 // Now that we have accumulated all the temporaries, clean them.
982 // Also insert any ninit queued during the previous loop.
983 // (The temporary cleaning must follow that ninit work.)
984 for _, cas := range n.Cases {
985 orderBlock(&cas.Body, o.free)
986 cas.Body.Prepend(o.cleanTempNoPop(t)...)
988 // TODO(mdempsky): Is this actually necessary?
989 // walkSelect appears to walk Ninit.
990 cas.Body.Prepend(ir.TakeInit(cas)...)
993 o.out = append(o.out, n)
996 // Special: value being sent is passed as a pointer; make it addressable.
998 n := n.(*ir.SendStmt)
1000 n.Chan = o.expr(n.Chan, nil)
1001 n.Value = o.expr(n.Value, nil)
1002 if base.Flag.Cfg.Instrumenting {
1003 // Force copying to the stack so that (chan T)(nil) <- x
1004 // is still instrumented as a read of x.
1005 n.Value = o.copyExpr(n.Value)
1007 n.Value = o.addrTemp(n.Value)
1009 o.out = append(o.out, n)
1012 // TODO(rsc): Clean temporaries more aggressively.
1013 // Note that because walkSwitch will rewrite some of the
1014 // switch into a binary search, this is not as easy as it looks.
1015 // (If we ran that code here we could invoke order.stmt on
1016 // the if-else chain instead.)
1017 // For now just clean all the temporaries at the end.
1018 // In practice that's fine.
1020 n := n.(*ir.SwitchStmt)
1021 if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
1022 // Add empty "default:" case for instrumentation.
1023 n.Cases = append(n.Cases, ir.NewCaseStmt(base.Pos, nil, nil))
1027 n.Tag = o.expr(n.Tag, nil)
1028 for _, ncas := range n.Cases {
1029 o.exprListInPlace(ncas.List)
1030 orderBlock(&ncas.Body, o.free)
1033 o.out = append(o.out, n)
1040 func hasDefaultCase(n *ir.SwitchStmt) bool {
1041 for _, ncas := range n.Cases {
1042 if len(ncas.List) == 0 {
1049 // exprList orders the expression list l into o.
1050 func (o *orderState) exprList(l ir.Nodes) {
1053 s[i] = o.expr(s[i], nil)
1057 // exprListInPlace orders the expression list l but saves
1058 // the side effects on the individual expression ninit lists.
1059 func (o *orderState) exprListInPlace(l ir.Nodes) {
1062 s[i] = o.exprInPlace(s[i])
1066 func (o *orderState) exprNoLHS(n ir.Node) ir.Node {
1067 return o.expr(n, nil)
1070 // expr orders a single expression, appending side
1071 // effects to o.out as needed.
1072 // If this is part of an assignment lhs = *np, lhs is given.
1073 // Otherwise lhs == nil. (When lhs != nil it may be possible
1074 // to avoid copying the result of the expression to a temporary.)
1075 // The result of expr MUST be assigned back to n, e.g.
1076 // n.Left = o.expr(n.Left, lhs)
1077 func (o *orderState) expr(n, lhs ir.Node) ir.Node {
1087 func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
1093 o.edit = o.exprNoLHS // create closure once
1095 ir.EditChildren(n, o.edit)
1098 // Addition of strings turns into a function call.
1099 // Allocate a temporary to hold the strings.
1100 // Fewer than 5 strings use direct runtime helpers.
1102 n := n.(*ir.AddStringExpr)
1105 if len(n.List) > 5 {
1106 t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List)))
1107 n.Prealloc = o.newTemp(t, false)
1110 // Mark string(byteSlice) arguments to reuse byteSlice backing
1111 // buffer during conversion. String concatenation does not
1112 // memorize the strings for later use, so it is safe.
1113 // However, we can do it only if there is at least one non-empty string literal.
1114 // Otherwise if all other arguments are empty strings,
1115 // concatstrings will return the reference to the temp string
1120 for _, n1 := range n.List {
1121 hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
1122 haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0
1125 if haslit && hasbyte {
1126 for _, n2 := range n.List {
1127 if n2.Op() == ir.OBYTES2STR {
1128 n2 := n2.(*ir.ConvExpr)
1129 n2.SetOp(ir.OBYTES2STRTMP)
1136 n := n.(*ir.IndexExpr)
1137 n.X = o.expr(n.X, nil)
1138 n.Index = o.expr(n.Index, nil)
1142 // Enforce that any []byte slices we are not copying
1143 // can not be changed before the map index by forcing
1144 // the map index to happen immediately following the
1145 // conversions. See copyExpr a few lines below.
1146 needCopy = mapKeyReplaceStrConv(n.Index)
1148 if base.Flag.Cfg.Instrumenting {
1149 // Race detector needs the copy.
1154 // key must be addressable
1155 n.Index = o.mapKeyTemp(n.X.Type(), n.Index)
1157 return o.copyExpr(n)
1161 // concrete type (not interface) argument might need an addressable
1162 // temporary to pass to the runtime conversion routine.
1163 case ir.OCONVIFACE, ir.OCONVIDATA:
1164 n := n.(*ir.ConvExpr)
1165 n.X = o.expr(n.X, nil)
1166 if n.X.Type().IsInterface() {
1169 if _, _, needsaddr := dataWordFuncName(n.X.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
1170 // Need a temp if we need to pass the address to the conversion function.
1171 // We also process static composite literal node here, making a named static global
1172 // whose address we can put directly in an interface (see OCONVIFACE/OCONVIDATA case in walk).
1173 n.X = o.addrTemp(n.X)
1178 n := n.(*ir.ConvExpr)
1179 if n.X.Op() == ir.OCALLMETH {
1180 base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck")
1182 if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER) {
1183 call := n.X.(*ir.CallExpr)
1184 // When reordering unsafe.Pointer(f()) into a separate
1185 // statement, the conversion and function call must stay
1186 // together. See golang.org/issue/15329.
1189 if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
1190 return o.copyExpr(n)
1193 n.X = o.expr(n.X, nil)
1197 case ir.OANDAND, ir.OOROR:
1202 // if r { // or !r, for OROR
1207 n := n.(*ir.LogicalExpr)
1208 r := o.newTemp(n.Type(), false)
1210 // Evaluate left-hand side.
1211 lhs := o.expr(n.X, nil)
1212 o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs)))
1214 // Evaluate right-hand side, save generated code.
1219 rhs := o.expr(n.Y, nil)
1220 o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs)))
1225 // If left-hand side doesn't cause a short-circuit, issue right-hand side.
1226 nif := ir.NewIfStmt(base.Pos, r, nil, nil)
1227 if n.Op() == ir.OANDAND {
1232 o.out = append(o.out, nif)
1236 base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
1237 panic("unreachable")
1258 // len([]rune(s)) is rewritten to runtime.countrunes(s) later.
1259 conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr)
1260 conv.X = o.expr(conv.X, nil)
1265 if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
1266 return o.copyExpr(n)
1271 n := n.(*ir.InlinedCallExpr)
1273 return n.SingleResult()
1276 // Check for append(x, make([]T, y)...) .
1277 n := n.(*ir.CallExpr)
1278 if isAppendOfMake(n) {
1279 n.Args[0] = o.expr(n.Args[0], nil) // order x
1280 mk := n.Args[1].(*ir.MakeExpr)
1281 mk.Len = o.expr(mk.Len, nil) // order y
1286 if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) {
1287 return o.copyExpr(n)
1291 case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
1292 n := n.(*ir.SliceExpr)
1293 n.X = o.expr(n.X, nil)
1294 n.Low = o.cheapExpr(o.expr(n.Low, nil))
1295 n.High = o.cheapExpr(o.expr(n.High, nil))
1296 n.Max = o.cheapExpr(o.expr(n.Max, nil))
1297 if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) {
1298 return o.copyExpr(n)
1303 n := n.(*ir.ClosureExpr)
1304 if n.Transient() && len(n.Func.ClosureVars) > 0 {
1305 n.Prealloc = o.newTemp(typecheck.ClosureType(n), false)
1310 n := n.(*ir.SelectorExpr)
1311 n.X = o.expr(n.X, nil)
1313 t := typecheck.MethodValueType(n)
1314 n.Prealloc = o.newTemp(t, false)
1319 n := n.(*ir.CompLitExpr)
1322 t := types.NewArray(n.Type().Elem(), n.Len)
1323 n.Prealloc = o.newTemp(t, false)
1327 case ir.ODOTTYPE, ir.ODOTTYPE2:
1328 n := n.(*ir.TypeAssertExpr)
1329 n.X = o.expr(n.X, nil)
1330 if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting {
1331 return o.copyExprClear(n)
1336 n := n.(*ir.UnaryExpr)
1337 n.X = o.expr(n.X, nil)
1338 return o.copyExprClear(n)
1340 case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
1341 n := n.(*ir.BinaryExpr)
1342 n.X = o.expr(n.X, nil)
1343 n.Y = o.expr(n.Y, nil)
1348 // Mark string(byteSlice) arguments to reuse byteSlice backing
1349 // buffer during conversion. String comparison does not
1350 // memorize the strings for later use, so it is safe.
1351 if n.X.Op() == ir.OBYTES2STR {
1352 n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
1354 if n.Y.Op() == ir.OBYTES2STR {
1355 n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
1358 case t.IsStruct() || t.IsArray():
1359 // for complex comparisons, we need both args to be
1360 // addressable so we can pass them to the runtime.
1361 n.X = o.addrTemp(n.X)
1362 n.Y = o.addrTemp(n.Y)
1367 // Order map by converting:
1374 // m := map[int]int{}
1378 // Then order the result.
1379 // Without this special case, order would otherwise compute all
1380 // the keys and values before storing any of them to the map.
1382 n := n.(*ir.CompLitExpr)
1384 statics := entries[:0]
1385 var dynamics []*ir.KeyExpr
1386 for _, r := range entries {
1387 r := r.(*ir.KeyExpr)
1389 if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
1390 dynamics = append(dynamics, r)
1394 // Recursively ordering some static entries can change them to dynamic;
1395 // e.g., OCONVIFACE nodes. See #31777.
1396 r = o.expr(r, nil).(*ir.KeyExpr)
1397 if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
1398 dynamics = append(dynamics, r)
1402 statics = append(statics, r)
1406 if len(dynamics) == 0 {
1410 // Emit the creation of the map (with all its static entries).
1411 m := o.newTemp(n.Type(), false)
1412 as := ir.NewAssignStmt(base.Pos, m, n)
1416 // Emit eval+insert of dynamic entries, one at a time.
1417 for _, r := range dynamics {
1418 as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Key), r.Value)
1419 typecheck.Stmt(as) // Note: this converts the OINDEX to an OINDEXMAP
1425 // No return - type-assertions above. Each case must return for itself.
1428 // as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment.
1429 // The caller should order the right-hand side of the assignment before calling order.as2func.
1433 // tmp1, tmp2, tmp3 = ...
1434 // a, b, a = tmp1, tmp2, tmp3
1435 // This is necessary to ensure left to right assignment order.
1436 func (o *orderState) as2func(n *ir.AssignListStmt) {
1437 results := n.Rhs[0].Type()
1438 as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
1439 for i, nl := range n.Lhs {
1440 if !ir.IsBlank(nl) {
1441 typ := results.Field(i).Type
1442 tmp := o.newTemp(typ, typ.HasPointers())
1444 as.Lhs = append(as.Lhs, nl)
1445 as.Rhs = append(as.Rhs, tmp)
1449 o.out = append(o.out, n)
1450 o.stmt(typecheck.Stmt(as))
1453 // as2ok orders OAS2XXX with ok.
1454 // Just like as2func, this also adds temporaries to ensure left-to-right assignment.
1455 func (o *orderState) as2ok(n *ir.AssignListStmt) {
1456 as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
1458 do := func(i int, typ *types.Type) {
1459 if nl := n.Lhs[i]; !ir.IsBlank(nl) {
1460 var tmp ir.Node = o.newTemp(typ, typ.HasPointers())
1462 as.Lhs = append(as.Lhs, nl)
1464 // The "ok" result is an untyped boolean according to the Go
1465 // spec. We need to explicitly convert it to the LHS type in
1466 // case the latter is a defined boolean type (#8475).
1467 tmp = typecheck.Conv(tmp, nl.Type())
1469 as.Rhs = append(as.Rhs, tmp)
1473 do(0, n.Rhs[0].Type())
1474 do(1, types.Types[types.TBOOL])
1476 o.out = append(o.out, n)
1477 o.stmt(typecheck.Stmt(as))
1480 // isFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions.
1481 func isFuncPCIntrinsic(n *ir.CallExpr) bool {
1482 if n.Op() != ir.OCALLFUNC || n.X.Op() != ir.ONAME {
1485 fn := n.X.(*ir.Name).Sym()
1486 return (fn.Name == "FuncPCABI0" || fn.Name == "FuncPCABIInternal") &&
1487 (fn.Pkg.Path == "internal/abi" || fn.Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "internal/abi")
1490 // isIfaceOfFunc returns whether n is an interface conversion from a direct reference of a func.
1491 func isIfaceOfFunc(n ir.Node) bool {
1492 return n.Op() == ir.OCONVIFACE && n.(*ir.ConvExpr).X.Op() == ir.ONAME && n.(*ir.ConvExpr).X.(*ir.Name).Class == ir.PFUNC