1 // Copyright 2012 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
11 "cmd/compile/internal/base"
12 "cmd/compile/internal/ir"
13 "cmd/compile/internal/reflectdata"
14 "cmd/compile/internal/staticinit"
15 "cmd/compile/internal/typecheck"
16 "cmd/compile/internal/types"
21 // Rewrite tree to use separate statements to enforce
22 // order of evaluation. Makes walk easier, because it
23 // can (after this runs) reorder at will within an expression.
25 // Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %.
27 // Introduce temporaries as needed by runtime routines.
28 // For example, the map runtime routines take the map key
29 // by reference, so make sure all map keys are addressable
30 // by copying them to temporaries as needed.
31 // The same is true for channel operations.
33 // Arrange that map index expressions only appear in direct
34 // assignments x = m[k] or m[k] = x, never in larger expressions.
36 // Arrange that receive expressions only appear in direct assignments
37 // x = <-c or as standalone statements <-c, never in larger expressions.
39 // orderState holds state during the ordering process.
40 type orderState struct {
41 out []ir.Node // list of generated statements
42 temp []*ir.Name // stack of temporary variables
43 free map[string][]*ir.Name // free list of unused temporaries, by type.LinkString().
44 edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
47 // order rewrites fn.Nbody to apply the ordering constraints
48 // described in the comment at the top of the file.
49 func order(fn *ir.Func) {
51 s := fmt.Sprintf("\nbefore order %v", fn.Sym())
52 ir.DumpList(s, fn.Body)
54 ir.SetPos(fn) // Set reasonable position for instrumenting code. See issue 53688.
55 orderBlock(&fn.Body, map[string][]*ir.Name{})
58 // append typechecks stmt and appends it to out.
59 func (o *orderState) append(stmt ir.Node) {
60 o.out = append(o.out, typecheck.Stmt(stmt))
63 // newTemp allocates a new temporary with the given type,
64 // pushes it onto the temp stack, and returns it.
65 // If clear is true, newTemp emits code to zero the temporary.
66 func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name {
69 if a := o.free[key]; len(a) > 0 {
71 if !types.Identical(t, v.Type()) {
72 base.Fatalf("expected %L to have type %v", v, t)
74 o.free[key] = a[:len(a)-1]
76 v = typecheck.TempAt(base.Pos, ir.CurFunc, t)
79 o.append(ir.NewAssignStmt(base.Pos, v, nil))
82 o.temp = append(o.temp, v)
86 // copyExpr behaves like newTemp but also emits
87 // code to initialize the temporary to the value n.
88 func (o *orderState) copyExpr(n ir.Node) *ir.Name {
89 return o.copyExpr1(n, false)
92 // copyExprClear is like copyExpr but clears the temp before assignment.
93 // It is provided for use when the evaluation of tmp = n turns into
94 // a function call that is passed a pointer to the temporary as the output space.
95 // If the call blocks before tmp has been written,
96 // the garbage collector will still treat the temporary as live,
97 // so we must zero it before entering that call.
98 // Today, this only happens for channel receive operations.
99 // (The other candidate would be map access, but map access
100 // returns a pointer to the result data instead of taking a pointer
102 func (o *orderState) copyExprClear(n ir.Node) *ir.Name {
103 return o.copyExpr1(n, true)
106 func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name {
108 v := o.newTemp(t, clear)
109 o.append(ir.NewAssignStmt(base.Pos, v, n))
113 // cheapExpr returns a cheap version of n.
114 // The definition of cheap is that n is a variable or constant.
115 // If not, cheapExpr allocates a new tmp, emits tmp = n,
116 // and then returns tmp.
117 func (o *orderState) cheapExpr(n ir.Node) ir.Node {
123 case ir.ONAME, ir.OLITERAL, ir.ONIL:
125 case ir.OLEN, ir.OCAP:
126 n := n.(*ir.UnaryExpr)
127 l := o.cheapExpr(n.X)
131 a := ir.Copy(n).(*ir.UnaryExpr)
133 return typecheck.Expr(a)
139 // safeExpr returns a safe version of n.
140 // The definition of safe is that n can appear multiple times
141 // without violating the semantics of the original program,
142 // and that assigning to the safe version has the same effect
143 // as assigning to the original n.
145 // The intended use is to apply to x when rewriting x += y into x = x + y.
146 func (o *orderState) safeExpr(n ir.Node) ir.Node {
148 case ir.ONAME, ir.OLITERAL, ir.ONIL:
151 case ir.OLEN, ir.OCAP:
152 n := n.(*ir.UnaryExpr)
157 a := ir.Copy(n).(*ir.UnaryExpr)
159 return typecheck.Expr(a)
162 n := n.(*ir.SelectorExpr)
167 a := ir.Copy(n).(*ir.SelectorExpr)
169 return typecheck.Expr(a)
172 n := n.(*ir.SelectorExpr)
173 l := o.cheapExpr(n.X)
177 a := ir.Copy(n).(*ir.SelectorExpr)
179 return typecheck.Expr(a)
182 n := n.(*ir.StarExpr)
183 l := o.cheapExpr(n.X)
187 a := ir.Copy(n).(*ir.StarExpr)
189 return typecheck.Expr(a)
191 case ir.OINDEX, ir.OINDEXMAP:
192 n := n.(*ir.IndexExpr)
194 if n.X.Type().IsArray() {
199 r := o.cheapExpr(n.Index)
200 if l == n.X && r == n.Index {
203 a := ir.Copy(n).(*ir.IndexExpr)
206 return typecheck.Expr(a)
209 base.Fatalf("order.safeExpr %v", n.Op())
210 return nil // not reached
214 // addrTemp ensures that n is okay to pass by address to runtime routines.
215 // If the original argument n is not okay, addrTemp creates a tmp, emits
216 // tmp = n, and then returns tmp.
217 // The result of addrTemp MUST be assigned back to n, e.g.
219 // n.Left = o.addrTemp(n.Left)
220 func (o *orderState) addrTemp(n ir.Node) ir.Node {
221 if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
222 // TODO: expand this to all static composite literal nodes?
223 n = typecheck.DefaultLit(n, nil)
224 types.CalcSize(n.Type())
225 vstat := readonlystaticname(n.Type())
226 var s staticinit.Schedule
227 s.StaticAssign(vstat, 0, n, n.Type())
229 base.Fatalf("staticassign of const generated code: %+v", n)
231 vstat = typecheck.Expr(vstat).(*ir.Name)
234 if ir.IsAddressable(n) {
240 // mapKeyTemp prepares n to be a key in a map runtime call and returns n.
241 // It should only be used for map runtime calls which have *_fast* versions.
242 // The first parameter is the position of n's containing node, for use in case
243 // that n's position is not unique (e.g., if n is an ONAME).
244 func (o *orderState) mapKeyTemp(outerPos src.XPos, t *types.Type, n ir.Node) ir.Node {
246 if ir.HasUniquePos(n) {
249 // Most map calls need to take the address of the key.
250 // Exception: map*_fast* calls. See golang.org/issue/19015.
258 kt = types.Types[types.TUINT32]
260 kt = types.Types[types.TUINT64]
261 case mapfast32ptr, mapfast64ptr:
262 kt = types.Types[types.TUNSAFEPTR]
264 kt = types.Types[types.TSTRING]
270 case nt.Kind() == kt.Kind(), nt.IsPtrShaped() && kt.IsPtrShaped():
271 // can directly convert (e.g. named type to underlying type, or one pointer to another)
272 return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, kt, n))
273 case nt.IsInteger() && kt.IsInteger():
274 // can directly convert (e.g. int32 to uint32)
275 if n.Op() == ir.OLITERAL && nt.IsSigned() {
276 // avoid constant overflow error
277 n = ir.NewConstExpr(constant.MakeUint64(uint64(ir.Int64Val(n))), n)
281 return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, kt, n))
283 // Unsafe cast through memory.
284 // We'll need to do a load with type kt. Create a temporary of type kt to
285 // ensure sufficient alignment. nt may be under-aligned.
286 if uint8(kt.Alignment()) < uint8(nt.Alignment()) {
287 base.Fatalf("mapKeyTemp: key type is not sufficiently aligned, kt=%v nt=%v", kt, nt)
289 tmp := o.newTemp(kt, true)
291 var e ir.Node = typecheck.NodAddr(tmp)
292 e = ir.NewConvExpr(pos, ir.OCONVNOP, nt.PtrTo(), e)
293 e = ir.NewStarExpr(pos, e)
294 o.append(ir.NewAssignStmt(pos, e, n))
299 // mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP
300 // in n to avoid string allocations for keys in map lookups.
301 // Returns a bool that signals if a modification was made.
306 // x = m[T1{... Tn{..., string(k), ...}}]
308 // where k is []byte, T1 to Tn is a nesting of struct and array literals,
309 // the allocation of backing bytes for the string can be avoided
310 // by reusing the []byte backing array. These are special cases
311 // for avoiding allocations when converting byte slices to strings.
312 // It would be nice to handle these generally, but because
313 // []byte keys are not allowed in maps, the use of string(k)
314 // comes up in important cases in practice. See issue 3512.
315 func mapKeyReplaceStrConv(n ir.Node) bool {
319 n := n.(*ir.ConvExpr)
320 n.SetOp(ir.OBYTES2STRTMP)
323 n := n.(*ir.CompLitExpr)
324 for _, elem := range n.List {
325 elem := elem.(*ir.StructKeyExpr)
326 if mapKeyReplaceStrConv(elem.Value) {
331 n := n.(*ir.CompLitExpr)
332 for _, elem := range n.List {
333 if elem.Op() == ir.OKEY {
334 elem = elem.(*ir.KeyExpr).Value
336 if mapKeyReplaceStrConv(elem) {
346 // markTemp returns the top of the temporary variable stack.
347 func (o *orderState) markTemp() ordermarker {
348 return ordermarker(len(o.temp))
351 // popTemp pops temporaries off the stack until reaching the mark,
352 // which must have been returned by markTemp.
353 func (o *orderState) popTemp(mark ordermarker) {
354 for _, n := range o.temp[mark:] {
355 key := n.Type().LinkString()
356 o.free[key] = append(o.free[key], n)
358 o.temp = o.temp[:mark]
361 // stmtList orders each of the statements in the list.
362 func (o *orderState) stmtList(l ir.Nodes) {
365 orderMakeSliceCopy(s[i:])
370 // orderMakeSliceCopy matches the pattern:
372 // m = OMAKESLICE([]T, x); OCOPY(m, s)
374 // and rewrites it to:
376 // m = OMAKESLICECOPY([]T, x, s); nil
377 func orderMakeSliceCopy(s []ir.Node) {
378 if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
381 if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY {
385 as := s[0].(*ir.AssignStmt)
386 cp := s[1].(*ir.BinaryExpr)
387 if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) ||
388 as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME ||
389 as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() {
390 // The line above this one is correct with the differing equality operators:
391 // we want as.X and cp.X to be the same name,
392 // but we want the initial data to be coming from a different name.
396 mk := as.Y.(*ir.MakeExpr)
397 if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil {
400 mk.SetOp(ir.OMAKESLICECOPY)
402 // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
403 mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y))
404 as.Y = typecheck.Expr(mk)
405 s[1] = nil // remove separate copy call
408 // edge inserts coverage instrumentation for libfuzzer.
409 func (o *orderState) edge() {
410 if base.Debug.Libfuzzer == 0 {
414 // Create a new uint8 counter to be allocated in section __sancov_cntrs
415 counter := staticinit.StaticName(types.Types[types.TUINT8])
416 counter.SetLibfuzzer8BitCounter(true)
417 // As well as setting SetLibfuzzer8BitCounter, we preemptively set the
418 // symbol type to SLIBFUZZER_8BIT_COUNTER so that the race detector
419 // instrumentation pass (which does not have access to the flags set by
420 // SetLibfuzzer8BitCounter) knows to ignore them. This information is
421 // lost by the time it reaches the compile step, so SetLibfuzzer8BitCounter
422 // is still necessary.
423 counter.Linksym().Type = objabi.SLIBFUZZER_8BIT_COUNTER
425 // We guarantee that the counter never becomes zero again once it has been
426 // incremented once. This implementation follows the NeverZero optimization
427 // presented by the paper:
428 // "AFL++: Combining Incremental Steps of Fuzzing Research"
429 // The NeverZero policy avoids the overflow to 0 by setting the counter to one
430 // after it reaches 255 and so, if an edge is executed at least one time, the entry is
432 // Another policy presented in the paper is the Saturated Counters policy which
433 // freezes the counter when it reaches the value of 255. However, a range
434 // of experiments showed that that decreases overall performance.
435 o.append(ir.NewIfStmt(base.Pos,
436 ir.NewBinaryExpr(base.Pos, ir.OEQ, counter, ir.NewInt(base.Pos, 0xff)),
437 []ir.Node{ir.NewAssignStmt(base.Pos, counter, ir.NewInt(base.Pos, 1))},
438 []ir.Node{ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(base.Pos, 1))}))
441 // orderBlock orders the block of statements in n into a new slice,
442 // and then replaces the old slice in n with the new slice.
443 // free is a map that can be used to obtain temporary variables by type.
444 func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
446 // Set reasonable position for instrumenting code. See issue 53688.
447 // It would be nice if ir.Nodes had a position (the opening {, probably),
448 // but it doesn't. So we use the first statement's position instead.
453 mark := order.markTemp()
460 // exprInPlace orders the side effects in *np and
461 // leaves them as the init list of the final *np.
462 // The result of exprInPlace MUST be assigned back to n, e.g.
464 // n.Left = o.exprInPlace(n.Left)
465 func (o *orderState) exprInPlace(n ir.Node) ir.Node {
468 n = order.expr(n, nil)
469 n = ir.InitExpr(order.out, n)
471 // insert new temporaries from order
472 // at head of outer list.
473 o.temp = append(o.temp, order.temp...)
477 // orderStmtInPlace orders the side effects of the single statement *np
478 // and replaces it with the resulting statement list.
479 // The result of orderStmtInPlace MUST be assigned back to n, e.g.
481 // n.Left = orderStmtInPlace(n.Left)
483 // free is a map that can be used to obtain temporary variables by type.
484 func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
487 mark := order.markTemp()
490 return ir.NewBlockStmt(src.NoXPos, order.out)
493 // init moves n's init list to o.out.
494 func (o *orderState) init(n ir.Node) {
495 if ir.MayBeShared(n) {
496 // For concurrency safety, don't mutate potentially shared nodes.
497 // First, ensure that no work is required here.
498 if len(n.Init()) > 0 {
499 base.Fatalf("order.init shared node with ninit")
503 o.stmtList(ir.TakeInit(n))
506 // call orders the call expression n.
507 // n.Op is OCALLFUNC/OCALLINTER or a builtin like OCOPY.
508 func (o *orderState) call(nn ir.Node) {
509 if len(nn.Init()) > 0 {
510 // Caller should have already called o.init(nn).
511 base.Fatalf("%v with unexpected ninit", nn.Op())
513 if nn.Op() == ir.OCALLMETH {
514 base.FatalfAt(nn.Pos(), "OCALLMETH missed by typecheck")
517 // Builtin functions.
518 if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLINTER {
519 switch n := nn.(type) {
521 base.Fatalf("unexpected call: %+v", n)
523 n.X = o.expr(n.X, nil)
525 n.X = o.expr(n.X, nil)
527 n.X = o.expr(n.X, nil)
528 n.Y = o.expr(n.Y, nil)
530 n.Len = o.expr(n.Len, nil)
531 n.Cap = o.expr(n.Cap, nil)
538 n := nn.(*ir.CallExpr)
539 typecheck.AssertFixedCall(n)
541 if ir.IsFuncPCIntrinsic(n) && isIfaceOfFunc(n.Args[0]) {
542 // For internal/abi.FuncPCABIxxx(fn), if fn is a defined function,
543 // do not introduce temporaries here, so it is easier to rewrite it
544 // to symbol address reference later in walk.
548 n.X = o.expr(n.X, nil)
552 // mapAssign appends n to o.out.
553 func (o *orderState) mapAssign(n ir.Node) {
556 base.Fatalf("order.mapAssign %v", n.Op())
559 n := n.(*ir.AssignStmt)
560 if n.X.Op() == ir.OINDEXMAP {
561 n.Y = o.safeMapRHS(n.Y)
563 o.out = append(o.out, n)
565 n := n.(*ir.AssignOpStmt)
566 if n.X.Op() == ir.OINDEXMAP {
567 n.Y = o.safeMapRHS(n.Y)
569 o.out = append(o.out, n)
573 func (o *orderState) safeMapRHS(r ir.Node) ir.Node {
574 // Make sure we evaluate the RHS before starting the map insert.
575 // We need to make sure the RHS won't panic. See issue 22881.
576 if r.Op() == ir.OAPPEND {
577 r := r.(*ir.CallExpr)
579 for i, n := range s {
580 s[i] = o.cheapExpr(n)
584 return o.cheapExpr(r)
587 // stmt orders the statement n, appending to o.out.
588 func (o *orderState) stmt(n ir.Node) {
598 base.Fatalf("order.stmt %v", n.Op())
601 o.out = append(o.out, n)
604 n := n.(*ir.AssignStmt)
606 n.X = o.expr(n.X, nil)
607 n.Y = o.expr(n.Y, n.X)
612 n := n.(*ir.AssignOpStmt)
614 n.X = o.expr(n.X, nil)
615 n.Y = o.expr(n.Y, nil)
617 if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) {
618 // Rewrite m[k] op= r into m[k] = m[k] op r so
619 // that we can ensure that if op panics
620 // because r is zero, the panic happens before
621 // the map assignment.
622 // DeepCopy is a big hammer here, but safeExpr
623 // makes sure there is nothing too deep being copied.
624 l1 := o.safeExpr(n.X)
625 l2 := ir.DeepCopy(src.NoXPos, l1)
626 if l2.Op() == ir.OINDEXMAP {
627 l2 := l2.(*ir.IndexExpr)
631 r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil)
632 as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r))
642 n := n.(*ir.AssignListStmt)
646 o.out = append(o.out, n)
649 // Special: avoid copy of func call n.Right
651 n := n.(*ir.AssignListStmt)
656 if ic, ok := call.(*ir.InlinedCallExpr); ok {
660 n.Rhs = ic.ReturnVars
663 o.out = append(o.out, n)
670 // Special: use temporary variables to hold result,
671 // so that runtime can take address of temporary.
672 // No temporary for blank assignment.
674 // OAS2MAPR: make sure key is addressable if needed,
675 // and make sure OINDEXMAP is not copied out.
676 case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
677 n := n.(*ir.AssignListStmt)
681 switch r := n.Rhs[0]; r.Op() {
683 r := r.(*ir.TypeAssertExpr)
684 r.X = o.expr(r.X, nil)
685 case ir.ODYNAMICDOTTYPE2:
686 r := r.(*ir.DynamicTypeAssertExpr)
687 r.X = o.expr(r.X, nil)
688 r.RType = o.expr(r.RType, nil)
689 r.ITab = o.expr(r.ITab, nil)
691 r := r.(*ir.UnaryExpr)
692 r.X = o.expr(r.X, nil)
694 r := r.(*ir.IndexExpr)
695 r.X = o.expr(r.X, nil)
696 r.Index = o.expr(r.Index, nil)
697 // See similar conversion for OINDEXMAP below.
698 _ = mapKeyReplaceStrConv(r.Index)
699 r.Index = o.mapKeyTemp(r.Pos(), r.X.Type(), r.Index)
701 base.Fatalf("order.stmt: %v", r.Op())
707 // Special: does not save n onto out.
709 n := n.(*ir.BlockStmt)
712 // Special: n->left is not an expression; save as is.
720 o.out = append(o.out, n)
722 // Special: handle call arguments.
723 case ir.OCALLFUNC, ir.OCALLINTER:
724 n := n.(*ir.CallExpr)
727 o.out = append(o.out, n)
731 n := n.(*ir.InlinedCallExpr)
734 // discard results; double-check for no side effects
735 for _, result := range n.ReturnVars {
736 if staticinit.AnySideEffects(result) {
737 base.FatalfAt(result.Pos(), "inlined call result has side effects: %v", result)
741 case ir.OCHECKNIL, ir.OCLEAR, ir.OCLOSE, ir.OPANIC, ir.ORECV:
742 n := n.(*ir.UnaryExpr)
744 n.X = o.expr(n.X, nil)
745 o.out = append(o.out, n)
749 n := n.(*ir.BinaryExpr)
751 n.X = o.expr(n.X, nil)
752 n.Y = o.expr(n.Y, nil)
753 o.out = append(o.out, n)
756 case ir.OPRINT, ir.OPRINTN, ir.ORECOVERFP:
757 n := n.(*ir.CallExpr)
760 o.out = append(o.out, n)
763 // Special: order arguments to inner call but not call itself.
764 case ir.ODEFER, ir.OGO:
765 n := n.(*ir.GoDeferStmt)
769 o.out = append(o.out, n)
773 n := n.(*ir.CallExpr)
775 n.Args[0] = o.expr(n.Args[0], nil)
776 n.Args[1] = o.expr(n.Args[1], nil)
777 n.Args[1] = o.mapKeyTemp(n.Pos(), n.Args[0].Type(), n.Args[1])
778 o.out = append(o.out, n)
781 // Clean temporaries from condition evaluation at
782 // beginning of loop body and after for statement.
786 n.Cond = o.exprInPlace(n.Cond)
787 orderBlock(&n.Body, o.free)
788 n.Post = orderStmtInPlace(n.Post, o.free)
789 o.out = append(o.out, n)
792 // Clean temporaries from condition at
793 // beginning of both branches.
797 n.Cond = o.exprInPlace(n.Cond)
799 orderBlock(&n.Body, o.free)
800 orderBlock(&n.Else, o.free)
801 o.out = append(o.out, n)
804 // n.Right is the expression being ranged over.
805 // order it, and then make a copy if we need one.
806 // We almost always do, to ensure that we don't
807 // see any value changes made during the loop.
808 // Usually the copy is cheap (e.g., array pointer,
809 // chan, slice, string are all tiny).
810 // The exception is ranging over an array value
811 // (not a slice, not a pointer to array),
812 // which must make a copy to avoid seeing updates made during
813 // the range body. Ranging over an array value is uncommon though.
815 // Mark []byte(str) range expression to reuse string backing storage.
816 // It is safe because the storage cannot be mutated.
817 n := n.(*ir.RangeStmt)
818 if x, ok := n.X.(*ir.ConvExpr); ok {
821 x.SetOp(ir.OSTR2BYTESTMP)
823 case ir.OSTR2BYTESTMP:
824 x.MarkNonNil() // "range []byte(nil)" is fine
829 n.X = o.expr(n.X, nil)
832 xt := typecheck.RangeExprType(n.X.Type())
835 base.Fatalf("order.stmt range %v", n.Type())
837 case types.TARRAY, types.TSLICE:
838 if n.Value == nil || ir.IsBlank(n.Value) {
839 // for i := range x will only use x once, to compute len(x).
840 // No need to copy it.
845 case types.TCHAN, types.TSTRING:
846 // chan, string, slice, array ranges use value multiple times.
850 if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
851 r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r)
852 r.SetType(types.Types[types.TSTRING])
853 r = typecheck.Expr(r)
860 // Preserve the body of the map clear pattern so it can
861 // be detected during walk. The loop body will not be used
862 // when optimizing away the range loop to a runtime call.
867 // copy the map value in case it is a map literal.
868 // TODO(rsc): Make tmp = literal expressions reuse tmp.
869 // For maps tmp is just one word so it hardly matters.
873 // n.Prealloc is the temp for the iterator.
874 // MapIterType contains pointers and needs to be zeroed.
875 n.Prealloc = o.newTemp(reflectdata.MapIterType(), true)
877 n.Key = o.exprInPlace(n.Key)
878 n.Value = o.exprInPlace(n.Value)
880 orderBlock(&n.Body, o.free)
882 o.out = append(o.out, n)
886 n := n.(*ir.ReturnStmt)
887 o.exprList(n.Results)
888 o.out = append(o.out, n)
890 // Special: clean case temporaries in each block entry.
891 // Select must enter one of its blocks, so there is no
892 // need for a cleaning at the end.
893 // Doubly special: evaluation order for select is stricter
894 // than ordinary expressions. Even something like p.c
895 // has to be hoisted into a temporary, so that it cannot be
896 // reordered after the channel evaluation for a different
897 // case (if p were nil, then the timing of the fault would
900 n := n.(*ir.SelectStmt)
902 for _, ncas := range n.Cases {
906 // Append any new body prologue to ninit.
907 // The next loop will insert ninit into nbody.
908 if len(ncas.Init()) != 0 {
909 base.Fatalf("order select ninit")
916 ir.Dump("select case", r)
917 base.Fatalf("unknown op in select %v", r.Op())
921 r := r.(*ir.AssignListStmt)
922 recv := r.Rhs[0].(*ir.UnaryExpr)
923 recv.X = o.expr(recv.X, nil)
924 if !ir.IsAutoTmp(recv.X) {
925 recv.X = o.copyExpr(recv.X)
927 init := ir.TakeInit(r)
930 do := func(i int, t *types.Type) {
935 // If this is case x := <-ch or case x, y := <-ch, the case has
936 // the ODCL nodes to declare x and y. We want to delay that
937 // declaration (and possible allocation) until inside the case body.
938 // Delete the ODCL nodes here and recreate them inside the body below.
940 if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n {
943 // iimport may have added a default initialization assignment,
944 // due to how it handles ODCL statements.
945 if len(init) > 0 && init[0].Op() == ir.OAS && init[0].(*ir.AssignStmt).X == n {
949 dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
950 ncas.PtrInit().Append(dcl)
952 tmp := o.newTemp(t, t.HasPointers())
953 as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type())))
954 ncas.PtrInit().Append(as)
957 do(0, recv.X.Type().Elem())
958 do(1, types.Types[types.TBOOL])
960 ir.DumpList("ninit", init)
961 base.Fatalf("ninit on select recv")
963 orderBlock(ncas.PtrInit(), o.free)
966 r := r.(*ir.SendStmt)
967 if len(r.Init()) != 0 {
968 ir.DumpList("ninit", r.Init())
969 base.Fatalf("ninit on select send")
973 // r->left is c, r->right is x, both are always evaluated.
974 r.Chan = o.expr(r.Chan, nil)
976 if !ir.IsAutoTmp(r.Chan) {
977 r.Chan = o.copyExpr(r.Chan)
979 r.Value = o.expr(r.Value, nil)
980 if !ir.IsAutoTmp(r.Value) {
981 r.Value = o.copyExpr(r.Value)
985 // Now that we have accumulated all the temporaries, clean them.
986 // Also insert any ninit queued during the previous loop.
987 // (The temporary cleaning must follow that ninit work.)
988 for _, cas := range n.Cases {
989 orderBlock(&cas.Body, o.free)
991 // TODO(mdempsky): Is this actually necessary?
992 // walkSelect appears to walk Ninit.
993 cas.Body.Prepend(ir.TakeInit(cas)...)
996 o.out = append(o.out, n)
999 // Special: value being sent is passed as a pointer; make it addressable.
1001 n := n.(*ir.SendStmt)
1003 n.Chan = o.expr(n.Chan, nil)
1004 n.Value = o.expr(n.Value, nil)
1005 if base.Flag.Cfg.Instrumenting {
1006 // Force copying to the stack so that (chan T)(nil) <- x
1007 // is still instrumented as a read of x.
1008 n.Value = o.copyExpr(n.Value)
1010 n.Value = o.addrTemp(n.Value)
1012 o.out = append(o.out, n)
1015 // TODO(rsc): Clean temporaries more aggressively.
1016 // Note that because walkSwitch will rewrite some of the
1017 // switch into a binary search, this is not as easy as it looks.
1018 // (If we ran that code here we could invoke order.stmt on
1019 // the if-else chain instead.)
1020 // For now just clean all the temporaries at the end.
1021 // In practice that's fine.
1023 n := n.(*ir.SwitchStmt)
1024 if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
1025 // Add empty "default:" case for instrumentation.
1026 n.Cases = append(n.Cases, ir.NewCaseStmt(base.Pos, nil, nil))
1030 n.Tag = o.expr(n.Tag, nil)
1031 for _, ncas := range n.Cases {
1032 o.exprListInPlace(ncas.List)
1033 orderBlock(&ncas.Body, o.free)
1036 o.out = append(o.out, n)
1043 func hasDefaultCase(n *ir.SwitchStmt) bool {
1044 for _, ncas := range n.Cases {
1045 if len(ncas.List) == 0 {
1052 // exprList orders the expression list l into o.
1053 func (o *orderState) exprList(l ir.Nodes) {
1056 s[i] = o.expr(s[i], nil)
1060 // exprListInPlace orders the expression list l but saves
1061 // the side effects on the individual expression ninit lists.
1062 func (o *orderState) exprListInPlace(l ir.Nodes) {
1065 s[i] = o.exprInPlace(s[i])
1069 func (o *orderState) exprNoLHS(n ir.Node) ir.Node {
1070 return o.expr(n, nil)
1073 // expr orders a single expression, appending side
1074 // effects to o.out as needed.
1075 // If this is part of an assignment lhs = *np, lhs is given.
1076 // Otherwise lhs == nil. (When lhs != nil it may be possible
1077 // to avoid copying the result of the expression to a temporary.)
1078 // The result of expr MUST be assigned back to n, e.g.
1080 // n.Left = o.expr(n.Left, lhs)
1081 func (o *orderState) expr(n, lhs ir.Node) ir.Node {
1091 func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
1097 o.edit = o.exprNoLHS // create closure once
1099 ir.EditChildren(n, o.edit)
1102 // Addition of strings turns into a function call.
1103 // Allocate a temporary to hold the strings.
1104 // Fewer than 5 strings use direct runtime helpers.
1106 n := n.(*ir.AddStringExpr)
1109 if len(n.List) > 5 {
1110 t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List)))
1111 n.Prealloc = o.newTemp(t, false)
1114 // Mark string(byteSlice) arguments to reuse byteSlice backing
1115 // buffer during conversion. String concatenation does not
1116 // memorize the strings for later use, so it is safe.
1117 // However, we can do it only if there is at least one non-empty string literal.
1118 // Otherwise if all other arguments are empty strings,
1119 // concatstrings will return the reference to the temp string
1124 for _, n1 := range n.List {
1125 hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
1126 haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0
1129 if haslit && hasbyte {
1130 for _, n2 := range n.List {
1131 if n2.Op() == ir.OBYTES2STR {
1132 n2 := n2.(*ir.ConvExpr)
1133 n2.SetOp(ir.OBYTES2STRTMP)
1140 n := n.(*ir.IndexExpr)
1141 n.X = o.expr(n.X, nil)
1142 n.Index = o.expr(n.Index, nil)
1146 // Enforce that any []byte slices we are not copying
1147 // can not be changed before the map index by forcing
1148 // the map index to happen immediately following the
1149 // conversions. See copyExpr a few lines below.
1150 needCopy = mapKeyReplaceStrConv(n.Index)
1152 if base.Flag.Cfg.Instrumenting {
1153 // Race detector needs the copy.
1158 // key must be addressable
1159 n.Index = o.mapKeyTemp(n.Pos(), n.X.Type(), n.Index)
1161 return o.copyExpr(n)
1165 // concrete type (not interface) argument might need an addressable
1166 // temporary to pass to the runtime conversion routine.
1168 n := n.(*ir.ConvExpr)
1169 n.X = o.expr(n.X, nil)
1170 if n.X.Type().IsInterface() {
1173 if _, _, needsaddr := dataWordFuncName(n.X.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
1174 // Need a temp if we need to pass the address to the conversion function.
1175 // We also process static composite literal node here, making a named static global
1176 // whose address we can put directly in an interface (see OCONVIFACE case in walk).
1177 n.X = o.addrTemp(n.X)
1182 n := n.(*ir.ConvExpr)
1183 if n.X.Op() == ir.OCALLMETH {
1184 base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck")
1186 if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER) {
1187 call := n.X.(*ir.CallExpr)
1188 // When reordering unsafe.Pointer(f()) into a separate
1189 // statement, the conversion and function call must stay
1190 // together. See golang.org/issue/15329.
1193 if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
1194 return o.copyExpr(n)
1197 n.X = o.expr(n.X, nil)
1201 case ir.OANDAND, ir.OOROR:
1206 // if r { // or !r, for OROR
1211 n := n.(*ir.LogicalExpr)
1212 r := o.newTemp(n.Type(), false)
1214 // Evaluate left-hand side.
1215 lhs := o.expr(n.X, nil)
1216 o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs)))
1218 // Evaluate right-hand side, save generated code.
1223 rhs := o.expr(n.Y, nil)
1224 o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs)))
1229 // If left-hand side doesn't cause a short-circuit, issue right-hand side.
1230 nif := ir.NewIfStmt(base.Pos, r, nil, nil)
1231 if n.Op() == ir.OANDAND {
1236 o.out = append(o.out, nif)
1240 base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
1241 panic("unreachable")
1264 // len([]rune(s)) is rewritten to runtime.countrunes(s) later.
1265 conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr)
1266 conv.X = o.expr(conv.X, nil)
1271 if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
1272 return o.copyExpr(n)
1277 n := n.(*ir.InlinedCallExpr)
1279 return n.SingleResult()
1282 // Check for append(x, make([]T, y)...) .
1283 n := n.(*ir.CallExpr)
1284 if isAppendOfMake(n) {
1285 n.Args[0] = o.expr(n.Args[0], nil) // order x
1286 mk := n.Args[1].(*ir.MakeExpr)
1287 mk.Len = o.expr(mk.Len, nil) // order y
1292 if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) {
1293 return o.copyExpr(n)
1297 case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
1298 n := n.(*ir.SliceExpr)
1299 n.X = o.expr(n.X, nil)
1300 n.Low = o.cheapExpr(o.expr(n.Low, nil))
1301 n.High = o.cheapExpr(o.expr(n.High, nil))
1302 n.Max = o.cheapExpr(o.expr(n.Max, nil))
1303 if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) {
1304 return o.copyExpr(n)
1309 n := n.(*ir.ClosureExpr)
1310 if n.Transient() && len(n.Func.ClosureVars) > 0 {
1311 n.Prealloc = o.newTemp(typecheck.ClosureType(n), false)
1316 n := n.(*ir.SelectorExpr)
1317 n.X = o.expr(n.X, nil)
1319 t := typecheck.MethodValueType(n)
1320 n.Prealloc = o.newTemp(t, false)
1325 n := n.(*ir.CompLitExpr)
1328 t := types.NewArray(n.Type().Elem(), n.Len)
1329 n.Prealloc = o.newTemp(t, false)
1333 case ir.ODOTTYPE, ir.ODOTTYPE2:
1334 n := n.(*ir.TypeAssertExpr)
1335 n.X = o.expr(n.X, nil)
1336 if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting {
1337 return o.copyExprClear(n)
1342 n := n.(*ir.UnaryExpr)
1343 n.X = o.expr(n.X, nil)
1344 return o.copyExprClear(n)
1346 case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
1347 n := n.(*ir.BinaryExpr)
1348 n.X = o.expr(n.X, nil)
1349 n.Y = o.expr(n.Y, nil)
1354 // Mark string(byteSlice) arguments to reuse byteSlice backing
1355 // buffer during conversion. String comparison does not
1356 // memorize the strings for later use, so it is safe.
1357 if n.X.Op() == ir.OBYTES2STR {
1358 n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
1360 if n.Y.Op() == ir.OBYTES2STR {
1361 n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
1364 case t.IsStruct() || t.IsArray():
1365 // for complex comparisons, we need both args to be
1366 // addressable so we can pass them to the runtime.
1367 n.X = o.addrTemp(n.X)
1368 n.Y = o.addrTemp(n.Y)
1373 // Order map by converting:
1380 // m := map[int]int{}
1384 // Then order the result.
1385 // Without this special case, order would otherwise compute all
1386 // the keys and values before storing any of them to the map.
1388 n := n.(*ir.CompLitExpr)
1390 statics := entries[:0]
1391 var dynamics []*ir.KeyExpr
1392 for _, r := range entries {
1393 r := r.(*ir.KeyExpr)
1395 if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
1396 dynamics = append(dynamics, r)
1400 // Recursively ordering some static entries can change them to dynamic;
1401 // e.g., OCONVIFACE nodes. See #31777.
1402 r = o.expr(r, nil).(*ir.KeyExpr)
1403 if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
1404 dynamics = append(dynamics, r)
1408 statics = append(statics, r)
1412 if len(dynamics) == 0 {
1416 // Emit the creation of the map (with all its static entries).
1417 m := o.newTemp(n.Type(), false)
1418 as := ir.NewAssignStmt(base.Pos, m, n)
1422 // Emit eval+insert of dynamic entries, one at a time.
1423 for _, r := range dynamics {
1424 lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, r.Key)).(*ir.IndexExpr)
1425 base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs)
1428 as := ir.NewAssignStmt(base.Pos, lhs, r.Value)
1433 // Remember that we issued these assignments so we can include that count
1434 // in the map alloc hint.
1435 // We're assuming here that all the keys in the map literal are distinct.
1436 // If any are equal, this will be an overcount. Probably not worth accounting
1437 // for that, as equal keys in map literals are rare, and at worst we waste
1439 n.Len += int64(len(dynamics))
1444 // No return - type-assertions above. Each case must return for itself.
1447 // as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment.
1448 // The caller should order the right-hand side of the assignment before calling order.as2func.
1455 // tmp1, tmp2, tmp3 = ...
1456 // a, b, a = tmp1, tmp2, tmp3
1458 // This is necessary to ensure left to right assignment order.
1459 func (o *orderState) as2func(n *ir.AssignListStmt) {
1460 results := n.Rhs[0].Type()
1461 as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
1462 for i, nl := range n.Lhs {
1463 if !ir.IsBlank(nl) {
1464 typ := results.Field(i).Type
1465 tmp := o.newTemp(typ, typ.HasPointers())
1467 as.Lhs = append(as.Lhs, nl)
1468 as.Rhs = append(as.Rhs, tmp)
1472 o.out = append(o.out, n)
1473 o.stmt(typecheck.Stmt(as))
1476 // as2ok orders OAS2XXX with ok.
1477 // Just like as2func, this also adds temporaries to ensure left-to-right assignment.
1478 func (o *orderState) as2ok(n *ir.AssignListStmt) {
1479 as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
1481 do := func(i int, typ *types.Type) {
1482 if nl := n.Lhs[i]; !ir.IsBlank(nl) {
1483 var tmp ir.Node = o.newTemp(typ, typ.HasPointers())
1485 as.Lhs = append(as.Lhs, nl)
1487 // The "ok" result is an untyped boolean according to the Go
1488 // spec. We need to explicitly convert it to the LHS type in
1489 // case the latter is a defined boolean type (#8475).
1490 tmp = typecheck.Conv(tmp, nl.Type())
1492 as.Rhs = append(as.Rhs, tmp)
1496 do(0, n.Rhs[0].Type())
1497 do(1, types.Types[types.TBOOL])
1499 o.out = append(o.out, n)
1500 o.stmt(typecheck.Stmt(as))
1503 // isIfaceOfFunc returns whether n is an interface conversion from a direct reference of a func.
1504 func isIfaceOfFunc(n ir.Node) bool {
1505 return n.Op() == ir.OCONVIFACE && n.(*ir.ConvExpr).X.Op() == ir.ONAME && n.(*ir.ConvExpr).X.(*ir.Name).Class == ir.PFUNC