1 // Copyright 2012 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
11 "cmd/compile/internal/base"
12 "cmd/compile/internal/escape"
13 "cmd/compile/internal/ir"
14 "cmd/compile/internal/reflectdata"
15 "cmd/compile/internal/staticinit"
16 "cmd/compile/internal/typecheck"
17 "cmd/compile/internal/types"
22 // Rewrite tree to use separate statements to enforce
23 // order of evaluation. Makes walk easier, because it
24 // can (after this runs) reorder at will within an expression.
26 // Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %.
28 // Introduce temporaries as needed by runtime routines.
29 // For example, the map runtime routines take the map key
30 // by reference, so make sure all map keys are addressable
31 // by copying them to temporaries as needed.
32 // The same is true for channel operations.
34 // Arrange that map index expressions only appear in direct
35 // assignments x = m[k] or m[k] = x, never in larger expressions.
37 // Arrange that receive expressions only appear in direct assignments
38 // x = <-c or as standalone statements <-c, never in larger expressions.
40 // TODO(rsc): The temporary introduction during multiple assignments
41 // should be moved into this file, so that the temporaries can be cleaned
42 // and so that conversions implicit in the OAS2FUNC and OAS2RECV
43 // nodes can be made explicit and then have their temporaries cleaned.
45 // TODO(rsc): Goto and multilevel break/continue can jump over
46 // inserted VARKILL annotations. Work out a way to handle these.
47 // The current implementation is safe, in that it will execute correctly.
48 // But it won't reuse temporaries as aggressively as it might, and
49 // it can result in unnecessary zeroing of those variables in the function
52 // orderState holds state during the ordering process.
53 type orderState struct {
54 out []ir.Node // list of generated statements
55 temp []*ir.Name // stack of temporary variables
56 free map[string][]*ir.Name // free list of unused temporaries, by type.LongString().
57 edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
60 // Order rewrites fn.Nbody to apply the ordering constraints
61 // described in the comment at the top of the file.
62 func order(fn *ir.Func) {
64 s := fmt.Sprintf("\nbefore order %v", fn.Sym())
65 ir.DumpList(s, fn.Body)
68 orderBlock(&fn.Body, map[string][]*ir.Name{})
71 // append typechecks stmt and appends it to out.
72 func (o *orderState) append(stmt ir.Node) {
73 o.out = append(o.out, typecheck.Stmt(stmt))
76 // newTemp allocates a new temporary with the given type,
77 // pushes it onto the temp stack, and returns it.
78 // If clear is true, newTemp emits code to zero the temporary.
79 func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name {
81 // Note: LongString is close to the type equality we want,
82 // but not exactly. We still need to double-check with types.Identical.
86 if types.Identical(t, n.Type()) {
98 o.append(ir.NewAssignStmt(base.Pos, v, nil))
101 o.temp = append(o.temp, v)
105 // copyExpr behaves like newTemp but also emits
106 // code to initialize the temporary to the value n.
107 func (o *orderState) copyExpr(n ir.Node) *ir.Name {
108 return o.copyExpr1(n, false)
111 // copyExprClear is like copyExpr but clears the temp before assignment.
112 // It is provided for use when the evaluation of tmp = n turns into
113 // a function call that is passed a pointer to the temporary as the output space.
114 // If the call blocks before tmp has been written,
115 // the garbage collector will still treat the temporary as live,
116 // so we must zero it before entering that call.
117 // Today, this only happens for channel receive operations.
118 // (The other candidate would be map access, but map access
119 // returns a pointer to the result data instead of taking a pointer
121 func (o *orderState) copyExprClear(n ir.Node) *ir.Name {
122 return o.copyExpr1(n, true)
125 func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name {
127 v := o.newTemp(t, clear)
128 o.append(ir.NewAssignStmt(base.Pos, v, n))
132 // cheapExpr returns a cheap version of n.
133 // The definition of cheap is that n is a variable or constant.
134 // If not, cheapExpr allocates a new tmp, emits tmp = n,
135 // and then returns tmp.
136 func (o *orderState) cheapExpr(n ir.Node) ir.Node {
142 case ir.ONAME, ir.OLITERAL, ir.ONIL:
144 case ir.OLEN, ir.OCAP:
145 n := n.(*ir.UnaryExpr)
146 l := o.cheapExpr(n.X)
150 a := ir.SepCopy(n).(*ir.UnaryExpr)
152 return typecheck.Expr(a)
158 // safeExpr returns a safe version of n.
159 // The definition of safe is that n can appear multiple times
160 // without violating the semantics of the original program,
161 // and that assigning to the safe version has the same effect
162 // as assigning to the original n.
164 // The intended use is to apply to x when rewriting x += y into x = x + y.
165 func (o *orderState) safeExpr(n ir.Node) ir.Node {
167 case ir.ONAME, ir.OLITERAL, ir.ONIL:
170 case ir.OLEN, ir.OCAP:
171 n := n.(*ir.UnaryExpr)
176 a := ir.SepCopy(n).(*ir.UnaryExpr)
178 return typecheck.Expr(a)
181 n := n.(*ir.SelectorExpr)
186 a := ir.SepCopy(n).(*ir.SelectorExpr)
188 return typecheck.Expr(a)
191 n := n.(*ir.SelectorExpr)
192 l := o.cheapExpr(n.X)
196 a := ir.SepCopy(n).(*ir.SelectorExpr)
198 return typecheck.Expr(a)
201 n := n.(*ir.StarExpr)
202 l := o.cheapExpr(n.X)
206 a := ir.SepCopy(n).(*ir.StarExpr)
208 return typecheck.Expr(a)
210 case ir.OINDEX, ir.OINDEXMAP:
211 n := n.(*ir.IndexExpr)
213 if n.X.Type().IsArray() {
218 r := o.cheapExpr(n.Index)
219 if l == n.X && r == n.Index {
222 a := ir.SepCopy(n).(*ir.IndexExpr)
225 return typecheck.Expr(a)
228 base.Fatalf("order.safeExpr %v", n.Op())
229 return nil // not reached
233 // isaddrokay reports whether it is okay to pass n's address to runtime routines.
234 // Taking the address of a variable makes the liveness and optimization analyses
235 // lose track of where the variable's lifetime ends. To avoid hurting the analyses
236 // of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
237 // because we emit explicit VARKILL instructions marking the end of those
238 // temporaries' lifetimes.
239 func isaddrokay(n ir.Node) bool {
240 return ir.IsAddressable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class == ir.PEXTERN || ir.IsAutoTmp(n))
243 // addrTemp ensures that n is okay to pass by address to runtime routines.
244 // If the original argument n is not okay, addrTemp creates a tmp, emits
245 // tmp = n, and then returns tmp.
246 // The result of addrTemp MUST be assigned back to n, e.g.
247 // n.Left = o.addrTemp(n.Left)
248 func (o *orderState) addrTemp(n ir.Node) ir.Node {
249 if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
250 // TODO: expand this to all static composite literal nodes?
251 n = typecheck.DefaultLit(n, nil)
252 types.CalcSize(n.Type())
253 vstat := readonlystaticname(n.Type())
254 var s staticinit.Schedule
255 s.StaticAssign(vstat, 0, n, n.Type())
257 base.Fatalf("staticassign of const generated code: %+v", n)
259 vstat = typecheck.Expr(vstat).(*ir.Name)
268 // mapKeyTemp prepares n to be a key in a map runtime call and returns n.
269 // It should only be used for map runtime calls which have *_fast* versions.
270 func (o *orderState) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
271 // Most map calls need to take the address of the key.
272 // Exception: map*_fast* calls. See golang.org/issue/19015.
279 case mapfast32, mapfast32ptr:
280 kt = types.Types[types.TUINT32]
281 case mapfast64, mapfast64ptr:
282 kt = types.Types[types.TUINT64]
284 kt = types.Types[types.TSTRING]
290 case nt.Kind() == kt.Kind():
291 // can directly convert (e.g. named type to underlying type)
292 return typecheck.Expr(ir.NewConvExpr(n.Pos(), ir.OCONVNOP, kt, n))
293 case nt.IsInteger() && kt.IsInteger():
294 // can directly convert (e.g. int32 to uint32)
295 if n.Op() == ir.OLITERAL && nt.IsSigned() {
296 // avoid constant overflow error
297 n = ir.NewConstExpr(constant.MakeUint64(uint64(ir.Int64Val(n))), n)
301 return typecheck.Expr(ir.NewConvExpr(n.Pos(), ir.OCONV, kt, n))
303 // Unsafe cast through memory.
304 // We'll need to do a load with type kt. Create a temporary of type kt to
305 // ensure sufficient alignment. nt may be under-aligned.
306 if kt.Align < nt.Align {
307 base.Fatalf("mapKeyTemp: key type is not sufficiently aligned, kt=%v nt=%v", kt, nt)
309 tmp := o.newTemp(kt, true)
311 var e ir.Node = typecheck.NodAddr(tmp)
312 e = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, nt.PtrTo(), e)
313 e = ir.NewStarExpr(n.Pos(), e)
314 o.append(ir.NewAssignStmt(base.Pos, e, n))
319 // mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP
320 // in n to avoid string allocations for keys in map lookups.
321 // Returns a bool that signals if a modification was made.
325 // x = m[T1{... Tn{..., string(k), ...}]
326 // where k is []byte, T1 to Tn is a nesting of struct and array literals,
327 // the allocation of backing bytes for the string can be avoided
328 // by reusing the []byte backing array. These are special cases
329 // for avoiding allocations when converting byte slices to strings.
330 // It would be nice to handle these generally, but because
331 // []byte keys are not allowed in maps, the use of string(k)
332 // comes up in important cases in practice. See issue 3512.
333 func mapKeyReplaceStrConv(n ir.Node) bool {
337 n := n.(*ir.ConvExpr)
338 n.SetOp(ir.OBYTES2STRTMP)
341 n := n.(*ir.CompLitExpr)
342 for _, elem := range n.List {
343 elem := elem.(*ir.StructKeyExpr)
344 if mapKeyReplaceStrConv(elem.Value) {
349 n := n.(*ir.CompLitExpr)
350 for _, elem := range n.List {
351 if elem.Op() == ir.OKEY {
352 elem = elem.(*ir.KeyExpr).Value
354 if mapKeyReplaceStrConv(elem) {
364 // markTemp returns the top of the temporary variable stack.
365 func (o *orderState) markTemp() ordermarker {
366 return ordermarker(len(o.temp))
369 // popTemp pops temporaries off the stack until reaching the mark,
370 // which must have been returned by markTemp.
371 func (o *orderState) popTemp(mark ordermarker) {
372 for _, n := range o.temp[mark:] {
373 key := n.Type().LongString()
374 o.free[key] = append(o.free[key], n)
376 o.temp = o.temp[:mark]
379 // cleanTempNoPop emits VARKILL instructions to *out
380 // for each temporary above the mark on the temporary stack.
381 // It does not pop the temporaries from the stack.
382 func (o *orderState) cleanTempNoPop(mark ordermarker) []ir.Node {
384 for i := len(o.temp) - 1; i >= int(mark); i-- {
386 out = append(out, typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARKILL, n)))
391 // cleanTemp emits VARKILL instructions for each temporary above the
392 // mark on the temporary stack and removes them from the stack.
393 func (o *orderState) cleanTemp(top ordermarker) {
394 o.out = append(o.out, o.cleanTempNoPop(top)...)
398 // stmtList orders each of the statements in the list.
399 func (o *orderState) stmtList(l ir.Nodes) {
402 orderMakeSliceCopy(s[i:])
407 // orderMakeSliceCopy matches the pattern:
408 // m = OMAKESLICE([]T, x); OCOPY(m, s)
409 // and rewrites it to:
410 // m = OMAKESLICECOPY([]T, x, s); nil
411 func orderMakeSliceCopy(s []ir.Node) {
412 if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
415 if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY {
419 as := s[0].(*ir.AssignStmt)
420 cp := s[1].(*ir.BinaryExpr)
421 if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) ||
422 as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME ||
423 as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() {
424 // The line above this one is correct with the differing equality operators:
425 // we want as.X and cp.X to be the same name,
426 // but we want the initial data to be coming from a different name.
430 mk := as.Y.(*ir.MakeExpr)
431 if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil {
434 mk.SetOp(ir.OMAKESLICECOPY)
436 // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
437 mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y))
438 as.Y = typecheck.Expr(mk)
439 s[1] = nil // remove separate copy call
442 // edge inserts coverage instrumentation for libfuzzer.
443 func (o *orderState) edge() {
444 if base.Debug.Libfuzzer == 0 {
448 // Create a new uint8 counter to be allocated in section
449 // __libfuzzer_extra_counters.
450 counter := staticinit.StaticName(types.Types[types.TUINT8])
451 counter.SetLibfuzzerExtraCounter(true)
454 incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(1))
458 // orderBlock orders the block of statements in n into a new slice,
459 // and then replaces the old slice in n with the new slice.
460 // free is a map that can be used to obtain temporary variables by type.
461 func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
464 mark := order.markTemp()
467 order.cleanTemp(mark)
471 // exprInPlace orders the side effects in *np and
472 // leaves them as the init list of the final *np.
473 // The result of exprInPlace MUST be assigned back to n, e.g.
474 // n.Left = o.exprInPlace(n.Left)
475 func (o *orderState) exprInPlace(n ir.Node) ir.Node {
478 n = order.expr(n, nil)
479 n = ir.InitExpr(order.out, n)
481 // insert new temporaries from order
482 // at head of outer list.
483 o.temp = append(o.temp, order.temp...)
487 // orderStmtInPlace orders the side effects of the single statement *np
488 // and replaces it with the resulting statement list.
489 // The result of orderStmtInPlace MUST be assigned back to n, e.g.
490 // n.Left = orderStmtInPlace(n.Left)
491 // free is a map that can be used to obtain temporary variables by type.
492 func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
495 mark := order.markTemp()
497 order.cleanTemp(mark)
498 return ir.NewBlockStmt(src.NoXPos, order.out)
501 // init moves n's init list to o.out.
502 func (o *orderState) init(n ir.Node) {
503 if ir.MayBeShared(n) {
504 // For concurrency safety, don't mutate potentially shared nodes.
505 // First, ensure that no work is required here.
506 if len(n.Init()) > 0 {
507 base.Fatalf("order.init shared node with ninit")
511 o.stmtList(ir.TakeInit(n))
514 // call orders the call expression n.
515 // n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
516 func (o *orderState) call(nn ir.Node) {
517 if len(nn.Init()) > 0 {
518 // Caller should have already called o.init(nn).
519 base.Fatalf("%v with unexpected ninit", nn.Op())
522 // Builtin functions.
523 if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLMETH && nn.Op() != ir.OCALLINTER {
524 switch n := nn.(type) {
526 base.Fatalf("unexpected call: %+v", n)
528 n.X = o.expr(n.X, nil)
530 n.X = o.expr(n.X, nil)
532 n.X = o.expr(n.X, nil)
533 n.Y = o.expr(n.Y, nil)
535 n.Len = o.expr(n.Len, nil)
536 n.Cap = o.expr(n.Cap, nil)
543 n := nn.(*ir.CallExpr)
544 typecheck.FixVariadicCall(n)
545 n.X = o.expr(n.X, nil)
548 if n.Op() == ir.OCALLINTER {
551 keepAlive := func(arg ir.Node) {
552 // If the argument is really a pointer being converted to uintptr,
553 // arrange for the pointer to be kept alive until the call returns,
554 // by copying it into a temp and marking that temp
555 // still alive when we pop the temp stack.
556 if arg.Op() == ir.OCONVNOP {
557 arg := arg.(*ir.ConvExpr)
558 if arg.X.Type().IsUnsafePtr() {
559 x := o.copyExpr(arg.X)
561 x.SetAddrtaken(true) // ensure SSA keeps the x variable
562 n.KeepAlive = append(n.KeepAlive, x)
567 // Check for "unsafe-uintptr" tag provided by escape analysis.
568 for i, param := range n.X.Type().Params().FieldSlice() {
569 if param.Note == escape.UnsafeUintptrNote || param.Note == escape.UintptrEscapesNote {
570 if arg := n.Args[i]; arg.Op() == ir.OSLICELIT {
571 arg := arg.(*ir.CompLitExpr)
572 for _, elt := range arg.List {
582 // mapAssign appends n to o.out.
583 func (o *orderState) mapAssign(n ir.Node) {
586 base.Fatalf("order.mapAssign %v", n.Op())
589 n := n.(*ir.AssignStmt)
590 if n.X.Op() == ir.OINDEXMAP {
591 n.Y = o.safeMapRHS(n.Y)
593 o.out = append(o.out, n)
595 n := n.(*ir.AssignOpStmt)
596 if n.X.Op() == ir.OINDEXMAP {
597 n.Y = o.safeMapRHS(n.Y)
599 o.out = append(o.out, n)
603 func (o *orderState) safeMapRHS(r ir.Node) ir.Node {
604 // Make sure we evaluate the RHS before starting the map insert.
605 // We need to make sure the RHS won't panic. See issue 22881.
606 if r.Op() == ir.OAPPEND {
607 r := r.(*ir.CallExpr)
609 for i, n := range s {
610 s[i] = o.cheapExpr(n)
614 return o.cheapExpr(r)
617 // stmt orders the statement n, appending to o.out.
618 // Temporaries created during the statement are cleaned
619 // up using VARKILL instructions as possible.
620 func (o *orderState) stmt(n ir.Node) {
630 base.Fatalf("order.stmt %v", n.Op())
632 case ir.OVARKILL, ir.OVARLIVE, ir.OINLMARK:
633 o.out = append(o.out, n)
636 n := n.(*ir.AssignStmt)
638 n.X = o.expr(n.X, nil)
639 n.Y = o.expr(n.Y, n.X)
644 n := n.(*ir.AssignOpStmt)
646 n.X = o.expr(n.X, nil)
647 n.Y = o.expr(n.Y, nil)
649 if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) {
650 // Rewrite m[k] op= r into m[k] = m[k] op r so
651 // that we can ensure that if op panics
652 // because r is zero, the panic happens before
653 // the map assignment.
654 // DeepCopy is a big hammer here, but safeExpr
655 // makes sure there is nothing too deep being copied.
656 l1 := o.safeExpr(n.X)
657 l2 := ir.DeepCopy(src.NoXPos, l1)
658 if l2.Op() == ir.OINDEXMAP {
659 l2 := l2.(*ir.IndexExpr)
663 r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil)
664 as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r))
674 n := n.(*ir.AssignListStmt)
678 o.out = append(o.out, n)
681 // Special: avoid copy of func call n.Right
683 n := n.(*ir.AssignListStmt)
691 // Special: use temporary variables to hold result,
692 // so that runtime can take address of temporary.
693 // No temporary for blank assignment.
695 // OAS2MAPR: make sure key is addressable if needed,
696 // and make sure OINDEXMAP is not copied out.
697 case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
698 n := n.(*ir.AssignListStmt)
702 switch r := n.Rhs[0]; r.Op() {
704 r := r.(*ir.TypeAssertExpr)
705 r.X = o.expr(r.X, nil)
707 r := r.(*ir.UnaryExpr)
708 r.X = o.expr(r.X, nil)
710 r := r.(*ir.IndexExpr)
711 r.X = o.expr(r.X, nil)
712 r.Index = o.expr(r.Index, nil)
713 // See similar conversion for OINDEXMAP below.
714 _ = mapKeyReplaceStrConv(r.Index)
715 r.Index = o.mapKeyTemp(r.X.Type(), r.Index)
717 base.Fatalf("order.stmt: %v", r.Op())
723 // Special: does not save n onto out.
725 n := n.(*ir.BlockStmt)
728 // Special: n->left is not an expression; save as is.
738 o.out = append(o.out, n)
740 // Special: handle call arguments.
741 case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
742 n := n.(*ir.CallExpr)
745 o.out = append(o.out, n)
748 case ir.OCLOSE, ir.ORECV:
749 n := n.(*ir.UnaryExpr)
751 n.X = o.expr(n.X, nil)
752 o.out = append(o.out, n)
756 n := n.(*ir.BinaryExpr)
758 n.X = o.expr(n.X, nil)
759 n.Y = o.expr(n.Y, nil)
760 o.out = append(o.out, n)
763 case ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
764 n := n.(*ir.CallExpr)
767 o.out = append(o.out, n)
770 // Special: order arguments to inner call but not call itself.
771 case ir.ODEFER, ir.OGO:
772 n := n.(*ir.GoDeferStmt)
776 if n.Call.Op() == ir.ORECOVER {
777 // Special handling of "defer recover()". We need to evaluate the FP
778 // argument before wrapping.
780 n.Call = walkRecover(n.Call.(*ir.CallExpr), &init)
783 if objabi.Experiment.RegabiDefer {
786 o.out = append(o.out, n)
790 n := n.(*ir.CallExpr)
792 n.Args[0] = o.expr(n.Args[0], nil)
793 n.Args[1] = o.expr(n.Args[1], nil)
794 n.Args[1] = o.mapKeyTemp(n.Args[0].Type(), n.Args[1])
795 o.out = append(o.out, n)
798 // Clean temporaries from condition evaluation at
799 // beginning of loop body and after for statement.
803 n.Cond = o.exprInPlace(n.Cond)
804 n.Body.Prepend(o.cleanTempNoPop(t)...)
805 orderBlock(&n.Body, o.free)
806 n.Post = orderStmtInPlace(n.Post, o.free)
807 o.out = append(o.out, n)
810 // Clean temporaries from condition at
811 // beginning of both branches.
815 n.Cond = o.exprInPlace(n.Cond)
816 n.Body.Prepend(o.cleanTempNoPop(t)...)
817 n.Else.Prepend(o.cleanTempNoPop(t)...)
819 orderBlock(&n.Body, o.free)
820 orderBlock(&n.Else, o.free)
821 o.out = append(o.out, n)
824 n := n.(*ir.UnaryExpr)
826 n.X = o.expr(n.X, nil)
827 if !n.X.Type().IsEmptyInterface() {
828 base.FatalfAt(n.Pos(), "bad argument to panic: %L", n.X)
830 o.out = append(o.out, n)
834 // n.Right is the expression being ranged over.
835 // order it, and then make a copy if we need one.
836 // We almost always do, to ensure that we don't
837 // see any value changes made during the loop.
838 // Usually the copy is cheap (e.g., array pointer,
839 // chan, slice, string are all tiny).
840 // The exception is ranging over an array value
841 // (not a slice, not a pointer to array),
842 // which must make a copy to avoid seeing updates made during
843 // the range body. Ranging over an array value is uncommon though.
845 // Mark []byte(str) range expression to reuse string backing storage.
846 // It is safe because the storage cannot be mutated.
847 n := n.(*ir.RangeStmt)
848 if n.X.Op() == ir.OSTR2BYTES {
849 n.X.(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
853 n.X = o.expr(n.X, nil)
856 xt := typecheck.RangeExprType(n.X.Type())
859 base.Fatalf("order.stmt range %v", n.Type())
861 case types.TARRAY, types.TSLICE:
862 if n.Value == nil || ir.IsBlank(n.Value) {
863 // for i := range x will only use x once, to compute len(x).
864 // No need to copy it.
869 case types.TCHAN, types.TSTRING:
870 // chan, string, slice, array ranges use value multiple times.
874 if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
875 r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r)
876 r.SetType(types.Types[types.TSTRING])
877 r = typecheck.Expr(r)
884 // Preserve the body of the map clear pattern so it can
885 // be detected during walk. The loop body will not be used
886 // when optimizing away the range loop to a runtime call.
891 // copy the map value in case it is a map literal.
892 // TODO(rsc): Make tmp = literal expressions reuse tmp.
893 // For maps tmp is just one word so it hardly matters.
897 // n.Prealloc is the temp for the iterator.
898 // MapIterType contains pointers and needs to be zeroed.
899 n.Prealloc = o.newTemp(reflectdata.MapIterType(xt), true)
901 n.Key = o.exprInPlace(n.Key)
902 n.Value = o.exprInPlace(n.Value)
904 orderBlock(&n.Body, o.free)
906 o.out = append(o.out, n)
910 n := n.(*ir.ReturnStmt)
911 o.exprList(n.Results)
912 o.out = append(o.out, n)
914 // Special: clean case temporaries in each block entry.
915 // Select must enter one of its blocks, so there is no
916 // need for a cleaning at the end.
917 // Doubly special: evaluation order for select is stricter
918 // than ordinary expressions. Even something like p.c
919 // has to be hoisted into a temporary, so that it cannot be
920 // reordered after the channel evaluation for a different
921 // case (if p were nil, then the timing of the fault would
924 n := n.(*ir.SelectStmt)
926 for _, ncas := range n.Cases {
930 // Append any new body prologue to ninit.
931 // The next loop will insert ninit into nbody.
932 if len(ncas.Init()) != 0 {
933 base.Fatalf("order select ninit")
940 ir.Dump("select case", r)
941 base.Fatalf("unknown op in select %v", r.Op())
945 r := r.(*ir.AssignListStmt)
946 recv := r.Rhs[0].(*ir.UnaryExpr)
947 recv.X = o.expr(recv.X, nil)
948 if !ir.IsAutoTmp(recv.X) {
949 recv.X = o.copyExpr(recv.X)
951 init := ir.TakeInit(r)
954 do := func(i int, t *types.Type) {
959 // If this is case x := <-ch or case x, y := <-ch, the case has
960 // the ODCL nodes to declare x and y. We want to delay that
961 // declaration (and possible allocation) until inside the case body.
962 // Delete the ODCL nodes here and recreate them inside the body below.
964 if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n {
967 dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
968 ncas.PtrInit().Append(dcl)
970 tmp := o.newTemp(t, t.HasPointers())
971 as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type())))
972 ncas.PtrInit().Append(as)
975 do(0, recv.X.Type().Elem())
976 do(1, types.Types[types.TBOOL])
978 ir.DumpList("ninit", r.Init())
979 base.Fatalf("ninit on select recv")
981 orderBlock(ncas.PtrInit(), o.free)
984 r := r.(*ir.SendStmt)
985 if len(r.Init()) != 0 {
986 ir.DumpList("ninit", r.Init())
987 base.Fatalf("ninit on select send")
991 // r->left is c, r->right is x, both are always evaluated.
992 r.Chan = o.expr(r.Chan, nil)
994 if !ir.IsAutoTmp(r.Chan) {
995 r.Chan = o.copyExpr(r.Chan)
997 r.Value = o.expr(r.Value, nil)
998 if !ir.IsAutoTmp(r.Value) {
999 r.Value = o.copyExpr(r.Value)
1003 // Now that we have accumulated all the temporaries, clean them.
1004 // Also insert any ninit queued during the previous loop.
1005 // (The temporary cleaning must follow that ninit work.)
1006 for _, cas := range n.Cases {
1007 orderBlock(&cas.Body, o.free)
1008 cas.Body.Prepend(o.cleanTempNoPop(t)...)
1010 // TODO(mdempsky): Is this actually necessary?
1011 // walkSelect appears to walk Ninit.
1012 cas.Body.Prepend(ir.TakeInit(cas)...)
1015 o.out = append(o.out, n)
1018 // Special: value being sent is passed as a pointer; make it addressable.
1020 n := n.(*ir.SendStmt)
1022 n.Chan = o.expr(n.Chan, nil)
1023 n.Value = o.expr(n.Value, nil)
1024 if base.Flag.Cfg.Instrumenting {
1025 // Force copying to the stack so that (chan T)(nil) <- x
1026 // is still instrumented as a read of x.
1027 n.Value = o.copyExpr(n.Value)
1029 n.Value = o.addrTemp(n.Value)
1031 o.out = append(o.out, n)
1034 // TODO(rsc): Clean temporaries more aggressively.
1035 // Note that because walkSwitch will rewrite some of the
1036 // switch into a binary search, this is not as easy as it looks.
1037 // (If we ran that code here we could invoke order.stmt on
1038 // the if-else chain instead.)
1039 // For now just clean all the temporaries at the end.
1040 // In practice that's fine.
1042 n := n.(*ir.SwitchStmt)
1043 if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
1044 // Add empty "default:" case for instrumentation.
1045 n.Cases = append(n.Cases, ir.NewCaseStmt(base.Pos, nil, nil))
1049 n.Tag = o.expr(n.Tag, nil)
1050 for _, ncas := range n.Cases {
1051 o.exprListInPlace(ncas.List)
1052 orderBlock(&ncas.Body, o.free)
1055 o.out = append(o.out, n)
1062 func hasDefaultCase(n *ir.SwitchStmt) bool {
1063 for _, ncas := range n.Cases {
1064 if len(ncas.List) == 0 {
1071 // exprList orders the expression list l into o.
1072 func (o *orderState) exprList(l ir.Nodes) {
1075 s[i] = o.expr(s[i], nil)
1079 // exprListInPlace orders the expression list l but saves
1080 // the side effects on the individual expression ninit lists.
1081 func (o *orderState) exprListInPlace(l ir.Nodes) {
1084 s[i] = o.exprInPlace(s[i])
1088 func (o *orderState) exprNoLHS(n ir.Node) ir.Node {
1089 return o.expr(n, nil)
1092 // expr orders a single expression, appending side
1093 // effects to o.out as needed.
1094 // If this is part of an assignment lhs = *np, lhs is given.
1095 // Otherwise lhs == nil. (When lhs != nil it may be possible
1096 // to avoid copying the result of the expression to a temporary.)
1097 // The result of expr MUST be assigned back to n, e.g.
1098 // n.Left = o.expr(n.Left, lhs)
1099 func (o *orderState) expr(n, lhs ir.Node) ir.Node {
1109 func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
1115 o.edit = o.exprNoLHS // create closure once
1117 ir.EditChildren(n, o.edit)
1120 // Addition of strings turns into a function call.
1121 // Allocate a temporary to hold the strings.
1122 // Fewer than 5 strings use direct runtime helpers.
1124 n := n.(*ir.AddStringExpr)
1127 if len(n.List) > 5 {
1128 t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List)))
1129 n.Prealloc = o.newTemp(t, false)
1132 // Mark string(byteSlice) arguments to reuse byteSlice backing
1133 // buffer during conversion. String concatenation does not
1134 // memorize the strings for later use, so it is safe.
1135 // However, we can do it only if there is at least one non-empty string literal.
1136 // Otherwise if all other arguments are empty strings,
1137 // concatstrings will return the reference to the temp string
1142 for _, n1 := range n.List {
1143 hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
1144 haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0
1147 if haslit && hasbyte {
1148 for _, n2 := range n.List {
1149 if n2.Op() == ir.OBYTES2STR {
1150 n2 := n2.(*ir.ConvExpr)
1151 n2.SetOp(ir.OBYTES2STRTMP)
1158 n := n.(*ir.IndexExpr)
1159 n.X = o.expr(n.X, nil)
1160 n.Index = o.expr(n.Index, nil)
1164 // Enforce that any []byte slices we are not copying
1165 // can not be changed before the map index by forcing
1166 // the map index to happen immediately following the
1167 // conversions. See copyExpr a few lines below.
1168 needCopy = mapKeyReplaceStrConv(n.Index)
1170 if base.Flag.Cfg.Instrumenting {
1171 // Race detector needs the copy.
1176 // key must be addressable
1177 n.Index = o.mapKeyTemp(n.X.Type(), n.Index)
1179 return o.copyExpr(n)
1183 // concrete type (not interface) argument might need an addressable
1184 // temporary to pass to the runtime conversion routine.
1186 n := n.(*ir.ConvExpr)
1187 n.X = o.expr(n.X, nil)
1188 if n.X.Type().IsInterface() {
1191 if _, _, needsaddr := convFuncName(n.X.Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
1192 // Need a temp if we need to pass the address to the conversion function.
1193 // We also process static composite literal node here, making a named static global
1194 // whose address we can put directly in an interface (see OCONVIFACE case in walk).
1195 n.X = o.addrTemp(n.X)
1200 n := n.(*ir.ConvExpr)
1201 if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER || n.X.Op() == ir.OCALLMETH) {
1202 call := n.X.(*ir.CallExpr)
1203 // When reordering unsafe.Pointer(f()) into a separate
1204 // statement, the conversion and function call must stay
1205 // together. See golang.org/issue/15329.
1208 if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
1209 return o.copyExpr(n)
1212 n.X = o.expr(n.X, nil)
1216 case ir.OANDAND, ir.OOROR:
1221 // if r { // or !r, for OROR
1226 n := n.(*ir.LogicalExpr)
1227 r := o.newTemp(n.Type(), false)
1229 // Evaluate left-hand side.
1230 lhs := o.expr(n.X, nil)
1231 o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs)))
1233 // Evaluate right-hand side, save generated code.
1238 rhs := o.expr(n.Y, nil)
1239 o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs)))
1244 // If left-hand side doesn't cause a short-circuit, issue right-hand side.
1245 nif := ir.NewIfStmt(base.Pos, r, nil, nil)
1246 if n.Op() == ir.OANDAND {
1251 o.out = append(o.out, nif)
1274 // len([]rune(s)) is rewritten to runtime.countrunes(s) later.
1275 conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr)
1276 conv.X = o.expr(conv.X, nil)
1281 if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
1282 return o.copyExpr(n)
1287 // Check for append(x, make([]T, y)...) .
1288 n := n.(*ir.CallExpr)
1289 if isAppendOfMake(n) {
1290 n.Args[0] = o.expr(n.Args[0], nil) // order x
1291 mk := n.Args[1].(*ir.MakeExpr)
1292 mk.Len = o.expr(mk.Len, nil) // order y
1297 if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) {
1298 return o.copyExpr(n)
1302 case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
1303 n := n.(*ir.SliceExpr)
1304 n.X = o.expr(n.X, nil)
1305 n.Low = o.cheapExpr(o.expr(n.Low, nil))
1306 n.High = o.cheapExpr(o.expr(n.High, nil))
1307 n.Max = o.cheapExpr(o.expr(n.Max, nil))
1308 if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) {
1309 return o.copyExpr(n)
1314 n := n.(*ir.ClosureExpr)
1315 if n.Transient() && len(n.Func.ClosureVars) > 0 {
1316 n.Prealloc = o.newTemp(typecheck.ClosureType(n), false)
1321 n := n.(*ir.SelectorExpr)
1322 n.X = o.expr(n.X, nil)
1324 t := typecheck.PartialCallType(n)
1325 n.Prealloc = o.newTemp(t, false)
1330 n := n.(*ir.CompLitExpr)
1333 t := types.NewArray(n.Type().Elem(), n.Len)
1334 n.Prealloc = o.newTemp(t, false)
1338 case ir.ODOTTYPE, ir.ODOTTYPE2:
1339 n := n.(*ir.TypeAssertExpr)
1340 n.X = o.expr(n.X, nil)
1341 if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting {
1342 return o.copyExprClear(n)
1347 n := n.(*ir.UnaryExpr)
1348 n.X = o.expr(n.X, nil)
1349 return o.copyExprClear(n)
1351 case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
1352 n := n.(*ir.BinaryExpr)
1353 n.X = o.expr(n.X, nil)
1354 n.Y = o.expr(n.Y, nil)
1359 // Mark string(byteSlice) arguments to reuse byteSlice backing
1360 // buffer during conversion. String comparison does not
1361 // memorize the strings for later use, so it is safe.
1362 if n.X.Op() == ir.OBYTES2STR {
1363 n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
1365 if n.Y.Op() == ir.OBYTES2STR {
1366 n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
1369 case t.IsStruct() || t.IsArray():
1370 // for complex comparisons, we need both args to be
1371 // addressable so we can pass them to the runtime.
1372 n.X = o.addrTemp(n.X)
1373 n.Y = o.addrTemp(n.Y)
1378 // Order map by converting:
1385 // m := map[int]int{}
1389 // Then order the result.
1390 // Without this special case, order would otherwise compute all
1391 // the keys and values before storing any of them to the map.
1393 n := n.(*ir.CompLitExpr)
1395 statics := entries[:0]
1396 var dynamics []*ir.KeyExpr
1397 for _, r := range entries {
1398 r := r.(*ir.KeyExpr)
1400 if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
1401 dynamics = append(dynamics, r)
1405 // Recursively ordering some static entries can change them to dynamic;
1406 // e.g., OCONVIFACE nodes. See #31777.
1407 r = o.expr(r, nil).(*ir.KeyExpr)
1408 if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
1409 dynamics = append(dynamics, r)
1413 statics = append(statics, r)
1417 if len(dynamics) == 0 {
1421 // Emit the creation of the map (with all its static entries).
1422 m := o.newTemp(n.Type(), false)
1423 as := ir.NewAssignStmt(base.Pos, m, n)
1427 // Emit eval+insert of dynamic entries, one at a time.
1428 for _, r := range dynamics {
1429 as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Key), r.Value)
1430 typecheck.Stmt(as) // Note: this converts the OINDEX to an OINDEXMAP
1436 // No return - type-assertions above. Each case must return for itself.
1439 // as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment.
1440 // The caller should order the right-hand side of the assignment before calling order.as2func.
1444 // tmp1, tmp2, tmp3 = ...
1445 // a, b, a = tmp1, tmp2, tmp3
1446 // This is necessary to ensure left to right assignment order.
1447 func (o *orderState) as2func(n *ir.AssignListStmt) {
1448 results := n.Rhs[0].Type()
1449 as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
1450 for i, nl := range n.Lhs {
1451 if !ir.IsBlank(nl) {
1452 typ := results.Field(i).Type
1453 tmp := o.newTemp(typ, typ.HasPointers())
1455 as.Lhs = append(as.Lhs, nl)
1456 as.Rhs = append(as.Rhs, tmp)
1460 o.out = append(o.out, n)
1461 o.stmt(typecheck.Stmt(as))
1464 // as2ok orders OAS2XXX with ok.
1465 // Just like as2func, this also adds temporaries to ensure left-to-right assignment.
1466 func (o *orderState) as2ok(n *ir.AssignListStmt) {
1467 as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
1469 do := func(i int, typ *types.Type) {
1470 if nl := n.Lhs[i]; !ir.IsBlank(nl) {
1471 var tmp ir.Node = o.newTemp(typ, typ.HasPointers())
1473 as.Lhs = append(as.Lhs, nl)
1475 // The "ok" result is an untyped boolean according to the Go
1476 // spec. We need to explicitly convert it to the LHS type in
1477 // case the latter is a defined boolean type (#8475).
1478 tmp = typecheck.Conv(tmp, nl.Type())
1480 as.Rhs = append(as.Rhs, tmp)
1484 do(0, n.Rhs[0].Type())
1485 do(1, types.Types[types.TBOOL])
1487 o.out = append(o.out, n)
1488 o.stmt(typecheck.Stmt(as))
1491 var wrapGoDefer_prgen int
1493 // wrapGoDefer wraps the target of a "go" or "defer" statement with a
1494 // new "function with no arguments" closure. Specifically, it converts
1501 // defer func() { f(x1, y1) }()
1503 // This is primarily to enable a quicker bringup of defers under the
1504 // new register ABI; by doing this conversion, we can simplify the
1505 // code in the runtime that invokes defers on the panic path.
1506 func (o *orderState) wrapGoDefer(n *ir.GoDeferStmt) {
1509 var callX ir.Node // thing being called
1510 var callArgs []ir.Node // call arguments
1511 var keepAlive []*ir.Name // KeepAlive list from call, if present
1513 // A helper to recreate the call within the closure.
1514 var mkNewCall func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node
1516 // Defer calls come in many shapes and sizes; not all of them
1517 // are ir.CallExpr's. Examine the type to see what we're dealing with.
1518 switch x := call.(type) {
1522 keepAlive = x.KeepAlive
1523 mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node {
1524 newcall := ir.NewCallExpr(pos, op, fun, args)
1525 newcall.IsDDD = x.IsDDD
1526 return ir.Node(newcall)
1528 case *ir.UnaryExpr: // ex: OCLOSE
1529 callArgs = []ir.Node{x.X}
1530 mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node {
1532 panic("internal error, expecting single arg")
1534 return ir.Node(ir.NewUnaryExpr(pos, op, args[0]))
1536 case *ir.BinaryExpr: // ex: OCOPY
1537 callArgs = []ir.Node{x.X, x.Y}
1538 mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node {
1540 panic("internal error, expecting two args")
1542 return ir.Node(ir.NewBinaryExpr(pos, op, args[0], args[1]))
1545 panic("unhandled op")
1548 // No need to wrap if called func has no args, no receiver, and no results.
1549 // However in the case of "defer func() { ... }()" we need to
1550 // protect against the possibility of directClosureCall rewriting
1551 // things so that the call does have arguments.
1553 // Do wrap method calls (OCALLMETH, OCALLINTER), because it has
1556 // Also do wrap builtin functions, because they may be expanded to
1557 // calls with arguments (e.g. ORECOVER).
1559 // TODO: maybe not wrap if the called function has no arguments and
1560 // only in-register results?
1561 if len(callArgs) == 0 && call.Op() == ir.OCALLFUNC && callX.Type().NumResults() == 0 {
1562 if c, ok := call.(*ir.CallExpr); ok && callX != nil && callX.Op() == ir.OCLOSURE {
1563 cloFunc := callX.(*ir.ClosureExpr).Func
1564 cloFunc.SetClosureCalled(false)
1565 c.PreserveClosure = true
1570 if c, ok := call.(*ir.CallExpr); ok {
1571 // To simplify things, turn f(a, b, []T{c, d, e}...) back
1572 // into f(a, b, c, d, e) -- when the final call is run through the
1573 // type checker below, it will rebuild the proper slice literal.
1579 // This is set to true if the closure we're generating escapes
1580 // (needs heap allocation).
1581 cloEscapes := func() bool {
1582 if n.Op() == ir.OGO {
1583 // For "go", assume that all closures escape (with an
1584 // exception for the runtime, which doesn't permit
1585 // heap-allocated closures).
1586 return base.Ctxt.Pkgpath != "runtime"
1588 // For defer, just use whatever result escape analysis
1589 // has determined for the defer.
1590 return n.Esc() != ir.EscNever
1593 // A helper for making a copy of an argument.
1594 mkArgCopy := func(arg ir.Node) *ir.Name {
1595 argCopy := o.copyExpr(arg)
1596 // The value of 128 below is meant to be consistent with code
1597 // in escape analysis that picks byval/byaddr based on size.
1598 argCopy.SetByval(argCopy.Type().Size() <= 128 || cloEscapes)
1602 // getUnsafeArg looks for an unsafe.Pointer arg that has been
1603 // previously captured into the call's keepalive list, returning
1604 // the name node for it if found.
1605 getUnsafeArg := func(arg ir.Node) *ir.Name {
1606 // Look for uintptr(unsafe.Pointer(name))
1607 if arg.Op() != ir.OCONVNOP {
1610 if !arg.Type().IsUintptr() {
1613 if !arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
1616 arg = arg.(*ir.ConvExpr).X
1617 argname, ok := arg.(*ir.Name)
1621 for i := range keepAlive {
1622 if argname == keepAlive[i] {
1629 // Copy the arguments to the function into temps.
1631 // For calls with uintptr(unsafe.Pointer(...)) args that are being
1632 // kept alive (see code in (*orderState).call that does this), use
1633 // the existing arg copy instead of creating a new copy.
1634 unsafeArgs := make([]*ir.Name, len(callArgs))
1635 origArgs := callArgs
1636 var newNames []*ir.Name
1637 for i := range callArgs {
1639 var argname *ir.Name
1640 unsafeArgName := getUnsafeArg(arg)
1641 if unsafeArgName != nil {
1642 // arg has been copied already, use keepalive copy
1643 argname = unsafeArgName
1644 unsafeArgs[i] = unsafeArgName
1646 argname = mkArgCopy(arg)
1648 newNames = append(newNames, argname)
1651 // Deal with cases where the function expression (what we're
1652 // calling) is not a simple function symbol.
1654 var methSelectorExpr *ir.SelectorExpr
1657 case callX.Op() == ir.ODOTMETH || callX.Op() == ir.ODOTINTER:
1658 // Handle defer of a method call, e.g. "defer v.MyMethod(x, y)"
1659 n := callX.(*ir.SelectorExpr)
1660 n.X = mkArgCopy(n.X)
1661 methSelectorExpr = n
1662 if callX.Op() == ir.ODOTINTER {
1663 // Currently for "defer i.M()" if i is nil it panics at the
1664 // point of defer statement, not when deferred function is called.
1665 // (I think there is an issue discussing what is the intended
1666 // behavior but I cannot find it.)
1667 // We need to do the nil check outside of the wrapper.
1668 tab := typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X))
1669 c := ir.NewUnaryExpr(n.Pos(), ir.OCHECKNIL, tab)
1673 case !(callX.Op() == ir.ONAME && callX.(*ir.Name).Class == ir.PFUNC):
1674 // Deal with "defer returnsafunc()(x, y)" (for
1675 // example) by copying the callee expression.
1676 fnExpr = mkArgCopy(callX)
1677 if callX.Op() == ir.OCLOSURE {
1678 // For "defer func(...)", in addition to copying the
1679 // closure into a temp, mark it as no longer directly
1681 callX.(*ir.ClosureExpr).Func.SetClosureCalled(false)
1686 // Create a new no-argument function that we'll hand off to defer.
1687 var noFuncArgs []*ir.Field
1688 noargst := ir.NewFuncType(base.Pos, nil, noFuncArgs, nil)
1690 outerfn := ir.CurFunc
1691 wrapname := fmt.Sprintf("%v·dwrap·%d", outerfn, wrapGoDefer_prgen)
1692 sym := types.LocalPkg.Lookup(wrapname)
1693 fn := typecheck.DeclFunc(sym, noargst)
1694 fn.SetIsHiddenClosure(true)
1697 // helper for capturing reference to a var declared in an outer scope.
1698 capName := func(pos src.XPos, fn *ir.Func, n *ir.Name) *ir.Name {
1700 cv := ir.CaptureName(pos, fn, n)
1702 return typecheck.Expr(cv).(*ir.Name)
1705 // Call args (x1, y1) need to be captured as part of the newly
1707 newCallArgs := []ir.Node{}
1708 for i := range newNames {
1710 arg = capName(callArgs[i].Pos(), fn, newNames[i])
1711 if unsafeArgs[i] != nil {
1712 arg = ir.NewConvExpr(arg.Pos(), origArgs[i].Op(), origArgs[i].Type(), arg)
1714 newCallArgs = append(newCallArgs, arg)
1716 // Also capture the function or method expression (if needed) into
1719 callX = capName(callX.Pos(), fn, fnExpr)
1721 if methSelectorExpr != nil {
1722 methSelectorExpr.X = capName(callX.Pos(), fn, methSelectorExpr.X.(*ir.Name))
1724 ir.FinishCaptureNames(n.Pos(), outerfn, fn)
1726 // This flags a builtin as opposed to a regular call.
1727 irregular := (call.Op() != ir.OCALLFUNC &&
1728 call.Op() != ir.OCALLMETH &&
1729 call.Op() != ir.OCALLINTER)
1731 // Construct new function body: f(x1, y1)
1736 newcall := mkNewCall(call.Pos(), op, callX, newCallArgs)
1738 // Type-check the result.
1740 typecheck.Call(newcall.(*ir.CallExpr))
1742 typecheck.Stmt(newcall)
1745 // Finalize body, register function on the main decls list.
1746 fn.Body = []ir.Node{newcall}
1747 typecheck.FinishFuncBody()
1749 typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
1751 // Create closure expr
1752 clo := ir.NewClosureExpr(n.Pos(), fn)
1754 clo.SetType(fn.Type())
1756 // Set escape properties for closure.
1757 if n.Op() == ir.OGO {
1758 // For "go", assume that the closure is going to escape
1759 // (with an exception for the runtime, which doesn't
1760 // permit heap-allocated closures).
1761 if base.Ctxt.Pkgpath != "runtime" {
1762 clo.SetEsc(ir.EscHeap)
1765 // For defer, just use whatever result escape analysis
1766 // has determined for the defer.
1767 if n.Esc() == ir.EscNever {
1768 clo.SetTransient(true)
1769 clo.SetEsc(ir.EscNone)
1773 // Create new top level call to closure over argless function.
1774 topcall := ir.NewCallExpr(n.Pos(), ir.OCALL, clo, []ir.Node{})
1775 typecheck.Call(topcall)
1777 // Tag the call to insure that directClosureCall doesn't undo our work.
1778 topcall.PreserveClosure = true
1780 fn.SetClosureCalled(false)
1782 // Finally, point the defer statement at the newly generated call.