1 // Copyright 2012 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 "cmd/compile/internal/base"
11 "cmd/compile/internal/escape"
12 "cmd/compile/internal/ir"
13 "cmd/compile/internal/reflectdata"
14 "cmd/compile/internal/staticinit"
15 "cmd/compile/internal/typecheck"
16 "cmd/compile/internal/types"
21 // Rewrite tree to use separate statements to enforce
22 // order of evaluation. Makes walk easier, because it
23 // can (after this runs) reorder at will within an expression.
25 // Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %.
27 // Introduce temporaries as needed by runtime routines.
28 // For example, the map runtime routines take the map key
29 // by reference, so make sure all map keys are addressable
30 // by copying them to temporaries as needed.
31 // The same is true for channel operations.
33 // Arrange that map index expressions only appear in direct
34 // assignments x = m[k] or m[k] = x, never in larger expressions.
36 // Arrange that receive expressions only appear in direct assignments
37 // x = <-c or as standalone statements <-c, never in larger expressions.
39 // TODO(rsc): The temporary introduction during multiple assignments
40 // should be moved into this file, so that the temporaries can be cleaned
41 // and so that conversions implicit in the OAS2FUNC and OAS2RECV
42 // nodes can be made explicit and then have their temporaries cleaned.
44 // TODO(rsc): Goto and multilevel break/continue can jump over
45 // inserted VARKILL annotations. Work out a way to handle these.
46 // The current implementation is safe, in that it will execute correctly.
47 // But it won't reuse temporaries as aggressively as it might, and
48 // it can result in unnecessary zeroing of those variables in the function
51 // orderState holds state during the ordering process.
52 type orderState struct {
53 out []ir.Node // list of generated statements
54 temp []*ir.Name // stack of temporary variables
55 free map[string][]*ir.Name // free list of unused temporaries, by type.LongString().
56 edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
59 // Order rewrites fn.Nbody to apply the ordering constraints
60 // described in the comment at the top of the file.
61 func order(fn *ir.Func) {
63 s := fmt.Sprintf("\nbefore order %v", fn.Sym())
64 ir.DumpList(s, fn.Body)
67 orderBlock(&fn.Body, map[string][]*ir.Name{})
70 // append typechecks stmt and appends it to out.
71 func (o *orderState) append(stmt ir.Node) {
72 o.out = append(o.out, typecheck.Stmt(stmt))
75 // newTemp allocates a new temporary with the given type,
76 // pushes it onto the temp stack, and returns it.
77 // If clear is true, newTemp emits code to zero the temporary.
78 func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name {
80 // Note: LongString is close to the type equality we want,
81 // but not exactly. We still need to double-check with types.Identical.
85 if types.Identical(t, n.Type()) {
97 o.append(ir.NewAssignStmt(base.Pos, v, nil))
100 o.temp = append(o.temp, v)
104 // copyExpr behaves like newTemp but also emits
105 // code to initialize the temporary to the value n.
106 func (o *orderState) copyExpr(n ir.Node) *ir.Name {
107 return o.copyExpr1(n, false)
110 // copyExprClear is like copyExpr but clears the temp before assignment.
111 // It is provided for use when the evaluation of tmp = n turns into
112 // a function call that is passed a pointer to the temporary as the output space.
113 // If the call blocks before tmp has been written,
114 // the garbage collector will still treat the temporary as live,
115 // so we must zero it before entering that call.
116 // Today, this only happens for channel receive operations.
117 // (The other candidate would be map access, but map access
118 // returns a pointer to the result data instead of taking a pointer
120 func (o *orderState) copyExprClear(n ir.Node) *ir.Name {
121 return o.copyExpr1(n, true)
124 func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name {
126 v := o.newTemp(t, clear)
127 o.append(ir.NewAssignStmt(base.Pos, v, n))
131 // cheapExpr returns a cheap version of n.
132 // The definition of cheap is that n is a variable or constant.
133 // If not, cheapExpr allocates a new tmp, emits tmp = n,
134 // and then returns tmp.
135 func (o *orderState) cheapExpr(n ir.Node) ir.Node {
141 case ir.ONAME, ir.OLITERAL, ir.ONIL:
143 case ir.OLEN, ir.OCAP:
144 n := n.(*ir.UnaryExpr)
145 l := o.cheapExpr(n.X)
149 a := ir.SepCopy(n).(*ir.UnaryExpr)
151 return typecheck.Expr(a)
157 // safeExpr returns a safe version of n.
158 // The definition of safe is that n can appear multiple times
159 // without violating the semantics of the original program,
160 // and that assigning to the safe version has the same effect
161 // as assigning to the original n.
163 // The intended use is to apply to x when rewriting x += y into x = x + y.
164 func (o *orderState) safeExpr(n ir.Node) ir.Node {
166 case ir.ONAME, ir.OLITERAL, ir.ONIL:
169 case ir.OLEN, ir.OCAP:
170 n := n.(*ir.UnaryExpr)
175 a := ir.SepCopy(n).(*ir.UnaryExpr)
177 return typecheck.Expr(a)
180 n := n.(*ir.SelectorExpr)
185 a := ir.SepCopy(n).(*ir.SelectorExpr)
187 return typecheck.Expr(a)
190 n := n.(*ir.SelectorExpr)
191 l := o.cheapExpr(n.X)
195 a := ir.SepCopy(n).(*ir.SelectorExpr)
197 return typecheck.Expr(a)
200 n := n.(*ir.StarExpr)
201 l := o.cheapExpr(n.X)
205 a := ir.SepCopy(n).(*ir.StarExpr)
207 return typecheck.Expr(a)
209 case ir.OINDEX, ir.OINDEXMAP:
210 n := n.(*ir.IndexExpr)
212 if n.X.Type().IsArray() {
217 r := o.cheapExpr(n.Index)
218 if l == n.X && r == n.Index {
221 a := ir.SepCopy(n).(*ir.IndexExpr)
224 return typecheck.Expr(a)
227 base.Fatalf("order.safeExpr %v", n.Op())
228 return nil // not reached
232 // isaddrokay reports whether it is okay to pass n's address to runtime routines.
233 // Taking the address of a variable makes the liveness and optimization analyses
234 // lose track of where the variable's lifetime ends. To avoid hurting the analyses
235 // of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
236 // because we emit explicit VARKILL instructions marking the end of those
237 // temporaries' lifetimes.
238 func isaddrokay(n ir.Node) bool {
239 return ir.IsAddressable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class == ir.PEXTERN || ir.IsAutoTmp(n))
242 // addrTemp ensures that n is okay to pass by address to runtime routines.
243 // If the original argument n is not okay, addrTemp creates a tmp, emits
244 // tmp = n, and then returns tmp.
245 // The result of addrTemp MUST be assigned back to n, e.g.
246 // n.Left = o.addrTemp(n.Left)
247 func (o *orderState) addrTemp(n ir.Node) ir.Node {
248 if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
249 // TODO: expand this to all static composite literal nodes?
250 n = typecheck.DefaultLit(n, nil)
251 types.CalcSize(n.Type())
252 vstat := readonlystaticname(n.Type())
253 var s staticinit.Schedule
254 s.StaticAssign(vstat, 0, n, n.Type())
256 base.Fatalf("staticassign of const generated code: %+v", n)
258 vstat = typecheck.Expr(vstat).(*ir.Name)
267 // mapKeyTemp prepares n to be a key in a map runtime call and returns n.
268 // It should only be used for map runtime calls which have *_fast* versions.
269 func (o *orderState) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
270 // Most map calls need to take the address of the key.
271 // Exception: map*_fast* calls. See golang.org/issue/19015.
272 if mapfast(t) == mapslow {
278 // mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP
279 // in n to avoid string allocations for keys in map lookups.
280 // Returns a bool that signals if a modification was made.
284 // x = m[T1{... Tn{..., string(k), ...}]
285 // where k is []byte, T1 to Tn is a nesting of struct and array literals,
286 // the allocation of backing bytes for the string can be avoided
287 // by reusing the []byte backing array. These are special cases
288 // for avoiding allocations when converting byte slices to strings.
289 // It would be nice to handle these generally, but because
290 // []byte keys are not allowed in maps, the use of string(k)
291 // comes up in important cases in practice. See issue 3512.
292 func mapKeyReplaceStrConv(n ir.Node) bool {
296 n := n.(*ir.ConvExpr)
297 n.SetOp(ir.OBYTES2STRTMP)
300 n := n.(*ir.CompLitExpr)
301 for _, elem := range n.List {
302 elem := elem.(*ir.StructKeyExpr)
303 if mapKeyReplaceStrConv(elem.Value) {
308 n := n.(*ir.CompLitExpr)
309 for _, elem := range n.List {
310 if elem.Op() == ir.OKEY {
311 elem = elem.(*ir.KeyExpr).Value
313 if mapKeyReplaceStrConv(elem) {
323 // markTemp returns the top of the temporary variable stack.
324 func (o *orderState) markTemp() ordermarker {
325 return ordermarker(len(o.temp))
328 // popTemp pops temporaries off the stack until reaching the mark,
329 // which must have been returned by markTemp.
330 func (o *orderState) popTemp(mark ordermarker) {
331 for _, n := range o.temp[mark:] {
332 key := n.Type().LongString()
333 o.free[key] = append(o.free[key], n)
335 o.temp = o.temp[:mark]
338 // cleanTempNoPop emits VARKILL instructions to *out
339 // for each temporary above the mark on the temporary stack.
340 // It does not pop the temporaries from the stack.
341 func (o *orderState) cleanTempNoPop(mark ordermarker) []ir.Node {
343 for i := len(o.temp) - 1; i >= int(mark); i-- {
345 out = append(out, typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARKILL, n)))
350 // cleanTemp emits VARKILL instructions for each temporary above the
351 // mark on the temporary stack and removes them from the stack.
352 func (o *orderState) cleanTemp(top ordermarker) {
353 o.out = append(o.out, o.cleanTempNoPop(top)...)
357 // stmtList orders each of the statements in the list.
358 func (o *orderState) stmtList(l ir.Nodes) {
361 orderMakeSliceCopy(s[i:])
366 // orderMakeSliceCopy matches the pattern:
367 // m = OMAKESLICE([]T, x); OCOPY(m, s)
368 // and rewrites it to:
369 // m = OMAKESLICECOPY([]T, x, s); nil
370 func orderMakeSliceCopy(s []ir.Node) {
371 if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
374 if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY {
378 as := s[0].(*ir.AssignStmt)
379 cp := s[1].(*ir.BinaryExpr)
380 if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) ||
381 as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME ||
382 as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() {
383 // The line above this one is correct with the differing equality operators:
384 // we want as.X and cp.X to be the same name,
385 // but we want the initial data to be coming from a different name.
389 mk := as.Y.(*ir.MakeExpr)
390 if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil {
393 mk.SetOp(ir.OMAKESLICECOPY)
395 // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
396 mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y))
397 as.Y = typecheck.Expr(mk)
398 s[1] = nil // remove separate copy call
401 // edge inserts coverage instrumentation for libfuzzer.
402 func (o *orderState) edge() {
403 if base.Debug.Libfuzzer == 0 {
407 // Create a new uint8 counter to be allocated in section
408 // __libfuzzer_extra_counters.
409 counter := staticinit.StaticName(types.Types[types.TUINT8])
410 counter.SetLibfuzzerExtraCounter(true)
413 incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(1))
417 // orderBlock orders the block of statements in n into a new slice,
418 // and then replaces the old slice in n with the new slice.
419 // free is a map that can be used to obtain temporary variables by type.
420 func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
423 mark := order.markTemp()
426 order.cleanTemp(mark)
430 // exprInPlace orders the side effects in *np and
431 // leaves them as the init list of the final *np.
432 // The result of exprInPlace MUST be assigned back to n, e.g.
433 // n.Left = o.exprInPlace(n.Left)
434 func (o *orderState) exprInPlace(n ir.Node) ir.Node {
437 n = order.expr(n, nil)
438 n = ir.InitExpr(order.out, n)
440 // insert new temporaries from order
441 // at head of outer list.
442 o.temp = append(o.temp, order.temp...)
446 // orderStmtInPlace orders the side effects of the single statement *np
447 // and replaces it with the resulting statement list.
448 // The result of orderStmtInPlace MUST be assigned back to n, e.g.
449 // n.Left = orderStmtInPlace(n.Left)
450 // free is a map that can be used to obtain temporary variables by type.
451 func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
454 mark := order.markTemp()
456 order.cleanTemp(mark)
457 return ir.NewBlockStmt(src.NoXPos, order.out)
460 // init moves n's init list to o.out.
461 func (o *orderState) init(n ir.Node) {
462 if ir.MayBeShared(n) {
463 // For concurrency safety, don't mutate potentially shared nodes.
464 // First, ensure that no work is required here.
465 if len(n.Init()) > 0 {
466 base.Fatalf("order.init shared node with ninit")
470 o.stmtList(ir.TakeInit(n))
473 // call orders the call expression n.
474 // n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
475 func (o *orderState) call(nn ir.Node) {
476 if len(nn.Init()) > 0 {
477 // Caller should have already called o.init(nn).
478 base.Fatalf("%v with unexpected ninit", nn.Op())
481 // Builtin functions.
482 if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLMETH && nn.Op() != ir.OCALLINTER {
483 switch n := nn.(type) {
485 base.Fatalf("unexpected call: %+v", n)
487 n.X = o.expr(n.X, nil)
489 n.X = o.expr(n.X, nil)
491 n.X = o.expr(n.X, nil)
492 n.Y = o.expr(n.Y, nil)
494 n.Len = o.expr(n.Len, nil)
495 n.Cap = o.expr(n.Cap, nil)
502 n := nn.(*ir.CallExpr)
503 typecheck.FixVariadicCall(n)
504 n.X = o.expr(n.X, nil)
507 if n.Op() == ir.OCALLINTER {
510 keepAlive := func(arg ir.Node) {
511 // If the argument is really a pointer being converted to uintptr,
512 // arrange for the pointer to be kept alive until the call returns,
513 // by copying it into a temp and marking that temp
514 // still alive when we pop the temp stack.
515 if arg.Op() == ir.OCONVNOP {
516 arg := arg.(*ir.ConvExpr)
517 if arg.X.Type().IsUnsafePtr() {
518 x := o.copyExpr(arg.X)
520 x.SetAddrtaken(true) // ensure SSA keeps the x variable
521 n.KeepAlive = append(n.KeepAlive, x)
526 // Check for "unsafe-uintptr" tag provided by escape analysis.
527 for i, param := range n.X.Type().Params().FieldSlice() {
528 if param.Note == escape.UnsafeUintptrNote || param.Note == escape.UintptrEscapesNote {
529 if arg := n.Args[i]; arg.Op() == ir.OSLICELIT {
530 arg := arg.(*ir.CompLitExpr)
531 for _, elt := range arg.List {
541 // mapAssign appends n to o.out.
542 func (o *orderState) mapAssign(n ir.Node) {
545 base.Fatalf("order.mapAssign %v", n.Op())
548 n := n.(*ir.AssignStmt)
549 if n.X.Op() == ir.OINDEXMAP {
550 n.Y = o.safeMapRHS(n.Y)
552 o.out = append(o.out, n)
554 n := n.(*ir.AssignOpStmt)
555 if n.X.Op() == ir.OINDEXMAP {
556 n.Y = o.safeMapRHS(n.Y)
558 o.out = append(o.out, n)
562 func (o *orderState) safeMapRHS(r ir.Node) ir.Node {
563 // Make sure we evaluate the RHS before starting the map insert.
564 // We need to make sure the RHS won't panic. See issue 22881.
565 if r.Op() == ir.OAPPEND {
566 r := r.(*ir.CallExpr)
568 for i, n := range s {
569 s[i] = o.cheapExpr(n)
573 return o.cheapExpr(r)
576 // stmt orders the statement n, appending to o.out.
577 // Temporaries created during the statement are cleaned
578 // up using VARKILL instructions as possible.
579 func (o *orderState) stmt(n ir.Node) {
589 base.Fatalf("order.stmt %v", n.Op())
591 case ir.OVARKILL, ir.OVARLIVE, ir.OINLMARK:
592 o.out = append(o.out, n)
595 n := n.(*ir.AssignStmt)
597 n.X = o.expr(n.X, nil)
598 n.Y = o.expr(n.Y, n.X)
603 n := n.(*ir.AssignOpStmt)
605 n.X = o.expr(n.X, nil)
606 n.Y = o.expr(n.Y, nil)
608 if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) {
609 // Rewrite m[k] op= r into m[k] = m[k] op r so
610 // that we can ensure that if op panics
611 // because r is zero, the panic happens before
612 // the map assignment.
613 // DeepCopy is a big hammer here, but safeExpr
614 // makes sure there is nothing too deep being copied.
615 l1 := o.safeExpr(n.X)
616 l2 := ir.DeepCopy(src.NoXPos, l1)
617 if l2.Op() == ir.OINDEXMAP {
618 l2 := l2.(*ir.IndexExpr)
622 r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil)
623 as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r))
633 n := n.(*ir.AssignListStmt)
637 o.out = append(o.out, n)
640 // Special: avoid copy of func call n.Right
642 n := n.(*ir.AssignListStmt)
650 // Special: use temporary variables to hold result,
651 // so that runtime can take address of temporary.
652 // No temporary for blank assignment.
654 // OAS2MAPR: make sure key is addressable if needed,
655 // and make sure OINDEXMAP is not copied out.
656 case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
657 n := n.(*ir.AssignListStmt)
661 switch r := n.Rhs[0]; r.Op() {
663 r := r.(*ir.TypeAssertExpr)
664 r.X = o.expr(r.X, nil)
666 r := r.(*ir.UnaryExpr)
667 r.X = o.expr(r.X, nil)
669 r := r.(*ir.IndexExpr)
670 r.X = o.expr(r.X, nil)
671 r.Index = o.expr(r.Index, nil)
672 // See similar conversion for OINDEXMAP below.
673 _ = mapKeyReplaceStrConv(r.Index)
674 r.Index = o.mapKeyTemp(r.X.Type(), r.Index)
676 base.Fatalf("order.stmt: %v", r.Op())
682 // Special: does not save n onto out.
684 n := n.(*ir.BlockStmt)
687 // Special: n->left is not an expression; save as is.
697 o.out = append(o.out, n)
699 // Special: handle call arguments.
700 case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
701 n := n.(*ir.CallExpr)
704 o.out = append(o.out, n)
707 case ir.OCLOSE, ir.ORECV:
708 n := n.(*ir.UnaryExpr)
710 n.X = o.expr(n.X, nil)
711 o.out = append(o.out, n)
715 n := n.(*ir.BinaryExpr)
717 n.X = o.expr(n.X, nil)
718 n.Y = o.expr(n.Y, nil)
719 o.out = append(o.out, n)
722 case ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
723 n := n.(*ir.CallExpr)
726 o.out = append(o.out, n)
729 // Special: order arguments to inner call but not call itself.
730 case ir.ODEFER, ir.OGO:
731 n := n.(*ir.GoDeferStmt)
735 if objabi.Experiment.RegabiDefer {
738 o.out = append(o.out, n)
742 n := n.(*ir.CallExpr)
744 n.Args[0] = o.expr(n.Args[0], nil)
745 n.Args[1] = o.expr(n.Args[1], nil)
746 n.Args[1] = o.mapKeyTemp(n.Args[0].Type(), n.Args[1])
747 o.out = append(o.out, n)
750 // Clean temporaries from condition evaluation at
751 // beginning of loop body and after for statement.
755 n.Cond = o.exprInPlace(n.Cond)
756 n.Body.Prepend(o.cleanTempNoPop(t)...)
757 orderBlock(&n.Body, o.free)
758 n.Post = orderStmtInPlace(n.Post, o.free)
759 o.out = append(o.out, n)
762 // Clean temporaries from condition at
763 // beginning of both branches.
767 n.Cond = o.exprInPlace(n.Cond)
768 n.Body.Prepend(o.cleanTempNoPop(t)...)
769 n.Else.Prepend(o.cleanTempNoPop(t)...)
771 orderBlock(&n.Body, o.free)
772 orderBlock(&n.Else, o.free)
773 o.out = append(o.out, n)
776 n := n.(*ir.UnaryExpr)
778 n.X = o.expr(n.X, nil)
779 if !n.X.Type().IsEmptyInterface() {
780 base.FatalfAt(n.Pos(), "bad argument to panic: %L", n.X)
782 o.out = append(o.out, n)
786 // n.Right is the expression being ranged over.
787 // order it, and then make a copy if we need one.
788 // We almost always do, to ensure that we don't
789 // see any value changes made during the loop.
790 // Usually the copy is cheap (e.g., array pointer,
791 // chan, slice, string are all tiny).
792 // The exception is ranging over an array value
793 // (not a slice, not a pointer to array),
794 // which must make a copy to avoid seeing updates made during
795 // the range body. Ranging over an array value is uncommon though.
797 // Mark []byte(str) range expression to reuse string backing storage.
798 // It is safe because the storage cannot be mutated.
799 n := n.(*ir.RangeStmt)
800 if n.X.Op() == ir.OSTR2BYTES {
801 n.X.(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
805 n.X = o.expr(n.X, nil)
808 xt := typecheck.RangeExprType(n.X.Type())
811 base.Fatalf("order.stmt range %v", n.Type())
813 case types.TARRAY, types.TSLICE:
814 if n.Value == nil || ir.IsBlank(n.Value) {
815 // for i := range x will only use x once, to compute len(x).
816 // No need to copy it.
821 case types.TCHAN, types.TSTRING:
822 // chan, string, slice, array ranges use value multiple times.
826 if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
827 r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r)
828 r.SetType(types.Types[types.TSTRING])
829 r = typecheck.Expr(r)
836 // Preserve the body of the map clear pattern so it can
837 // be detected during walk. The loop body will not be used
838 // when optimizing away the range loop to a runtime call.
843 // copy the map value in case it is a map literal.
844 // TODO(rsc): Make tmp = literal expressions reuse tmp.
845 // For maps tmp is just one word so it hardly matters.
849 // n.Prealloc is the temp for the iterator.
850 // MapIterType contains pointers and needs to be zeroed.
851 n.Prealloc = o.newTemp(reflectdata.MapIterType(xt), true)
853 n.Key = o.exprInPlace(n.Key)
854 n.Value = o.exprInPlace(n.Value)
856 orderBlock(&n.Body, o.free)
858 o.out = append(o.out, n)
862 n := n.(*ir.ReturnStmt)
863 o.exprList(n.Results)
864 o.out = append(o.out, n)
866 // Special: clean case temporaries in each block entry.
867 // Select must enter one of its blocks, so there is no
868 // need for a cleaning at the end.
869 // Doubly special: evaluation order for select is stricter
870 // than ordinary expressions. Even something like p.c
871 // has to be hoisted into a temporary, so that it cannot be
872 // reordered after the channel evaluation for a different
873 // case (if p were nil, then the timing of the fault would
876 n := n.(*ir.SelectStmt)
878 for _, ncas := range n.Cases {
882 // Append any new body prologue to ninit.
883 // The next loop will insert ninit into nbody.
884 if len(ncas.Init()) != 0 {
885 base.Fatalf("order select ninit")
892 ir.Dump("select case", r)
893 base.Fatalf("unknown op in select %v", r.Op())
897 r := r.(*ir.AssignListStmt)
898 recv := r.Rhs[0].(*ir.UnaryExpr)
899 recv.X = o.expr(recv.X, nil)
900 if !ir.IsAutoTmp(recv.X) {
901 recv.X = o.copyExpr(recv.X)
903 init := ir.TakeInit(r)
906 do := func(i int, t *types.Type) {
911 // If this is case x := <-ch or case x, y := <-ch, the case has
912 // the ODCL nodes to declare x and y. We want to delay that
913 // declaration (and possible allocation) until inside the case body.
914 // Delete the ODCL nodes here and recreate them inside the body below.
916 if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n {
919 dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
920 ncas.PtrInit().Append(dcl)
922 tmp := o.newTemp(t, t.HasPointers())
923 as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type())))
924 ncas.PtrInit().Append(as)
927 do(0, recv.X.Type().Elem())
928 do(1, types.Types[types.TBOOL])
930 ir.DumpList("ninit", r.Init())
931 base.Fatalf("ninit on select recv")
933 orderBlock(ncas.PtrInit(), o.free)
936 r := r.(*ir.SendStmt)
937 if len(r.Init()) != 0 {
938 ir.DumpList("ninit", r.Init())
939 base.Fatalf("ninit on select send")
943 // r->left is c, r->right is x, both are always evaluated.
944 r.Chan = o.expr(r.Chan, nil)
946 if !ir.IsAutoTmp(r.Chan) {
947 r.Chan = o.copyExpr(r.Chan)
949 r.Value = o.expr(r.Value, nil)
950 if !ir.IsAutoTmp(r.Value) {
951 r.Value = o.copyExpr(r.Value)
955 // Now that we have accumulated all the temporaries, clean them.
956 // Also insert any ninit queued during the previous loop.
957 // (The temporary cleaning must follow that ninit work.)
958 for _, cas := range n.Cases {
959 orderBlock(&cas.Body, o.free)
960 cas.Body.Prepend(o.cleanTempNoPop(t)...)
962 // TODO(mdempsky): Is this actually necessary?
963 // walkSelect appears to walk Ninit.
964 cas.Body.Prepend(ir.TakeInit(cas)...)
967 o.out = append(o.out, n)
970 // Special: value being sent is passed as a pointer; make it addressable.
972 n := n.(*ir.SendStmt)
974 n.Chan = o.expr(n.Chan, nil)
975 n.Value = o.expr(n.Value, nil)
976 if base.Flag.Cfg.Instrumenting {
977 // Force copying to the stack so that (chan T)(nil) <- x
978 // is still instrumented as a read of x.
979 n.Value = o.copyExpr(n.Value)
981 n.Value = o.addrTemp(n.Value)
983 o.out = append(o.out, n)
986 // TODO(rsc): Clean temporaries more aggressively.
987 // Note that because walkSwitch will rewrite some of the
988 // switch into a binary search, this is not as easy as it looks.
989 // (If we ran that code here we could invoke order.stmt on
990 // the if-else chain instead.)
991 // For now just clean all the temporaries at the end.
992 // In practice that's fine.
994 n := n.(*ir.SwitchStmt)
995 if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
996 // Add empty "default:" case for instrumentation.
997 n.Cases = append(n.Cases, ir.NewCaseStmt(base.Pos, nil, nil))
1001 n.Tag = o.expr(n.Tag, nil)
1002 for _, ncas := range n.Cases {
1003 o.exprListInPlace(ncas.List)
1004 orderBlock(&ncas.Body, o.free)
1007 o.out = append(o.out, n)
1014 func hasDefaultCase(n *ir.SwitchStmt) bool {
1015 for _, ncas := range n.Cases {
1016 if len(ncas.List) == 0 {
1023 // exprList orders the expression list l into o.
1024 func (o *orderState) exprList(l ir.Nodes) {
1027 s[i] = o.expr(s[i], nil)
1031 // exprListInPlace orders the expression list l but saves
1032 // the side effects on the individual expression ninit lists.
1033 func (o *orderState) exprListInPlace(l ir.Nodes) {
1036 s[i] = o.exprInPlace(s[i])
1040 func (o *orderState) exprNoLHS(n ir.Node) ir.Node {
1041 return o.expr(n, nil)
1044 // expr orders a single expression, appending side
1045 // effects to o.out as needed.
1046 // If this is part of an assignment lhs = *np, lhs is given.
1047 // Otherwise lhs == nil. (When lhs != nil it may be possible
1048 // to avoid copying the result of the expression to a temporary.)
1049 // The result of expr MUST be assigned back to n, e.g.
1050 // n.Left = o.expr(n.Left, lhs)
1051 func (o *orderState) expr(n, lhs ir.Node) ir.Node {
1061 func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
1067 o.edit = o.exprNoLHS // create closure once
1069 ir.EditChildren(n, o.edit)
1072 // Addition of strings turns into a function call.
1073 // Allocate a temporary to hold the strings.
1074 // Fewer than 5 strings use direct runtime helpers.
1076 n := n.(*ir.AddStringExpr)
1079 if len(n.List) > 5 {
1080 t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List)))
1081 n.Prealloc = o.newTemp(t, false)
1084 // Mark string(byteSlice) arguments to reuse byteSlice backing
1085 // buffer during conversion. String concatenation does not
1086 // memorize the strings for later use, so it is safe.
1087 // However, we can do it only if there is at least one non-empty string literal.
1088 // Otherwise if all other arguments are empty strings,
1089 // concatstrings will return the reference to the temp string
1094 for _, n1 := range n.List {
1095 hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
1096 haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0
1099 if haslit && hasbyte {
1100 for _, n2 := range n.List {
1101 if n2.Op() == ir.OBYTES2STR {
1102 n2 := n2.(*ir.ConvExpr)
1103 n2.SetOp(ir.OBYTES2STRTMP)
1110 n := n.(*ir.IndexExpr)
1111 n.X = o.expr(n.X, nil)
1112 n.Index = o.expr(n.Index, nil)
1116 // Enforce that any []byte slices we are not copying
1117 // can not be changed before the map index by forcing
1118 // the map index to happen immediately following the
1119 // conversions. See copyExpr a few lines below.
1120 needCopy = mapKeyReplaceStrConv(n.Index)
1122 if base.Flag.Cfg.Instrumenting {
1123 // Race detector needs the copy.
1128 // key must be addressable
1129 n.Index = o.mapKeyTemp(n.X.Type(), n.Index)
1131 return o.copyExpr(n)
1135 // concrete type (not interface) argument might need an addressable
1136 // temporary to pass to the runtime conversion routine.
1138 n := n.(*ir.ConvExpr)
1139 n.X = o.expr(n.X, nil)
1140 if n.X.Type().IsInterface() {
1143 if _, needsaddr := convFuncName(n.X.Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
1144 // Need a temp if we need to pass the address to the conversion function.
1145 // We also process static composite literal node here, making a named static global
1146 // whose address we can put directly in an interface (see OCONVIFACE case in walk).
1147 n.X = o.addrTemp(n.X)
1152 n := n.(*ir.ConvExpr)
1153 if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER || n.X.Op() == ir.OCALLMETH) {
1154 call := n.X.(*ir.CallExpr)
1155 // When reordering unsafe.Pointer(f()) into a separate
1156 // statement, the conversion and function call must stay
1157 // together. See golang.org/issue/15329.
1160 if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
1161 return o.copyExpr(n)
1164 n.X = o.expr(n.X, nil)
1168 case ir.OANDAND, ir.OOROR:
1173 // if r { // or !r, for OROR
1178 n := n.(*ir.LogicalExpr)
1179 r := o.newTemp(n.Type(), false)
1181 // Evaluate left-hand side.
1182 lhs := o.expr(n.X, nil)
1183 o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs)))
1185 // Evaluate right-hand side, save generated code.
1190 rhs := o.expr(n.Y, nil)
1191 o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs)))
1196 // If left-hand side doesn't cause a short-circuit, issue right-hand side.
1197 nif := ir.NewIfStmt(base.Pos, r, nil, nil)
1198 if n.Op() == ir.OANDAND {
1203 o.out = append(o.out, nif)
1226 // len([]rune(s)) is rewritten to runtime.countrunes(s) later.
1227 conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr)
1228 conv.X = o.expr(conv.X, nil)
1233 if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
1234 return o.copyExpr(n)
1239 // Check for append(x, make([]T, y)...) .
1240 n := n.(*ir.CallExpr)
1241 if isAppendOfMake(n) {
1242 n.Args[0] = o.expr(n.Args[0], nil) // order x
1243 mk := n.Args[1].(*ir.MakeExpr)
1244 mk.Len = o.expr(mk.Len, nil) // order y
1249 if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) {
1250 return o.copyExpr(n)
1254 case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
1255 n := n.(*ir.SliceExpr)
1256 n.X = o.expr(n.X, nil)
1257 n.Low = o.cheapExpr(o.expr(n.Low, nil))
1258 n.High = o.cheapExpr(o.expr(n.High, nil))
1259 n.Max = o.cheapExpr(o.expr(n.Max, nil))
1260 if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) {
1261 return o.copyExpr(n)
1266 n := n.(*ir.ClosureExpr)
1267 if n.Transient() && len(n.Func.ClosureVars) > 0 {
1268 n.Prealloc = o.newTemp(typecheck.ClosureType(n), false)
1273 n := n.(*ir.SelectorExpr)
1274 n.X = o.expr(n.X, nil)
1276 t := typecheck.PartialCallType(n)
1277 n.Prealloc = o.newTemp(t, false)
1282 n := n.(*ir.CompLitExpr)
1285 t := types.NewArray(n.Type().Elem(), n.Len)
1286 n.Prealloc = o.newTemp(t, false)
1290 case ir.ODOTTYPE, ir.ODOTTYPE2:
1291 n := n.(*ir.TypeAssertExpr)
1292 n.X = o.expr(n.X, nil)
1293 if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting {
1294 return o.copyExprClear(n)
1299 n := n.(*ir.UnaryExpr)
1300 n.X = o.expr(n.X, nil)
1301 return o.copyExprClear(n)
1303 case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
1304 n := n.(*ir.BinaryExpr)
1305 n.X = o.expr(n.X, nil)
1306 n.Y = o.expr(n.Y, nil)
1311 // Mark string(byteSlice) arguments to reuse byteSlice backing
1312 // buffer during conversion. String comparison does not
1313 // memorize the strings for later use, so it is safe.
1314 if n.X.Op() == ir.OBYTES2STR {
1315 n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
1317 if n.Y.Op() == ir.OBYTES2STR {
1318 n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
1321 case t.IsStruct() || t.IsArray():
1322 // for complex comparisons, we need both args to be
1323 // addressable so we can pass them to the runtime.
1324 n.X = o.addrTemp(n.X)
1325 n.Y = o.addrTemp(n.Y)
1330 // Order map by converting:
1337 // m := map[int]int{}
1341 // Then order the result.
1342 // Without this special case, order would otherwise compute all
1343 // the keys and values before storing any of them to the map.
1345 n := n.(*ir.CompLitExpr)
1347 statics := entries[:0]
1348 var dynamics []*ir.KeyExpr
1349 for _, r := range entries {
1350 r := r.(*ir.KeyExpr)
1352 if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
1353 dynamics = append(dynamics, r)
1357 // Recursively ordering some static entries can change them to dynamic;
1358 // e.g., OCONVIFACE nodes. See #31777.
1359 r = o.expr(r, nil).(*ir.KeyExpr)
1360 if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
1361 dynamics = append(dynamics, r)
1365 statics = append(statics, r)
1369 if len(dynamics) == 0 {
1373 // Emit the creation of the map (with all its static entries).
1374 m := o.newTemp(n.Type(), false)
1375 as := ir.NewAssignStmt(base.Pos, m, n)
1379 // Emit eval+insert of dynamic entries, one at a time.
1380 for _, r := range dynamics {
1381 as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Key), r.Value)
1382 typecheck.Stmt(as) // Note: this converts the OINDEX to an OINDEXMAP
1388 // No return - type-assertions above. Each case must return for itself.
1391 // as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment.
1392 // The caller should order the right-hand side of the assignment before calling order.as2func.
1396 // tmp1, tmp2, tmp3 = ...
1397 // a, b, a = tmp1, tmp2, tmp3
1398 // This is necessary to ensure left to right assignment order.
1399 func (o *orderState) as2func(n *ir.AssignListStmt) {
1400 results := n.Rhs[0].Type()
1401 as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
1402 for i, nl := range n.Lhs {
1403 if !ir.IsBlank(nl) {
1404 typ := results.Field(i).Type
1405 tmp := o.newTemp(typ, typ.HasPointers())
1407 as.Lhs = append(as.Lhs, nl)
1408 as.Rhs = append(as.Rhs, tmp)
1412 o.out = append(o.out, n)
1413 o.stmt(typecheck.Stmt(as))
1416 // as2ok orders OAS2XXX with ok.
1417 // Just like as2func, this also adds temporaries to ensure left-to-right assignment.
1418 func (o *orderState) as2ok(n *ir.AssignListStmt) {
1419 as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
1421 do := func(i int, typ *types.Type) {
1422 if nl := n.Lhs[i]; !ir.IsBlank(nl) {
1423 var tmp ir.Node = o.newTemp(typ, typ.HasPointers())
1425 as.Lhs = append(as.Lhs, nl)
1427 // The "ok" result is an untyped boolean according to the Go
1428 // spec. We need to explicitly convert it to the LHS type in
1429 // case the latter is a defined boolean type (#8475).
1430 tmp = typecheck.Conv(tmp, nl.Type())
1432 as.Rhs = append(as.Rhs, tmp)
1436 do(0, n.Rhs[0].Type())
1437 do(1, types.Types[types.TBOOL])
1439 o.out = append(o.out, n)
1440 o.stmt(typecheck.Stmt(as))
1443 var wrapGoDefer_prgen int
1445 // wrapGoDefer wraps the target of a "go" or "defer" statement with a
1446 // new "function with no arguments" closure. Specifically, it converts
1453 // defer func() { f(x1, y1) }()
1455 // This is primarily to enable a quicker bringup of defers under the
1456 // new register ABI; by doing this conversion, we can simplify the
1457 // code in the runtime that invokes defers on the panic path.
1458 func (o *orderState) wrapGoDefer(n *ir.GoDeferStmt) {
1461 var callX ir.Node // thing being called
1462 var callArgs []ir.Node // call arguments
1464 // A helper to recreate the call within the closure.
1465 var mkNewCall func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node
1467 // Defer calls come in many shapes and sizes; not all of them
1468 // are ir.CallExpr's. Examine the type to see what we're dealing with.
1469 switch x := call.(type) {
1473 mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node {
1474 newcall := ir.NewCallExpr(pos, op, fun, args)
1475 newcall.IsDDD = x.IsDDD
1476 return ir.Node(newcall)
1478 case *ir.UnaryExpr: // ex: OCLOSE
1479 callArgs = []ir.Node{x.X}
1480 mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node {
1482 panic("internal error, expecting single arg to close")
1484 return ir.Node(ir.NewUnaryExpr(pos, op, args[0]))
1486 case *ir.BinaryExpr: // ex: OCOPY
1487 callArgs = []ir.Node{x.X, x.Y}
1488 mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node {
1490 panic("internal error, expecting two args")
1492 return ir.Node(ir.NewBinaryExpr(pos, op, args[0], args[1]))
1495 panic("unhandled op")
1498 // No need to wrap if called func has no args. However in the case
1499 // of "defer func() { ... }()" we need to protect against the
1500 // possibility of directClosureCall rewriting things so that the
1501 // call does have arguments.
1502 if len(callArgs) == 0 {
1503 if c, ok := call.(*ir.CallExpr); ok && callX != nil && callX.Op() == ir.OCLOSURE {
1504 cloFunc := callX.(*ir.ClosureExpr).Func
1505 cloFunc.SetClosureCalled(false)
1506 c.PreserveClosure = true
1511 if c, ok := call.(*ir.CallExpr); ok {
1512 // To simplify things, turn f(a, b, []T{c, d, e}...) back
1513 // into f(a, b, c, d, e) -- when the final call is run through the
1514 // type checker below, it will rebuild the proper slice literal.
1520 // This is set to true if the closure we're generating escapes
1521 // (needs heap allocation).
1522 cloEscapes := func() bool {
1523 if n.Op() == ir.OGO {
1524 // For "go", assume that all closures escape (with an
1525 // exception for the runtime, which doesn't permit
1526 // heap-allocated closures).
1527 return base.Ctxt.Pkgpath != "runtime"
1529 // For defer, just use whatever result escape analysis
1530 // has determined for the defer.
1531 return n.Esc() != ir.EscNever
1534 // A helper for making a copy of an argument.
1535 mkArgCopy := func(arg ir.Node) *ir.Name {
1536 argCopy := o.copyExpr(arg)
1537 // The value of 128 below is meant to be consistent with code
1538 // in escape analysis that picks byval/byaddr based on size.
1539 argCopy.SetByval(argCopy.Type().Size() <= 128 || cloEscapes)
1543 unsafeArgs := make([]*ir.Name, len(callArgs))
1544 origArgs := callArgs
1546 // Copy the arguments to the function into temps.
1548 outerfn := ir.CurFunc
1549 var newNames []*ir.Name
1550 for i := range callArgs {
1552 var argname *ir.Name
1553 if arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
1554 // No need for copy here; orderState.call() above has already inserted one.
1555 arg = arg.(*ir.ConvExpr).X
1556 argname = arg.(*ir.Name)
1557 unsafeArgs[i] = argname
1559 argname = mkArgCopy(arg)
1561 newNames = append(newNames, argname)
1564 // Deal with cases where the function expression (what we're
1565 // calling) is not a simple function symbol.
1567 var methSelectorExpr *ir.SelectorExpr
1570 case callX.Op() == ir.ODOTMETH || callX.Op() == ir.ODOTINTER:
1571 // Handle defer of a method call, e.g. "defer v.MyMethod(x, y)"
1572 n := callX.(*ir.SelectorExpr)
1573 n.X = mkArgCopy(n.X)
1574 methSelectorExpr = n
1575 case !(callX.Op() == ir.ONAME && callX.(*ir.Name).Class == ir.PFUNC):
1576 // Deal with "defer returnsafunc()(x, y)" (for
1577 // example) by copying the callee expression.
1578 fnExpr = mkArgCopy(callX)
1579 if callX.Op() == ir.OCLOSURE {
1580 // For "defer func(...)", in addition to copying the
1581 // closure into a temp, mark it as no longer directly
1583 callX.(*ir.ClosureExpr).Func.SetClosureCalled(false)
1588 // Create a new no-argument function that we'll hand off to defer.
1589 var noFuncArgs []*ir.Field
1590 noargst := ir.NewFuncType(base.Pos, nil, noFuncArgs, nil)
1592 wrapname := fmt.Sprintf("%v·dwrap·%d", outerfn, wrapGoDefer_prgen)
1593 sym := types.LocalPkg.Lookup(wrapname)
1594 fn := typecheck.DeclFunc(sym, noargst)
1595 fn.SetIsHiddenClosure(true)
1598 // helper for capturing reference to a var declared in an outer scope.
1599 capName := func(pos src.XPos, fn *ir.Func, n *ir.Name) *ir.Name {
1601 cv := ir.CaptureName(pos, fn, n)
1603 return typecheck.Expr(cv).(*ir.Name)
1606 // Call args (x1, y1) need to be captured as part of the newly
1608 newCallArgs := []ir.Node{}
1609 for i := range newNames {
1611 arg = capName(callArgs[i].Pos(), fn, newNames[i])
1612 if unsafeArgs[i] != nil {
1613 arg = ir.NewConvExpr(arg.Pos(), origArgs[i].Op(), origArgs[i].Type(), arg)
1615 newCallArgs = append(newCallArgs, arg)
1617 // Also capture the function or method expression (if needed) into
1620 callX = capName(callX.Pos(), fn, fnExpr)
1622 if methSelectorExpr != nil {
1623 methSelectorExpr.X = capName(callX.Pos(), fn, methSelectorExpr.X.(*ir.Name))
1625 ir.FinishCaptureNames(pos, outerfn, fn)
1627 // This flags a builtin as opposed to a regular call.
1628 irregular := (call.Op() != ir.OCALLFUNC &&
1629 call.Op() != ir.OCALLMETH &&
1630 call.Op() != ir.OCALLINTER)
1632 // Construct new function body: f(x1, y1)
1637 newcall := mkNewCall(call.Pos(), op, callX, newCallArgs)
1639 // Type-check the result.
1641 typecheck.Call(newcall.(*ir.CallExpr))
1643 typecheck.Stmt(newcall)
1646 // Finalize body, register function on the main decls list.
1647 fn.Body = []ir.Node{newcall}
1648 typecheck.FinishFuncBody()
1650 typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
1652 // Create closure expr
1653 clo := ir.NewClosureExpr(pos, fn)
1655 clo.SetType(fn.Type())
1657 // Set escape properties for closure.
1658 if n.Op() == ir.OGO {
1659 // For "go", assume that the closure is going to escape
1660 // (with an exception for the runtime, which doesn't
1661 // permit heap-allocated closures).
1662 if base.Ctxt.Pkgpath != "runtime" {
1663 clo.SetEsc(ir.EscHeap)
1666 // For defer, just use whatever result escape analysis
1667 // has determined for the defer.
1668 if n.Esc() == ir.EscNever {
1669 clo.SetTransient(true)
1670 clo.SetEsc(ir.EscNone)
1674 // Create new top level call to closure over argless function.
1675 topcall := ir.NewCallExpr(pos, ir.OCALL, clo, []ir.Node{})
1676 typecheck.Call(topcall)
1678 // Tag the call to insure that directClosureCall doesn't undo our work.
1679 topcall.PreserveClosure = true
1681 fn.SetClosureCalled(false)
1683 // Finally, point the defer statement at the newly generated call.