1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // The inlining facility makes 2 passes: first CanInline determines which
6 // functions are suitable for inlining, and for those that are it
7 // saves a copy of the body. Then InlineCalls walks each function body to
8 // expand calls to inlinable functions.
10 // The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
11 // making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
14 // 1: 80-nodes leaf functions, oneliners, panic, lazy typechecking (default)
17 // 4: allow non-leaf functions
19 // At some point this may get another default and become switch-offable with -N.
21 // The -d typcheckinl flag enables early typechecking of all imported bodies,
22 // which is useful to flush out bugs.
24 // The Debug.m flag enables diagnostic output. a single -m is useful for verifying
25 // which calls get inlined or not, more is for debugging, and may go away at any point.
35 "cmd/compile/internal/base"
36 "cmd/compile/internal/inline/inlheur"
37 "cmd/compile/internal/ir"
38 "cmd/compile/internal/logopt"
39 "cmd/compile/internal/pgo"
40 "cmd/compile/internal/typecheck"
41 "cmd/compile/internal/types"
45 // Inlining budget parameters, gathered in one place
48 inlineExtraAppendCost = 0
49 // default is to inline if there's at most one call. -l=4 overrides this by using 1 instead.
50 inlineExtraCallCost = 57 // 57 was benchmarked to provided most benefit with no bad surprises; see https://github.com/golang/go/issues/19348#issuecomment-439370742
51 inlineExtraPanicCost = 1 // do not penalize inlining panics.
52 inlineExtraThrowCost = inlineMaxBudget // with current (2018-05/1.11) code, inlining runtime.throw does not help.
54 inlineBigFunctionNodes = 5000 // Functions with this many nodes are considered "big".
55 inlineBigFunctionMaxCost = 20 // Max cost of inlinee when inlining into a "big" function.
59 // List of all hot callee nodes.
60 // TODO(prattmic): Make this non-global.
61 candHotCalleeMap = make(map[*pgo.IRNode]struct{})
63 // List of all hot call sites. CallSiteInfo.Callee is always nil.
64 // TODO(prattmic): Make this non-global.
65 candHotEdgeMap = make(map[pgo.CallSiteInfo]struct{})
67 // Threshold in percentage for hot callsite inlining.
68 inlineHotCallSiteThresholdPercent float64
70 // Threshold in CDF percentage for hot callsite inlining,
71 // that is, for a threshold of X the hottest callsites that
72 // make up the top X% of total edge weight will be
73 // considered hot for inlining candidates.
74 inlineCDFHotCallSiteThresholdPercent = float64(99)
76 // Budget increased due to hotness.
77 inlineHotMaxBudget int32 = 2000
80 // pgoInlinePrologue records the hot callsites from ir-graph.
81 func pgoInlinePrologue(p *pgo.Profile, decls []ir.Node) {
82 if base.Debug.PGOInlineCDFThreshold != "" {
83 if s, err := strconv.ParseFloat(base.Debug.PGOInlineCDFThreshold, 64); err == nil && s >= 0 && s <= 100 {
84 inlineCDFHotCallSiteThresholdPercent = s
86 base.Fatalf("invalid PGOInlineCDFThreshold, must be between 0 and 100")
89 var hotCallsites []pgo.NodeMapKey
90 inlineHotCallSiteThresholdPercent, hotCallsites = hotNodesFromCDF(p)
91 if base.Debug.PGODebug > 0 {
92 fmt.Printf("hot-callsite-thres-from-CDF=%v\n", inlineHotCallSiteThresholdPercent)
95 if x := base.Debug.PGOInlineBudget; x != 0 {
96 inlineHotMaxBudget = int32(x)
99 for _, n := range hotCallsites {
100 // mark inlineable callees from hot edges
101 if callee := p.WeightedCG.IRNodes[n.CalleeName]; callee != nil {
102 candHotCalleeMap[callee] = struct{}{}
104 // mark hot call sites
105 if caller := p.WeightedCG.IRNodes[n.CallerName]; caller != nil && caller.AST != nil {
106 csi := pgo.CallSiteInfo{LineOffset: n.CallSiteOffset, Caller: caller.AST}
107 candHotEdgeMap[csi] = struct{}{}
111 if base.Debug.PGODebug >= 3 {
112 fmt.Printf("hot-cg before inline in dot format:")
113 p.PrintWeightedCallGraphDOT(inlineHotCallSiteThresholdPercent)
117 // hotNodesFromCDF computes an edge weight threshold and the list of hot
118 // nodes that make up the given percentage of the CDF. The threshold, as
119 // a percent, is the lower bound of weight for nodes to be considered hot
120 // (currently only used in debug prints) (in case of equal weights,
121 // comparing with the threshold may not accurately reflect which nodes are
123 func hotNodesFromCDF(p *pgo.Profile) (float64, []pgo.NodeMapKey) {
124 nodes := make([]pgo.NodeMapKey, len(p.NodeMap))
126 for n := range p.NodeMap {
130 sort.Slice(nodes, func(i, j int) bool {
131 ni, nj := nodes[i], nodes[j]
132 if wi, wj := p.NodeMap[ni].EWeight, p.NodeMap[nj].EWeight; wi != wj {
133 return wi > wj // want larger weight first
135 // same weight, order by name/line number
136 if ni.CallerName != nj.CallerName {
137 return ni.CallerName < nj.CallerName
139 if ni.CalleeName != nj.CalleeName {
140 return ni.CalleeName < nj.CalleeName
142 return ni.CallSiteOffset < nj.CallSiteOffset
145 for i, n := range nodes {
146 w := p.NodeMap[n].EWeight
148 if pgo.WeightInPercentage(cum, p.TotalEdgeWeight) > inlineCDFHotCallSiteThresholdPercent {
149 // nodes[:i+1] to include the very last node that makes it to go over the threshold.
150 // (Say, if the CDF threshold is 50% and one hot node takes 60% of weight, we want to
151 // include that node instead of excluding it.)
152 return pgo.WeightInPercentage(w, p.TotalEdgeWeight), nodes[:i+1]
158 // InlinePackage finds functions that can be inlined and clones them before walk expands them.
159 func InlinePackage(p *pgo.Profile) {
160 if base.Debug.PGOInline == 0 {
164 InlineDecls(p, typecheck.Target.Decls, true)
166 // Perform a garbage collection of hidden closures functions that
167 // are no longer reachable from top-level functions following
168 // inlining. See #59404 and #59638 for more context.
169 garbageCollectUnreferencedHiddenClosures()
171 if base.Debug.DumpInlFuncProps != "" {
172 inlheur.DumpFuncProps(nil, base.Debug.DumpInlFuncProps)
176 // InlineDecls applies inlining to the given batch of declarations.
177 func InlineDecls(p *pgo.Profile, decls []ir.Node, doInline bool) {
179 pgoInlinePrologue(p, decls)
182 doCanInline := func(n *ir.Func, recursive bool, numfns int) {
183 if !recursive || numfns > 1 {
184 // We allow inlining if there is no
185 // recursion, or the recursion cycle is
186 // across more than one function.
189 if base.Flag.LowerM > 1 && n.OClosure == nil {
190 fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname)
195 ir.VisitFuncsBottomUp(decls, func(list []*ir.Func, recursive bool) {
196 numfns := numNonClosures(list)
197 // We visit functions within an SCC in fairly arbitrary order,
198 // so by computing inlinability for all functions in the SCC
199 // before performing any inlining, the results are less
200 // sensitive to the order within the SCC (see #58905 for an
203 // First compute inlinability for all functions in the SCC ...
204 for _, n := range list {
205 doCanInline(n, recursive, numfns)
207 // ... then make a second pass to do inlining of calls.
209 for _, n := range list {
216 // garbageCollectUnreferencedHiddenClosures makes a pass over all the
217 // top-level (non-hidden-closure) functions looking for nested closure
218 // functions that are reachable, then sweeps through the Target.Decls
219 // list and marks any non-reachable hidden closure function as dead.
220 // See issues #59404 and #59638 for more context.
221 func garbageCollectUnreferencedHiddenClosures() {
223 liveFuncs := make(map[*ir.Func]bool)
225 var markLiveFuncs func(fn *ir.Func)
226 markLiveFuncs = func(fn *ir.Func) {
231 ir.Visit(fn, func(n ir.Node) {
232 if clo, ok := n.(*ir.ClosureExpr); ok {
233 markLiveFuncs(clo.Func)
238 for i := 0; i < len(typecheck.Target.Decls); i++ {
239 if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
240 if fn.IsHiddenClosure() {
247 for i := 0; i < len(typecheck.Target.Decls); i++ {
248 if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
249 if !fn.IsHiddenClosure() {
252 if fn.IsDeadcodeClosure() {
258 fn.SetIsDeadcodeClosure(true)
259 if base.Flag.LowerM > 2 {
260 fmt.Printf("%v: unreferenced closure %v marked as dead\n", ir.Line(fn), fn)
262 if fn.Inl != nil && fn.LSym == nil {
263 ir.InitLSym(fn, true)
269 // inlineBudget determines the max budget for function 'fn' prior to
270 // analyzing the hairyness of the body of 'fn'. We pass in the pgo
271 // profile if available, which can change the budget. If 'verbose' is
272 // set, then print a remark where we boost the budget due to PGO.
273 func inlineBudget(fn *ir.Func, profile *pgo.Profile, verbose bool) int32 {
274 // Update the budget for profile-guided inlining.
275 budget := int32(inlineMaxBudget)
277 if n, ok := profile.WeightedCG.IRNodes[ir.LinkFuncName(fn)]; ok {
278 if _, ok := candHotCalleeMap[n]; ok {
279 budget = int32(inlineHotMaxBudget)
281 fmt.Printf("hot-node enabled increased budget=%v for func=%v\n", budget, ir.PkgFuncName(fn))
289 // CanInline determines whether fn is inlineable.
290 // If so, CanInline saves copies of fn.Body and fn.Dcl in fn.Inl.
291 // fn and fn.Body will already have been typechecked.
292 func CanInline(fn *ir.Func, profile *pgo.Profile) {
294 base.Fatalf("CanInline no nname %+v", fn)
297 if base.Debug.DumpInlFuncProps != "" {
298 defer inlheur.DumpFuncProps(fn, base.Debug.DumpInlFuncProps)
301 var reason string // reason, if any, that the function was not inlined
302 if base.Flag.LowerM > 1 || logopt.Enabled() {
305 if base.Flag.LowerM > 1 {
306 fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Nname, reason)
308 if logopt.Enabled() {
309 logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason)
315 reason = InlineImpossible(fn)
319 if fn.Typecheck() == 0 {
320 base.Fatalf("CanInline on non-typechecked function %v", fn)
324 if n.Func.InlinabilityChecked() {
327 defer n.Func.SetInlinabilityChecked(true)
329 cc := int32(inlineExtraCallCost)
330 if base.Flag.LowerL == 4 {
331 cc = 1 // this appears to yield better performance than 0.
334 // Compute the inline budget for this function.
335 budget := inlineBudget(fn, profile, base.Debug.PGODebug > 0)
337 // At this point in the game the function we're looking at may
338 // have "stale" autos, vars that still appear in the Dcl list, but
339 // which no longer have any uses in the function body (due to
340 // elimination by deadcode). We'd like to exclude these dead vars
341 // when creating the "Inline.Dcl" field below; to accomplish this,
342 // the hairyVisitor below builds up a map of used/referenced
343 // locals, and we use this map to produce a pruned Inline.Dcl
344 // list. See issue 25249 for more context.
346 visitor := hairyVisitor{
353 if visitor.tooHairy(fn) {
354 reason = visitor.reason
358 n.Func.Inl = &ir.Inline{
359 Cost: budget - visitor.budget,
360 Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
361 Body: inlcopylist(fn.Body),
363 CanDelayResults: canDelayResults(fn),
366 if base.Flag.LowerM > 1 {
367 fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, budget-visitor.budget, fn.Type(), ir.Nodes(n.Func.Inl.Body))
368 } else if base.Flag.LowerM != 0 {
369 fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
371 if logopt.Enabled() {
372 logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", budget-visitor.budget))
376 // InlineImpossible returns a non-empty reason string if fn is impossible to
377 // inline regardless of cost or contents.
378 func InlineImpossible(fn *ir.Func) string {
379 var reason string // reason, if any, that the function can not be inlined.
385 // If marked "go:noinline", don't inline.
386 if fn.Pragma&ir.Noinline != 0 {
387 reason = "marked go:noinline"
391 // If marked "go:norace" and -race compilation, don't inline.
392 if base.Flag.Race && fn.Pragma&ir.Norace != 0 {
393 reason = "marked go:norace with -race compilation"
397 // If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
398 if base.Debug.Checkptr != 0 && fn.Pragma&ir.NoCheckPtr != 0 {
399 reason = "marked go:nocheckptr"
403 // If marked "go:cgo_unsafe_args", don't inline, since the function
404 // makes assumptions about its argument frame layout.
405 if fn.Pragma&ir.CgoUnsafeArgs != 0 {
406 reason = "marked go:cgo_unsafe_args"
410 // If marked as "go:uintptrkeepalive", don't inline, since the keep
411 // alive information is lost during inlining.
413 // TODO(prattmic): This is handled on calls during escape analysis,
414 // which is after inlining. Move prior to inlining so the keep-alive is
415 // maintained after inlining.
416 if fn.Pragma&ir.UintptrKeepAlive != 0 {
417 reason = "marked as having a keep-alive uintptr argument"
421 // If marked as "go:uintptrescapes", don't inline, since the escape
422 // information is lost during inlining.
423 if fn.Pragma&ir.UintptrEscapes != 0 {
424 reason = "marked as having an escaping uintptr argument"
428 // The nowritebarrierrec checker currently works at function
429 // granularity, so inlining yeswritebarrierrec functions can confuse it
430 // (#22342). As a workaround, disallow inlining them for now.
431 if fn.Pragma&ir.Yeswritebarrierrec != 0 {
432 reason = "marked go:yeswritebarrierrec"
436 // If a local function has no fn.Body (is defined outside of Go), cannot inline it.
437 // Imported functions don't have fn.Body but might have inline body in fn.Inl.
438 if len(fn.Body) == 0 && !typecheck.HaveInlineBody(fn) {
439 reason = "no function body"
443 // If fn is synthetic hash or eq function, cannot inline it.
444 // The function is not generated in Unified IR frontend at this moment.
445 if ir.IsEqOrHashFunc(fn) {
446 reason = "type eq/hash function"
453 // canDelayResults reports whether inlined calls to fn can delay
454 // declaring the result parameter until the "return" statement.
455 func canDelayResults(fn *ir.Func) bool {
456 // We can delay declaring+initializing result parameters if:
457 // (1) there's exactly one "return" statement in the inlined function;
458 // (2) it's not an empty return statement (#44355); and
459 // (3) the result parameters aren't named.
462 ir.VisitList(fn.Body, func(n ir.Node) {
463 if n, ok := n.(*ir.ReturnStmt); ok {
465 if len(n.Results) == 0 {
466 nreturns++ // empty return statement (case 2)
472 return false // not exactly one return statement (case 1)
475 // temporaries for return values.
476 for _, param := range fn.Type().Results().FieldSlice() {
477 if sym := types.OrigSym(param.Sym); sym != nil && !sym.IsBlank() {
478 return false // found a named result parameter (case 3)
485 // hairyVisitor visits a function body to determine its inlining
486 // hairiness and whether or not it can be inlined.
487 type hairyVisitor struct {
488 // This is needed to access the current caller in the doNode function.
494 usedLocals ir.NameSet
495 do func(ir.Node) bool
499 func (v *hairyVisitor) tooHairy(fn *ir.Func) bool {
500 v.do = v.doNode // cache closure
501 if ir.DoChildren(fn, v.do) {
505 v.reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", v.maxBudget-v.budget, v.maxBudget)
511 // doNode visits n and its children, updates the state in v, and returns true if
512 // n makes the current function too hairy for inlining.
513 func (v *hairyVisitor) doNode(n ir.Node) bool {
518 // Call is okay if inlinable and we have the budget for the body.
520 n := n.(*ir.CallExpr)
521 // Functions that call runtime.getcaller{pc,sp} can not be inlined
522 // because getcaller{pc,sp} expect a pointer to the caller's first argument.
524 // runtime.throw is a "cheap call" like panic in normal code.
526 if n.X.Op() == ir.ONAME {
527 name := n.X.(*ir.Name)
528 if name.Class == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) {
529 fn := name.Sym().Name
530 if fn == "getcallerpc" || fn == "getcallersp" {
531 v.reason = "call to " + fn
535 v.budget -= inlineExtraThrowCost
539 // Special case for reflect.noescpae. It does just type
540 // conversions to appease the escape analysis, and doesn't
542 if name.Class == ir.PFUNC && types.IsReflectPkg(name.Sym().Pkg) {
543 if name.Sym().Name == "noescape" {
547 // Special case for coverage counter updates; although
548 // these correspond to real operations, we treat them as
549 // zero cost for the moment. This is due to the existence
550 // of tests that are sensitive to inlining-- if the
551 // insertion of coverage instrumentation happens to tip a
552 // given function over the threshold and move it from
553 // "inlinable" to "not-inlinable", this can cause changes
554 // in allocation behavior, which can then result in test
555 // failures (a good example is the TestAllocations in
557 if isAtomicCoverageCounterUpdate(n) {
561 if n.X.Op() == ir.OMETHEXPR {
562 if meth := ir.MethodExprName(n.X); meth != nil {
563 if fn := meth.Func; fn != nil {
565 if types.IsRuntimePkg(s.Pkg) && s.Name == "heapBits.nextArena" {
566 // Special case: explicitly allow mid-stack inlining of
567 // runtime.heapBits.next even though it calls slow-path
568 // runtime.heapBits.nextArena.
571 // Special case: on architectures that can do unaligned loads,
572 // explicitly mark encoding/binary methods as cheap,
573 // because in practice they are, even though our inlining
574 // budgeting system does not see that. See issue 42958.
575 if base.Ctxt.Arch.CanMergeLoads && s.Pkg.Path == "encoding/binary" {
577 case "littleEndian.Uint64", "littleEndian.Uint32", "littleEndian.Uint16",
578 "bigEndian.Uint64", "bigEndian.Uint32", "bigEndian.Uint16",
579 "littleEndian.PutUint64", "littleEndian.PutUint32", "littleEndian.PutUint16",
580 "bigEndian.PutUint64", "bigEndian.PutUint32", "bigEndian.PutUint16",
581 "littleEndian.AppendUint64", "littleEndian.AppendUint32", "littleEndian.AppendUint16",
582 "bigEndian.AppendUint64", "bigEndian.AppendUint32", "bigEndian.AppendUint16":
590 break // treat like any other node, that is, cost of 1
593 // Determine if the callee edge is for an inlinable hot callee or not.
594 if v.profile != nil && v.curFunc != nil {
595 if fn := inlCallee(n.X, v.profile); fn != nil && typecheck.HaveInlineBody(fn) {
596 lineOffset := pgo.NodeLineOffset(n, fn)
597 csi := pgo.CallSiteInfo{LineOffset: lineOffset, Caller: v.curFunc}
598 if _, o := candHotEdgeMap[csi]; o {
599 if base.Debug.PGODebug > 0 {
600 fmt.Printf("hot-callsite identified at line=%v for func=%v\n", ir.Line(n), ir.PkgFuncName(v.curFunc))
606 if ir.IsIntrinsicCall(n) {
607 // Treat like any other node.
611 if fn := inlCallee(n.X, v.profile); fn != nil && typecheck.HaveInlineBody(fn) {
612 v.budget -= fn.Inl.Cost
616 // Call cost for non-leaf inlining.
617 v.budget -= v.extraCallCost
620 base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
622 // Things that are too hairy, irrespective of the budget
623 case ir.OCALL, ir.OCALLINTER:
624 // Call cost for non-leaf inlining.
625 v.budget -= v.extraCallCost
628 n := n.(*ir.UnaryExpr)
629 if n.X.Op() == ir.OCONVIFACE && n.X.(*ir.ConvExpr).Implicit() {
630 // Hack to keep reflect.flag.mustBe inlinable for TestIntendedInlining.
631 // Before CL 284412, these conversions were introduced later in the
632 // compiler, so they didn't count against inlining budget.
635 v.budget -= inlineExtraPanicCost
638 // recover matches the argument frame pointer to find
639 // the right panic value, so it needs an argument frame.
640 v.reason = "call to recover"
644 if base.Debug.InlFuncsWithClosures == 0 {
645 v.reason = "not inlining functions with closures"
649 // TODO(danscales): Maybe make budget proportional to number of closure
651 //v.budget -= int32(len(n.(*ir.ClosureExpr).Func.ClosureVars) * 3)
652 // TODO(austin): However, if we're able to inline this closure into
653 // v.curFunc, then we actually pay nothing for the closure captures. We
654 // should try to account for that if we're going to account for captures.
659 ir.ODCLTYPE, // can't print yet
661 v.reason = "unhandled op " + n.Op().String()
665 v.budget -= inlineExtraAppendCost
668 n := n.(*ir.AddrExpr)
669 // Make "&s.f" cost 0 when f's offset is zero.
670 if dot, ok := n.X.(*ir.SelectorExpr); ok && (dot.Op() == ir.ODOT || dot.Op() == ir.ODOTPTR) {
671 if _, ok := dot.X.(*ir.Name); ok && dot.Selection.Offset == 0 {
672 v.budget += 2 // undo ir.OADDR+ir.ODOT/ir.ODOTPTR
677 // *(*X)(unsafe.Pointer(&x)) is low-cost
678 n := n.(*ir.StarExpr)
681 for ptr.Op() == ir.OCONVNOP {
682 ptr = ptr.(*ir.ConvExpr).X
684 if ptr.Op() == ir.OADDR {
685 v.budget += 1 // undo half of default cost of ir.ODEREF+ir.OADDR
689 // This doesn't produce code, but the children might.
690 v.budget++ // undo default cost
692 case ir.ODCLCONST, ir.OFALL, ir.OTYPE:
693 // These nodes don't produce code; omit from inlining budget.
698 if ir.IsConst(n.Cond, constant.Bool) {
699 // This if and the condition cost nothing.
700 if doList(n.Init(), v.do) {
703 if ir.BoolVal(n.Cond) {
704 return doList(n.Body, v.do)
706 return doList(n.Else, v.do)
712 if n.Class == ir.PAUTO {
717 // The only OBLOCK we should see at this point is an empty one.
718 // In any event, let the visitList(n.List()) below take care of the statements,
719 // and don't charge for the OBLOCK itself. The ++ undoes the -- below.
722 case ir.OMETHVALUE, ir.OSLICELIT:
723 v.budget-- // Hack for toolstash -cmp.
726 v.budget++ // Hack for toolstash -cmp.
729 n := n.(*ir.AssignListStmt)
731 // Unified IR unconditionally rewrites:
742 // so that it can insert implicit conversions as necessary. To
743 // minimize impact to the existing inlining heuristics (in
744 // particular, to avoid breaking the existing inlinability regress
745 // tests), we need to compensate for this here.
747 // See also identical logic in isBigFunc.
748 if init := n.Rhs[0].Init(); len(init) == 1 {
749 if _, ok := init[0].(*ir.AssignListStmt); ok {
750 // 4 for each value, because each temporary variable now
751 // appears 3 times (DCL, LHS, RHS), plus an extra DCL node.
753 // 1 for the extra "tmp1, tmp2 = f()" assignment statement.
754 v.budget += 4*int32(len(n.Lhs)) + 1
759 // Special case for coverage counter updates and coverage
760 // function registrations. Although these correspond to real
761 // operations, we treat them as zero cost for the moment. This
762 // is primarily due to the existence of tests that are
763 // sensitive to inlining-- if the insertion of coverage
764 // instrumentation happens to tip a given function over the
765 // threshold and move it from "inlinable" to "not-inlinable",
766 // this can cause changes in allocation behavior, which can
767 // then result in test failures (a good example is the
768 // TestAllocations in crypto/ed25519).
769 n := n.(*ir.AssignStmt)
770 if n.X.Op() == ir.OINDEX && isIndexingCoverageCounter(n.X) {
777 // When debugging, don't stop early, to get full cost of inlining this function
778 if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() {
779 v.reason = "too expensive"
783 return ir.DoChildren(n, v.do)
786 func isBigFunc(fn *ir.Func) bool {
787 budget := inlineBigFunctionNodes
788 return ir.Any(fn, func(n ir.Node) bool {
789 // See logic in hairyVisitor.doNode, explaining unified IR's
790 // handling of "a, b = f()" assignments.
791 if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 {
792 if init := n.Rhs[0].Init(); len(init) == 1 {
793 if _, ok := init[0].(*ir.AssignListStmt); ok {
794 budget += 4*len(n.Lhs) + 1
804 // inlcopylist (together with inlcopy) recursively copies a list of nodes, except
805 // that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
806 // the body and dcls of an inlineable function.
807 func inlcopylist(ll []ir.Node) []ir.Node {
808 s := make([]ir.Node, len(ll))
809 for i, n := range ll {
815 // inlcopy is like DeepCopy(), but does extra work to copy closures.
816 func inlcopy(n ir.Node) ir.Node {
817 var edit func(ir.Node) ir.Node
818 edit = func(x ir.Node) ir.Node {
820 case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL:
824 ir.EditChildren(m, edit)
825 if x.Op() == ir.OCLOSURE {
826 x := x.(*ir.ClosureExpr)
827 // Need to save/duplicate x.Func.Nname,
828 // x.Func.Nname.Ntype, x.Func.Dcl, x.Func.ClosureVars, and
829 // x.Func.Body for iexport and local inlining.
831 newfn := ir.NewFunc(oldfn.Pos())
832 m.(*ir.ClosureExpr).Func = newfn
833 newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym())
834 // XXX OK to share fn.Type() ??
835 newfn.Nname.SetType(oldfn.Nname.Type())
836 newfn.Body = inlcopylist(oldfn.Body)
837 // Make shallow copy of the Dcl and ClosureVar slices
838 newfn.Dcl = append([]*ir.Name(nil), oldfn.Dcl...)
839 newfn.ClosureVars = append([]*ir.Name(nil), oldfn.ClosureVars...)
846 // InlineCalls/inlnode walks fn's statements and expressions and substitutes any
847 // calls made to inlineable functions. This is the external entry point.
848 func InlineCalls(fn *ir.Func, profile *pgo.Profile) {
851 bigCaller := isBigFunc(fn)
852 if bigCaller && base.Flag.LowerM > 1 {
853 fmt.Printf("%v: function %v considered 'big'; reducing max cost of inlinees\n", ir.Line(fn), fn)
855 var inlCalls []*ir.InlinedCallExpr
856 var edit func(ir.Node) ir.Node
857 edit = func(n ir.Node) ir.Node {
858 return inlnode(n, bigCaller, &inlCalls, edit, profile)
860 ir.EditChildren(fn, edit)
862 // If we inlined any calls, we want to recursively visit their
863 // bodies for further inlining. However, we need to wait until
864 // *after* the original function body has been expanded, or else
865 // inlCallee can have false positives (e.g., #54632).
866 for len(inlCalls) > 0 {
868 inlCalls = inlCalls[1:]
869 ir.EditChildren(call, edit)
875 // inlnode recurses over the tree to find inlineable calls, which will
876 // be turned into OINLCALLs by mkinlcall. When the recursion comes
877 // back up will examine left, right, list, rlist, ninit, ntest, nincr,
878 // nbody and nelse and use one of the 4 inlconv/glue functions above
879 // to turn the OINLCALL into an expression, a statement, or patch it
880 // in to this nodes list or rlist as appropriate.
881 // NOTE it makes no sense to pass the glue functions down the
882 // recursion to the level where the OINLCALL gets created because they
883 // have to edit /this/ n, so you'd have to push that one down as well,
884 // but then you may as well do it here. so this is cleaner and
885 // shorter and less complicated.
886 // The result of inlnode MUST be assigned back to n, e.g.
888 // n.Left = inlnode(n.Left)
889 func inlnode(n ir.Node, bigCaller bool, inlCalls *[]*ir.InlinedCallExpr, edit func(ir.Node) ir.Node, profile *pgo.Profile) ir.Node {
895 case ir.ODEFER, ir.OGO:
896 n := n.(*ir.GoDeferStmt)
897 switch call := n.Call; call.Op() {
899 base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
901 call := call.(*ir.CallExpr)
905 n := n.(*ir.TailCallStmt)
906 n.Call.NoInline = true // Not inline a tail call for now. Maybe we could inline it just like RETURN fn(arg)?
908 // TODO do them here (or earlier),
909 // so escape analysis can avoid more heapmoves.
913 base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
915 n := n.(*ir.CallExpr)
916 if n.X.Op() == ir.OMETHEXPR {
917 // Prevent inlining some reflect.Value methods when using checkptr,
918 // even when package reflect was compiled without it (#35073).
919 if meth := ir.MethodExprName(n.X); meth != nil {
921 if base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
930 ir.EditChildren(n, edit)
932 // with all the branches out of the way, it is now time to
933 // transmogrify this node itself unless inhibited by the
934 // switch at the top of this function.
937 base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
940 call := n.(*ir.CallExpr)
944 if base.Flag.LowerM > 3 {
945 fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.X)
947 if ir.IsIntrinsicCall(call) {
950 if fn := inlCallee(call.X, profile); fn != nil && typecheck.HaveInlineBody(fn) {
951 n = mkinlcall(call, fn, bigCaller, inlCalls)
960 // inlCallee takes a function-typed expression and returns the underlying function ONAME
961 // that it refers to if statically known. Otherwise, it returns nil.
962 func inlCallee(fn ir.Node, profile *pgo.Profile) *ir.Func {
963 fn = ir.StaticValue(fn)
966 fn := fn.(*ir.SelectorExpr)
967 n := ir.MethodExprName(fn)
968 // Check that receiver type matches fn.X.
969 // TODO(mdempsky): Handle implicit dereference
970 // of pointer receiver argument?
971 if n == nil || !types.Identical(n.Type().Recv().Type, fn.X.Type()) {
977 if fn.Class == ir.PFUNC {
981 fn := fn.(*ir.ClosureExpr)
983 CanInline(c, profile)
991 // SSADumpInline gives the SSA back end a chance to dump the function
992 // when producing output for debugging the compiler itself.
993 var SSADumpInline = func(*ir.Func) {}
995 // InlineCall allows the inliner implementation to be overridden.
996 // If it returns nil, the function will not be inlined.
997 var InlineCall = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
998 base.Fatalf("inline.InlineCall not overridden")
1002 // inlineCostOK returns true if call n from caller to callee is cheap enough to
1003 // inline. bigCaller indicates that caller is a big function.
1005 // If inlineCostOK returns false, it also returns the max cost that the callee
1007 func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool, int32) {
1008 maxCost := int32(inlineMaxBudget)
1010 // We use this to restrict inlining into very big functions.
1011 // See issue 26546 and 17566.
1012 maxCost = inlineBigFunctionMaxCost
1015 if callee.Inl.Cost <= maxCost {
1016 // Simple case. Function is already cheap enough.
1020 // We'll also allow inlining of hot functions below inlineHotMaxBudget,
1021 // but only in small functions.
1023 lineOffset := pgo.NodeLineOffset(n, caller)
1024 csi := pgo.CallSiteInfo{LineOffset: lineOffset, Caller: caller}
1025 if _, ok := candHotEdgeMap[csi]; !ok {
1027 return false, maxCost
1033 if base.Debug.PGODebug > 0 {
1034 fmt.Printf("hot-big check disallows inlining for call %s (cost %d) at %v in big function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
1036 return false, maxCost
1039 if callee.Inl.Cost > inlineHotMaxBudget {
1040 return false, inlineHotMaxBudget
1043 if base.Debug.PGODebug > 0 {
1044 fmt.Printf("hot-budget check allows inlining for call %s (cost %d) at %v in function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
1050 // If n is a OCALLFUNC node, and fn is an ONAME node for a
1051 // function with an inlinable body, return an OINLCALL node that can replace n.
1052 // The returned node's Ninit has the parameter assignments, the Nbody is the
1053 // inlined function body, and (List, Rlist) contain the (input, output)
1055 // The result of mkinlcall MUST be assigned back to n, e.g.
1057 // n.Left = mkinlcall(n.Left, fn, isddd)
1058 func mkinlcall(n *ir.CallExpr, fn *ir.Func, bigCaller bool, inlCalls *[]*ir.InlinedCallExpr) ir.Node {
1060 if logopt.Enabled() {
1061 logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
1062 fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn)))
1067 if ok, maxCost := inlineCostOK(n, ir.CurFunc, fn, bigCaller); !ok {
1068 if logopt.Enabled() {
1069 logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
1070 fmt.Sprintf("cost %d of %s exceeds max caller cost %d", fn.Inl.Cost, ir.PkgFuncName(fn), maxCost))
1075 if fn == ir.CurFunc {
1076 // Can't recursively inline a function into itself.
1077 if logopt.Enabled() {
1078 logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(ir.CurFunc)))
1083 if base.Flag.Cfg.Instrumenting && types.IsNoInstrumentPkg(fn.Sym().Pkg) {
1084 // Runtime package must not be instrumented.
1085 // Instrument skips runtime package. However, some runtime code can be
1086 // inlined into other packages and instrumented there. To avoid this,
1087 // we disable inlining of runtime functions when instrumenting.
1088 // The example that we observed is inlining of LockOSThread,
1089 // which lead to false race reports on m contents.
1092 if base.Flag.Race && types.IsNoRacePkg(fn.Sym().Pkg) {
1096 parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
1099 // Check if we've already inlined this function at this particular
1100 // call site, in order to stop inlining when we reach the beginning
1101 // of a recursion cycle again. We don't inline immediately recursive
1102 // functions, but allow inlining if there is a recursion cycle of
1103 // many functions. Most likely, the inlining will stop before we
1104 // even hit the beginning of the cycle again, but this catches the
1106 for inlIndex := parent; inlIndex >= 0; inlIndex = base.Ctxt.InlTree.Parent(inlIndex) {
1107 if base.Ctxt.InlTree.InlinedFunction(inlIndex) == sym {
1108 if base.Flag.LowerM > 1 {
1109 fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(ir.CurFunc))
1115 typecheck.AssertFixedCall(n)
1117 inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym, ir.FuncName(fn))
1119 closureInitLSym := func(n *ir.CallExpr, fn *ir.Func) {
1120 // The linker needs FuncInfo metadata for all inlined
1121 // functions. This is typically handled by gc.enqueueFunc
1122 // calling ir.InitLSym for all function declarations in
1123 // typecheck.Target.Decls (ir.UseClosure adds all closures to
1126 // However, non-trivial closures in Decls are ignored, and are
1127 // insteaded enqueued when walk of the calling function
1130 // This presents a problem for direct calls to closures.
1131 // Inlining will replace the entire closure definition with its
1132 // body, which hides the closure from walk and thus suppresses
1135 // Explicitly create a symbol early in this edge case to ensure
1136 // we keep this metadata.
1138 // TODO: Refactor to keep a reference so this can all be done
1141 if n.Op() != ir.OCALLFUNC {
1142 // Not a standard call.
1145 if n.X.Op() != ir.OCLOSURE {
1146 // Not a direct closure call.
1150 clo := n.X.(*ir.ClosureExpr)
1151 if ir.IsTrivialClosure(clo) {
1152 // enqueueFunc will handle trivial closures anyways.
1156 ir.InitLSym(fn, true)
1159 closureInitLSym(n, fn)
1161 if base.Flag.GenDwarfInl > 0 {
1162 if !sym.WasInlined() {
1163 base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
1164 sym.Set(obj.AttrWasInlined, true)
1168 if base.Flag.LowerM != 0 {
1169 fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
1171 if base.Flag.LowerM > 2 {
1172 fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
1175 res := InlineCall(n, fn, inlIndex)
1178 base.FatalfAt(n.Pos(), "inlining call to %v failed", fn)
1181 if base.Flag.LowerM > 2 {
1182 fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res)
1185 *inlCalls = append(*inlCalls, res)
1190 // CalleeEffects appends any side effects from evaluating callee to init.
1191 func CalleeEffects(init *ir.Nodes, callee ir.Node) {
1193 init.Append(ir.TakeInit(callee)...)
1195 switch callee.Op() {
1196 case ir.ONAME, ir.OCLOSURE, ir.OMETHEXPR:
1200 conv := callee.(*ir.ConvExpr)
1204 ic := callee.(*ir.InlinedCallExpr)
1205 init.Append(ic.Body.Take()...)
1206 callee = ic.SingleResult()
1209 base.FatalfAt(callee.Pos(), "unexpected callee expression: %v", callee)
1214 func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
1215 s := make([]*ir.Name, 0, len(ll))
1216 for _, n := range ll {
1217 if n.Class == ir.PAUTO {
1218 if !vis.usedLocals.Has(n) {
1227 // numNonClosures returns the number of functions in list which are not closures.
1228 func numNonClosures(list []*ir.Func) int {
1230 for _, fn := range list {
1231 if fn.OClosure == nil {
1238 func doList(list []ir.Node, do func(ir.Node) bool) bool {
1239 for _, x := range list {
1249 // isIndexingCoverageCounter returns true if the specified node 'n' is indexing
1250 // into a coverage counter array.
1251 func isIndexingCoverageCounter(n ir.Node) bool {
1252 if n.Op() != ir.OINDEX {
1255 ixn := n.(*ir.IndexExpr)
1256 if ixn.X.Op() != ir.ONAME || !ixn.X.Type().IsArray() {
1259 nn := ixn.X.(*ir.Name)
1260 return nn.CoverageCounter()
1263 // isAtomicCoverageCounterUpdate examines the specified node to
1264 // determine whether it represents a call to sync/atomic.AddUint32 to
1265 // increment a coverage counter.
1266 func isAtomicCoverageCounterUpdate(cn *ir.CallExpr) bool {
1267 if cn.X.Op() != ir.ONAME {
1270 name := cn.X.(*ir.Name)
1271 if name.Class != ir.PFUNC {
1274 fn := name.Sym().Name
1275 if name.Sym().Pkg.Path != "sync/atomic" ||
1276 (fn != "AddUint32" && fn != "StoreUint32") {
1279 if len(cn.Args) != 2 || cn.Args[0].Op() != ir.OADDR {
1282 adn := cn.Args[0].(*ir.AddrExpr)
1283 v := isIndexingCoverageCounter(adn.X)