1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // The inlining facility makes 2 passes: first CanInline determines which
6 // functions are suitable for inlining, and for those that are it
7 // saves a copy of the body. Then InlineCalls walks each function body to
8 // expand calls to inlinable functions.
10 // The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
11 // making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
14 // 1: 80-nodes leaf functions, oneliners, panic, lazy typechecking (default)
17 // 4: allow non-leaf functions
19 // At some point this may get another default and become switch-offable with -N.
21 // The -d typcheckinl flag enables early typechecking of all imported bodies,
22 // which is useful to flush out bugs.
24 // The Debug.m flag enables diagnostic output. a single -m is useful for verifying
25 // which calls get inlined or not, more is for debugging, and may go away at any point.
35 "cmd/compile/internal/base"
36 "cmd/compile/internal/inline/inlheur"
37 "cmd/compile/internal/ir"
38 "cmd/compile/internal/logopt"
39 "cmd/compile/internal/pgo"
40 "cmd/compile/internal/typecheck"
41 "cmd/compile/internal/types"
45 // Inlining budget parameters, gathered in one place
48 inlineExtraAppendCost = 0
49 // default is to inline if there's at most one call. -l=4 overrides this by using 1 instead.
50 inlineExtraCallCost = 57 // 57 was benchmarked to provided most benefit with no bad surprises; see https://github.com/golang/go/issues/19348#issuecomment-439370742
51 inlineExtraPanicCost = 1 // do not penalize inlining panics.
52 inlineExtraThrowCost = inlineMaxBudget // with current (2018-05/1.11) code, inlining runtime.throw does not help.
54 inlineBigFunctionNodes = 5000 // Functions with this many nodes are considered "big".
55 inlineBigFunctionMaxCost = 20 // Max cost of inlinee when inlining into a "big" function.
59 // List of all hot callee nodes.
60 // TODO(prattmic): Make this non-global.
61 candHotCalleeMap = make(map[*pgo.IRNode]struct{})
63 // List of all hot call sites. CallSiteInfo.Callee is always nil.
64 // TODO(prattmic): Make this non-global.
65 candHotEdgeMap = make(map[pgo.CallSiteInfo]struct{})
67 // Threshold in percentage for hot callsite inlining.
68 inlineHotCallSiteThresholdPercent float64
70 // Threshold in CDF percentage for hot callsite inlining,
71 // that is, for a threshold of X the hottest callsites that
72 // make up the top X% of total edge weight will be
73 // considered hot for inlining candidates.
74 inlineCDFHotCallSiteThresholdPercent = float64(99)
76 // Budget increased due to hotness.
77 inlineHotMaxBudget int32 = 2000
80 // pgoInlinePrologue records the hot callsites from ir-graph.
81 func pgoInlinePrologue(p *pgo.Profile, funcs []*ir.Func) {
82 if base.Debug.PGOInlineCDFThreshold != "" {
83 if s, err := strconv.ParseFloat(base.Debug.PGOInlineCDFThreshold, 64); err == nil && s >= 0 && s <= 100 {
84 inlineCDFHotCallSiteThresholdPercent = s
86 base.Fatalf("invalid PGOInlineCDFThreshold, must be between 0 and 100")
89 var hotCallsites []pgo.NodeMapKey
90 inlineHotCallSiteThresholdPercent, hotCallsites = hotNodesFromCDF(p)
91 if base.Debug.PGODebug > 0 {
92 fmt.Printf("hot-callsite-thres-from-CDF=%v\n", inlineHotCallSiteThresholdPercent)
95 if x := base.Debug.PGOInlineBudget; x != 0 {
96 inlineHotMaxBudget = int32(x)
99 for _, n := range hotCallsites {
100 // mark inlineable callees from hot edges
101 if callee := p.WeightedCG.IRNodes[n.CalleeName]; callee != nil {
102 candHotCalleeMap[callee] = struct{}{}
104 // mark hot call sites
105 if caller := p.WeightedCG.IRNodes[n.CallerName]; caller != nil && caller.AST != nil {
106 csi := pgo.CallSiteInfo{LineOffset: n.CallSiteOffset, Caller: caller.AST}
107 candHotEdgeMap[csi] = struct{}{}
111 if base.Debug.PGODebug >= 3 {
112 fmt.Printf("hot-cg before inline in dot format:")
113 p.PrintWeightedCallGraphDOT(inlineHotCallSiteThresholdPercent)
117 // hotNodesFromCDF computes an edge weight threshold and the list of hot
118 // nodes that make up the given percentage of the CDF. The threshold, as
119 // a percent, is the lower bound of weight for nodes to be considered hot
120 // (currently only used in debug prints) (in case of equal weights,
121 // comparing with the threshold may not accurately reflect which nodes are
123 func hotNodesFromCDF(p *pgo.Profile) (float64, []pgo.NodeMapKey) {
124 nodes := make([]pgo.NodeMapKey, len(p.NodeMap))
126 for n := range p.NodeMap {
130 sort.Slice(nodes, func(i, j int) bool {
131 ni, nj := nodes[i], nodes[j]
132 if wi, wj := p.NodeMap[ni].EWeight, p.NodeMap[nj].EWeight; wi != wj {
133 return wi > wj // want larger weight first
135 // same weight, order by name/line number
136 if ni.CallerName != nj.CallerName {
137 return ni.CallerName < nj.CallerName
139 if ni.CalleeName != nj.CalleeName {
140 return ni.CalleeName < nj.CalleeName
142 return ni.CallSiteOffset < nj.CallSiteOffset
145 for i, n := range nodes {
146 w := p.NodeMap[n].EWeight
148 if pgo.WeightInPercentage(cum, p.TotalEdgeWeight) > inlineCDFHotCallSiteThresholdPercent {
149 // nodes[:i+1] to include the very last node that makes it to go over the threshold.
150 // (Say, if the CDF threshold is 50% and one hot node takes 60% of weight, we want to
151 // include that node instead of excluding it.)
152 return pgo.WeightInPercentage(w, p.TotalEdgeWeight), nodes[:i+1]
158 // InlinePackage finds functions that can be inlined and clones them before walk expands them.
159 func InlinePackage(p *pgo.Profile) {
160 if base.Debug.PGOInline == 0 {
164 InlineDecls(p, typecheck.Target.Funcs, true)
166 // Perform a garbage collection of hidden closures functions that
167 // are no longer reachable from top-level functions following
168 // inlining. See #59404 and #59638 for more context.
169 garbageCollectUnreferencedHiddenClosures()
171 if base.Debug.DumpInlFuncProps != "" {
172 inlheur.DumpFuncProps(nil, base.Debug.DumpInlFuncProps)
176 // InlineDecls applies inlining to the given batch of declarations.
177 func InlineDecls(p *pgo.Profile, funcs []*ir.Func, doInline bool) {
179 pgoInlinePrologue(p, funcs)
182 doCanInline := func(n *ir.Func, recursive bool, numfns int) {
183 if !recursive || numfns > 1 {
184 // We allow inlining if there is no
185 // recursion, or the recursion cycle is
186 // across more than one function.
189 if base.Flag.LowerM > 1 && n.OClosure == nil {
190 fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname)
195 ir.VisitFuncsBottomUp(funcs, func(list []*ir.Func, recursive bool) {
196 numfns := numNonClosures(list)
197 // We visit functions within an SCC in fairly arbitrary order,
198 // so by computing inlinability for all functions in the SCC
199 // before performing any inlining, the results are less
200 // sensitive to the order within the SCC (see #58905 for an
203 // First compute inlinability for all functions in the SCC ...
204 for _, n := range list {
205 doCanInline(n, recursive, numfns)
207 // ... then make a second pass to do inlining of calls.
209 for _, n := range list {
216 // garbageCollectUnreferencedHiddenClosures makes a pass over all the
217 // top-level (non-hidden-closure) functions looking for nested closure
218 // functions that are reachable, then sweeps through the Target.Decls
219 // list and marks any non-reachable hidden closure function as dead.
220 // See issues #59404 and #59638 for more context.
221 func garbageCollectUnreferencedHiddenClosures() {
223 liveFuncs := make(map[*ir.Func]bool)
225 var markLiveFuncs func(fn *ir.Func)
226 markLiveFuncs = func(fn *ir.Func) {
231 ir.Visit(fn, func(n ir.Node) {
232 if clo, ok := n.(*ir.ClosureExpr); ok {
233 markLiveFuncs(clo.Func)
238 for i := 0; i < len(typecheck.Target.Funcs); i++ {
239 fn := typecheck.Target.Funcs[i]
240 if fn.IsHiddenClosure() {
246 for i := 0; i < len(typecheck.Target.Funcs); i++ {
247 fn := typecheck.Target.Funcs[i]
248 if !fn.IsHiddenClosure() {
251 if fn.IsDeadcodeClosure() {
257 fn.SetIsDeadcodeClosure(true)
258 if base.Flag.LowerM > 2 {
259 fmt.Printf("%v: unreferenced closure %v marked as dead\n", ir.Line(fn), fn)
261 if fn.Inl != nil && fn.LSym == nil {
262 ir.InitLSym(fn, true)
267 // inlineBudget determines the max budget for function 'fn' prior to
268 // analyzing the hairyness of the body of 'fn'. We pass in the pgo
269 // profile if available, which can change the budget. If 'verbose' is
270 // set, then print a remark where we boost the budget due to PGO.
271 func inlineBudget(fn *ir.Func, profile *pgo.Profile, verbose bool) int32 {
272 // Update the budget for profile-guided inlining.
273 budget := int32(inlineMaxBudget)
275 if n, ok := profile.WeightedCG.IRNodes[ir.LinkFuncName(fn)]; ok {
276 if _, ok := candHotCalleeMap[n]; ok {
277 budget = int32(inlineHotMaxBudget)
279 fmt.Printf("hot-node enabled increased budget=%v for func=%v\n", budget, ir.PkgFuncName(fn))
287 // CanInline determines whether fn is inlineable.
288 // If so, CanInline saves copies of fn.Body and fn.Dcl in fn.Inl.
289 // fn and fn.Body will already have been typechecked.
290 func CanInline(fn *ir.Func, profile *pgo.Profile) {
292 base.Fatalf("CanInline no nname %+v", fn)
295 if base.Debug.DumpInlFuncProps != "" {
296 defer inlheur.DumpFuncProps(fn, base.Debug.DumpInlFuncProps)
299 var reason string // reason, if any, that the function was not inlined
300 if base.Flag.LowerM > 1 || logopt.Enabled() {
303 if base.Flag.LowerM > 1 {
304 fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Nname, reason)
306 if logopt.Enabled() {
307 logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason)
313 reason = InlineImpossible(fn)
317 if fn.Typecheck() == 0 {
318 base.Fatalf("CanInline on non-typechecked function %v", fn)
322 if n.Func.InlinabilityChecked() {
325 defer n.Func.SetInlinabilityChecked(true)
327 cc := int32(inlineExtraCallCost)
328 if base.Flag.LowerL == 4 {
329 cc = 1 // this appears to yield better performance than 0.
332 // Compute the inline budget for this function.
333 budget := inlineBudget(fn, profile, base.Debug.PGODebug > 0)
335 // At this point in the game the function we're looking at may
336 // have "stale" autos, vars that still appear in the Dcl list, but
337 // which no longer have any uses in the function body (due to
338 // elimination by deadcode). We'd like to exclude these dead vars
339 // when creating the "Inline.Dcl" field below; to accomplish this,
340 // the hairyVisitor below builds up a map of used/referenced
341 // locals, and we use this map to produce a pruned Inline.Dcl
342 // list. See issue 25249 for more context.
344 visitor := hairyVisitor{
351 if visitor.tooHairy(fn) {
352 reason = visitor.reason
356 n.Func.Inl = &ir.Inline{
357 Cost: budget - visitor.budget,
358 Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
359 Body: inlcopylist(fn.Body),
361 CanDelayResults: canDelayResults(fn),
364 if base.Flag.LowerM > 1 {
365 fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, budget-visitor.budget, fn.Type(), ir.Nodes(n.Func.Inl.Body))
366 } else if base.Flag.LowerM != 0 {
367 fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
369 if logopt.Enabled() {
370 logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", budget-visitor.budget))
374 // InlineImpossible returns a non-empty reason string if fn is impossible to
375 // inline regardless of cost or contents.
376 func InlineImpossible(fn *ir.Func) string {
377 var reason string // reason, if any, that the function can not be inlined.
383 // If marked "go:noinline", don't inline.
384 if fn.Pragma&ir.Noinline != 0 {
385 reason = "marked go:noinline"
389 // If marked "go:norace" and -race compilation, don't inline.
390 if base.Flag.Race && fn.Pragma&ir.Norace != 0 {
391 reason = "marked go:norace with -race compilation"
395 // If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
396 if base.Debug.Checkptr != 0 && fn.Pragma&ir.NoCheckPtr != 0 {
397 reason = "marked go:nocheckptr"
401 // If marked "go:cgo_unsafe_args", don't inline, since the function
402 // makes assumptions about its argument frame layout.
403 if fn.Pragma&ir.CgoUnsafeArgs != 0 {
404 reason = "marked go:cgo_unsafe_args"
408 // If marked as "go:uintptrkeepalive", don't inline, since the keep
409 // alive information is lost during inlining.
411 // TODO(prattmic): This is handled on calls during escape analysis,
412 // which is after inlining. Move prior to inlining so the keep-alive is
413 // maintained after inlining.
414 if fn.Pragma&ir.UintptrKeepAlive != 0 {
415 reason = "marked as having a keep-alive uintptr argument"
419 // If marked as "go:uintptrescapes", don't inline, since the escape
420 // information is lost during inlining.
421 if fn.Pragma&ir.UintptrEscapes != 0 {
422 reason = "marked as having an escaping uintptr argument"
426 // The nowritebarrierrec checker currently works at function
427 // granularity, so inlining yeswritebarrierrec functions can confuse it
428 // (#22342). As a workaround, disallow inlining them for now.
429 if fn.Pragma&ir.Yeswritebarrierrec != 0 {
430 reason = "marked go:yeswritebarrierrec"
434 // If a local function has no fn.Body (is defined outside of Go), cannot inline it.
435 // Imported functions don't have fn.Body but might have inline body in fn.Inl.
436 if len(fn.Body) == 0 && !typecheck.HaveInlineBody(fn) {
437 reason = "no function body"
441 // If fn is synthetic hash or eq function, cannot inline it.
442 // The function is not generated in Unified IR frontend at this moment.
443 if ir.IsEqOrHashFunc(fn) {
444 reason = "type eq/hash function"
451 // canDelayResults reports whether inlined calls to fn can delay
452 // declaring the result parameter until the "return" statement.
453 func canDelayResults(fn *ir.Func) bool {
454 // We can delay declaring+initializing result parameters if:
455 // (1) there's exactly one "return" statement in the inlined function;
456 // (2) it's not an empty return statement (#44355); and
457 // (3) the result parameters aren't named.
460 ir.VisitList(fn.Body, func(n ir.Node) {
461 if n, ok := n.(*ir.ReturnStmt); ok {
463 if len(n.Results) == 0 {
464 nreturns++ // empty return statement (case 2)
470 return false // not exactly one return statement (case 1)
473 // temporaries for return values.
474 for _, param := range fn.Type().Results().FieldSlice() {
475 if sym := types.OrigSym(param.Sym); sym != nil && !sym.IsBlank() {
476 return false // found a named result parameter (case 3)
483 // hairyVisitor visits a function body to determine its inlining
484 // hairiness and whether or not it can be inlined.
485 type hairyVisitor struct {
486 // This is needed to access the current caller in the doNode function.
492 usedLocals ir.NameSet
493 do func(ir.Node) bool
497 func (v *hairyVisitor) tooHairy(fn *ir.Func) bool {
498 v.do = v.doNode // cache closure
499 if ir.DoChildren(fn, v.do) {
503 v.reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", v.maxBudget-v.budget, v.maxBudget)
509 // doNode visits n and its children, updates the state in v, and returns true if
510 // n makes the current function too hairy for inlining.
511 func (v *hairyVisitor) doNode(n ir.Node) bool {
516 // Call is okay if inlinable and we have the budget for the body.
518 n := n.(*ir.CallExpr)
519 // Functions that call runtime.getcaller{pc,sp} can not be inlined
520 // because getcaller{pc,sp} expect a pointer to the caller's first argument.
522 // runtime.throw is a "cheap call" like panic in normal code.
524 if n.X.Op() == ir.ONAME {
525 name := n.X.(*ir.Name)
526 if name.Class == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) {
527 fn := name.Sym().Name
528 if fn == "getcallerpc" || fn == "getcallersp" {
529 v.reason = "call to " + fn
533 v.budget -= inlineExtraThrowCost
537 // Special case for reflect.noescpae. It does just type
538 // conversions to appease the escape analysis, and doesn't
540 if name.Class == ir.PFUNC && types.IsReflectPkg(name.Sym().Pkg) {
541 if name.Sym().Name == "noescape" {
545 // Special case for coverage counter updates; although
546 // these correspond to real operations, we treat them as
547 // zero cost for the moment. This is due to the existence
548 // of tests that are sensitive to inlining-- if the
549 // insertion of coverage instrumentation happens to tip a
550 // given function over the threshold and move it from
551 // "inlinable" to "not-inlinable", this can cause changes
552 // in allocation behavior, which can then result in test
553 // failures (a good example is the TestAllocations in
555 if isAtomicCoverageCounterUpdate(n) {
559 if n.X.Op() == ir.OMETHEXPR {
560 if meth := ir.MethodExprName(n.X); meth != nil {
561 if fn := meth.Func; fn != nil {
563 if types.IsRuntimePkg(s.Pkg) && s.Name == "heapBits.nextArena" {
564 // Special case: explicitly allow mid-stack inlining of
565 // runtime.heapBits.next even though it calls slow-path
566 // runtime.heapBits.nextArena.
569 // Special case: on architectures that can do unaligned loads,
570 // explicitly mark encoding/binary methods as cheap,
571 // because in practice they are, even though our inlining
572 // budgeting system does not see that. See issue 42958.
573 if base.Ctxt.Arch.CanMergeLoads && s.Pkg.Path == "encoding/binary" {
575 case "littleEndian.Uint64", "littleEndian.Uint32", "littleEndian.Uint16",
576 "bigEndian.Uint64", "bigEndian.Uint32", "bigEndian.Uint16",
577 "littleEndian.PutUint64", "littleEndian.PutUint32", "littleEndian.PutUint16",
578 "bigEndian.PutUint64", "bigEndian.PutUint32", "bigEndian.PutUint16",
579 "littleEndian.AppendUint64", "littleEndian.AppendUint32", "littleEndian.AppendUint16",
580 "bigEndian.AppendUint64", "bigEndian.AppendUint32", "bigEndian.AppendUint16":
588 break // treat like any other node, that is, cost of 1
591 // Determine if the callee edge is for an inlinable hot callee or not.
592 if v.profile != nil && v.curFunc != nil {
593 if fn := inlCallee(n.X, v.profile); fn != nil && typecheck.HaveInlineBody(fn) {
594 lineOffset := pgo.NodeLineOffset(n, fn)
595 csi := pgo.CallSiteInfo{LineOffset: lineOffset, Caller: v.curFunc}
596 if _, o := candHotEdgeMap[csi]; o {
597 if base.Debug.PGODebug > 0 {
598 fmt.Printf("hot-callsite identified at line=%v for func=%v\n", ir.Line(n), ir.PkgFuncName(v.curFunc))
604 if ir.IsIntrinsicCall(n) {
605 // Treat like any other node.
609 if fn := inlCallee(n.X, v.profile); fn != nil && typecheck.HaveInlineBody(fn) {
610 v.budget -= fn.Inl.Cost
614 // Call cost for non-leaf inlining.
615 v.budget -= v.extraCallCost
618 base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
620 // Things that are too hairy, irrespective of the budget
621 case ir.OCALL, ir.OCALLINTER:
622 // Call cost for non-leaf inlining.
623 v.budget -= v.extraCallCost
626 n := n.(*ir.UnaryExpr)
627 if n.X.Op() == ir.OCONVIFACE && n.X.(*ir.ConvExpr).Implicit() {
628 // Hack to keep reflect.flag.mustBe inlinable for TestIntendedInlining.
629 // Before CL 284412, these conversions were introduced later in the
630 // compiler, so they didn't count against inlining budget.
633 v.budget -= inlineExtraPanicCost
636 // recover matches the argument frame pointer to find
637 // the right panic value, so it needs an argument frame.
638 v.reason = "call to recover"
642 if base.Debug.InlFuncsWithClosures == 0 {
643 v.reason = "not inlining functions with closures"
647 // TODO(danscales): Maybe make budget proportional to number of closure
649 //v.budget -= int32(len(n.(*ir.ClosureExpr).Func.ClosureVars) * 3)
650 // TODO(austin): However, if we're able to inline this closure into
651 // v.curFunc, then we actually pay nothing for the closure captures. We
652 // should try to account for that if we're going to account for captures.
657 ir.ODCLTYPE, // can't print yet
659 v.reason = "unhandled op " + n.Op().String()
663 v.budget -= inlineExtraAppendCost
666 n := n.(*ir.AddrExpr)
667 // Make "&s.f" cost 0 when f's offset is zero.
668 if dot, ok := n.X.(*ir.SelectorExpr); ok && (dot.Op() == ir.ODOT || dot.Op() == ir.ODOTPTR) {
669 if _, ok := dot.X.(*ir.Name); ok && dot.Selection.Offset == 0 {
670 v.budget += 2 // undo ir.OADDR+ir.ODOT/ir.ODOTPTR
675 // *(*X)(unsafe.Pointer(&x)) is low-cost
676 n := n.(*ir.StarExpr)
679 for ptr.Op() == ir.OCONVNOP {
680 ptr = ptr.(*ir.ConvExpr).X
682 if ptr.Op() == ir.OADDR {
683 v.budget += 1 // undo half of default cost of ir.ODEREF+ir.OADDR
687 // This doesn't produce code, but the children might.
688 v.budget++ // undo default cost
690 case ir.ODCLCONST, ir.OFALL, ir.OTYPE:
691 // These nodes don't produce code; omit from inlining budget.
696 if ir.IsConst(n.Cond, constant.Bool) {
697 // This if and the condition cost nothing.
698 if doList(n.Init(), v.do) {
701 if ir.BoolVal(n.Cond) {
702 return doList(n.Body, v.do)
704 return doList(n.Else, v.do)
710 if n.Class == ir.PAUTO {
715 // The only OBLOCK we should see at this point is an empty one.
716 // In any event, let the visitList(n.List()) below take care of the statements,
717 // and don't charge for the OBLOCK itself. The ++ undoes the -- below.
720 case ir.OMETHVALUE, ir.OSLICELIT:
721 v.budget-- // Hack for toolstash -cmp.
724 v.budget++ // Hack for toolstash -cmp.
727 n := n.(*ir.AssignListStmt)
729 // Unified IR unconditionally rewrites:
740 // so that it can insert implicit conversions as necessary. To
741 // minimize impact to the existing inlining heuristics (in
742 // particular, to avoid breaking the existing inlinability regress
743 // tests), we need to compensate for this here.
745 // See also identical logic in isBigFunc.
746 if init := n.Rhs[0].Init(); len(init) == 1 {
747 if _, ok := init[0].(*ir.AssignListStmt); ok {
748 // 4 for each value, because each temporary variable now
749 // appears 3 times (DCL, LHS, RHS), plus an extra DCL node.
751 // 1 for the extra "tmp1, tmp2 = f()" assignment statement.
752 v.budget += 4*int32(len(n.Lhs)) + 1
757 // Special case for coverage counter updates and coverage
758 // function registrations. Although these correspond to real
759 // operations, we treat them as zero cost for the moment. This
760 // is primarily due to the existence of tests that are
761 // sensitive to inlining-- if the insertion of coverage
762 // instrumentation happens to tip a given function over the
763 // threshold and move it from "inlinable" to "not-inlinable",
764 // this can cause changes in allocation behavior, which can
765 // then result in test failures (a good example is the
766 // TestAllocations in crypto/ed25519).
767 n := n.(*ir.AssignStmt)
768 if n.X.Op() == ir.OINDEX && isIndexingCoverageCounter(n.X) {
775 // When debugging, don't stop early, to get full cost of inlining this function
776 if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() {
777 v.reason = "too expensive"
781 return ir.DoChildren(n, v.do)
784 func isBigFunc(fn *ir.Func) bool {
785 budget := inlineBigFunctionNodes
786 return ir.Any(fn, func(n ir.Node) bool {
787 // See logic in hairyVisitor.doNode, explaining unified IR's
788 // handling of "a, b = f()" assignments.
789 if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 {
790 if init := n.Rhs[0].Init(); len(init) == 1 {
791 if _, ok := init[0].(*ir.AssignListStmt); ok {
792 budget += 4*len(n.Lhs) + 1
802 // inlcopylist (together with inlcopy) recursively copies a list of nodes, except
803 // that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
804 // the body and dcls of an inlineable function.
805 func inlcopylist(ll []ir.Node) []ir.Node {
806 s := make([]ir.Node, len(ll))
807 for i, n := range ll {
813 // inlcopy is like DeepCopy(), but does extra work to copy closures.
814 func inlcopy(n ir.Node) ir.Node {
815 var edit func(ir.Node) ir.Node
816 edit = func(x ir.Node) ir.Node {
818 case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL:
822 ir.EditChildren(m, edit)
823 if x.Op() == ir.OCLOSURE {
824 x := x.(*ir.ClosureExpr)
825 // Need to save/duplicate x.Func.Nname,
826 // x.Func.Nname.Ntype, x.Func.Dcl, x.Func.ClosureVars, and
827 // x.Func.Body for iexport and local inlining.
829 newfn := ir.NewFunc(oldfn.Pos())
830 m.(*ir.ClosureExpr).Func = newfn
831 newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym())
832 // XXX OK to share fn.Type() ??
833 newfn.Nname.SetType(oldfn.Nname.Type())
834 newfn.Body = inlcopylist(oldfn.Body)
835 // Make shallow copy of the Dcl and ClosureVar slices
836 newfn.Dcl = append([]*ir.Name(nil), oldfn.Dcl...)
837 newfn.ClosureVars = append([]*ir.Name(nil), oldfn.ClosureVars...)
844 // InlineCalls/inlnode walks fn's statements and expressions and substitutes any
845 // calls made to inlineable functions. This is the external entry point.
846 func InlineCalls(fn *ir.Func, profile *pgo.Profile) {
849 bigCaller := isBigFunc(fn)
850 if bigCaller && base.Flag.LowerM > 1 {
851 fmt.Printf("%v: function %v considered 'big'; reducing max cost of inlinees\n", ir.Line(fn), fn)
853 var inlCalls []*ir.InlinedCallExpr
854 var edit func(ir.Node) ir.Node
855 edit = func(n ir.Node) ir.Node {
856 return inlnode(n, bigCaller, &inlCalls, edit, profile)
858 ir.EditChildren(fn, edit)
860 // If we inlined any calls, we want to recursively visit their
861 // bodies for further inlining. However, we need to wait until
862 // *after* the original function body has been expanded, or else
863 // inlCallee can have false positives (e.g., #54632).
864 for len(inlCalls) > 0 {
866 inlCalls = inlCalls[1:]
867 ir.EditChildren(call, edit)
873 // inlnode recurses over the tree to find inlineable calls, which will
874 // be turned into OINLCALLs by mkinlcall. When the recursion comes
875 // back up will examine left, right, list, rlist, ninit, ntest, nincr,
876 // nbody and nelse and use one of the 4 inlconv/glue functions above
877 // to turn the OINLCALL into an expression, a statement, or patch it
878 // in to this nodes list or rlist as appropriate.
879 // NOTE it makes no sense to pass the glue functions down the
880 // recursion to the level where the OINLCALL gets created because they
881 // have to edit /this/ n, so you'd have to push that one down as well,
882 // but then you may as well do it here. so this is cleaner and
883 // shorter and less complicated.
884 // The result of inlnode MUST be assigned back to n, e.g.
886 // n.Left = inlnode(n.Left)
887 func inlnode(n ir.Node, bigCaller bool, inlCalls *[]*ir.InlinedCallExpr, edit func(ir.Node) ir.Node, profile *pgo.Profile) ir.Node {
893 case ir.ODEFER, ir.OGO:
894 n := n.(*ir.GoDeferStmt)
895 switch call := n.Call; call.Op() {
897 base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
899 call := call.(*ir.CallExpr)
903 n := n.(*ir.TailCallStmt)
904 n.Call.NoInline = true // Not inline a tail call for now. Maybe we could inline it just like RETURN fn(arg)?
906 // TODO do them here (or earlier),
907 // so escape analysis can avoid more heapmoves.
911 base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
913 n := n.(*ir.CallExpr)
914 if n.X.Op() == ir.OMETHEXPR {
915 // Prevent inlining some reflect.Value methods when using checkptr,
916 // even when package reflect was compiled without it (#35073).
917 if meth := ir.MethodExprName(n.X); meth != nil {
919 if base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
928 ir.EditChildren(n, edit)
930 // with all the branches out of the way, it is now time to
931 // transmogrify this node itself unless inhibited by the
932 // switch at the top of this function.
935 base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
938 call := n.(*ir.CallExpr)
942 if base.Flag.LowerM > 3 {
943 fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.X)
945 if ir.IsIntrinsicCall(call) {
948 if fn := inlCallee(call.X, profile); fn != nil && typecheck.HaveInlineBody(fn) {
949 n = mkinlcall(call, fn, bigCaller, inlCalls)
958 // inlCallee takes a function-typed expression and returns the underlying function ONAME
959 // that it refers to if statically known. Otherwise, it returns nil.
960 func inlCallee(fn ir.Node, profile *pgo.Profile) *ir.Func {
961 fn = ir.StaticValue(fn)
964 fn := fn.(*ir.SelectorExpr)
965 n := ir.MethodExprName(fn)
966 // Check that receiver type matches fn.X.
967 // TODO(mdempsky): Handle implicit dereference
968 // of pointer receiver argument?
969 if n == nil || !types.Identical(n.Type().Recv().Type, fn.X.Type()) {
975 if fn.Class == ir.PFUNC {
979 fn := fn.(*ir.ClosureExpr)
981 CanInline(c, profile)
989 // SSADumpInline gives the SSA back end a chance to dump the function
990 // when producing output for debugging the compiler itself.
991 var SSADumpInline = func(*ir.Func) {}
993 // InlineCall allows the inliner implementation to be overridden.
994 // If it returns nil, the function will not be inlined.
995 var InlineCall = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
996 base.Fatalf("inline.InlineCall not overridden")
1000 // inlineCostOK returns true if call n from caller to callee is cheap enough to
1001 // inline. bigCaller indicates that caller is a big function.
1003 // If inlineCostOK returns false, it also returns the max cost that the callee
1005 func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool, int32) {
1006 maxCost := int32(inlineMaxBudget)
1008 // We use this to restrict inlining into very big functions.
1009 // See issue 26546 and 17566.
1010 maxCost = inlineBigFunctionMaxCost
1013 if callee.Inl.Cost <= maxCost {
1014 // Simple case. Function is already cheap enough.
1018 // We'll also allow inlining of hot functions below inlineHotMaxBudget,
1019 // but only in small functions.
1021 lineOffset := pgo.NodeLineOffset(n, caller)
1022 csi := pgo.CallSiteInfo{LineOffset: lineOffset, Caller: caller}
1023 if _, ok := candHotEdgeMap[csi]; !ok {
1025 return false, maxCost
1031 if base.Debug.PGODebug > 0 {
1032 fmt.Printf("hot-big check disallows inlining for call %s (cost %d) at %v in big function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
1034 return false, maxCost
1037 if callee.Inl.Cost > inlineHotMaxBudget {
1038 return false, inlineHotMaxBudget
1041 if base.Debug.PGODebug > 0 {
1042 fmt.Printf("hot-budget check allows inlining for call %s (cost %d) at %v in function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
1048 // If n is a OCALLFUNC node, and fn is an ONAME node for a
1049 // function with an inlinable body, return an OINLCALL node that can replace n.
1050 // The returned node's Ninit has the parameter assignments, the Nbody is the
1051 // inlined function body, and (List, Rlist) contain the (input, output)
1053 // The result of mkinlcall MUST be assigned back to n, e.g.
1055 // n.Left = mkinlcall(n.Left, fn, isddd)
1056 func mkinlcall(n *ir.CallExpr, fn *ir.Func, bigCaller bool, inlCalls *[]*ir.InlinedCallExpr) ir.Node {
1058 if logopt.Enabled() {
1059 logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
1060 fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn)))
1065 if ok, maxCost := inlineCostOK(n, ir.CurFunc, fn, bigCaller); !ok {
1066 if logopt.Enabled() {
1067 logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
1068 fmt.Sprintf("cost %d of %s exceeds max caller cost %d", fn.Inl.Cost, ir.PkgFuncName(fn), maxCost))
1073 if fn == ir.CurFunc {
1074 // Can't recursively inline a function into itself.
1075 if logopt.Enabled() {
1076 logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(ir.CurFunc)))
1081 if base.Flag.Cfg.Instrumenting && types.IsNoInstrumentPkg(fn.Sym().Pkg) {
1082 // Runtime package must not be instrumented.
1083 // Instrument skips runtime package. However, some runtime code can be
1084 // inlined into other packages and instrumented there. To avoid this,
1085 // we disable inlining of runtime functions when instrumenting.
1086 // The example that we observed is inlining of LockOSThread,
1087 // which lead to false race reports on m contents.
1090 if base.Flag.Race && types.IsNoRacePkg(fn.Sym().Pkg) {
1094 parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
1097 // Check if we've already inlined this function at this particular
1098 // call site, in order to stop inlining when we reach the beginning
1099 // of a recursion cycle again. We don't inline immediately recursive
1100 // functions, but allow inlining if there is a recursion cycle of
1101 // many functions. Most likely, the inlining will stop before we
1102 // even hit the beginning of the cycle again, but this catches the
1104 for inlIndex := parent; inlIndex >= 0; inlIndex = base.Ctxt.InlTree.Parent(inlIndex) {
1105 if base.Ctxt.InlTree.InlinedFunction(inlIndex) == sym {
1106 if base.Flag.LowerM > 1 {
1107 fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(ir.CurFunc))
1113 typecheck.AssertFixedCall(n)
1115 inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym, ir.FuncName(fn))
1117 closureInitLSym := func(n *ir.CallExpr, fn *ir.Func) {
1118 // The linker needs FuncInfo metadata for all inlined
1119 // functions. This is typically handled by gc.enqueueFunc
1120 // calling ir.InitLSym for all function declarations in
1121 // typecheck.Target.Decls (ir.UseClosure adds all closures to
1124 // However, non-trivial closures in Decls are ignored, and are
1125 // insteaded enqueued when walk of the calling function
1128 // This presents a problem for direct calls to closures.
1129 // Inlining will replace the entire closure definition with its
1130 // body, which hides the closure from walk and thus suppresses
1133 // Explicitly create a symbol early in this edge case to ensure
1134 // we keep this metadata.
1136 // TODO: Refactor to keep a reference so this can all be done
1139 if n.Op() != ir.OCALLFUNC {
1140 // Not a standard call.
1143 if n.X.Op() != ir.OCLOSURE {
1144 // Not a direct closure call.
1148 clo := n.X.(*ir.ClosureExpr)
1149 if ir.IsTrivialClosure(clo) {
1150 // enqueueFunc will handle trivial closures anyways.
1154 ir.InitLSym(fn, true)
1157 closureInitLSym(n, fn)
1159 if base.Flag.GenDwarfInl > 0 {
1160 if !sym.WasInlined() {
1161 base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
1162 sym.Set(obj.AttrWasInlined, true)
1166 if base.Flag.LowerM != 0 {
1167 fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
1169 if base.Flag.LowerM > 2 {
1170 fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
1173 res := InlineCall(n, fn, inlIndex)
1176 base.FatalfAt(n.Pos(), "inlining call to %v failed", fn)
1179 if base.Flag.LowerM > 2 {
1180 fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res)
1183 *inlCalls = append(*inlCalls, res)
1188 // CalleeEffects appends any side effects from evaluating callee to init.
1189 func CalleeEffects(init *ir.Nodes, callee ir.Node) {
1191 init.Append(ir.TakeInit(callee)...)
1193 switch callee.Op() {
1194 case ir.ONAME, ir.OCLOSURE, ir.OMETHEXPR:
1198 conv := callee.(*ir.ConvExpr)
1202 ic := callee.(*ir.InlinedCallExpr)
1203 init.Append(ic.Body.Take()...)
1204 callee = ic.SingleResult()
1207 base.FatalfAt(callee.Pos(), "unexpected callee expression: %v", callee)
1212 func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
1213 s := make([]*ir.Name, 0, len(ll))
1214 for _, n := range ll {
1215 if n.Class == ir.PAUTO {
1216 if !vis.usedLocals.Has(n) {
1225 // numNonClosures returns the number of functions in list which are not closures.
1226 func numNonClosures(list []*ir.Func) int {
1228 for _, fn := range list {
1229 if fn.OClosure == nil {
1236 func doList(list []ir.Node, do func(ir.Node) bool) bool {
1237 for _, x := range list {
1247 // isIndexingCoverageCounter returns true if the specified node 'n' is indexing
1248 // into a coverage counter array.
1249 func isIndexingCoverageCounter(n ir.Node) bool {
1250 if n.Op() != ir.OINDEX {
1253 ixn := n.(*ir.IndexExpr)
1254 if ixn.X.Op() != ir.ONAME || !ixn.X.Type().IsArray() {
1257 nn := ixn.X.(*ir.Name)
1258 return nn.CoverageCounter()
1261 // isAtomicCoverageCounterUpdate examines the specified node to
1262 // determine whether it represents a call to sync/atomic.AddUint32 to
1263 // increment a coverage counter.
1264 func isAtomicCoverageCounterUpdate(cn *ir.CallExpr) bool {
1265 if cn.X.Op() != ir.ONAME {
1268 name := cn.X.(*ir.Name)
1269 if name.Class != ir.PFUNC {
1272 fn := name.Sym().Name
1273 if name.Sym().Pkg.Path != "sync/atomic" ||
1274 (fn != "AddUint32" && fn != "StoreUint32") {
1277 if len(cn.Args) != 2 || cn.Args[0].Op() != ir.OADDR {
1280 adn := cn.Args[0].(*ir.AddrExpr)
1281 v := isIndexingCoverageCounter(adn.X)