1 // Copyright 2017 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
20 // TestIntendedInlining tests that specific functions are inlined.
21 // This allows refactoring for code clarity and re-use without fear that
22 // changes to the compiler will cause silent performance regressions.
23 func TestIntendedInlining(t *testing.T) {
24 if testing.Short() && testenv.Builder() == "" {
25 t.Skip("skipping in short mode")
27 testenv.MustHaveGoRun(t)
30 // want is the list of function names (by package) that should
31 // be inlinable. If they have no callers in their packages, they
32 // might not actually be inlined anywhere.
33 want := map[string][]string{
75 "writeHeapBitsForAddr",
82 "(*gcWork).tryGetFast",
84 "(*markBits).advance",
85 "(*mspan).allocBitsForIndex",
87 "(*mspan).markBitsForBase",
88 "(*mspan).markBitsForIndex",
92 "runtime/internal/sys": {},
93 "runtime/internal/math": {
103 "(*Buffer).ReadByte",
106 "(*Buffer).UnreadByte",
107 "(*Buffer).tryGrowByReslice",
113 "(*dictDecoder).tryWriteCopy",
133 "Value.CanInterface",
146 "Value.OverflowComplex",
147 "Value.OverflowFloat",
149 "Value.OverflowUint",
158 "flag.mustBeAssignable",
159 "flag.mustBeExported",
168 // The following functions require the math_big_pure_go build tag.
173 "(*rngSource).Int63",
174 "(*rngSource).Uint64",
177 "(*UDPConn).ReadFromUDP",
180 // (*Bool).CompareAndSwap handled below.
185 "(*Int32).CompareAndSwap",
190 "(*Int64).CompareAndSwap",
195 "(*Uint32).CompareAndSwap",
200 "(*Uint64).CompareAndSwap",
205 "(*Uintptr).CompareAndSwap",
209 // (*Pointer[T])'s methods' handled below.
213 if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" {
214 // nextFreeFast calls sys.Ctz64, which on 386 is implemented in asm and is not inlinable.
215 // We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386.
216 // On loong64, mips64x and riscv64, Ctz64 is not intrinsified and causes nextFreeFast too expensive
217 // to inline (Issue 22239).
218 want["runtime"] = append(want["runtime"], "nextFreeFast")
219 // Same behavior for heapBits.nextFast.
220 want["runtime"] = append(want["runtime"], "heapBits.nextFast")
222 if runtime.GOARCH != "386" {
223 // As explained above, Ctz64 and Ctz32 are not Go code on 386.
224 // The same applies to Bswap32.
225 want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Ctz64")
226 want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Ctz32")
227 want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Bswap32")
229 if bits.UintSize == 64 {
230 // mix is only defined on 64-bit architectures
231 want["runtime"] = append(want["runtime"], "mix")
232 // (*Bool).CompareAndSwap is just over budget on 32-bit systems (386, arm).
233 want["sync/atomic"] = append(want["sync/atomic"], "(*Bool).CompareAndSwap")
235 if buildcfg.Experiment.Unified {
236 // Non-unified IR does not report "inlining call ..." for atomic.Pointer[T]'s methods.
237 // TODO(cuonglm): remove once non-unified IR frontend gone.
238 want["sync/atomic"] = append(want["sync/atomic"], "(*Pointer[go.shape.int]).CompareAndSwap")
239 want["sync/atomic"] = append(want["sync/atomic"], "(*Pointer[go.shape.int]).Load")
240 want["sync/atomic"] = append(want["sync/atomic"], "(*Pointer[go.shape.int]).Store")
241 want["sync/atomic"] = append(want["sync/atomic"], "(*Pointer[go.shape.int]).Swap")
244 switch runtime.GOARCH {
245 case "386", "wasm", "arm":
247 // TODO(mvdan): As explained in /test/inline_sync.go, some
248 // architectures don't have atomic intrinsics, so these go over
249 // the inlining budget. Move back to the main table once that
250 // problem is solved.
251 want["sync"] = []string{
255 "(*RWMutex).RUnlock",
260 // Functions that must actually be inlined; they must have actual callers.
261 must := map[string]bool{
262 "compress/flate.byLiteral.Len": true,
263 "compress/flate.byLiteral.Less": true,
264 "compress/flate.byLiteral.Swap": true,
267 notInlinedReason := make(map[string]string)
268 pkgs := make([]string, 0, len(want))
269 for pname, fnames := range want {
270 pkgs = append(pkgs, pname)
271 for _, fname := range fnames {
272 fullName := pname + "." + fname
273 if _, ok := notInlinedReason[fullName]; ok {
274 t.Errorf("duplicate func: %s", fullName)
276 notInlinedReason[fullName] = "unknown reason"
280 args := append([]string{"build", "-gcflags=-m -m", "-tags=math_big_pure_go"}, pkgs...)
281 cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), args...))
285 cmdErr := make(chan error, 1)
290 scanner := bufio.NewScanner(pr)
292 canInline := regexp.MustCompile(`: can inline ([^ ]*)`)
293 haveInlined := regexp.MustCompile(`: inlining call to ([^ ]*)`)
294 cannotInline := regexp.MustCompile(`: cannot inline ([^ ]*): (.*)`)
296 line := scanner.Text()
297 if strings.HasPrefix(line, "# ") {
301 if m := haveInlined.FindStringSubmatch(line); m != nil {
303 delete(notInlinedReason, curPkg+"."+fname)
306 if m := canInline.FindStringSubmatch(line); m != nil {
308 fullname := curPkg + "." + fname
309 // If function must be inlined somewhere, being inlinable is not enough
310 if _, ok := must[fullname]; !ok {
311 delete(notInlinedReason, fullname)
315 if m := cannotInline.FindStringSubmatch(line); m != nil {
316 fname, reason := m[1], m[2]
317 fullName := curPkg + "." + fname
318 if _, ok := notInlinedReason[fullName]; ok {
319 // cmd/compile gave us a reason why
320 notInlinedReason[fullName] = reason
325 if err := <-cmdErr; err != nil {
328 if err := scanner.Err(); err != nil {
331 for fullName, reason := range notInlinedReason {
332 t.Errorf("%s was not inlined: %s", fullName, reason)