1 // Copyright 2021 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
16 "cmd/compile/internal/base"
17 "cmd/compile/internal/inline"
18 "cmd/compile/internal/ir"
19 "cmd/compile/internal/typecheck"
20 "cmd/compile/internal/types"
21 "cmd/compile/internal/types2"
25 // localPkgReader holds the package reader used for reading the local
26 // package. It exists so the unified IR linker can refer back to it
28 var localPkgReader *pkgReader
30 // unified constructs the local package's Internal Representation (IR)
31 // from its syntax tree (AST).
33 // The pipeline contains 2 steps:
35 // 1. Generate the export data "stub".
37 // 2. Generate the IR from the export data above.
39 // The package data "stub" at step (1) contains everything from the local package,
40 // but nothing that has been imported. When we're actually writing out export data
41 // to the output files (see writeNewExport), we run the "linker", which:
43 // - Updates compiler extensions data (e.g. inlining cost, escape analysis results).
45 // - Handles re-exporting any transitive dependencies.
47 // - Prunes out any unnecessary details (e.g. non-inlineable functions, because any
48 // downstream importers only care about inlinable functions).
50 // The source files are typechecked twice: once before writing the export data
51 // using types2, and again after reading the export data using gc/typecheck.
52 // The duplication of work will go away once we only use the types2 type checker,
53 // removing the gc/typecheck step. For now, it is kept because:
55 // - It reduces the engineering costs in maintaining a fork of typecheck
56 // (e.g. no need to backport fixes like CL 327651).
58 // - It makes it easier to pass toolstash -cmp.
60 // - Historically, we would always re-run the typechecker after importing a package,
61 // even though we know the imported data is valid. It's not ideal, but it's
62 // not causing any problems either.
64 // - gc/typecheck is still in charge of some transformations, such as rewriting
65 // multi-valued function calls or transforming ir.OINDEX to ir.OINDEXMAP.
67 // Using the syntax tree with types2, which has a complete representation of generics,
68 // the unified IR has the full typed AST needed for introspection during step (1).
69 // In other words, we have all the necessary information to build the generic IR form
70 // (see writer.captureVars for an example).
71 func unified(m posMap, noders []*noder) {
72 inline.InlineCall = unifiedInlineCall
73 typecheck.HaveInlineBody = unifiedHaveInlineBody
75 data := writePkgStub(m, noders)
77 // We already passed base.Flag.Lang to types2 to handle validating
78 // the user's source code. Bump it up now to the current version and
79 // re-parse, so typecheck doesn't complain if we construct IR that
80 // utilizes newer Go features.
81 base.Flag.Lang = fmt.Sprintf("go1.%d", goversion.Version)
84 target := typecheck.Target
86 typecheck.TypecheckAllowed = true
88 localPkgReader = newPkgReader(pkgbits.NewPkgDecoder(types.LocalPkg.Path, data))
89 readPackage(localPkgReader, types.LocalPkg, true)
91 r := localPkgReader.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate)
92 r.pkgInit(types.LocalPkg, target)
94 readBodies(target, false)
96 // Check that nothing snuck past typechecking.
97 for _, fn := range target.Funcs {
98 if fn.Typecheck() == 0 {
99 base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn)
102 // For functions, check that at least their first statement (if
103 // any) was typechecked too.
104 if len(fn.Body) != 0 {
105 if stmt := fn.Body[0]; stmt.Typecheck() == 0 {
106 base.FatalfAt(stmt.Pos(), "missed typecheck: %v", stmt)
111 // For functions originally came from package runtime,
112 // mark as norace to prevent instrumenting, see issue #60439.
113 for _, fn := range target.Funcs {
114 if !base.Flag.CompilingRuntime && types.IsRuntimePkg(fn.Sym().Pkg) {
115 fn.Pragma |= ir.Norace
119 base.ExitIfErrors() // just in case
122 // readBodies iteratively expands all pending dictionaries and
125 // If duringInlining is true, then the inline.InlineDecls is called as
126 // necessary on instantiations of imported generic functions, so their
127 // inlining costs can be computed.
128 func readBodies(target *ir.Package, duringInlining bool) {
129 var inlDecls []*ir.Func
131 // Don't use range--bodyIdx can add closures to todoBodies.
133 // The order we expand dictionaries and bodies doesn't matter, so
134 // pop from the end to reduce todoBodies reallocations if it grows
137 // However, we do at least need to flush any pending dictionaries
138 // before reading bodies, because bodies might reference the
141 if len(todoDicts) > 0 {
142 fn := todoDicts[len(todoDicts)-1]
143 todoDicts = todoDicts[:len(todoDicts)-1]
148 if len(todoBodies) > 0 {
149 fn := todoBodies[len(todoBodies)-1]
150 todoBodies = todoBodies[:len(todoBodies)-1]
152 pri, ok := bodyReader[fn]
156 // Instantiated generic function: add to Decls for typechecking
158 if fn.OClosure == nil && len(pri.dict.targs) != 0 {
159 // cmd/link does not support a type symbol referencing a method symbol
160 // across DSO boundary, so force re-compiling methods on a generic type
161 // even it was seen from imported package in linkshared mode, see #58966.
162 canSkipNonGenericMethod := !(base.Ctxt.Flag_linkshared && ir.IsMethod(fn))
163 if duringInlining && canSkipNonGenericMethod {
164 inlDecls = append(inlDecls, fn)
166 target.Funcs = append(target.Funcs, fn)
179 if len(inlDecls) != 0 {
180 // If we instantiated any generic functions during inlining, we need
181 // to call CanInline on them so they'll be transitively inlined
182 // correctly (#56280).
184 // We know these functions were already compiled in an imported
185 // package though, so we don't need to actually apply InlineCalls or
186 // save the function bodies any further than this.
188 // We can also lower the -m flag to 0, to suppress duplicate "can
189 // inline" diagnostics reported against the imported package. Again,
190 // we already reported those diagnostics in the original package, so
191 // it's pointless repeating them here.
193 oldLowerM := base.Flag.LowerM
195 inline.InlineDecls(nil, inlDecls, false)
196 base.Flag.LowerM = oldLowerM
198 for _, fn := range inlDecls {
199 fn.Body = nil // free memory
204 // writePkgStub type checks the given parsed source files,
205 // writes an export data package stub representing them,
206 // and returns the result.
207 func writePkgStub(m posMap, noders []*noder) string {
208 pkg, info := checkFiles(m, noders)
210 pw := newPkgWriter(m, pkg, info)
212 pw.collectDecls(noders)
214 publicRootWriter := pw.newWriter(pkgbits.RelocMeta, pkgbits.SyncPublic)
215 privateRootWriter := pw.newWriter(pkgbits.RelocMeta, pkgbits.SyncPrivate)
217 assert(publicRootWriter.Idx == pkgbits.PublicRootIdx)
218 assert(privateRootWriter.Idx == pkgbits.PrivateRootIdx)
221 w := publicRootWriter
223 w.Bool(false) // TODO(mdempsky): Remove; was "has init"
226 names := scope.Names()
228 for _, name := range names {
229 w.obj(scope.Lookup(name), nil)
232 w.Sync(pkgbits.SyncEOF)
237 w := privateRootWriter
242 var sb strings.Builder
245 // At this point, we're done with types2. Make sure the package is
246 // garbage collected.
252 // freePackage ensures the given package is garbage collected.
253 func freePackage(pkg *types2.Package) {
254 // The GC test below relies on a precise GC that runs finalizers as
255 // soon as objects are unreachable. Our implementation provides
256 // this, but other/older implementations may not (e.g., Go 1.4 does
257 // not because of #22350). To avoid imposing unnecessary
258 // restrictions on the GOROOT_BOOTSTRAP toolchain, we skip the test
259 // during bootstrapping.
260 if base.CompilerBootstrap || base.Debug.GCCheck == 0 {
261 *pkg = types2.Package{}
265 // Set a finalizer on pkg so we can detect if/when it's collected.
266 done := make(chan struct{})
267 runtime.SetFinalizer(pkg, func(*types2.Package) { close(done) })
269 // Important: objects involved in cycles are not finalized, so zero
270 // out pkg to break its cycles and allow the finalizer to run.
271 *pkg = types2.Package{}
273 // It typically takes just 1 or 2 cycles to release pkg, but it
274 // doesn't hurt to try a few more times.
275 for i := 0; i < 10; i++ {
284 base.Fatalf("package never finalized")
287 // readPackage reads package export data from pr to populate
290 // localStub indicates whether pr is reading the stub export data for
291 // the local package, as opposed to relocated export data for an
293 func readPackage(pr *pkgReader, importpkg *types.Pkg, localStub bool) {
295 r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
298 base.Assertf(pkg == importpkg, "have package %q (%p), want package %q (%p)", pkg.Path, pkg, importpkg.Path, importpkg)
300 r.Bool() // TODO(mdempsky): Remove; was "has init"
302 for i, n := 0, r.Len(); i < n; i++ {
303 r.Sync(pkgbits.SyncObject)
305 idx := r.Reloc(pkgbits.RelocObj)
308 path, name, code := r.p.PeekObj(idx)
309 if code != pkgbits.ObjStub {
310 objReader[types.NewPkg(path, "").Lookup(name)] = pkgReaderIndex{pr, idx, nil, nil, nil}
314 r.Sync(pkgbits.SyncEOF)
318 r := pr.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate)
321 sym := importpkg.Lookup(".inittask")
322 task := ir.NewNameAt(src.NoXPos, sym, nil)
323 task.Class = ir.PEXTERN
327 for i, n := 0, r.Len(); i < n; i++ {
330 idx := r.Reloc(pkgbits.RelocBody)
332 sym := types.NewPkg(path, "").Lookup(name)
333 if _, ok := importBodyReader[sym]; !ok {
334 importBodyReader[sym] = pkgReaderIndex{pr, idx, nil, nil, nil}
338 r.Sync(pkgbits.SyncEOF)
342 // writeUnifiedExport writes to `out` the finalized, self-contained
343 // Unified IR export data file for the current compilation unit.
344 func writeUnifiedExport(out io.Writer) {
346 pw: pkgbits.NewPkgEncoder(base.Debug.SyncFrames),
348 pkgs: make(map[string]pkgbits.Index),
349 decls: make(map[*types.Sym]pkgbits.Index),
350 bodies: make(map[*types.Sym]pkgbits.Index),
353 publicRootWriter := l.pw.NewEncoder(pkgbits.RelocMeta, pkgbits.SyncPublic)
354 privateRootWriter := l.pw.NewEncoder(pkgbits.RelocMeta, pkgbits.SyncPrivate)
355 assert(publicRootWriter.Idx == pkgbits.PublicRootIdx)
356 assert(privateRootWriter.Idx == pkgbits.PrivateRootIdx)
358 var selfPkgIdx pkgbits.Index
362 r := pr.NewDecoder(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
364 r.Sync(pkgbits.SyncPkg)
365 selfPkgIdx = l.relocIdx(pr, pkgbits.RelocPkg, r.Reloc(pkgbits.RelocPkg))
367 r.Bool() // TODO(mdempsky): Remove; was "has init"
369 for i, n := 0, r.Len(); i < n; i++ {
370 r.Sync(pkgbits.SyncObject)
372 idx := r.Reloc(pkgbits.RelocObj)
375 xpath, xname, xtag := pr.PeekObj(idx)
376 assert(xpath == pr.PkgPath())
377 assert(xtag != pkgbits.ObjStub)
379 if types.IsExported(xname) {
380 l.relocIdx(pr, pkgbits.RelocObj, idx)
384 r.Sync(pkgbits.SyncEOF)
388 var idxs []pkgbits.Index
389 for _, idx := range l.decls {
390 idxs = append(idxs, idx)
392 sort.Slice(idxs, func(i, j int) bool { return idxs[i] < idxs[j] })
394 w := publicRootWriter
396 w.Sync(pkgbits.SyncPkg)
397 w.Reloc(pkgbits.RelocPkg, selfPkgIdx)
398 w.Bool(false) // TODO(mdempsky): Remove; was "has init"
401 for _, idx := range idxs {
402 w.Sync(pkgbits.SyncObject)
404 w.Reloc(pkgbits.RelocObj, idx)
408 w.Sync(pkgbits.SyncEOF)
418 for sym, idx := range l.bodies {
419 bodies = append(bodies, symIdx{sym, idx})
421 sort.Slice(bodies, func(i, j int) bool { return bodies[i].idx < bodies[j].idx })
423 w := privateRootWriter
425 w.Bool(typecheck.Lookup(".inittask").Def != nil)
428 for _, body := range bodies {
429 w.String(body.sym.Pkg.Path)
430 w.String(body.sym.Name)
431 w.Reloc(pkgbits.RelocBody, body.idx)
434 w.Sync(pkgbits.SyncEOF)
438 base.Ctxt.Fingerprint = l.pw.DumpTo(out)