1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
20 "cmd/compile/internal/abi"
21 "cmd/compile/internal/base"
22 "cmd/compile/internal/ir"
23 "cmd/compile/internal/liveness"
24 "cmd/compile/internal/objw"
25 "cmd/compile/internal/reflectdata"
26 "cmd/compile/internal/ssa"
27 "cmd/compile/internal/staticdata"
28 "cmd/compile/internal/typecheck"
29 "cmd/compile/internal/types"
38 var ssaConfig *ssa.Config
39 var ssaCaches []ssa.Cache
41 var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for
42 var ssaDir string // optional destination for ssa dump file
43 var ssaDumpStdout bool // whether to dump to stdout
44 var ssaDumpCFG string // generate CFGs for these phases
45 const ssaDumpFile = "ssa.html"
47 // ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
48 var ssaDumpInlined []*ir.Func
50 func DumpInline(fn *ir.Func) {
51 if ssaDump != "" && ssaDump == ir.FuncName(fn) {
52 ssaDumpInlined = append(ssaDumpInlined, fn)
57 ssaDump = os.Getenv("GOSSAFUNC")
58 ssaDir = os.Getenv("GOSSADIR")
60 if strings.HasSuffix(ssaDump, "+") {
61 ssaDump = ssaDump[:len(ssaDump)-1]
64 spl := strings.Split(ssaDump, ":")
73 types_ := ssa.NewTypes()
79 // Generate a few pointer types that are uncommon in the frontend but common in the backend.
80 // Caching is disabled in the backend, so generating these here avoids allocations.
81 _ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
82 _ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
83 _ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
84 _ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte
85 _ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte
86 _ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
87 _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
88 _ = types.NewPtr(types.Types[types.TINT16]) // *int16
89 _ = types.NewPtr(types.Types[types.TINT64]) // *int64
90 _ = types.NewPtr(types.ErrorType) // *error
91 _ = types.NewPtr(reflectdata.MapType()) // *runtime.hmap
92 _ = types.NewPtr(deferstruct()) // *runtime._defer
93 types.NewPtrCacheEnabled = false
94 ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
95 ssaConfig.Race = base.Flag.Race
96 ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
98 // Set up some runtime functions we'll need to call.
99 ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I")
100 ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2")
101 ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I")
102 ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2")
103 ir.Syms.CgoCheckMemmove = typecheck.LookupRuntimeFunc("cgoCheckMemmove")
104 ir.Syms.CgoCheckPtrWrite = typecheck.LookupRuntimeFunc("cgoCheckPtrWrite")
105 ir.Syms.CheckPtrAlignment = typecheck.LookupRuntimeFunc("checkptrAlignment")
106 ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
107 ir.Syms.Deferprocat = typecheck.LookupRuntimeFunc("deferprocat")
108 ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
109 ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
110 ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
111 ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero")
112 ir.Syms.GCWriteBarrier[0] = typecheck.LookupRuntimeFunc("gcWriteBarrier1")
113 ir.Syms.GCWriteBarrier[1] = typecheck.LookupRuntimeFunc("gcWriteBarrier2")
114 ir.Syms.GCWriteBarrier[2] = typecheck.LookupRuntimeFunc("gcWriteBarrier3")
115 ir.Syms.GCWriteBarrier[3] = typecheck.LookupRuntimeFunc("gcWriteBarrier4")
116 ir.Syms.GCWriteBarrier[4] = typecheck.LookupRuntimeFunc("gcWriteBarrier5")
117 ir.Syms.GCWriteBarrier[5] = typecheck.LookupRuntimeFunc("gcWriteBarrier6")
118 ir.Syms.GCWriteBarrier[6] = typecheck.LookupRuntimeFunc("gcWriteBarrier7")
119 ir.Syms.GCWriteBarrier[7] = typecheck.LookupRuntimeFunc("gcWriteBarrier8")
120 ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
121 ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
122 ir.Syms.InterfaceSwitch = typecheck.LookupRuntimeFunc("interfaceSwitch")
123 ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove")
124 ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
125 ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
126 ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
127 ir.Syms.Asanread = typecheck.LookupRuntimeFunc("asanread")
128 ir.Syms.Asanwrite = typecheck.LookupRuntimeFunc("asanwrite")
129 ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
130 ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
131 ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
132 ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE")
133 ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI")
134 ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
135 ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
136 ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
137 ir.Syms.Racefuncenter = typecheck.LookupRuntimeFunc("racefuncenter")
138 ir.Syms.Racefuncexit = typecheck.LookupRuntimeFunc("racefuncexit")
139 ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
140 ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
141 ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
142 ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
143 ir.Syms.TypeAssert = typecheck.LookupRuntimeFunc("typeAssert")
144 ir.Syms.WBZero = typecheck.LookupRuntimeFunc("wbZero")
145 ir.Syms.WBMove = typecheck.LookupRuntimeFunc("wbMove")
146 ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool
147 ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool
148 ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool
149 ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool
150 ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
151 ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s")
152 ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
153 ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI
154 ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... }
155 ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
157 if Arch.LinkArch.Family == sys.Wasm {
158 BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex")
159 BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU")
160 BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen")
161 BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU")
162 BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap")
163 BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU")
164 BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB")
165 BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU")
166 BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen")
167 BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU")
168 BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap")
169 BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU")
170 BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B")
171 BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU")
172 BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
173 BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
174 BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("goPanicSliceConvert")
176 BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
177 BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
178 BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
179 BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
180 BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
181 BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
182 BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
183 BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
184 BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
185 BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
186 BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
187 BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
188 BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
189 BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
190 BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
191 BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
192 BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("panicSliceConvert")
194 if Arch.LinkArch.PtrSize == 4 {
195 ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
196 ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
197 ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
198 ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
199 ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
200 ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
201 ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
202 ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
203 ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
204 ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
205 ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
206 ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
207 ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
208 ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
209 ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
210 ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
213 // Wasm (all asm funcs with special ABIs)
214 ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv")
215 ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS")
216 ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU")
217 ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic")
220 // AbiForBodylessFuncStackMap returns the ABI for a bodyless function's stack map.
221 // This is not necessarily the ABI used to call it.
222 // Currently (1.17 dev) such a stack map is always ABI0;
223 // any ABI wrapper that is present is nosplit, hence a precise
224 // stack map is not needed there (the parameters survive only long
225 // enough to call the wrapped assembly function).
226 // This always returns a freshly copied ABI.
227 func AbiForBodylessFuncStackMap(fn *ir.Func) *abi.ABIConfig {
228 return ssaConfig.ABI0.Copy() // No idea what races will result, be safe
231 // abiForFunc implements ABI policy for a function, but does not return a copy of the ABI.
232 // Passing a nil function returns the default ABI based on experiment configuration.
233 func abiForFunc(fn *ir.Func, abi0, abi1 *abi.ABIConfig) *abi.ABIConfig {
234 if buildcfg.Experiment.RegabiArgs {
235 // Select the ABI based on the function's defining ABI.
242 case obj.ABIInternal:
243 // TODO(austin): Clean up the nomenclature here.
244 // It's not clear that "abi1" is ABIInternal.
247 base.Fatalf("function %v has unknown ABI %v", fn, fn.ABI)
248 panic("not reachable")
253 if fn.Pragma&ir.RegisterParams != 0 { // TODO(register args) remove after register abi is working
260 // dvarint writes a varint v to the funcdata in symbol x and returns the new offset.
261 func dvarint(x *obj.LSym, off int, v int64) int {
263 panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
265 var buf [binary.MaxVarintLen64]byte
266 n := binary.PutUvarint(buf[:], uint64(v))
267 for _, b := range buf[:n] {
268 off = objw.Uint8(x, off, b)
273 // emitOpenDeferInfo emits FUNCDATA information about the defers in a function
274 // that is using open-coded defers. This funcdata is used to determine the active
275 // defers in a function and execute those defers during panic processing.
277 // The funcdata is all encoded in varints (since values will almost always be less than
278 // 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets)
279 // for stack variables are specified as the number of bytes below varp (pointer to the
280 // top of the local variables) for their starting address. The format is:
282 // - Offset of the deferBits variable
283 // - Offset of the first closure slot (the rest are laid out consecutively).
284 func (s *state) emitOpenDeferInfo() {
285 firstOffset := s.openDefers[0].closureNode.FrameOffset()
287 // Verify that cmpstackvarlt laid out the slots in order.
288 for i, r := range s.openDefers {
289 have := r.closureNode.FrameOffset()
290 want := firstOffset + int64(i)*int64(types.PtrSize)
292 base.FatalfAt(s.curfn.Pos(), "unexpected frame offset for open-coded defer slot #%v: have %v, want %v", i, have, want)
296 x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
297 x.Set(obj.AttrContentAddressable, true)
298 s.curfn.LSym.Func().OpenCodedDeferInfo = x
301 off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
302 off = dvarint(x, off, -firstOffset)
305 // buildssa builds an SSA function for fn.
306 // worker indicates which of the backend workers is doing the processing.
307 func buildssa(fn *ir.Func, worker int) *ssa.Func {
308 name := ir.FuncName(fn)
310 abiSelf := abiForFunc(fn, ssaConfig.ABI0, ssaConfig.ABI1)
313 // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset"
314 // optionally allows an ABI suffix specification in the GOSSAHASH, e.g. "(*Reader).Reset<0>" etc
315 if strings.Contains(ssaDump, name) { // in all the cases the function name is entirely contained within the GOSSAFUNC string.
317 if strings.Contains(ssaDump, ",") { // ABI specification
318 nameOptABI = ssa.FuncNameABI(name, abiSelf.Which())
319 } else if strings.HasSuffix(ssaDump, ">") { // if they use the linker syntax instead....
321 if l >= 3 && ssaDump[l-3] == '<' {
322 nameOptABI = ssa.FuncNameABI(name, abiSelf.Which())
323 ssaDump = ssaDump[:l-3] + "," + ssaDump[l-2:l-1]
326 pkgDotName := base.Ctxt.Pkgpath + "." + nameOptABI
327 printssa = nameOptABI == ssaDump || // "(*Reader).Reset"
328 pkgDotName == ssaDump || // "compress/gzip.(*Reader).Reset"
329 strings.HasSuffix(pkgDotName, ssaDump) && strings.HasSuffix(pkgDotName, "/"+ssaDump) // "gzip.(*Reader).Reset"
332 var astBuf *bytes.Buffer
334 astBuf = &bytes.Buffer{}
335 ir.FDumpList(astBuf, "buildssa-body", fn.Body)
337 fmt.Println("generating SSA for", name)
338 fmt.Print(astBuf.String())
346 s.hasdefer = fn.HasDefer()
347 if fn.Pragma&ir.CgoUnsafeArgs != 0 {
348 s.cgoUnsafeArgs = true
350 s.checkPtrEnabled = ir.ShouldCheckPtr(fn, 1)
352 if base.Flag.Cfg.Instrumenting && fn.Pragma&ir.Norace == 0 && !fn.Linksym().ABIWrapper() {
353 if !base.Flag.Race || !objabi.LookupPkgSpecial(fn.Sym().Pkg.Path).NoRaceFunc {
354 s.instrumentMemory = true
357 s.instrumentEnterExit = true
363 log: printssa && ssaDumpStdout,
367 cache := &ssaCaches[worker]
370 s.f = ssaConfig.NewFunc(&fe, cache)
374 s.f.PrintOrHtmlSSA = printssa
375 if fn.Pragma&ir.Nosplit != 0 {
378 s.f.ABI0 = ssaConfig.ABI0
379 s.f.ABI1 = ssaConfig.ABI1
380 s.f.ABIDefault = abiForFunc(nil, ssaConfig.ABI0, ssaConfig.ABI1)
381 s.f.ABISelf = abiSelf
383 s.panics = map[funcLine]*ssa.Block{}
384 s.softFloat = s.config.SoftFloat
386 // Allocate starting block
387 s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
388 s.f.Entry.Pos = fn.Pos()
393 ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+s.f.NameABI()+".html")
394 ssaD := filepath.Dir(ssaDF)
395 os.MkdirAll(ssaD, 0755)
397 s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
398 // TODO: generate and print a mapping from nodes to values and blocks
399 dumpSourcesColumn(s.f.HTMLWriter, fn)
400 s.f.HTMLWriter.WriteAST("AST", astBuf)
403 // Allocate starting values
404 s.labels = map[string]*ssaLabel{}
405 s.fwdVars = map[ir.Node]*ssa.Value{}
406 s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
408 s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
410 case base.Debug.NoOpenDefer != 0:
411 s.hasOpenDefers = false
412 case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
413 // Don't support open-coded defers for 386 ONLY when using shared
414 // libraries, because there is extra code (added by rewriteToUseGot())
415 // preceding the deferreturn/ret code that we don't track correctly.
416 s.hasOpenDefers = false
418 if s.hasOpenDefers && s.instrumentEnterExit {
419 // Skip doing open defers if we need to instrument function
420 // returns for the race detector, since we will not generate that
421 // code in the case of the extra deferreturn/ret segment.
422 s.hasOpenDefers = false
425 // Similarly, skip if there are any heap-allocated result
426 // parameters that need to be copied back to their stack slots.
427 for _, f := range s.curfn.Type().Results() {
428 if !f.Nname.(*ir.Name).OnStack() {
429 s.hasOpenDefers = false
434 if s.hasOpenDefers &&
435 s.curfn.NumReturns*s.curfn.NumDefers > 15 {
436 // Since we are generating defer calls at every exit for
437 // open-coded defers, skip doing open-coded defers if there are
438 // too many returns (especially if there are multiple defers).
439 // Open-coded defers are most important for improving performance
440 // for smaller functions (which don't have many returns).
441 s.hasOpenDefers = false
444 s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
445 s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
447 s.startBlock(s.f.Entry)
448 s.vars[memVar] = s.startmem
450 // Create the deferBits variable and stack slot. deferBits is a
451 // bitmask showing which of the open-coded defers in this function
452 // have been activated.
453 deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
454 deferBitsTemp.SetAddrtaken(true)
455 s.deferBitsTemp = deferBitsTemp
456 // For this value, AuxInt is initialized to zero by default
457 startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
458 s.vars[deferBitsVar] = startDeferBits
459 s.deferBitsAddr = s.addr(deferBitsTemp)
460 s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
461 // Make sure that the deferBits stack slot is kept alive (for use
462 // by panics) and stores to deferBits are not eliminated, even if
463 // all checking code on deferBits in the function exit can be
464 // eliminated, because the defer statements were all
466 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
469 var params *abi.ABIParamResultInfo
470 params = s.f.ABISelf.ABIAnalyze(fn.Type(), true)
472 // The backend's stackframe pass prunes away entries from the fn's
473 // Dcl list, including PARAMOUT nodes that correspond to output
474 // params passed in registers. Walk the Dcl list and capture these
475 // nodes to a side list, so that we'll have them available during
476 // DWARF-gen later on. See issue 48573 for more details.
477 var debugInfo ssa.FuncDebug
478 for _, n := range fn.Dcl {
479 if n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters() {
480 debugInfo.RegOutputParams = append(debugInfo.RegOutputParams, n)
483 fn.DebugInfo = &debugInfo
485 // Generate addresses of local declarations
486 s.decladdrs = map[*ir.Name]*ssa.Value{}
487 for _, n := range fn.Dcl {
490 // Be aware that blank and unnamed input parameters will not appear here, but do appear in the type
491 s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
493 s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
495 // processed at each use, to prevent Addr coming
498 s.Fatalf("local variable with class %v unimplemented", n.Class)
502 s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, params)
504 // Populate SSAable arguments.
505 for _, n := range fn.Dcl {
506 if n.Class == ir.PPARAM {
508 v := s.newValue0A(ssa.OpArg, n.Type(), n)
510 s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
511 } else { // address was taken AND/OR too large for SSA
512 paramAssignment := ssa.ParamAssignmentForArgName(s.f, n)
513 if len(paramAssignment.Registers) > 0 {
514 if ssa.CanSSA(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
515 v := s.newValue0A(ssa.OpArg, n.Type(), n)
516 s.store(n.Type(), s.decladdrs[n], v)
517 } else { // Too big for SSA.
518 // Brute force, and early, do a bunch of stores from registers
519 // Note that expand calls knows about this and doesn't trouble itself with larger-than-SSA-able Args in registers.
520 s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false)
527 // Populate closure variables.
529 clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
530 offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
531 for _, n := range fn.ClosureVars {
534 typ = types.NewPtr(typ)
537 offset = types.RoundUp(offset, typ.Alignment())
538 ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
541 // If n is a small variable captured by value, promote
542 // it to PAUTO so it can be converted to SSA.
544 // Note: While we never capture a variable by value if
545 // the user took its address, we may have generated
546 // runtime calls that did (#43701). Since we don't
547 // convert Addrtaken variables to SSA anyway, no point
548 // in promoting them either.
549 if n.Byval() && !n.Addrtaken() && ssa.CanSSA(n.Type()) {
551 fn.Dcl = append(fn.Dcl, n)
552 s.assign(n, s.load(n.Type(), ptr), false, 0)
557 ptr = s.load(typ, ptr)
559 s.setHeapaddr(fn.Pos(), n, ptr)
563 // Convert the AST-based IR to the SSA-based IR
564 if s.instrumentEnterExit {
565 s.rtcall(ir.Syms.Racefuncenter, true, nil, s.newValue0(ssa.OpGetCallerPC, types.Types[types.TUINTPTR]))
571 // fallthrough to exit
572 if s.curBlock != nil {
573 s.pushLine(fn.Endlineno)
578 for _, b := range s.f.Blocks {
579 if b.Pos != src.NoXPos {
580 s.updateUnsetPredPos(b)
584 s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis")
588 // Main call to ssa package to compile function
593 if len(s.openDefers) != 0 {
594 s.emitOpenDeferInfo()
597 // Record incoming parameter spill information for morestack calls emitted in the assembler.
598 // This is done here, using all the parameters (used, partially used, and unused) because
599 // it mimics the behavior of the former ABI (everything stored) and because it's not 100%
600 // clear if naming conventions are respected in autogenerated code.
601 // TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also.
602 for _, p := range params.InParams() {
603 typs, offs := p.RegisterTypesAndOffsets()
604 for i, t := range typs {
605 o := offs[i] // offset within parameter
606 fo := p.FrameOffset(params) // offset of parameter in frame
607 reg := ssa.ObjRegForAbiReg(p.Registers[i], s.f.Config)
608 s.f.RegArgs = append(s.f.RegArgs, ssa.Spill{Reg: reg, Offset: fo + o, Type: t})
615 func (s *state) storeParameterRegsToStack(abi *abi.ABIConfig, paramAssignment *abi.ABIParamAssignment, n *ir.Name, addr *ssa.Value, pointersOnly bool) {
616 typs, offs := paramAssignment.RegisterTypesAndOffsets()
617 for i, t := range typs {
618 if pointersOnly && !t.IsPtrShaped() {
621 r := paramAssignment.Registers[i]
623 op, reg := ssa.ArgOpAndRegisterFor(r, abi)
624 aux := &ssa.AuxNameOffset{Name: n, Offset: o}
625 v := s.newValue0I(op, t, reg)
627 p := s.newValue1I(ssa.OpOffPtr, types.NewPtr(t), o, addr)
632 // zeroResults zeros the return values at the start of the function.
633 // We need to do this very early in the function. Defer might stop a
634 // panic and show the return values as they exist at the time of
635 // panic. For precise stacks, the garbage collector assumes results
636 // are always live, so we need to zero them before any allocations,
637 // even allocations to move params/results to the heap.
638 func (s *state) zeroResults() {
639 for _, f := range s.curfn.Type().Results() {
640 n := f.Nname.(*ir.Name)
642 // The local which points to the return value is the
643 // thing that needs zeroing. This is already handled
644 // by a Needzero annotation in plive.go:(*liveness).epilogue.
647 // Zero the stack location containing f.
648 if typ := n.Type(); ssa.CanSSA(typ) {
649 s.assign(n, s.zeroVal(typ), false, 0)
651 if typ.HasPointers() {
652 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
654 s.zero(n.Type(), s.decladdrs[n])
659 // paramsToHeap produces code to allocate memory for heap-escaped parameters
660 // and to copy non-result parameters' values from the stack.
661 func (s *state) paramsToHeap() {
662 do := func(params []*types.Field) {
663 for _, f := range params {
665 continue // anonymous or blank parameter
667 n := f.Nname.(*ir.Name)
668 if ir.IsBlank(n) || n.OnStack() {
672 if n.Class == ir.PPARAM {
673 s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n])
678 typ := s.curfn.Type()
684 // newHeapaddr allocates heap memory for n and sets its heap address.
685 func (s *state) newHeapaddr(n *ir.Name) {
686 s.setHeapaddr(n.Pos(), n, s.newObject(n.Type(), nil))
689 // setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
690 // and then sets it as n's heap address.
691 func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
692 if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) {
693 base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type)
696 // Declare variable to hold address.
697 sym := &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg}
698 addr := s.curfn.NewLocal(pos, sym, types.NewPtr(n.Type()))
700 types.CalcSize(addr.Type())
702 if n.Class == ir.PPARAMOUT {
703 addr.SetIsOutputParamHeapAddr(true)
707 s.assign(addr, ptr, false, 0)
710 // newObject returns an SSA value denoting new(typ).
711 func (s *state) newObject(typ *types.Type, rtype *ssa.Value) *ssa.Value {
713 return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
716 rtype = s.reflectType(typ)
718 return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, rtype)[0]
721 func (s *state) checkPtrAlignment(n *ir.ConvExpr, v *ssa.Value, count *ssa.Value) {
722 if !n.Type().IsPtr() {
723 s.Fatalf("expected pointer type: %v", n.Type())
725 elem, rtypeExpr := n.Type().Elem(), n.ElemRType
728 s.Fatalf("expected array type: %v", elem)
730 elem, rtypeExpr = elem.Elem(), n.ElemElemRType
733 // Casting from larger type to smaller one is ok, so for smallest type, do nothing.
734 if elem.Alignment() == 1 && (size == 0 || size == 1 || count == nil) {
738 count = s.constInt(types.Types[types.TUINTPTR], 1)
740 if count.Type.Size() != s.config.PtrSize {
741 s.Fatalf("expected count fit to a uintptr size, have: %d, want: %d", count.Type.Size(), s.config.PtrSize)
744 if rtypeExpr != nil {
745 rtype = s.expr(rtypeExpr)
747 rtype = s.reflectType(elem)
749 s.rtcall(ir.Syms.CheckPtrAlignment, true, nil, v, rtype, count)
752 // reflectType returns an SSA value representing a pointer to typ's
753 // reflection type descriptor.
754 func (s *state) reflectType(typ *types.Type) *ssa.Value {
755 // TODO(mdempsky): Make this Fatalf under Unified IR; frontend needs
756 // to supply RType expressions.
757 lsym := reflectdata.TypeLinksym(typ)
758 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
761 func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
762 // Read sources of target function fn.
763 fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
764 targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line())
766 writer.Logf("cannot read sources for function %v: %v", fn, err)
769 // Read sources of inlined functions.
770 var inlFns []*ssa.FuncLines
771 for _, fi := range ssaDumpInlined {
773 fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
774 fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
776 writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
779 inlFns = append(inlFns, fnLines)
782 sort.Sort(ssa.ByTopo(inlFns))
784 inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
787 writer.WriteSources("sources", inlFns)
790 func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
791 f, err := os.Open(os.ExpandEnv(file))
798 scanner := bufio.NewScanner(f)
799 for scanner.Scan() && ln <= end {
801 lines = append(lines, scanner.Text())
805 return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
808 // updateUnsetPredPos propagates the earliest-value position information for b
809 // towards all of b's predecessors that need a position, and recurs on that
810 // predecessor if its position is updated. B should have a non-empty position.
811 func (s *state) updateUnsetPredPos(b *ssa.Block) {
812 if b.Pos == src.NoXPos {
813 s.Fatalf("Block %s should have a position", b)
815 bestPos := src.NoXPos
816 for _, e := range b.Preds {
821 if bestPos == src.NoXPos {
823 for _, v := range b.Values {
827 if v.Pos != src.NoXPos {
828 // Assume values are still in roughly textual order;
829 // TODO: could also seek minimum position?
836 s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
840 // Information about each open-coded defer.
841 type openDeferInfo struct {
842 // The node representing the call of the defer
844 // If defer call is closure call, the address of the argtmp where the
845 // closure is stored.
847 // The node representing the argtmp where the closure is stored - used for
848 // function, method, or interface call, to store a closure that panic
849 // processing can use for this defer.
854 // configuration (arch) information
857 // function we're building
864 labels map[string]*ssaLabel
866 // unlabeled break and continue statement tracking
867 breakTo *ssa.Block // current target for plain break statement
868 continueTo *ssa.Block // current target for plain continue statement
870 // current location where we're interpreting the AST
873 // variable assignments in the current block (map from variable symbol to ssa value)
874 // *Node is the unique identifier (an ONAME Node) for the variable.
875 // TODO: keep a single varnum map, then make all of these maps slices instead?
876 vars map[ir.Node]*ssa.Value
878 // fwdVars are variables that are used before they are defined in the current block.
879 // This map exists just to coalesce multiple references into a single FwdRef op.
880 // *Node is the unique identifier (an ONAME Node) for the variable.
881 fwdVars map[ir.Node]*ssa.Value
883 // all defined variables at the end of each block. Indexed by block ID.
884 defvars []map[ir.Node]*ssa.Value
886 // addresses of PPARAM and PPARAMOUT variables on the stack.
887 decladdrs map[*ir.Name]*ssa.Value
889 // starting values. Memory, stack pointer, and globals pointer
893 // value representing address of where deferBits autotmp is stored
894 deferBitsAddr *ssa.Value
895 deferBitsTemp *ir.Name
897 // line number stack. The current line number is top of stack
899 // the last line number processed; it may have been popped
902 // list of panic calls by function name and line number.
903 // Used to deduplicate panic calls.
904 panics map[funcLine]*ssa.Block
907 hasdefer bool // whether the function contains a defer statement
909 hasOpenDefers bool // whether we are doing open-coded defers
910 checkPtrEnabled bool // whether to insert checkptr instrumentation
911 instrumentEnterExit bool // whether to instrument function enter/exit
912 instrumentMemory bool // whether to instrument memory operations
914 // If doing open-coded defers, list of info about the defer calls in
915 // scanning order. Hence, at exit we should run these defers in reverse
916 // order of this list
917 openDefers []*openDeferInfo
918 // For open-coded defers, this is the beginning and end blocks of the last
919 // defer exit code that we have generated so far. We use these to share
920 // code between exits if the shareDeferExits option (disabled by default)
922 lastDeferExit *ssa.Block // Entry block of last defer exit code we generated
923 lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
924 lastDeferCount int // Number of defers encountered at that point
926 prevCall *ssa.Value // the previous call; use this to tie results to the call op.
929 type funcLine struct {
935 type ssaLabel struct {
936 target *ssa.Block // block identified by this label
937 breakTarget *ssa.Block // block to break to in control flow node identified by this label
938 continueTarget *ssa.Block // block to continue to in control flow node identified by this label
941 // label returns the label associated with sym, creating it if necessary.
942 func (s *state) label(sym *types.Sym) *ssaLabel {
943 lab := s.labels[sym.Name]
946 s.labels[sym.Name] = lab
951 func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
952 func (s *state) Log() bool { return s.f.Log() }
953 func (s *state) Fatalf(msg string, args ...interface{}) {
954 s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
956 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
957 func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
959 func ssaMarker(name string) *ir.Name {
960 return ir.NewNameAt(base.Pos, &types.Sym{Name: name}, nil)
964 // marker node for the memory variable
965 memVar = ssaMarker("mem")
967 // marker nodes for temporary variables
968 ptrVar = ssaMarker("ptr")
969 lenVar = ssaMarker("len")
970 capVar = ssaMarker("cap")
971 typVar = ssaMarker("typ")
972 okVar = ssaMarker("ok")
973 deferBitsVar = ssaMarker("deferBits")
974 hashVar = ssaMarker("hash")
977 // startBlock sets the current block we're generating code in to b.
978 func (s *state) startBlock(b *ssa.Block) {
979 if s.curBlock != nil {
980 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
983 s.vars = map[ir.Node]*ssa.Value{}
984 for n := range s.fwdVars {
989 // endBlock marks the end of generating code for the current block.
990 // Returns the (former) current block. Returns nil if there is no current
991 // block, i.e. if no code flows to the current execution point.
992 func (s *state) endBlock() *ssa.Block {
997 for len(s.defvars) <= int(b.ID) {
998 s.defvars = append(s.defvars, nil)
1000 s.defvars[b.ID] = s.vars
1004 // Empty plain blocks get the line of their successor (handled after all blocks created),
1005 // except for increment blocks in For statements (handled in ssa conversion of OFOR),
1006 // and for blocks ending in GOTO/BREAK/CONTINUE.
1014 // pushLine pushes a line number on the line number stack.
1015 func (s *state) pushLine(line src.XPos) {
1016 if !line.IsKnown() {
1017 // the frontend may emit node with line number missing,
1018 // use the parent line number in this case.
1020 if base.Flag.K != 0 {
1021 base.Warn("buildssa: unknown position (line 0)")
1027 s.line = append(s.line, line)
1030 // popLine pops the top of the line number stack.
1031 func (s *state) popLine() {
1032 s.line = s.line[:len(s.line)-1]
1035 // peekPos peeks the top of the line number stack.
1036 func (s *state) peekPos() src.XPos {
1037 return s.line[len(s.line)-1]
1040 // newValue0 adds a new value with no arguments to the current block.
1041 func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
1042 return s.curBlock.NewValue0(s.peekPos(), op, t)
1045 // newValue0A adds a new value with no arguments and an aux value to the current block.
1046 func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
1047 return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
1050 // newValue0I adds a new value with no arguments and an auxint value to the current block.
1051 func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
1052 return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
1055 // newValue1 adds a new value with one argument to the current block.
1056 func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
1057 return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
1060 // newValue1A adds a new value with one argument and an aux value to the current block.
1061 func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
1062 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
1065 // newValue1Apos adds a new value with one argument and an aux value to the current block.
1066 // isStmt determines whether the created values may be a statement or not
1067 // (i.e., false means never, yes means maybe).
1068 func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value {
1070 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
1072 return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
1075 // newValue1I adds a new value with one argument and an auxint value to the current block.
1076 func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
1077 return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
1080 // newValue2 adds a new value with two arguments to the current block.
1081 func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
1082 return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
1085 // newValue2A adds a new value with two arguments and an aux value to the current block.
1086 func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
1087 return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
1090 // newValue2Apos adds a new value with two arguments and an aux value to the current block.
1091 // isStmt determines whether the created values may be a statement or not
1092 // (i.e., false means never, yes means maybe).
1093 func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
1095 return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
1097 return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
1100 // newValue2I adds a new value with two arguments and an auxint value to the current block.
1101 func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
1102 return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
1105 // newValue3 adds a new value with three arguments to the current block.
1106 func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
1107 return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
1110 // newValue3I adds a new value with three arguments and an auxint value to the current block.
1111 func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
1112 return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
1115 // newValue3A adds a new value with three arguments and an aux value to the current block.
1116 func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
1117 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
1120 // newValue3Apos adds a new value with three arguments and an aux value to the current block.
1121 // isStmt determines whether the created values may be a statement or not
1122 // (i.e., false means never, yes means maybe).
1123 func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
1125 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
1127 return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
1130 // newValue4 adds a new value with four arguments to the current block.
1131 func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
1132 return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
1135 // newValue4I adds a new value with four arguments and an auxint value to the current block.
1136 func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
1137 return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
1140 func (s *state) entryBlock() *ssa.Block {
1142 if base.Flag.N > 0 && s.curBlock != nil {
1143 // If optimizations are off, allocate in current block instead. Since with -N
1144 // we're not doing the CSE or tighten passes, putting lots of stuff in the
1145 // entry block leads to O(n^2) entries in the live value map during regalloc.
1152 // entryNewValue0 adds a new value with no arguments to the entry block.
1153 func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
1154 return s.entryBlock().NewValue0(src.NoXPos, op, t)
1157 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
1158 func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
1159 return s.entryBlock().NewValue0A(src.NoXPos, op, t, aux)
1162 // entryNewValue1 adds a new value with one argument to the entry block.
1163 func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
1164 return s.entryBlock().NewValue1(src.NoXPos, op, t, arg)
1167 // entryNewValue1I adds a new value with one argument and an auxint value to the entry block.
1168 func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
1169 return s.entryBlock().NewValue1I(src.NoXPos, op, t, auxint, arg)
1172 // entryNewValue1A adds a new value with one argument and an aux value to the entry block.
1173 func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
1174 return s.entryBlock().NewValue1A(src.NoXPos, op, t, aux, arg)
1177 // entryNewValue2 adds a new value with two arguments to the entry block.
1178 func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
1179 return s.entryBlock().NewValue2(src.NoXPos, op, t, arg0, arg1)
1182 // entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
1183 func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
1184 return s.entryBlock().NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
1187 // const* routines add a new const value to the entry block.
1188 func (s *state) constSlice(t *types.Type) *ssa.Value {
1189 return s.f.ConstSlice(t)
1191 func (s *state) constInterface(t *types.Type) *ssa.Value {
1192 return s.f.ConstInterface(t)
1194 func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
1195 func (s *state) constEmptyString(t *types.Type) *ssa.Value {
1196 return s.f.ConstEmptyString(t)
1198 func (s *state) constBool(c bool) *ssa.Value {
1199 return s.f.ConstBool(types.Types[types.TBOOL], c)
1201 func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
1202 return s.f.ConstInt8(t, c)
1204 func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
1205 return s.f.ConstInt16(t, c)
1207 func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
1208 return s.f.ConstInt32(t, c)
1210 func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
1211 return s.f.ConstInt64(t, c)
1213 func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
1214 return s.f.ConstFloat32(t, c)
1216 func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
1217 return s.f.ConstFloat64(t, c)
1219 func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
1220 if s.config.PtrSize == 8 {
1221 return s.constInt64(t, c)
1223 if int64(int32(c)) != c {
1224 s.Fatalf("integer constant too big %d", c)
1226 return s.constInt32(t, int32(c))
1228 func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
1229 return s.f.ConstOffPtrSP(t, c, s.sp)
1232 // newValueOrSfCall* are wrappers around newValue*, which may create a call to a
1233 // soft-float runtime function instead (when emitting soft-float code).
1234 func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
1236 if c, ok := s.sfcall(op, arg); ok {
1240 return s.newValue1(op, t, arg)
1242 func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
1244 if c, ok := s.sfcall(op, arg0, arg1); ok {
1248 return s.newValue2(op, t, arg0, arg1)
1251 type instrumentKind uint8
1254 instrumentRead = iota
1259 func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) {
1260 s.instrument2(t, addr, nil, kind)
1263 // instrumentFields instruments a read/write operation on addr.
1264 // If it is instrumenting for MSAN or ASAN and t is a struct type, it instruments
1265 // operation for each field, instead of for the whole struct.
1266 func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
1267 if !(base.Flag.MSan || base.Flag.ASan) || !t.IsStruct() {
1268 s.instrument(t, addr, kind)
1271 for _, f := range t.Fields() {
1272 if f.Sym.IsBlank() {
1275 offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr)
1276 s.instrumentFields(f.Type, offptr, kind)
1280 func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
1282 s.instrument2(t, dst, src, instrumentMove)
1284 s.instrument(t, src, instrumentRead)
1285 s.instrument(t, dst, instrumentWrite)
1289 func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
1290 if !s.instrumentMemory {
1296 return // can't race on zero-sized things
1299 if ssa.IsSanitizerSafeAddr(addr) {
1306 if addr2 != nil && kind != instrumentMove {
1307 panic("instrument2: non-nil addr2 for non-move instrumentation")
1312 case instrumentRead:
1313 fn = ir.Syms.Msanread
1314 case instrumentWrite:
1315 fn = ir.Syms.Msanwrite
1316 case instrumentMove:
1317 fn = ir.Syms.Msanmove
1319 panic("unreachable")
1322 } else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
1323 // for composite objects we have to write every address
1324 // because a write might happen to any subobject.
1325 // composites with only one element don't have subobjects, though.
1327 case instrumentRead:
1328 fn = ir.Syms.Racereadrange
1329 case instrumentWrite:
1330 fn = ir.Syms.Racewriterange
1332 panic("unreachable")
1335 } else if base.Flag.Race {
1336 // for non-composite objects we can write just the start
1337 // address, as any write must write the first byte.
1339 case instrumentRead:
1340 fn = ir.Syms.Raceread
1341 case instrumentWrite:
1342 fn = ir.Syms.Racewrite
1344 panic("unreachable")
1346 } else if base.Flag.ASan {
1348 case instrumentRead:
1349 fn = ir.Syms.Asanread
1350 case instrumentWrite:
1351 fn = ir.Syms.Asanwrite
1353 panic("unreachable")
1357 panic("unreachable")
1360 args := []*ssa.Value{addr}
1362 args = append(args, addr2)
1365 args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
1367 s.rtcall(fn, true, nil, args...)
1370 func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
1371 s.instrumentFields(t, src, instrumentRead)
1372 return s.rawLoad(t, src)
1375 func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
1376 return s.newValue2(ssa.OpLoad, t, src, s.mem())
1379 func (s *state) store(t *types.Type, dst, val *ssa.Value) {
1380 s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
1383 func (s *state) zero(t *types.Type, dst *ssa.Value) {
1384 s.instrument(t, dst, instrumentWrite)
1385 store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
1387 s.vars[memVar] = store
1390 func (s *state) move(t *types.Type, dst, src *ssa.Value) {
1391 s.moveWhichMayOverlap(t, dst, src, false)
1393 func (s *state) moveWhichMayOverlap(t *types.Type, dst, src *ssa.Value, mayOverlap bool) {
1394 s.instrumentMove(t, dst, src)
1395 if mayOverlap && t.IsArray() && t.NumElem() > 1 && !ssa.IsInlinableMemmove(dst, src, t.Size(), s.f.Config) {
1396 // Normally, when moving Go values of type T from one location to another,
1397 // we don't need to worry about partial overlaps. The two Ts must either be
1398 // in disjoint (nonoverlapping) memory or in exactly the same location.
1399 // There are 2 cases where this isn't true:
1400 // 1) Using unsafe you can arrange partial overlaps.
1401 // 2) Since Go 1.17, you can use a cast from a slice to a ptr-to-array.
1402 // https://go.dev/ref/spec#Conversions_from_slice_to_array_pointer
1403 // This feature can be used to construct partial overlaps of array types.
1405 // p := (*[2]int)(a[:])
1406 // q := (*[2]int)(a[1:])
1408 // We don't care about solving 1. Or at least, we haven't historically
1409 // and no one has complained.
1410 // For 2, we need to ensure that if there might be partial overlap,
1411 // then we can't use OpMove; we must use memmove instead.
1412 // (memmove handles partial overlap by copying in the correct
1413 // direction. OpMove does not.)
1415 // Note that we have to be careful here not to introduce a call when
1416 // we're marshaling arguments to a call or unmarshaling results from a call.
1417 // Cases where this is happening must pass mayOverlap to false.
1418 // (Currently this only happens when unmarshaling results of a call.)
1419 if t.HasPointers() {
1420 s.rtcall(ir.Syms.Typedmemmove, true, nil, s.reflectType(t), dst, src)
1421 // We would have otherwise implemented this move with straightline code,
1422 // including a write barrier. Pretend we issue a write barrier here,
1423 // so that the write barrier tests work. (Otherwise they'd need to know
1424 // the details of IsInlineableMemmove.)
1425 s.curfn.SetWBPos(s.peekPos())
1427 s.rtcall(ir.Syms.Memmove, true, nil, dst, src, s.constInt(types.Types[types.TUINTPTR], t.Size()))
1429 ssa.LogLargeCopy(s.f.Name, s.peekPos(), t.Size())
1432 store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
1434 s.vars[memVar] = store
1437 // stmtList converts the statement list n to SSA and adds it to s.
1438 func (s *state) stmtList(l ir.Nodes) {
1439 for _, n := range l {
1444 // stmt converts the statement n to SSA and adds it to s.
1445 func (s *state) stmt(n ir.Node) {
1449 // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
1450 // then this code is dead. Stop here.
1451 if s.curBlock == nil && n.Op() != ir.OLABEL {
1455 s.stmtList(n.Init())
1459 n := n.(*ir.BlockStmt)
1462 case ir.OFALL: // no-op
1464 // Expression statements
1466 n := n.(*ir.CallExpr)
1467 if ir.IsIntrinsicCall(n) {
1474 n := n.(*ir.CallExpr)
1475 s.callResult(n, callNormal)
1476 if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME && n.Fun.(*ir.Name).Class == ir.PFUNC {
1477 if fn := n.Fun.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
1478 n.Fun.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap" || fn == "panicunsafeslicelen" || fn == "panicunsafeslicenilptr" || fn == "panicunsafestringlen" || fn == "panicunsafestringnilptr") {
1481 b.Kind = ssa.BlockExit
1483 // TODO: never rewrite OPANIC to OCALLFUNC in the
1484 // first place. Need to wait until all backends
1489 n := n.(*ir.GoDeferStmt)
1490 if base.Debug.Defer > 0 {
1491 var defertype string
1492 if s.hasOpenDefers {
1493 defertype = "open-coded"
1494 } else if n.Esc() == ir.EscNever {
1495 defertype = "stack-allocated"
1497 defertype = "heap-allocated"
1499 base.WarnfAt(n.Pos(), "%s defer", defertype)
1501 if s.hasOpenDefers {
1502 s.openDeferRecord(n.Call.(*ir.CallExpr))
1505 if n.Esc() == ir.EscNever && n.DeferAt == nil {
1508 s.call(n.Call.(*ir.CallExpr), d, false, n.DeferAt)
1511 n := n.(*ir.GoDeferStmt)
1512 s.callResult(n.Call.(*ir.CallExpr), callGo)
1514 case ir.OAS2DOTTYPE:
1515 n := n.(*ir.AssignListStmt)
1516 var res, resok *ssa.Value
1517 if n.Rhs[0].Op() == ir.ODOTTYPE2 {
1518 res, resok = s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
1520 res, resok = s.dynamicDottype(n.Rhs[0].(*ir.DynamicTypeAssertExpr), true)
1523 if !ssa.CanSSA(n.Rhs[0].Type()) {
1524 if res.Op != ssa.OpLoad {
1525 s.Fatalf("dottype of non-load")
1528 if res.Args[1] != mem {
1529 s.Fatalf("memory no longer live from 2-result dottype load")
1534 s.assign(n.Lhs[0], res, deref, 0)
1535 s.assign(n.Lhs[1], resok, false, 0)
1539 // We come here only when it is an intrinsic call returning two values.
1540 n := n.(*ir.AssignListStmt)
1541 call := n.Rhs[0].(*ir.CallExpr)
1542 if !ir.IsIntrinsicCall(call) {
1543 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call)
1545 v := s.intrinsicCall(call)
1546 v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v)
1547 v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v)
1548 s.assign(n.Lhs[0], v1, false, 0)
1549 s.assign(n.Lhs[1], v2, false, 0)
1554 if v := n.X; v.Esc() == ir.EscHeap {
1559 n := n.(*ir.LabelStmt)
1562 // Nothing to do because the label isn't targetable. See issue 52278.
1567 // The label might already have a target block via a goto.
1568 if lab.target == nil {
1569 lab.target = s.f.NewBlock(ssa.BlockPlain)
1572 // Go to that label.
1573 // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
1574 if s.curBlock != nil {
1576 b.AddEdgeTo(lab.target)
1578 s.startBlock(lab.target)
1581 n := n.(*ir.BranchStmt)
1585 if lab.target == nil {
1586 lab.target = s.f.NewBlock(ssa.BlockPlain)
1590 b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
1591 b.AddEdgeTo(lab.target)
1594 n := n.(*ir.AssignStmt)
1595 if n.X == n.Y && n.X.Op() == ir.ONAME {
1596 // An x=x assignment. No point in doing anything
1597 // here. In addition, skipping this assignment
1598 // prevents generating:
1601 // which is bad because x is incorrectly considered
1602 // dead before the vardef. See issue #14904.
1606 // mayOverlap keeps track of whether the LHS and RHS might
1607 // refer to partially overlapping memory. Partial overlapping can
1608 // only happen for arrays, see the comment in moveWhichMayOverlap.
1610 // If both sides of the assignment are not dereferences, then partial
1611 // overlap can't happen. Partial overlap can only occur only when the
1612 // arrays referenced are strictly smaller parts of the same base array.
1613 // If one side of the assignment is a full array, then partial overlap
1614 // can't happen. (The arrays are either disjoint or identical.)
1615 mayOverlap := n.X.Op() == ir.ODEREF && (n.Y != nil && n.Y.Op() == ir.ODEREF)
1616 if n.Y != nil && n.Y.Op() == ir.ODEREF {
1617 p := n.Y.(*ir.StarExpr).X
1618 for p.Op() == ir.OCONVNOP {
1619 p = p.(*ir.ConvExpr).X
1621 if p.Op() == ir.OSPTR && p.(*ir.UnaryExpr).X.Type().IsString() {
1622 // Pointer fields of strings point to unmodifiable memory.
1623 // That memory can't overlap with the memory being written.
1632 case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
1633 // All literals with nonzero fields have already been
1634 // rewritten during walk. Any that remain are just T{}
1635 // or equivalents. Use the zero value.
1636 if !ir.IsZero(rhs) {
1637 s.Fatalf("literal with nonzero value in SSA: %v", rhs)
1641 rhs := rhs.(*ir.CallExpr)
1642 // Check whether we're writing the result of an append back to the same slice.
1643 // If so, we handle it specially to avoid write barriers on the fast
1644 // (non-growth) path.
1645 if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 {
1648 // If the slice can be SSA'd, it'll be on the stack,
1649 // so there will be no write barriers,
1650 // so there's no need to attempt to prevent them.
1652 if base.Debug.Append > 0 { // replicating old diagnostic message
1653 base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
1657 if base.Debug.Append > 0 {
1658 base.WarnfAt(n.Pos(), "append: len-only update")
1665 if ir.IsBlank(n.X) {
1667 // Just evaluate rhs for side-effects.
1682 deref := !ssa.CanSSA(t)
1685 r = nil // Signal assign to use OpZero.
1698 if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) {
1699 // We're assigning a slicing operation back to its source.
1700 // Don't write back fields we aren't changing. See issue #14855.
1701 rhs := rhs.(*ir.SliceExpr)
1702 i, j, k := rhs.Low, rhs.High, rhs.Max
1703 if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) {
1704 // [0:...] is the same as [:...]
1707 // TODO: detect defaults for len/cap also.
1708 // Currently doesn't really work because (*p)[:len(*p)] appears here as:
1711 // if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) {
1714 // if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) {
1728 s.assignWhichMayOverlap(n.X, r, deref, skip, mayOverlap)
1732 if ir.IsConst(n.Cond, constant.Bool) {
1733 s.stmtList(n.Cond.Init())
1734 if ir.BoolVal(n.Cond) {
1742 bEnd := s.f.NewBlock(ssa.BlockPlain)
1747 var bThen *ssa.Block
1748 if len(n.Body) != 0 {
1749 bThen = s.f.NewBlock(ssa.BlockPlain)
1753 var bElse *ssa.Block
1754 if len(n.Else) != 0 {
1755 bElse = s.f.NewBlock(ssa.BlockPlain)
1759 s.condBranch(n.Cond, bThen, bElse, likely)
1761 if len(n.Body) != 0 {
1764 if b := s.endBlock(); b != nil {
1768 if len(n.Else) != 0 {
1771 if b := s.endBlock(); b != nil {
1778 n := n.(*ir.ReturnStmt)
1779 s.stmtList(n.Results)
1781 b.Pos = s.lastPos.WithIsStmt()
1784 n := n.(*ir.TailCallStmt)
1785 s.callResult(n.Call, callTail)
1788 b.Kind = ssa.BlockRetJmp // could use BlockExit. BlockRetJmp is mostly for clarity.
1791 case ir.OCONTINUE, ir.OBREAK:
1792 n := n.(*ir.BranchStmt)
1795 // plain break/continue
1803 // labeled break/continue; look up the target
1808 to = lab.continueTarget
1810 to = lab.breakTarget
1815 b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
1819 // OFOR: for Ninit; Left; Right { Nbody }
1820 // cond (Left); body (Nbody); incr (Right)
1821 n := n.(*ir.ForStmt)
1822 base.Assert(!n.DistinctVars) // Should all be rewritten before escape analysis
1823 bCond := s.f.NewBlock(ssa.BlockPlain)
1824 bBody := s.f.NewBlock(ssa.BlockPlain)
1825 bIncr := s.f.NewBlock(ssa.BlockPlain)
1826 bEnd := s.f.NewBlock(ssa.BlockPlain)
1828 // ensure empty for loops have correct position; issue #30167
1831 // first, jump to condition test
1835 // generate code to test condition
1838 s.condBranch(n.Cond, bBody, bEnd, 1)
1841 b.Kind = ssa.BlockPlain
1845 // set up for continue/break in body
1846 prevContinue := s.continueTo
1847 prevBreak := s.breakTo
1848 s.continueTo = bIncr
1851 if sym := n.Label; sym != nil {
1854 lab.continueTarget = bIncr
1855 lab.breakTarget = bEnd
1862 // tear down continue/break
1863 s.continueTo = prevContinue
1864 s.breakTo = prevBreak
1866 lab.continueTarget = nil
1867 lab.breakTarget = nil
1870 // done with body, goto incr
1871 if b := s.endBlock(); b != nil {
1880 if b := s.endBlock(); b != nil {
1882 // It can happen that bIncr ends in a block containing only VARKILL,
1883 // and that muddles the debugging experience.
1884 if b.Pos == src.NoXPos {
1891 case ir.OSWITCH, ir.OSELECT:
1892 // These have been mostly rewritten by the front end into their Nbody fields.
1893 // Our main task is to correctly hook up any break statements.
1894 bEnd := s.f.NewBlock(ssa.BlockPlain)
1896 prevBreak := s.breakTo
1900 if n.Op() == ir.OSWITCH {
1901 n := n.(*ir.SwitchStmt)
1905 n := n.(*ir.SelectStmt)
1914 lab.breakTarget = bEnd
1917 // generate body code
1920 s.breakTo = prevBreak
1922 lab.breakTarget = nil
1925 // walk adds explicit OBREAK nodes to the end of all reachable code paths.
1926 // If we still have a current block here, then mark it unreachable.
1927 if s.curBlock != nil {
1930 b.Kind = ssa.BlockExit
1936 n := n.(*ir.JumpTableStmt)
1938 // Make blocks we'll need.
1939 jt := s.f.NewBlock(ssa.BlockJumpTable)
1940 bEnd := s.f.NewBlock(ssa.BlockPlain)
1942 // The only thing that needs evaluating is the index we're looking up.
1943 idx := s.expr(n.Idx)
1944 unsigned := idx.Type.IsUnsigned()
1946 // Extend so we can do everything in uintptr arithmetic.
1947 t := types.Types[types.TUINTPTR]
1948 idx = s.conv(nil, idx, idx.Type, t)
1950 // The ending condition for the current block decides whether we'll use
1951 // the jump table at all.
1952 // We check that min <= idx <= max and jump around the jump table
1953 // if that test fails.
1954 // We implement min <= idx <= max with 0 <= idx-min <= max-min, because
1955 // we'll need idx-min anyway as the control value for the jump table.
1958 min, _ = constant.Uint64Val(n.Cases[0])
1959 max, _ = constant.Uint64Val(n.Cases[len(n.Cases)-1])
1961 mn, _ := constant.Int64Val(n.Cases[0])
1962 mx, _ := constant.Int64Val(n.Cases[len(n.Cases)-1])
1966 // Compare idx-min with max-min, to see if we can use the jump table.
1967 idx = s.newValue2(s.ssaOp(ir.OSUB, t), t, idx, s.uintptrConstant(min))
1968 width := s.uintptrConstant(max - min)
1969 cmp := s.newValue2(s.ssaOp(ir.OLE, t), types.Types[types.TBOOL], idx, width)
1971 b.Kind = ssa.BlockIf
1973 b.AddEdgeTo(jt) // in range - use jump table
1974 b.AddEdgeTo(bEnd) // out of range - no case in the jump table will trigger
1975 b.Likely = ssa.BranchLikely // TODO: assumes missing the table entirely is unlikely. True?
1977 // Build jump table block.
1980 if base.Flag.Cfg.SpectreIndex {
1981 idx = s.newValue2(ssa.OpSpectreSliceIndex, t, idx, width)
1985 // Figure out where we should go for each index in the table.
1986 table := make([]*ssa.Block, max-min+1)
1987 for i := range table {
1988 table[i] = bEnd // default target
1990 for i := range n.Targets {
1992 lab := s.label(n.Targets[i])
1993 if lab.target == nil {
1994 lab.target = s.f.NewBlock(ssa.BlockPlain)
1998 val, _ = constant.Uint64Val(c)
2000 vl, _ := constant.Int64Val(c)
2003 // Overwrite the default target.
2004 table[val-min] = lab.target
2006 for _, t := range table {
2013 case ir.OINTERFACESWITCH:
2014 n := n.(*ir.InterfaceSwitchStmt)
2015 typs := s.f.Config.Types
2017 t := s.expr(n.RuntimeType)
2019 d := s.newValue1A(ssa.OpAddr, typs.BytePtr, n.Descriptor, s.sb)
2021 // Check the cache first.
2022 var merge *ssa.Block
2023 if base.Flag.N == 0 && rtabi.UseInterfaceSwitchCache(Arch.LinkArch.Name) {
2024 // Note: we can only use the cache if we have the right atomic load instruction.
2025 // Double-check that here.
2026 if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "runtime/internal/atomic", "Loadp"}]; !ok {
2027 s.Fatalf("atomic load not available")
2029 merge = s.f.NewBlock(ssa.BlockPlain)
2030 cacheHit := s.f.NewBlock(ssa.BlockPlain)
2031 cacheMiss := s.f.NewBlock(ssa.BlockPlain)
2032 loopHead := s.f.NewBlock(ssa.BlockPlain)
2033 loopBody := s.f.NewBlock(ssa.BlockPlain)
2035 // Pick right size ops.
2036 var mul, and, add, zext ssa.Op
2037 if s.config.PtrSize == 4 {
2046 zext = ssa.OpZeroExt32to64
2049 // Load cache pointer out of descriptor, with an atomic load so
2050 // we ensure that we see a fully written cache.
2051 atomicLoad := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(typs.BytePtr, types.TypeMem), d, s.mem())
2052 cache := s.newValue1(ssa.OpSelect0, typs.BytePtr, atomicLoad)
2053 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, atomicLoad)
2055 // Initialize hash variable.
2056 s.vars[hashVar] = s.newValue1(zext, typs.Uintptr, h)
2058 // Load mask from cache.
2059 mask := s.newValue2(ssa.OpLoad, typs.Uintptr, cache, s.mem())
2060 // Jump to loop head.
2062 b.AddEdgeTo(loopHead)
2064 // At loop head, get pointer to the cache entry.
2065 // e := &cache.Entries[hash&mask]
2066 s.startBlock(loopHead)
2067 entries := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, cache, s.uintptrConstant(uint64(s.config.PtrSize)))
2068 idx := s.newValue2(and, typs.Uintptr, s.variable(hashVar, typs.Uintptr), mask)
2069 idx = s.newValue2(mul, typs.Uintptr, idx, s.uintptrConstant(uint64(3*s.config.PtrSize)))
2070 e := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, entries, idx)
2072 s.vars[hashVar] = s.newValue2(add, typs.Uintptr, s.variable(hashVar, typs.Uintptr), s.uintptrConstant(1))
2074 // Look for a cache hit.
2075 // if e.Typ == t { goto hit }
2076 eTyp := s.newValue2(ssa.OpLoad, typs.Uintptr, e, s.mem())
2077 cmp1 := s.newValue2(ssa.OpEqPtr, typs.Bool, t, eTyp)
2079 b.Kind = ssa.BlockIf
2081 b.AddEdgeTo(cacheHit)
2082 b.AddEdgeTo(loopBody)
2084 // Look for an empty entry, the tombstone for this hash table.
2085 // if e.Typ == nil { goto miss }
2086 s.startBlock(loopBody)
2087 cmp2 := s.newValue2(ssa.OpEqPtr, typs.Bool, eTyp, s.constNil(typs.BytePtr))
2089 b.Kind = ssa.BlockIf
2091 b.AddEdgeTo(cacheMiss)
2092 b.AddEdgeTo(loopHead)
2094 // On a hit, load the data fields of the cache entry.
2097 s.startBlock(cacheHit)
2098 eCase := s.newValue2(ssa.OpLoad, typs.Int, s.newValue1I(ssa.OpOffPtr, typs.IntPtr, s.config.PtrSize, e), s.mem())
2099 eItab := s.newValue2(ssa.OpLoad, typs.BytePtr, s.newValue1I(ssa.OpOffPtr, typs.BytePtrPtr, 2*s.config.PtrSize, e), s.mem())
2100 s.assign(n.Case, eCase, false, 0)
2101 s.assign(n.Itab, eItab, false, 0)
2105 // On a miss, call into the runtime to get the answer.
2106 s.startBlock(cacheMiss)
2109 r := s.rtcall(ir.Syms.InterfaceSwitch, true, []*types.Type{typs.Int, typs.BytePtr}, d, t)
2110 s.assign(n.Case, r[0], false, 0)
2111 s.assign(n.Itab, r[1], false, 0)
2114 // Cache hits merge in here.
2116 b.Kind = ssa.BlockPlain
2122 n := n.(*ir.UnaryExpr)
2127 n := n.(*ir.InlineMarkStmt)
2128 s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem())
2131 s.Fatalf("unhandled stmt %v", n.Op())
2135 // If true, share as many open-coded defer exits as possible (with the downside of
2136 // worse line-number information)
2137 const shareDeferExits = false
2139 // exit processes any code that needs to be generated just before returning.
2140 // It returns a BlockRet block that ends the control flow. Its control value
2141 // will be set to the final memory state.
2142 func (s *state) exit() *ssa.Block {
2144 if s.hasOpenDefers {
2145 if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
2146 if s.curBlock.Kind != ssa.BlockPlain {
2147 panic("Block for an exit should be BlockPlain")
2149 s.curBlock.AddEdgeTo(s.lastDeferExit)
2151 return s.lastDeferFinalBlock
2155 s.rtcall(ir.Syms.Deferreturn, true, nil)
2159 // Do actual return.
2160 // These currently turn into self-copies (in many cases).
2161 resultFields := s.curfn.Type().Results()
2162 results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
2163 // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
2164 for i, f := range resultFields {
2165 n := f.Nname.(*ir.Name)
2166 if s.canSSA(n) { // result is in some SSA variable
2167 if !n.IsOutputParamInRegisters() && n.Type().HasPointers() {
2168 // We are about to store to the result slot.
2169 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
2171 results[i] = s.variable(n, n.Type())
2172 } else if !n.OnStack() { // result is actually heap allocated
2173 // We are about to copy the in-heap result to the result slot.
2174 if n.Type().HasPointers() {
2175 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
2177 ha := s.expr(n.Heapaddr)
2178 s.instrumentFields(n.Type(), ha, instrumentRead)
2179 results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem())
2180 } else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA.
2181 // Before register ABI this ought to be a self-move, home=dest,
2182 // With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed)
2183 // No VarDef, as the result slot is already holding live value.
2184 results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem())
2188 // In -race mode, we need to call racefuncexit.
2189 // Note: This has to happen after we load any heap-allocated results,
2190 // otherwise races will be attributed to the caller instead.
2191 if s.instrumentEnterExit {
2192 s.rtcall(ir.Syms.Racefuncexit, true, nil)
2195 results[len(results)-1] = s.mem()
2196 m := s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
2197 m.AddArgs(results...)
2200 b.Kind = ssa.BlockRet
2202 if s.hasdefer && s.hasOpenDefers {
2203 s.lastDeferFinalBlock = b
2208 type opAndType struct {
2213 var opToSSA = map[opAndType]ssa.Op{
2214 {ir.OADD, types.TINT8}: ssa.OpAdd8,
2215 {ir.OADD, types.TUINT8}: ssa.OpAdd8,
2216 {ir.OADD, types.TINT16}: ssa.OpAdd16,
2217 {ir.OADD, types.TUINT16}: ssa.OpAdd16,
2218 {ir.OADD, types.TINT32}: ssa.OpAdd32,
2219 {ir.OADD, types.TUINT32}: ssa.OpAdd32,
2220 {ir.OADD, types.TINT64}: ssa.OpAdd64,
2221 {ir.OADD, types.TUINT64}: ssa.OpAdd64,
2222 {ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
2223 {ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
2225 {ir.OSUB, types.TINT8}: ssa.OpSub8,
2226 {ir.OSUB, types.TUINT8}: ssa.OpSub8,
2227 {ir.OSUB, types.TINT16}: ssa.OpSub16,
2228 {ir.OSUB, types.TUINT16}: ssa.OpSub16,
2229 {ir.OSUB, types.TINT32}: ssa.OpSub32,
2230 {ir.OSUB, types.TUINT32}: ssa.OpSub32,
2231 {ir.OSUB, types.TINT64}: ssa.OpSub64,
2232 {ir.OSUB, types.TUINT64}: ssa.OpSub64,
2233 {ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
2234 {ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
2236 {ir.ONOT, types.TBOOL}: ssa.OpNot,
2238 {ir.ONEG, types.TINT8}: ssa.OpNeg8,
2239 {ir.ONEG, types.TUINT8}: ssa.OpNeg8,
2240 {ir.ONEG, types.TINT16}: ssa.OpNeg16,
2241 {ir.ONEG, types.TUINT16}: ssa.OpNeg16,
2242 {ir.ONEG, types.TINT32}: ssa.OpNeg32,
2243 {ir.ONEG, types.TUINT32}: ssa.OpNeg32,
2244 {ir.ONEG, types.TINT64}: ssa.OpNeg64,
2245 {ir.ONEG, types.TUINT64}: ssa.OpNeg64,
2246 {ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
2247 {ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
2249 {ir.OBITNOT, types.TINT8}: ssa.OpCom8,
2250 {ir.OBITNOT, types.TUINT8}: ssa.OpCom8,
2251 {ir.OBITNOT, types.TINT16}: ssa.OpCom16,
2252 {ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
2253 {ir.OBITNOT, types.TINT32}: ssa.OpCom32,
2254 {ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
2255 {ir.OBITNOT, types.TINT64}: ssa.OpCom64,
2256 {ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
2258 {ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag,
2259 {ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
2260 {ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal,
2261 {ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
2263 {ir.OMUL, types.TINT8}: ssa.OpMul8,
2264 {ir.OMUL, types.TUINT8}: ssa.OpMul8,
2265 {ir.OMUL, types.TINT16}: ssa.OpMul16,
2266 {ir.OMUL, types.TUINT16}: ssa.OpMul16,
2267 {ir.OMUL, types.TINT32}: ssa.OpMul32,
2268 {ir.OMUL, types.TUINT32}: ssa.OpMul32,
2269 {ir.OMUL, types.TINT64}: ssa.OpMul64,
2270 {ir.OMUL, types.TUINT64}: ssa.OpMul64,
2271 {ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
2272 {ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
2274 {ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
2275 {ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
2277 {ir.ODIV, types.TINT8}: ssa.OpDiv8,
2278 {ir.ODIV, types.TUINT8}: ssa.OpDiv8u,
2279 {ir.ODIV, types.TINT16}: ssa.OpDiv16,
2280 {ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
2281 {ir.ODIV, types.TINT32}: ssa.OpDiv32,
2282 {ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
2283 {ir.ODIV, types.TINT64}: ssa.OpDiv64,
2284 {ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
2286 {ir.OMOD, types.TINT8}: ssa.OpMod8,
2287 {ir.OMOD, types.TUINT8}: ssa.OpMod8u,
2288 {ir.OMOD, types.TINT16}: ssa.OpMod16,
2289 {ir.OMOD, types.TUINT16}: ssa.OpMod16u,
2290 {ir.OMOD, types.TINT32}: ssa.OpMod32,
2291 {ir.OMOD, types.TUINT32}: ssa.OpMod32u,
2292 {ir.OMOD, types.TINT64}: ssa.OpMod64,
2293 {ir.OMOD, types.TUINT64}: ssa.OpMod64u,
2295 {ir.OAND, types.TINT8}: ssa.OpAnd8,
2296 {ir.OAND, types.TUINT8}: ssa.OpAnd8,
2297 {ir.OAND, types.TINT16}: ssa.OpAnd16,
2298 {ir.OAND, types.TUINT16}: ssa.OpAnd16,
2299 {ir.OAND, types.TINT32}: ssa.OpAnd32,
2300 {ir.OAND, types.TUINT32}: ssa.OpAnd32,
2301 {ir.OAND, types.TINT64}: ssa.OpAnd64,
2302 {ir.OAND, types.TUINT64}: ssa.OpAnd64,
2304 {ir.OOR, types.TINT8}: ssa.OpOr8,
2305 {ir.OOR, types.TUINT8}: ssa.OpOr8,
2306 {ir.OOR, types.TINT16}: ssa.OpOr16,
2307 {ir.OOR, types.TUINT16}: ssa.OpOr16,
2308 {ir.OOR, types.TINT32}: ssa.OpOr32,
2309 {ir.OOR, types.TUINT32}: ssa.OpOr32,
2310 {ir.OOR, types.TINT64}: ssa.OpOr64,
2311 {ir.OOR, types.TUINT64}: ssa.OpOr64,
2313 {ir.OXOR, types.TINT8}: ssa.OpXor8,
2314 {ir.OXOR, types.TUINT8}: ssa.OpXor8,
2315 {ir.OXOR, types.TINT16}: ssa.OpXor16,
2316 {ir.OXOR, types.TUINT16}: ssa.OpXor16,
2317 {ir.OXOR, types.TINT32}: ssa.OpXor32,
2318 {ir.OXOR, types.TUINT32}: ssa.OpXor32,
2319 {ir.OXOR, types.TINT64}: ssa.OpXor64,
2320 {ir.OXOR, types.TUINT64}: ssa.OpXor64,
2322 {ir.OEQ, types.TBOOL}: ssa.OpEqB,
2323 {ir.OEQ, types.TINT8}: ssa.OpEq8,
2324 {ir.OEQ, types.TUINT8}: ssa.OpEq8,
2325 {ir.OEQ, types.TINT16}: ssa.OpEq16,
2326 {ir.OEQ, types.TUINT16}: ssa.OpEq16,
2327 {ir.OEQ, types.TINT32}: ssa.OpEq32,
2328 {ir.OEQ, types.TUINT32}: ssa.OpEq32,
2329 {ir.OEQ, types.TINT64}: ssa.OpEq64,
2330 {ir.OEQ, types.TUINT64}: ssa.OpEq64,
2331 {ir.OEQ, types.TINTER}: ssa.OpEqInter,
2332 {ir.OEQ, types.TSLICE}: ssa.OpEqSlice,
2333 {ir.OEQ, types.TFUNC}: ssa.OpEqPtr,
2334 {ir.OEQ, types.TMAP}: ssa.OpEqPtr,
2335 {ir.OEQ, types.TCHAN}: ssa.OpEqPtr,
2336 {ir.OEQ, types.TPTR}: ssa.OpEqPtr,
2337 {ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr,
2338 {ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
2339 {ir.OEQ, types.TFLOAT64}: ssa.OpEq64F,
2340 {ir.OEQ, types.TFLOAT32}: ssa.OpEq32F,
2342 {ir.ONE, types.TBOOL}: ssa.OpNeqB,
2343 {ir.ONE, types.TINT8}: ssa.OpNeq8,
2344 {ir.ONE, types.TUINT8}: ssa.OpNeq8,
2345 {ir.ONE, types.TINT16}: ssa.OpNeq16,
2346 {ir.ONE, types.TUINT16}: ssa.OpNeq16,
2347 {ir.ONE, types.TINT32}: ssa.OpNeq32,
2348 {ir.ONE, types.TUINT32}: ssa.OpNeq32,
2349 {ir.ONE, types.TINT64}: ssa.OpNeq64,
2350 {ir.ONE, types.TUINT64}: ssa.OpNeq64,
2351 {ir.ONE, types.TINTER}: ssa.OpNeqInter,
2352 {ir.ONE, types.TSLICE}: ssa.OpNeqSlice,
2353 {ir.ONE, types.TFUNC}: ssa.OpNeqPtr,
2354 {ir.ONE, types.TMAP}: ssa.OpNeqPtr,
2355 {ir.ONE, types.TCHAN}: ssa.OpNeqPtr,
2356 {ir.ONE, types.TPTR}: ssa.OpNeqPtr,
2357 {ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr,
2358 {ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
2359 {ir.ONE, types.TFLOAT64}: ssa.OpNeq64F,
2360 {ir.ONE, types.TFLOAT32}: ssa.OpNeq32F,
2362 {ir.OLT, types.TINT8}: ssa.OpLess8,
2363 {ir.OLT, types.TUINT8}: ssa.OpLess8U,
2364 {ir.OLT, types.TINT16}: ssa.OpLess16,
2365 {ir.OLT, types.TUINT16}: ssa.OpLess16U,
2366 {ir.OLT, types.TINT32}: ssa.OpLess32,
2367 {ir.OLT, types.TUINT32}: ssa.OpLess32U,
2368 {ir.OLT, types.TINT64}: ssa.OpLess64,
2369 {ir.OLT, types.TUINT64}: ssa.OpLess64U,
2370 {ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
2371 {ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
2373 {ir.OLE, types.TINT8}: ssa.OpLeq8,
2374 {ir.OLE, types.TUINT8}: ssa.OpLeq8U,
2375 {ir.OLE, types.TINT16}: ssa.OpLeq16,
2376 {ir.OLE, types.TUINT16}: ssa.OpLeq16U,
2377 {ir.OLE, types.TINT32}: ssa.OpLeq32,
2378 {ir.OLE, types.TUINT32}: ssa.OpLeq32U,
2379 {ir.OLE, types.TINT64}: ssa.OpLeq64,
2380 {ir.OLE, types.TUINT64}: ssa.OpLeq64U,
2381 {ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
2382 {ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
2385 func (s *state) concreteEtype(t *types.Type) types.Kind {
2391 if s.config.PtrSize == 8 {
2396 if s.config.PtrSize == 8 {
2397 return types.TUINT64
2399 return types.TUINT32
2400 case types.TUINTPTR:
2401 if s.config.PtrSize == 8 {
2402 return types.TUINT64
2404 return types.TUINT32
2408 func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
2409 etype := s.concreteEtype(t)
2410 x, ok := opToSSA[opAndType{op, etype}]
2412 s.Fatalf("unhandled binary op %v %s", op, etype)
2417 type opAndTwoTypes struct {
2423 type twoTypes struct {
2428 type twoOpsAndType struct {
2431 intermediateType types.Kind
2434 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
2436 {types.TINT8, types.TFLOAT32}: {ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
2437 {types.TINT16, types.TFLOAT32}: {ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
2438 {types.TINT32, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
2439 {types.TINT64, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
2441 {types.TINT8, types.TFLOAT64}: {ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
2442 {types.TINT16, types.TFLOAT64}: {ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
2443 {types.TINT32, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
2444 {types.TINT64, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
2446 {types.TFLOAT32, types.TINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
2447 {types.TFLOAT32, types.TINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
2448 {types.TFLOAT32, types.TINT32}: {ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
2449 {types.TFLOAT32, types.TINT64}: {ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
2451 {types.TFLOAT64, types.TINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
2452 {types.TFLOAT64, types.TINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
2453 {types.TFLOAT64, types.TINT32}: {ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
2454 {types.TFLOAT64, types.TINT64}: {ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
2456 {types.TUINT8, types.TFLOAT32}: {ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
2457 {types.TUINT16, types.TFLOAT32}: {ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
2458 {types.TUINT32, types.TFLOAT32}: {ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
2459 {types.TUINT64, types.TFLOAT32}: {ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead
2461 {types.TUINT8, types.TFLOAT64}: {ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
2462 {types.TUINT16, types.TFLOAT64}: {ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
2463 {types.TUINT32, types.TFLOAT64}: {ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
2464 {types.TUINT64, types.TFLOAT64}: {ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead
2466 {types.TFLOAT32, types.TUINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
2467 {types.TFLOAT32, types.TUINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
2468 {types.TFLOAT32, types.TUINT32}: {ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
2469 {types.TFLOAT32, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
2471 {types.TFLOAT64, types.TUINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
2472 {types.TFLOAT64, types.TUINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
2473 {types.TFLOAT64, types.TUINT32}: {ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
2474 {types.TFLOAT64, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
2477 {types.TFLOAT64, types.TFLOAT32}: {ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
2478 {types.TFLOAT64, types.TFLOAT64}: {ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
2479 {types.TFLOAT32, types.TFLOAT32}: {ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
2480 {types.TFLOAT32, types.TFLOAT64}: {ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
2483 // this map is used only for 32-bit arch, and only includes the difference
2484 // on 32-bit arch, don't use int64<->float conversion for uint32
2485 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
2486 {types.TUINT32, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
2487 {types.TUINT32, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
2488 {types.TFLOAT32, types.TUINT32}: {ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
2489 {types.TFLOAT64, types.TUINT32}: {ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
2492 // uint64<->float conversions, only on machines that have instructions for that
2493 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
2494 {types.TUINT64, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
2495 {types.TUINT64, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
2496 {types.TFLOAT32, types.TUINT64}: {ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
2497 {types.TFLOAT64, types.TUINT64}: {ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
2500 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
2501 {ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8,
2502 {ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8,
2503 {ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16,
2504 {ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
2505 {ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32,
2506 {ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
2507 {ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64,
2508 {ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
2510 {ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8,
2511 {ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8,
2512 {ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16,
2513 {ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
2514 {ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32,
2515 {ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
2516 {ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64,
2517 {ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
2519 {ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8,
2520 {ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8,
2521 {ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16,
2522 {ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
2523 {ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32,
2524 {ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
2525 {ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64,
2526 {ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
2528 {ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8,
2529 {ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8,
2530 {ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16,
2531 {ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
2532 {ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32,
2533 {ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
2534 {ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64,
2535 {ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
2537 {ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8,
2538 {ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8,
2539 {ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16,
2540 {ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
2541 {ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32,
2542 {ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
2543 {ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64,
2544 {ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
2546 {ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8,
2547 {ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8,
2548 {ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16,
2549 {ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
2550 {ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32,
2551 {ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
2552 {ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64,
2553 {ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
2555 {ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8,
2556 {ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8,
2557 {ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16,
2558 {ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
2559 {ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32,
2560 {ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
2561 {ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64,
2562 {ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
2564 {ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8,
2565 {ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8,
2566 {ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16,
2567 {ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
2568 {ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32,
2569 {ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
2570 {ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64,
2571 {ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
2574 func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
2575 etype1 := s.concreteEtype(t)
2576 etype2 := s.concreteEtype(u)
2577 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
2579 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
2584 func (s *state) uintptrConstant(v uint64) *ssa.Value {
2585 if s.config.PtrSize == 4 {
2586 return s.newValue0I(ssa.OpConst32, types.Types[types.TUINTPTR], int64(v))
2588 return s.newValue0I(ssa.OpConst64, types.Types[types.TUINTPTR], int64(v))
2591 func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value {
2592 if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
2593 // Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
2594 return s.newValue1(ssa.OpCvtBoolToUint8, tt, v)
2596 if ft.IsInteger() && tt.IsInteger() {
2598 if tt.Size() == ft.Size() {
2600 } else if tt.Size() < ft.Size() {
2602 switch 10*ft.Size() + tt.Size() {
2604 op = ssa.OpTrunc16to8
2606 op = ssa.OpTrunc32to8
2608 op = ssa.OpTrunc32to16
2610 op = ssa.OpTrunc64to8
2612 op = ssa.OpTrunc64to16
2614 op = ssa.OpTrunc64to32
2616 s.Fatalf("weird integer truncation %v -> %v", ft, tt)
2618 } else if ft.IsSigned() {
2620 switch 10*ft.Size() + tt.Size() {
2622 op = ssa.OpSignExt8to16
2624 op = ssa.OpSignExt8to32
2626 op = ssa.OpSignExt8to64
2628 op = ssa.OpSignExt16to32
2630 op = ssa.OpSignExt16to64
2632 op = ssa.OpSignExt32to64
2634 s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
2638 switch 10*ft.Size() + tt.Size() {
2640 op = ssa.OpZeroExt8to16
2642 op = ssa.OpZeroExt8to32
2644 op = ssa.OpZeroExt8to64
2646 op = ssa.OpZeroExt16to32
2648 op = ssa.OpZeroExt16to64
2650 op = ssa.OpZeroExt32to64
2652 s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
2655 return s.newValue1(op, tt, v)
2658 if ft.IsComplex() && tt.IsComplex() {
2660 if ft.Size() == tt.Size() {
2667 s.Fatalf("weird complex conversion %v -> %v", ft, tt)
2669 } else if ft.Size() == 8 && tt.Size() == 16 {
2670 op = ssa.OpCvt32Fto64F
2671 } else if ft.Size() == 16 && tt.Size() == 8 {
2672 op = ssa.OpCvt64Fto32F
2674 s.Fatalf("weird complex conversion %v -> %v", ft, tt)
2676 ftp := types.FloatForComplex(ft)
2677 ttp := types.FloatForComplex(tt)
2678 return s.newValue2(ssa.OpComplexMake, tt,
2679 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, v)),
2680 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, v)))
2683 if tt.IsComplex() { // and ft is not complex
2684 // Needed for generics support - can't happen in normal Go code.
2685 et := types.FloatForComplex(tt)
2686 v = s.conv(n, v, ft, et)
2687 return s.newValue2(ssa.OpComplexMake, tt, v, s.zeroVal(et))
2690 if ft.IsFloat() || tt.IsFloat() {
2691 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
2692 if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
2693 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
2697 if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
2698 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
2703 if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
2704 if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
2705 // tt is float32 or float64, and ft is also unsigned
2707 return s.uint32Tofloat32(n, v, ft, tt)
2710 return s.uint32Tofloat64(n, v, ft, tt)
2712 } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
2713 // ft is float32 or float64, and tt is unsigned integer
2715 return s.float32ToUint32(n, v, ft, tt)
2718 return s.float64ToUint32(n, v, ft, tt)
2724 s.Fatalf("weird float conversion %v -> %v", ft, tt)
2726 op1, op2, it := conv.op1, conv.op2, conv.intermediateType
2728 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
2729 // normal case, not tripping over unsigned 64
2730 if op1 == ssa.OpCopy {
2731 if op2 == ssa.OpCopy {
2734 return s.newValueOrSfCall1(op2, tt, v)
2736 if op2 == ssa.OpCopy {
2737 return s.newValueOrSfCall1(op1, tt, v)
2739 return s.newValueOrSfCall1(op2, tt, s.newValueOrSfCall1(op1, types.Types[it], v))
2741 // Tricky 64-bit unsigned cases.
2743 // tt is float32 or float64, and ft is also unsigned
2745 return s.uint64Tofloat32(n, v, ft, tt)
2748 return s.uint64Tofloat64(n, v, ft, tt)
2750 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
2752 // ft is float32 or float64, and tt is unsigned integer
2754 return s.float32ToUint64(n, v, ft, tt)
2757 return s.float64ToUint64(n, v, ft, tt)
2759 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
2763 s.Fatalf("unhandled OCONV %s -> %s", ft.Kind(), tt.Kind())
2767 // expr converts the expression n to ssa, adds it to s and returns the ssa result.
2768 func (s *state) expr(n ir.Node) *ssa.Value {
2769 return s.exprCheckPtr(n, true)
2772 func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
2773 if ir.HasUniquePos(n) {
2774 // ONAMEs and named OLITERALs have the line number
2775 // of the decl, not the use. See issue 14742.
2780 s.stmtList(n.Init())
2782 case ir.OBYTES2STRTMP:
2783 n := n.(*ir.ConvExpr)
2784 slice := s.expr(n.X)
2785 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
2786 len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
2787 return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
2788 case ir.OSTR2BYTESTMP:
2789 n := n.(*ir.ConvExpr)
2791 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
2793 // We need to ensure []byte("") evaluates to []byte{}, and not []byte(nil).
2795 // TODO(mdempsky): Investigate using "len != 0" instead of "ptr != nil".
2796 cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], ptr, s.constNil(ptr.Type))
2797 zerobase := s.newValue1A(ssa.OpAddr, ptr.Type, ir.Syms.Zerobase, s.sb)
2798 ptr = s.ternary(cond, ptr, zerobase)
2800 len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
2801 return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
2803 n := n.(*ir.UnaryExpr)
2804 aux := n.X.(*ir.Name).Linksym()
2805 // OCFUNC is used to build function values, which must
2806 // always reference ABIInternal entry points.
2807 if aux.ABI() != obj.ABIInternal {
2808 s.Fatalf("expected ABIInternal: %v", aux.ABI())
2810 return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
2813 if n.Class == ir.PFUNC {
2814 // "value" of a function is the address of the function's closure
2815 sym := staticdata.FuncLinksym(n)
2816 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
2819 return s.variable(n, n.Type())
2821 return s.load(n.Type(), s.addr(n))
2822 case ir.OLINKSYMOFFSET:
2823 n := n.(*ir.LinksymOffsetExpr)
2824 return s.load(n.Type(), s.addr(n))
2826 n := n.(*ir.NilExpr)
2830 return s.constSlice(t)
2831 case t.IsInterface():
2832 return s.constInterface(t)
2834 return s.constNil(t)
2837 switch u := n.Val(); u.Kind() {
2839 i := ir.IntVal(n.Type(), u)
2840 switch n.Type().Size() {
2842 return s.constInt8(n.Type(), int8(i))
2844 return s.constInt16(n.Type(), int16(i))
2846 return s.constInt32(n.Type(), int32(i))
2848 return s.constInt64(n.Type(), i)
2850 s.Fatalf("bad integer size %d", n.Type().Size())
2853 case constant.String:
2854 i := constant.StringVal(u)
2856 return s.constEmptyString(n.Type())
2858 return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i))
2860 return s.constBool(constant.BoolVal(u))
2861 case constant.Float:
2862 f, _ := constant.Float64Val(u)
2863 switch n.Type().Size() {
2865 return s.constFloat32(n.Type(), f)
2867 return s.constFloat64(n.Type(), f)
2869 s.Fatalf("bad float size %d", n.Type().Size())
2872 case constant.Complex:
2873 re, _ := constant.Float64Val(constant.Real(u))
2874 im, _ := constant.Float64Val(constant.Imag(u))
2875 switch n.Type().Size() {
2877 pt := types.Types[types.TFLOAT32]
2878 return s.newValue2(ssa.OpComplexMake, n.Type(),
2879 s.constFloat32(pt, re),
2880 s.constFloat32(pt, im))
2882 pt := types.Types[types.TFLOAT64]
2883 return s.newValue2(ssa.OpComplexMake, n.Type(),
2884 s.constFloat64(pt, re),
2885 s.constFloat64(pt, im))
2887 s.Fatalf("bad complex size %d", n.Type().Size())
2891 s.Fatalf("unhandled OLITERAL %v", u.Kind())
2895 n := n.(*ir.ConvExpr)
2899 // Assume everything will work out, so set up our return value.
2900 // Anything interesting that happens from here is a fatal.
2906 // Special case for not confusing GC and liveness.
2907 // We don't want pointers accidentally classified
2908 // as not-pointers or vice-versa because of copy
2910 if to.IsPtrShaped() != from.IsPtrShaped() {
2911 return s.newValue2(ssa.OpConvert, to, x, s.mem())
2914 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
2917 if to.Kind() == types.TFUNC && from.IsPtrShaped() {
2921 // named <--> unnamed type or typed <--> untyped const
2922 if from.Kind() == to.Kind() {
2926 // unsafe.Pointer <--> *T
2927 if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
2928 if s.checkPtrEnabled && checkPtrOK && to.IsPtr() && from.IsUnsafePtr() {
2929 s.checkPtrAlignment(n, v, nil)
2935 if to.Kind() == types.TMAP && from == types.NewPtr(reflectdata.MapType()) {
2939 types.CalcSize(from)
2941 if from.Size() != to.Size() {
2942 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Size(), to, to.Size())
2945 if etypesign(from.Kind()) != etypesign(to.Kind()) {
2946 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind())
2950 if base.Flag.Cfg.Instrumenting {
2951 // These appear to be fine, but they fail the
2952 // integer constraint below, so okay them here.
2953 // Sample non-integer conversion: map[string]string -> *uint8
2957 if etypesign(from.Kind()) == 0 {
2958 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
2962 // integer, same width, same sign
2966 n := n.(*ir.ConvExpr)
2968 return s.conv(n, x, n.X.Type(), n.Type())
2971 n := n.(*ir.TypeAssertExpr)
2972 res, _ := s.dottype(n, false)
2975 case ir.ODYNAMICDOTTYPE:
2976 n := n.(*ir.DynamicTypeAssertExpr)
2977 res, _ := s.dynamicDottype(n, false)
2981 case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
2982 n := n.(*ir.BinaryExpr)
2985 if n.X.Type().IsComplex() {
2986 pt := types.FloatForComplex(n.X.Type())
2987 op := s.ssaOp(ir.OEQ, pt)
2988 r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
2989 i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
2990 c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
2995 return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
2997 s.Fatalf("ordered complex compare %v", n.Op())
3001 // Convert OGE and OGT into OLE and OLT.
3005 op, a, b = ir.OLE, b, a
3007 op, a, b = ir.OLT, b, a
3009 if n.X.Type().IsFloat() {
3011 return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
3013 // integer comparison
3014 return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
3016 n := n.(*ir.BinaryExpr)
3019 if n.Type().IsComplex() {
3020 mulop := ssa.OpMul64F
3021 addop := ssa.OpAdd64F
3022 subop := ssa.OpSub64F
3023 pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
3024 wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
3026 areal := s.newValue1(ssa.OpComplexReal, pt, a)
3027 breal := s.newValue1(ssa.OpComplexReal, pt, b)
3028 aimag := s.newValue1(ssa.OpComplexImag, pt, a)
3029 bimag := s.newValue1(ssa.OpComplexImag, pt, b)
3031 if pt != wt { // Widen for calculation
3032 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
3033 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
3034 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
3035 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
3038 xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
3039 ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
3041 if pt != wt { // Narrow to store back
3042 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
3043 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
3046 return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
3049 if n.Type().IsFloat() {
3050 return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
3053 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
3056 n := n.(*ir.BinaryExpr)
3059 if n.Type().IsComplex() {
3060 // TODO this is not executed because the front-end substitutes a runtime call.
3061 // That probably ought to change; with modest optimization the widen/narrow
3062 // conversions could all be elided in larger expression trees.
3063 mulop := ssa.OpMul64F
3064 addop := ssa.OpAdd64F
3065 subop := ssa.OpSub64F
3066 divop := ssa.OpDiv64F
3067 pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
3068 wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
3070 areal := s.newValue1(ssa.OpComplexReal, pt, a)
3071 breal := s.newValue1(ssa.OpComplexReal, pt, b)
3072 aimag := s.newValue1(ssa.OpComplexImag, pt, a)
3073 bimag := s.newValue1(ssa.OpComplexImag, pt, b)
3075 if pt != wt { // Widen for calculation
3076 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
3077 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
3078 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
3079 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
3082 denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
3083 xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
3084 ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
3086 // TODO not sure if this is best done in wide precision or narrow
3087 // Double-rounding might be an issue.
3088 // Note that the pre-SSA implementation does the entire calculation
3089 // in wide format, so wide is compatible.
3090 xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
3091 ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
3093 if pt != wt { // Narrow to store back
3094 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
3095 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
3097 return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
3099 if n.Type().IsFloat() {
3100 return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
3102 return s.intDivide(n, a, b)
3104 n := n.(*ir.BinaryExpr)
3107 return s.intDivide(n, a, b)
3108 case ir.OADD, ir.OSUB:
3109 n := n.(*ir.BinaryExpr)
3112 if n.Type().IsComplex() {
3113 pt := types.FloatForComplex(n.Type())
3114 op := s.ssaOp(n.Op(), pt)
3115 return s.newValue2(ssa.OpComplexMake, n.Type(),
3116 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
3117 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
3119 if n.Type().IsFloat() {
3120 return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
3122 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
3123 case ir.OAND, ir.OOR, ir.OXOR:
3124 n := n.(*ir.BinaryExpr)
3127 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
3129 n := n.(*ir.BinaryExpr)
3132 b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
3133 return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
3134 case ir.OLSH, ir.ORSH:
3135 n := n.(*ir.BinaryExpr)
3140 cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
3141 s.check(cmp, ir.Syms.Panicshift)
3142 bt = bt.ToUnsigned()
3144 return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
3145 case ir.OANDAND, ir.OOROR:
3146 // To implement OANDAND (and OOROR), we introduce a
3147 // new temporary variable to hold the result. The
3148 // variable is associated with the OANDAND node in the
3149 // s.vars table (normally variables are only
3150 // associated with ONAME nodes). We convert
3157 // Using var in the subsequent block introduces the
3158 // necessary phi variable.
3159 n := n.(*ir.LogicalExpr)
3164 b.Kind = ssa.BlockIf
3166 // In theory, we should set b.Likely here based on context.
3167 // However, gc only gives us likeliness hints
3168 // in a single place, for plain OIF statements,
3169 // and passing around context is finnicky, so don't bother for now.
3171 bRight := s.f.NewBlock(ssa.BlockPlain)
3172 bResult := s.f.NewBlock(ssa.BlockPlain)
3173 if n.Op() == ir.OANDAND {
3175 b.AddEdgeTo(bResult)
3176 } else if n.Op() == ir.OOROR {
3177 b.AddEdgeTo(bResult)
3181 s.startBlock(bRight)
3186 b.AddEdgeTo(bResult)
3188 s.startBlock(bResult)
3189 return s.variable(n, types.Types[types.TBOOL])
3191 n := n.(*ir.BinaryExpr)
3194 return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
3198 n := n.(*ir.UnaryExpr)
3200 if n.Type().IsComplex() {
3201 tp := types.FloatForComplex(n.Type())
3202 negop := s.ssaOp(n.Op(), tp)
3203 return s.newValue2(ssa.OpComplexMake, n.Type(),
3204 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
3205 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
3207 return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
3208 case ir.ONOT, ir.OBITNOT:
3209 n := n.(*ir.UnaryExpr)
3211 return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
3212 case ir.OIMAG, ir.OREAL:
3213 n := n.(*ir.UnaryExpr)
3215 return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a)
3217 n := n.(*ir.UnaryExpr)
3221 n := n.(*ir.AddrExpr)
3225 n := n.(*ir.ResultExpr)
3226 if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
3227 panic("Expected to see a previous call")
3231 panic(fmt.Errorf("ORESULT %v does not match call %s", n, s.prevCall))
3233 return s.resultOfCall(s.prevCall, which, n.Type())
3236 n := n.(*ir.StarExpr)
3237 p := s.exprPtr(n.X, n.Bounded(), n.Pos())
3238 return s.load(n.Type(), p)
3241 n := n.(*ir.SelectorExpr)
3242 if n.X.Op() == ir.OSTRUCTLIT {
3243 // All literals with nonzero fields have already been
3244 // rewritten during walk. Any that remain are just T{}
3245 // or equivalents. Use the zero value.
3246 if !ir.IsZero(n.X) {
3247 s.Fatalf("literal with nonzero value in SSA: %v", n.X)
3249 return s.zeroVal(n.Type())
3251 // If n is addressable and can't be represented in
3252 // SSA, then load just the selected field. This
3253 // prevents false memory dependencies in race/msan/asan
3255 if ir.IsAddressable(n) && !s.canSSA(n) {
3257 return s.load(n.Type(), p)
3260 return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
3263 n := n.(*ir.SelectorExpr)
3264 p := s.exprPtr(n.X, n.Bounded(), n.Pos())
3265 p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
3266 return s.load(n.Type(), p)
3269 n := n.(*ir.IndexExpr)
3271 case n.X.Type().IsString():
3272 if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) {
3273 // Replace "abc"[1] with 'b'.
3274 // Delayed until now because "abc"[1] is not an ideal constant.
3275 // See test/fixedbugs/issue11370.go.
3276 return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)])))
3279 i := s.expr(n.Index)
3280 len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
3281 i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
3282 ptrtyp := s.f.Config.Types.BytePtr
3283 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
3284 if ir.IsConst(n.Index, constant.Int) {
3285 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr)
3287 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
3289 return s.load(types.Types[types.TUINT8], ptr)
3290 case n.X.Type().IsSlice():
3292 return s.load(n.X.Type().Elem(), p)
3293 case n.X.Type().IsArray():
3294 if ssa.CanSSA(n.X.Type()) {
3295 // SSA can handle arrays of length at most 1.
3296 bound := n.X.Type().NumElem()
3298 i := s.expr(n.Index)
3300 // Bounds check will never succeed. Might as well
3301 // use constants for the bounds check.
3302 z := s.constInt(types.Types[types.TINT], 0)
3303 s.boundsCheck(z, z, ssa.BoundsIndex, false)
3304 // The return value won't be live, return junk.
3305 // But not quite junk, in case bounds checks are turned off. See issue 48092.
3306 return s.zeroVal(n.Type())
3308 len := s.constInt(types.Types[types.TINT], bound)
3309 s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
3310 return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
3313 return s.load(n.X.Type().Elem(), p)
3315 s.Fatalf("bad type for index %v", n.X.Type())
3319 case ir.OLEN, ir.OCAP:
3320 n := n.(*ir.UnaryExpr)
3322 case n.X.Type().IsSlice():
3323 op := ssa.OpSliceLen
3324 if n.Op() == ir.OCAP {
3327 return s.newValue1(op, types.Types[types.TINT], s.expr(n.X))
3328 case n.X.Type().IsString(): // string; not reachable for OCAP
3329 return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X))
3330 case n.X.Type().IsMap(), n.X.Type().IsChan():
3331 return s.referenceTypeBuiltin(n, s.expr(n.X))
3333 return s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
3337 n := n.(*ir.UnaryExpr)
3339 if n.X.Type().IsSlice() {
3341 return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
3343 return s.newValue1(ssa.OpSlicePtrUnchecked, n.Type(), a)
3345 return s.newValue1(ssa.OpStringPtr, n.Type(), a)
3349 n := n.(*ir.UnaryExpr)
3351 return s.newValue1(ssa.OpITab, n.Type(), a)
3354 n := n.(*ir.UnaryExpr)
3356 return s.newValue1(ssa.OpIData, n.Type(), a)
3359 n := n.(*ir.BinaryExpr)
3362 return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
3364 case ir.OSLICEHEADER:
3365 n := n.(*ir.SliceHeaderExpr)
3369 return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
3371 case ir.OSTRINGHEADER:
3372 n := n.(*ir.StringHeaderExpr)
3375 return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
3377 case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
3378 n := n.(*ir.SliceExpr)
3379 check := s.checkPtrEnabled && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr()
3380 v := s.exprCheckPtr(n.X, !check)
3381 var i, j, k *ssa.Value
3391 p, l, c := s.slice(v, i, j, k, n.Bounded())
3393 // Emit checkptr instrumentation after bound check to prevent false positive, see #46938.
3394 s.checkPtrAlignment(n.X.(*ir.ConvExpr), v, s.conv(n.Max, k, k.Type, types.Types[types.TUINTPTR]))
3396 return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
3399 n := n.(*ir.SliceExpr)
3408 p, l, _ := s.slice(v, i, j, nil, n.Bounded())
3409 return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
3411 case ir.OSLICE2ARRPTR:
3412 // if arrlen > slice.len {
3416 n := n.(*ir.ConvExpr)
3418 nelem := n.Type().Elem().NumElem()
3419 arrlen := s.constInt(types.Types[types.TINT], nelem)
3420 cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
3421 s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false)
3422 op := ssa.OpSlicePtr
3424 op = ssa.OpSlicePtrUnchecked
3426 return s.newValue1(op, n.Type(), v)
3429 n := n.(*ir.CallExpr)
3430 if ir.IsIntrinsicCall(n) {
3431 return s.intrinsicCall(n)
3436 n := n.(*ir.CallExpr)
3437 return s.callResult(n, callNormal)
3440 n := n.(*ir.CallExpr)
3441 return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
3443 case ir.OGETCALLERPC:
3444 n := n.(*ir.CallExpr)
3445 return s.newValue0(ssa.OpGetCallerPC, n.Type())
3447 case ir.OGETCALLERSP:
3448 n := n.(*ir.CallExpr)
3449 return s.newValue1(ssa.OpGetCallerSP, n.Type(), s.mem())
3452 return s.append(n.(*ir.CallExpr), false)
3454 case ir.OMIN, ir.OMAX:
3455 return s.minMax(n.(*ir.CallExpr))
3457 case ir.OSTRUCTLIT, ir.OARRAYLIT:
3458 // All literals with nonzero fields have already been
3459 // rewritten during walk. Any that remain are just T{}
3460 // or equivalents. Use the zero value.
3461 n := n.(*ir.CompLitExpr)
3463 s.Fatalf("literal with nonzero value in SSA: %v", n)
3465 return s.zeroVal(n.Type())
3468 n := n.(*ir.UnaryExpr)
3469 var rtype *ssa.Value
3470 if x, ok := n.X.(*ir.DynamicType); ok && x.Op() == ir.ODYNAMICTYPE {
3471 rtype = s.expr(x.RType)
3473 return s.newObject(n.Type().Elem(), rtype)
3476 n := n.(*ir.BinaryExpr)
3480 // Force len to uintptr to prevent misuse of garbage bits in the
3481 // upper part of the register (#48536).
3482 len = s.conv(n, len, len.Type, types.Types[types.TUINTPTR])
3484 return s.newValue2(ssa.OpAddPtr, n.Type(), ptr, len)
3487 s.Fatalf("unhandled expr %v", n.Op())
3492 func (s *state) resultOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
3493 aux := c.Aux.(*ssa.AuxCall)
3494 pa := aux.ParamAssignmentForResult(which)
3495 // TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded.
3496 // SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future.
3497 if len(pa.Registers) == 0 && !ssa.CanSSA(t) {
3498 addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
3499 return s.rawLoad(t, addr)
3501 return s.newValue1I(ssa.OpSelectN, t, which, c)
3504 func (s *state) resultAddrOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
3505 aux := c.Aux.(*ssa.AuxCall)
3506 pa := aux.ParamAssignmentForResult(which)
3507 if len(pa.Registers) == 0 {
3508 return s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
3510 _, addr := s.temp(c.Pos, t)
3511 rval := s.newValue1I(ssa.OpSelectN, t, which, c)
3512 s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, addr, rval, s.mem(), false)
3516 // append converts an OAPPEND node to SSA.
3517 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
3518 // adds it to s, and returns the Value.
3519 // If inplace is true, it writes the result of the OAPPEND expression n
3520 // back to the slice being appended to, and returns nil.
3521 // inplace MUST be set to false if the slice can be SSA'd.
3522 // Note: this code only handles fixed-count appends. Dotdotdot appends
3523 // have already been rewritten at this point (by walk).
3524 func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
3525 // If inplace is false, process as expression "append(s, e1, e2, e3)":
3527 // ptr, len, cap := s
3529 // if uint(len) > uint(cap) {
3530 // ptr, len, cap = growslice(ptr, len, cap, 3, typ)
3531 // Note that len is unmodified by growslice.
3533 // // with write barriers, if needed:
3534 // *(ptr+(len-3)) = e1
3535 // *(ptr+(len-2)) = e2
3536 // *(ptr+(len-1)) = e3
3537 // return makeslice(ptr, len, cap)
3540 // If inplace is true, process as statement "s = append(s, e1, e2, e3)":
3543 // ptr, len, cap := s
3545 // if uint(len) > uint(cap) {
3546 // ptr, len, cap = growslice(ptr, len, cap, 3, typ)
3547 // vardef(a) // if necessary, advise liveness we are writing a new a
3548 // *a.cap = cap // write before ptr to avoid a spill
3549 // *a.ptr = ptr // with write barrier
3552 // // with write barriers, if needed:
3553 // *(ptr+(len-3)) = e1
3554 // *(ptr+(len-2)) = e2
3555 // *(ptr+(len-1)) = e3
3557 et := n.Type().Elem()
3558 pt := types.NewPtr(et)
3561 sn := n.Args[0] // the slice node is the first in the list
3562 var slice, addr *ssa.Value
3565 slice = s.load(n.Type(), addr)
3570 // Allocate new blocks
3571 grow := s.f.NewBlock(ssa.BlockPlain)
3572 assign := s.f.NewBlock(ssa.BlockPlain)
3574 // Decomposse input slice.
3575 p := s.newValue1(ssa.OpSlicePtr, pt, slice)
3576 l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
3577 c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
3579 // Add number of new elements to length.
3580 nargs := s.constInt(types.Types[types.TINT], int64(len(n.Args)-1))
3581 l = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, nargs)
3583 // Decide if we need to grow
3584 cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, l)
3586 // Record values of ptr/len/cap before branch.
3594 b.Kind = ssa.BlockIf
3595 b.Likely = ssa.BranchUnlikely
3602 taddr := s.expr(n.Fun)
3603 r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr)
3605 // Decompose output slice
3606 p = s.newValue1(ssa.OpSlicePtr, pt, r[0])
3607 l = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], r[0])
3608 c = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], r[0])
3614 if sn.Op() == ir.ONAME {
3616 if sn.Class != ir.PEXTERN {
3617 // Tell liveness we're about to build a new slice
3618 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
3621 capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr)
3622 s.store(types.Types[types.TINT], capaddr, c)
3623 s.store(pt, addr, p)
3629 // assign new elements to slots
3630 s.startBlock(assign)
3631 p = s.variable(ptrVar, pt) // generates phi for ptr
3632 l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
3634 c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap
3638 // Update length in place.
3639 // We have to wait until here to make sure growslice succeeded.
3640 lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr)
3641 s.store(types.Types[types.TINT], lenaddr, l)
3645 type argRec struct {
3646 // if store is true, we're appending the value v. If false, we're appending the
3651 args := make([]argRec, 0, len(n.Args[1:]))
3652 for _, n := range n.Args[1:] {
3653 if ssa.CanSSA(n.Type()) {
3654 args = append(args, argRec{v: s.expr(n), store: true})
3657 args = append(args, argRec{v: v})
3661 // Write args into slice.
3662 oldLen := s.newValue2(s.ssaOp(ir.OSUB, types.Types[types.TINT]), types.Types[types.TINT], l, nargs)
3663 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, oldLen)
3664 for i, arg := range args {
3665 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
3667 s.storeType(et, addr, arg.v, 0, true)
3669 s.move(et, addr, arg.v)
3673 // The following deletions have no practical effect at this time
3674 // because state.vars has been reset by the preceding state.startBlock.
3675 // They only enforce the fact that these variables are no longer need in
3676 // the current scope.
3677 delete(s.vars, ptrVar)
3678 delete(s.vars, lenVar)
3680 delete(s.vars, capVar)
3687 return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
3690 // minMax converts an OMIN/OMAX builtin call into SSA.
3691 func (s *state) minMax(n *ir.CallExpr) *ssa.Value {
3692 // The OMIN/OMAX builtin is variadic, but its semantics are
3693 // equivalent to left-folding a binary min/max operation across the
3695 fold := func(op func(x, a *ssa.Value) *ssa.Value) *ssa.Value {
3696 x := s.expr(n.Args[0])
3697 for _, arg := range n.Args[1:] {
3698 x = op(x, s.expr(arg))
3705 if typ.IsFloat() || typ.IsString() {
3706 // min/max semantics for floats are tricky because of NaNs and
3707 // negative zero. Some architectures have instructions which
3708 // we can use to generate the right result. For others we must
3709 // call into the runtime instead.
3711 // Strings are conceptually simpler, but we currently desugar
3712 // string comparisons during walk, not ssagen.
3715 switch Arch.LinkArch.Family {
3716 case sys.AMD64, sys.ARM64:
3719 case typ.Kind() == types.TFLOAT64 && n.Op() == ir.OMIN:
3721 case typ.Kind() == types.TFLOAT64 && n.Op() == ir.OMAX:
3723 case typ.Kind() == types.TFLOAT32 && n.Op() == ir.OMIN:
3725 case typ.Kind() == types.TFLOAT32 && n.Op() == ir.OMAX:
3728 return fold(func(x, a *ssa.Value) *ssa.Value {
3729 return s.newValue2(op, typ, x, a)
3735 case types.TFLOAT32:
3742 case types.TFLOAT64:
3757 fn := typecheck.LookupRuntimeFunc(name)
3759 return fold(func(x, a *ssa.Value) *ssa.Value {
3760 return s.rtcall(fn, true, []*types.Type{typ}, x, a)[0]
3764 lt := s.ssaOp(ir.OLT, typ)
3766 return fold(func(x, a *ssa.Value) *ssa.Value {
3770 return s.ternary(s.newValue2(lt, types.Types[types.TBOOL], a, x), a, x)
3773 return s.ternary(s.newValue2(lt, types.Types[types.TBOOL], x, a), a, x)
3775 panic("unreachable")
3779 // ternary emits code to evaluate cond ? x : y.
3780 func (s *state) ternary(cond, x, y *ssa.Value) *ssa.Value {
3781 // Note that we need a new ternaryVar each time (unlike okVar where we can
3782 // reuse the variable) because it might have a different type every time.
3783 ternaryVar := ssaMarker("ternary")
3785 bThen := s.f.NewBlock(ssa.BlockPlain)
3786 bElse := s.f.NewBlock(ssa.BlockPlain)
3787 bEnd := s.f.NewBlock(ssa.BlockPlain)
3790 b.Kind = ssa.BlockIf
3796 s.vars[ternaryVar] = x
3797 s.endBlock().AddEdgeTo(bEnd)
3800 s.vars[ternaryVar] = y
3801 s.endBlock().AddEdgeTo(bEnd)
3804 r := s.variable(ternaryVar, x.Type)
3805 delete(s.vars, ternaryVar)
3809 // condBranch evaluates the boolean expression cond and branches to yes
3810 // if cond is true and no if cond is false.
3811 // This function is intended to handle && and || better than just calling
3812 // s.expr(cond) and branching on the result.
3813 func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
3816 cond := cond.(*ir.LogicalExpr)
3817 mid := s.f.NewBlock(ssa.BlockPlain)
3818 s.stmtList(cond.Init())
3819 s.condBranch(cond.X, mid, no, max8(likely, 0))
3821 s.condBranch(cond.Y, yes, no, likely)
3823 // Note: if likely==1, then both recursive calls pass 1.
3824 // If likely==-1, then we don't have enough information to decide
3825 // whether the first branch is likely or not. So we pass 0 for
3826 // the likeliness of the first branch.
3827 // TODO: have the frontend give us branch prediction hints for
3828 // OANDAND and OOROR nodes (if it ever has such info).
3830 cond := cond.(*ir.LogicalExpr)
3831 mid := s.f.NewBlock(ssa.BlockPlain)
3832 s.stmtList(cond.Init())
3833 s.condBranch(cond.X, yes, mid, min8(likely, 0))
3835 s.condBranch(cond.Y, yes, no, likely)
3837 // Note: if likely==-1, then both recursive calls pass -1.
3838 // If likely==1, then we don't have enough info to decide
3839 // the likelihood of the first branch.
3841 cond := cond.(*ir.UnaryExpr)
3842 s.stmtList(cond.Init())
3843 s.condBranch(cond.X, no, yes, -likely)
3846 cond := cond.(*ir.ConvExpr)
3847 s.stmtList(cond.Init())
3848 s.condBranch(cond.X, yes, no, likely)
3853 b.Kind = ssa.BlockIf
3855 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
3863 skipPtr skipMask = 1 << iota
3868 // assign does left = right.
3869 // Right has already been evaluated to ssa, left has not.
3870 // If deref is true, then we do left = *right instead (and right has already been nil-checked).
3871 // If deref is true and right == nil, just do left = 0.
3872 // skip indicates assignments (at the top level) that can be avoided.
3873 // mayOverlap indicates whether left&right might partially overlap in memory. Default is false.
3874 func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
3875 s.assignWhichMayOverlap(left, right, deref, skip, false)
3877 func (s *state) assignWhichMayOverlap(left ir.Node, right *ssa.Value, deref bool, skip skipMask, mayOverlap bool) {
3878 if left.Op() == ir.ONAME && ir.IsBlank(left) {
3885 s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
3887 if left.Op() == ir.ODOT {
3888 // We're assigning to a field of an ssa-able value.
3889 // We need to build a new structure with the new value for the
3890 // field we're assigning and the old values for the other fields.
3892 // type T struct {a, b, c int}
3895 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
3897 // Grab information about the structure type.
3898 left := left.(*ir.SelectorExpr)
3901 idx := fieldIdx(left)
3903 // Grab old value of structure.
3904 old := s.expr(left.X)
3906 // Make new structure.
3907 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
3909 // Add fields as args.
3910 for i := 0; i < nf; i++ {
3914 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
3918 // Recursively assign the new value we've made to the base of the dot op.
3919 s.assign(left.X, new, false, 0)
3920 // TODO: do we need to update named values here?
3923 if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() {
3924 left := left.(*ir.IndexExpr)
3925 s.pushLine(left.Pos())
3927 // We're assigning to an element of an ssa-able array.
3932 i := s.expr(left.Index) // index
3934 // The bounds check must fail. Might as well
3935 // ignore the actual index and just use zeros.
3936 z := s.constInt(types.Types[types.TINT], 0)
3937 s.boundsCheck(z, z, ssa.BoundsIndex, false)
3941 s.Fatalf("assigning to non-1-length array")
3943 // Rewrite to a = [1]{v}
3944 len := s.constInt(types.Types[types.TINT], 1)
3945 s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
3946 v := s.newValue1(ssa.OpArrayMake1, t, right)
3947 s.assign(left.X, v, false, 0)
3950 left := left.(*ir.Name)
3951 // Update variable assignment.
3952 s.vars[left] = right
3953 s.addNamedValue(left, right)
3957 // If this assignment clobbers an entire local variable, then emit
3958 // OpVarDef so liveness analysis knows the variable is redefined.
3959 if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 && t.HasPointers() {
3960 s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
3963 // Left is not ssa-able. Compute its address.
3964 addr := s.addr(left)
3965 if ir.IsReflectHeaderDataField(left) {
3966 // Package unsafe's documentation says storing pointers into
3967 // reflect.SliceHeader and reflect.StringHeader's Data fields
3968 // is valid, even though they have type uintptr (#19168).
3969 // Mark it pointer type to signal the writebarrier pass to
3970 // insert a write barrier.
3971 t = types.Types[types.TUNSAFEPTR]
3974 // Treat as a mem->mem move.
3978 s.moveWhichMayOverlap(t, addr, right, mayOverlap)
3982 // Treat as a store.
3983 s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
3986 // zeroVal returns the zero value for type t.
3987 func (s *state) zeroVal(t *types.Type) *ssa.Value {
3992 return s.constInt8(t, 0)
3994 return s.constInt16(t, 0)
3996 return s.constInt32(t, 0)
3998 return s.constInt64(t, 0)
4000 s.Fatalf("bad sized integer type %v", t)
4005 return s.constFloat32(t, 0)
4007 return s.constFloat64(t, 0)
4009 s.Fatalf("bad sized float type %v", t)
4014 z := s.constFloat32(types.Types[types.TFLOAT32], 0)
4015 return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
4017 z := s.constFloat64(types.Types[types.TFLOAT64], 0)
4018 return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
4020 s.Fatalf("bad sized complex type %v", t)
4024 return s.constEmptyString(t)
4025 case t.IsPtrShaped():
4026 return s.constNil(t)
4028 return s.constBool(false)
4029 case t.IsInterface():
4030 return s.constInterface(t)
4032 return s.constSlice(t)
4035 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
4036 for i := 0; i < n; i++ {
4037 v.AddArg(s.zeroVal(t.FieldType(i)))
4041 switch t.NumElem() {
4043 return s.entryNewValue0(ssa.OpArrayMake0, t)
4045 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
4048 s.Fatalf("zero for type %v not implemented", t)
4055 callNormal callKind = iota
4062 type sfRtCallDef struct {
4067 var softFloatOps map[ssa.Op]sfRtCallDef
4069 func softfloatInit() {
4070 // Some of these operations get transformed by sfcall.
4071 softFloatOps = map[ssa.Op]sfRtCallDef{
4072 ssa.OpAdd32F: {typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
4073 ssa.OpAdd64F: {typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
4074 ssa.OpSub32F: {typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
4075 ssa.OpSub64F: {typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
4076 ssa.OpMul32F: {typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32},
4077 ssa.OpMul64F: {typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64},
4078 ssa.OpDiv32F: {typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32},
4079 ssa.OpDiv64F: {typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64},
4081 ssa.OpEq64F: {typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
4082 ssa.OpEq32F: {typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
4083 ssa.OpNeq64F: {typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
4084 ssa.OpNeq32F: {typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
4085 ssa.OpLess64F: {typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL},
4086 ssa.OpLess32F: {typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL},
4087 ssa.OpLeq64F: {typecheck.LookupRuntimeFunc("fge64"), types.TBOOL},
4088 ssa.OpLeq32F: {typecheck.LookupRuntimeFunc("fge32"), types.TBOOL},
4090 ssa.OpCvt32to32F: {typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32},
4091 ssa.OpCvt32Fto32: {typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32},
4092 ssa.OpCvt64to32F: {typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32},
4093 ssa.OpCvt32Fto64: {typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64},
4094 ssa.OpCvt64Uto32F: {typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32},
4095 ssa.OpCvt32Fto64U: {typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64},
4096 ssa.OpCvt32to64F: {typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64},
4097 ssa.OpCvt64Fto32: {typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32},
4098 ssa.OpCvt64to64F: {typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64},
4099 ssa.OpCvt64Fto64: {typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64},
4100 ssa.OpCvt64Uto64F: {typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64},
4101 ssa.OpCvt64Fto64U: {typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64},
4102 ssa.OpCvt32Fto64F: {typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64},
4103 ssa.OpCvt64Fto32F: {typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32},
4107 // TODO: do not emit sfcall if operation can be optimized to constant in later
4109 func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
4110 f2i := func(t *types.Type) *types.Type {
4112 case types.TFLOAT32:
4113 return types.Types[types.TUINT32]
4114 case types.TFLOAT64:
4115 return types.Types[types.TUINT64]
4120 if callDef, ok := softFloatOps[op]; ok {
4126 args[0], args[1] = args[1], args[0]
4129 args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
4132 // runtime functions take uints for floats and returns uints.
4133 // Convert to uints so we use the right calling convention.
4134 for i, a := range args {
4135 if a.Type.IsFloat() {
4136 args[i] = s.newValue1(ssa.OpCopy, f2i(a.Type), a)
4140 rt := types.Types[callDef.rtype]
4141 result := s.rtcall(callDef.rtfn, true, []*types.Type{f2i(rt)}, args...)[0]
4143 result = s.newValue1(ssa.OpCopy, rt, result)
4145 if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
4146 result = s.newValue1(ssa.OpNot, result.Type, result)
4153 var intrinsics map[intrinsicKey]intrinsicBuilder
4155 // An intrinsicBuilder converts a call node n into an ssa value that
4156 // implements that call as an intrinsic. args is a list of arguments to the func.
4157 type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value
4159 type intrinsicKey struct {
4166 intrinsics = map[intrinsicKey]intrinsicBuilder{}
4171 var lwatomics []*sys.Arch
4172 for _, a := range &sys.Archs {
4173 all = append(all, a)
4179 if a.Family != sys.PPC64 {
4180 lwatomics = append(lwatomics, a)
4184 // add adds the intrinsic b for pkg.fn for the given list of architectures.
4185 add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
4186 for _, a := range archs {
4187 intrinsics[intrinsicKey{a, pkg, fn}] = b
4190 // addF does the same as add but operates on architecture families.
4191 addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
4193 for _, f := range archFamilies {
4195 panic("too many architecture families")
4199 for _, a := range all {
4200 if m>>uint(a.Family)&1 != 0 {
4201 intrinsics[intrinsicKey{a, pkg, fn}] = b
4205 // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
4206 alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
4208 for _, a := range archs {
4209 if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
4210 intrinsics[intrinsicKey{a, pkg, fn}] = b
4215 panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn))
4219 /******** runtime ********/
4220 if !base.Flag.Cfg.Instrumenting {
4221 add("runtime", "slicebytetostringtmp",
4222 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4223 // Compiler frontend optimizations emit OBYTES2STRTMP nodes
4224 // for the backend instead of slicebytetostringtmp calls
4225 // when not instrumenting.
4226 return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
4230 addF("runtime/internal/math", "MulUintptr",
4231 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4232 if s.config.PtrSize == 4 {
4233 return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
4235 return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
4237 sys.AMD64, sys.I386, sys.Loong64, sys.MIPS64, sys.RISCV64, sys.ARM64)
4238 add("runtime", "KeepAlive",
4239 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4240 data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
4241 s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
4245 add("runtime", "getclosureptr",
4246 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4247 return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
4251 add("runtime", "getcallerpc",
4252 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4253 return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
4257 add("runtime", "getcallersp",
4258 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4259 return s.newValue1(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr, s.mem())
4263 addF("runtime", "publicationBarrier",
4264 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4265 s.vars[memVar] = s.newValue1(ssa.OpPubBarrier, types.TypeMem, s.mem())
4268 sys.ARM64, sys.PPC64, sys.RISCV64)
4270 brev_arch := []sys.ArchFamily{sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X}
4271 if buildcfg.GOPPC64 >= 10 {
4272 // Use only on Power10 as the new byte reverse instructions that Power10 provide
4273 // make it worthwhile as an intrinsic
4274 brev_arch = append(brev_arch, sys.PPC64)
4276 /******** runtime/internal/sys ********/
4277 addF("runtime/internal/sys", "Bswap32",
4278 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4279 return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
4282 addF("runtime/internal/sys", "Bswap64",
4283 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4284 return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
4288 /****** Prefetch ******/
4289 makePrefetchFunc := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4290 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4291 s.vars[memVar] = s.newValue2(op, types.TypeMem, args[0], s.mem())
4296 // Make Prefetch intrinsics for supported platforms
4297 // On the unsupported platforms stub function will be eliminated
4298 addF("runtime/internal/sys", "Prefetch", makePrefetchFunc(ssa.OpPrefetchCache),
4299 sys.AMD64, sys.ARM64, sys.PPC64)
4300 addF("runtime/internal/sys", "PrefetchStreamed", makePrefetchFunc(ssa.OpPrefetchCacheStreamed),
4301 sys.AMD64, sys.ARM64, sys.PPC64)
4303 /******** runtime/internal/atomic ********/
4304 addF("runtime/internal/atomic", "Load",
4305 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4306 v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
4307 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4308 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
4310 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4311 addF("runtime/internal/atomic", "Load8",
4312 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4313 v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
4314 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4315 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
4317 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4318 addF("runtime/internal/atomic", "Load64",
4319 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4320 v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
4321 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4322 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
4324 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4325 addF("runtime/internal/atomic", "LoadAcq",
4326 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4327 v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
4328 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4329 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
4331 sys.PPC64, sys.S390X)
4332 addF("runtime/internal/atomic", "LoadAcq64",
4333 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4334 v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
4335 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4336 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
4339 addF("runtime/internal/atomic", "Loadp",
4340 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4341 v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
4342 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4343 return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
4345 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4347 addF("runtime/internal/atomic", "Store",
4348 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4349 s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
4352 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4353 addF("runtime/internal/atomic", "Store8",
4354 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4355 s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
4358 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4359 addF("runtime/internal/atomic", "Store64",
4360 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4361 s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
4364 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4365 addF("runtime/internal/atomic", "StorepNoWB",
4366 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4367 s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
4370 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
4371 addF("runtime/internal/atomic", "StoreRel",
4372 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4373 s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
4376 sys.PPC64, sys.S390X)
4377 addF("runtime/internal/atomic", "StoreRel64",
4378 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4379 s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
4384 addF("runtime/internal/atomic", "Xchg",
4385 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4386 v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
4387 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4388 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
4390 sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4391 addF("runtime/internal/atomic", "Xchg64",
4392 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4393 v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
4394 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4395 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
4397 sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4399 type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind)
4401 makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
4403 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4404 // Target Atomic feature is identified by dynamic detection
4405 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb)
4406 v := s.load(types.Types[types.TBOOL], addr)
4408 b.Kind = ssa.BlockIf
4410 bTrue := s.f.NewBlock(ssa.BlockPlain)
4411 bFalse := s.f.NewBlock(ssa.BlockPlain)
4412 bEnd := s.f.NewBlock(ssa.BlockPlain)
4415 b.Likely = ssa.BranchLikely
4417 // We have atomic instructions - use it directly.
4419 emit(s, n, args, op1, typ)
4420 s.endBlock().AddEdgeTo(bEnd)
4422 // Use original instruction sequence.
4423 s.startBlock(bFalse)
4424 emit(s, n, args, op0, typ)
4425 s.endBlock().AddEdgeTo(bEnd)
4429 if rtyp == types.TNIL {
4432 return s.variable(n, types.Types[rtyp])
4437 atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
4438 v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
4439 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4440 s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
4442 addF("runtime/internal/atomic", "Xchg",
4443 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
4445 addF("runtime/internal/atomic", "Xchg64",
4446 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
4449 addF("runtime/internal/atomic", "Xadd",
4450 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4451 v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
4452 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4453 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
4455 sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4456 addF("runtime/internal/atomic", "Xadd64",
4457 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4458 v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
4459 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4460 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
4462 sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4464 addF("runtime/internal/atomic", "Xadd",
4465 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
4467 addF("runtime/internal/atomic", "Xadd64",
4468 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
4471 addF("runtime/internal/atomic", "Cas",
4472 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4473 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
4474 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4475 return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
4477 sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4478 addF("runtime/internal/atomic", "Cas64",
4479 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4480 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
4481 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4482 return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
4484 sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4485 addF("runtime/internal/atomic", "CasRel",
4486 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4487 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
4488 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4489 return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
4493 atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
4494 v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
4495 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4496 s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
4499 addF("runtime/internal/atomic", "Cas",
4500 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
4502 addF("runtime/internal/atomic", "Cas64",
4503 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
4506 addF("runtime/internal/atomic", "And8",
4507 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4508 s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
4511 sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4512 addF("runtime/internal/atomic", "And",
4513 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4514 s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
4517 sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4518 addF("runtime/internal/atomic", "Or8",
4519 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4520 s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
4523 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4524 addF("runtime/internal/atomic", "Or",
4525 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4526 s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
4529 sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4531 atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
4532 s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
4535 addF("runtime/internal/atomic", "And8",
4536 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4538 addF("runtime/internal/atomic", "And",
4539 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4541 addF("runtime/internal/atomic", "Or8",
4542 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4544 addF("runtime/internal/atomic", "Or",
4545 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4548 // Aliases for atomic load operations
4549 alias("runtime/internal/atomic", "Loadint32", "runtime/internal/atomic", "Load", all...)
4550 alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
4551 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
4552 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
4553 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
4554 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
4555 alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
4556 alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
4557 alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
4558 alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
4559 alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
4560 alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
4562 // Aliases for atomic store operations
4563 alias("runtime/internal/atomic", "Storeint32", "runtime/internal/atomic", "Store", all...)
4564 alias("runtime/internal/atomic", "Storeint64", "runtime/internal/atomic", "Store64", all...)
4565 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
4566 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
4567 alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
4568 alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
4569 alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
4570 alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
4571 alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
4572 alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
4574 // Aliases for atomic swap operations
4575 alias("runtime/internal/atomic", "Xchgint32", "runtime/internal/atomic", "Xchg", all...)
4576 alias("runtime/internal/atomic", "Xchgint64", "runtime/internal/atomic", "Xchg64", all...)
4577 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
4578 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
4580 // Aliases for atomic add operations
4581 alias("runtime/internal/atomic", "Xaddint32", "runtime/internal/atomic", "Xadd", all...)
4582 alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
4583 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
4584 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
4586 // Aliases for atomic CAS operations
4587 alias("runtime/internal/atomic", "Casint32", "runtime/internal/atomic", "Cas", all...)
4588 alias("runtime/internal/atomic", "Casint64", "runtime/internal/atomic", "Cas64", all...)
4589 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
4590 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
4591 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
4592 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
4593 alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
4595 /******** math ********/
4596 addF("math", "sqrt",
4597 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4598 return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
4600 sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
4601 addF("math", "Trunc",
4602 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4603 return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
4605 sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
4606 addF("math", "Ceil",
4607 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4608 return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
4610 sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
4611 addF("math", "Floor",
4612 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4613 return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
4615 sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
4616 addF("math", "Round",
4617 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4618 return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
4620 sys.ARM64, sys.PPC64, sys.S390X)
4621 addF("math", "RoundToEven",
4622 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4623 return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
4625 sys.ARM64, sys.S390X, sys.Wasm)
4627 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4628 return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
4630 sys.ARM64, sys.ARM, sys.PPC64, sys.RISCV64, sys.Wasm, sys.MIPS, sys.MIPS64)
4631 addF("math", "Copysign",
4632 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4633 return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
4635 sys.PPC64, sys.RISCV64, sys.Wasm)
4637 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4638 return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4640 sys.ARM64, sys.PPC64, sys.RISCV64, sys.S390X)
4642 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4643 if !s.config.UseFMA {
4644 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4645 return s.variable(n, types.Types[types.TFLOAT64])
4648 if buildcfg.GOAMD64 >= 3 {
4649 return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4652 v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA)
4654 b.Kind = ssa.BlockIf
4656 bTrue := s.f.NewBlock(ssa.BlockPlain)
4657 bFalse := s.f.NewBlock(ssa.BlockPlain)
4658 bEnd := s.f.NewBlock(ssa.BlockPlain)
4661 b.Likely = ssa.BranchLikely // >= haswell cpus are common
4663 // We have the intrinsic - use it directly.
4665 s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4666 s.endBlock().AddEdgeTo(bEnd)
4668 // Call the pure Go version.
4669 s.startBlock(bFalse)
4670 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4671 s.endBlock().AddEdgeTo(bEnd)
4675 return s.variable(n, types.Types[types.TFLOAT64])
4679 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4680 if !s.config.UseFMA {
4681 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4682 return s.variable(n, types.Types[types.TFLOAT64])
4684 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb)
4685 v := s.load(types.Types[types.TBOOL], addr)
4687 b.Kind = ssa.BlockIf
4689 bTrue := s.f.NewBlock(ssa.BlockPlain)
4690 bFalse := s.f.NewBlock(ssa.BlockPlain)
4691 bEnd := s.f.NewBlock(ssa.BlockPlain)
4694 b.Likely = ssa.BranchLikely
4696 // We have the intrinsic - use it directly.
4698 s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4699 s.endBlock().AddEdgeTo(bEnd)
4701 // Call the pure Go version.
4702 s.startBlock(bFalse)
4703 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4704 s.endBlock().AddEdgeTo(bEnd)
4708 return s.variable(n, types.Types[types.TFLOAT64])
4712 makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4713 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4714 if buildcfg.GOAMD64 >= 2 {
4715 return s.newValue1(op, types.Types[types.TFLOAT64], args[0])
4718 v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41)
4720 b.Kind = ssa.BlockIf
4722 bTrue := s.f.NewBlock(ssa.BlockPlain)
4723 bFalse := s.f.NewBlock(ssa.BlockPlain)
4724 bEnd := s.f.NewBlock(ssa.BlockPlain)
4727 b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
4729 // We have the intrinsic - use it directly.
4731 s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
4732 s.endBlock().AddEdgeTo(bEnd)
4734 // Call the pure Go version.
4735 s.startBlock(bFalse)
4736 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4737 s.endBlock().AddEdgeTo(bEnd)
4741 return s.variable(n, types.Types[types.TFLOAT64])
4744 addF("math", "RoundToEven",
4745 makeRoundAMD64(ssa.OpRoundToEven),
4747 addF("math", "Floor",
4748 makeRoundAMD64(ssa.OpFloor),
4750 addF("math", "Ceil",
4751 makeRoundAMD64(ssa.OpCeil),
4753 addF("math", "Trunc",
4754 makeRoundAMD64(ssa.OpTrunc),
4757 /******** math/bits ********/
4758 addF("math/bits", "TrailingZeros64",
4759 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4760 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
4762 sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4763 addF("math/bits", "TrailingZeros32",
4764 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4765 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
4767 sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4768 addF("math/bits", "TrailingZeros16",
4769 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4770 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
4771 c := s.constInt32(types.Types[types.TUINT32], 1<<16)
4772 y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
4773 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
4776 addF("math/bits", "TrailingZeros16",
4777 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4778 return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
4780 sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
4781 addF("math/bits", "TrailingZeros16",
4782 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4783 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
4784 c := s.constInt64(types.Types[types.TUINT64], 1<<16)
4785 y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
4786 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
4788 sys.S390X, sys.PPC64)
4789 addF("math/bits", "TrailingZeros8",
4790 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4791 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
4792 c := s.constInt32(types.Types[types.TUINT32], 1<<8)
4793 y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
4794 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
4797 addF("math/bits", "TrailingZeros8",
4798 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4799 return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
4801 sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
4802 addF("math/bits", "TrailingZeros8",
4803 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4804 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
4805 c := s.constInt64(types.Types[types.TUINT64], 1<<8)
4806 y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
4807 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
4810 alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
4811 alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
4812 // ReverseBytes inlines correctly, no need to intrinsify it.
4813 // Nothing special is needed for targets where ReverseBytes16 lowers to a rotate
4814 // On Power10, 16-bit rotate is not available so use BRH instruction
4815 if buildcfg.GOPPC64 >= 10 {
4816 addF("math/bits", "ReverseBytes16",
4817 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4818 return s.newValue1(ssa.OpBswap16, types.Types[types.TUINT], args[0])
4823 addF("math/bits", "Len64",
4824 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4825 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
4827 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4828 addF("math/bits", "Len32",
4829 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4830 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
4832 sys.AMD64, sys.ARM64, sys.PPC64)
4833 addF("math/bits", "Len32",
4834 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4835 if s.config.PtrSize == 4 {
4836 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
4838 x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
4839 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
4841 sys.ARM, sys.S390X, sys.MIPS, sys.Wasm)
4842 addF("math/bits", "Len16",
4843 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4844 if s.config.PtrSize == 4 {
4845 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
4846 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
4848 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
4849 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
4851 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4852 addF("math/bits", "Len16",
4853 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4854 return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
4857 addF("math/bits", "Len8",
4858 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4859 if s.config.PtrSize == 4 {
4860 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
4861 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
4863 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
4864 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
4866 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4867 addF("math/bits", "Len8",
4868 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4869 return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
4872 addF("math/bits", "Len",
4873 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4874 if s.config.PtrSize == 4 {
4875 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
4877 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
4879 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4880 // LeadingZeros is handled because it trivially calls Len.
4881 addF("math/bits", "Reverse64",
4882 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4883 return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
4886 addF("math/bits", "Reverse32",
4887 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4888 return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
4891 addF("math/bits", "Reverse16",
4892 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4893 return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
4896 addF("math/bits", "Reverse8",
4897 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4898 return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
4901 addF("math/bits", "Reverse",
4902 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4903 return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
4906 addF("math/bits", "RotateLeft8",
4907 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4908 return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
4911 addF("math/bits", "RotateLeft16",
4912 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4913 return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
4916 addF("math/bits", "RotateLeft32",
4917 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4918 return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
4920 sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm, sys.Loong64)
4921 addF("math/bits", "RotateLeft64",
4922 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4923 return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
4925 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm, sys.Loong64)
4926 alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
4928 makeOnesCountAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4929 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4930 if buildcfg.GOAMD64 >= 2 {
4931 return s.newValue1(op, types.Types[types.TINT], args[0])
4934 v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT)
4936 b.Kind = ssa.BlockIf
4938 bTrue := s.f.NewBlock(ssa.BlockPlain)
4939 bFalse := s.f.NewBlock(ssa.BlockPlain)
4940 bEnd := s.f.NewBlock(ssa.BlockPlain)
4943 b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
4945 // We have the intrinsic - use it directly.
4947 s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
4948 s.endBlock().AddEdgeTo(bEnd)
4950 // Call the pure Go version.
4951 s.startBlock(bFalse)
4952 s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
4953 s.endBlock().AddEdgeTo(bEnd)
4957 return s.variable(n, types.Types[types.TINT])
4960 addF("math/bits", "OnesCount64",
4961 makeOnesCountAMD64(ssa.OpPopCount64),
4963 addF("math/bits", "OnesCount64",
4964 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4965 return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
4967 sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
4968 addF("math/bits", "OnesCount32",
4969 makeOnesCountAMD64(ssa.OpPopCount32),
4971 addF("math/bits", "OnesCount32",
4972 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4973 return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
4975 sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
4976 addF("math/bits", "OnesCount16",
4977 makeOnesCountAMD64(ssa.OpPopCount16),
4979 addF("math/bits", "OnesCount16",
4980 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4981 return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
4983 sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
4984 addF("math/bits", "OnesCount8",
4985 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4986 return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
4988 sys.S390X, sys.PPC64, sys.Wasm)
4989 addF("math/bits", "OnesCount",
4990 makeOnesCountAMD64(ssa.OpPopCount64),
4992 addF("math/bits", "Mul64",
4993 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4994 return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
4996 sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64, sys.RISCV64, sys.Loong64)
4997 alias("math/bits", "Mul", "math/bits", "Mul64", p8...)
4998 alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", p8...)
4999 addF("math/bits", "Add64",
5000 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
5001 return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
5003 sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64, sys.MIPS64)
5004 alias("math/bits", "Add", "math/bits", "Add64", p8...)
5005 alias("runtime/internal/math", "Add64", "math/bits", "Add64", all...)
5006 addF("math/bits", "Sub64",
5007 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
5008 return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
5010 sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64, sys.MIPS64)
5011 alias("math/bits", "Sub", "math/bits", "Sub64", p8...)
5012 addF("math/bits", "Div64",
5013 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
5014 // check for divide-by-zero/overflow and panic with appropriate message
5015 cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
5016 s.check(cmpZero, ir.Syms.Panicdivide)
5017 cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
5018 s.check(cmpOverflow, ir.Syms.Panicoverflow)
5019 return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
5022 alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
5024 alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...)
5025 alias("runtime/internal/sys", "TrailingZeros32", "math/bits", "TrailingZeros32", all...)
5026 alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...)
5027 alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...)
5028 alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...)
5029 alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...)
5031 /******** sync/atomic ********/
5033 // Note: these are disabled by flag_race in findIntrinsic below.
5034 alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
5035 alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
5036 alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
5037 alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
5038 alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
5039 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
5040 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
5042 alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
5043 alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
5044 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
5045 alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
5046 alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
5047 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
5048 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
5050 alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
5051 alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
5052 alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
5053 alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
5054 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
5055 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
5057 alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
5058 alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
5059 alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
5060 alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
5061 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
5062 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
5064 alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
5065 alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
5066 alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
5067 alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
5068 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
5069 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
5071 /******** math/big ********/
5072 alias("math/big", "mulWW", "math/bits", "Mul64", p8...)
5075 // findIntrinsic returns a function which builds the SSA equivalent of the
5076 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
5077 func findIntrinsic(sym *types.Sym) intrinsicBuilder {
5078 if sym == nil || sym.Pkg == nil {
5082 if sym.Pkg == ir.Pkgs.Runtime {
5085 if base.Flag.Race && pkg == "sync/atomic" {
5086 // The race detector needs to be able to intercept these calls.
5087 // We can't intrinsify them.
5090 // Skip intrinsifying math functions (which may contain hard-float
5091 // instructions) when soft-float
5092 if Arch.SoftFloat && pkg == "math" {
5097 if ssa.IntrinsicsDisable {
5098 if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
5099 // These runtime functions don't have definitions, must be intrinsics.
5104 return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}]
5107 func IsIntrinsicCall(n *ir.CallExpr) bool {
5111 name, ok := n.Fun.(*ir.Name)
5115 return findIntrinsic(name.Sym()) != nil
5118 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
5119 func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
5120 v := findIntrinsic(n.Fun.Sym())(s, n, s.intrinsicArgs(n))
5121 if ssa.IntrinsicsDebug > 0 {
5126 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
5129 base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.Fun.Sym().Name, x.LongString())
5134 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
5135 func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
5136 args := make([]*ssa.Value, len(n.Args))
5137 for i, n := range n.Args {
5143 // openDeferRecord adds code to evaluate and store the function for an open-code defer
5144 // call, and records info about the defer, so we can generate proper code on the
5145 // exit paths. n is the sub-node of the defer node that is the actual function
5146 // call. We will also record funcdata information on where the function is stored
5147 // (as well as the deferBits variable), and this will enable us to run the proper
5148 // defer calls during panics.
5149 func (s *state) openDeferRecord(n *ir.CallExpr) {
5150 if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.Fun.Type().NumResults() != 0 {
5151 s.Fatalf("defer call with arguments or results: %v", n)
5154 opendefer := &openDeferInfo{
5158 // We must always store the function value in a stack slot for the
5159 // runtime panic code to use. But in the defer exit code, we will
5160 // call the function directly if it is a static function.
5161 closureVal := s.expr(fn)
5162 closure := s.openDeferSave(fn.Type(), closureVal)
5163 opendefer.closureNode = closure.Aux.(*ir.Name)
5164 if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
5165 opendefer.closure = closure
5167 index := len(s.openDefers)
5168 s.openDefers = append(s.openDefers, opendefer)
5170 // Update deferBits only after evaluation and storage to stack of
5171 // the function is successful.
5172 bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
5173 newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
5174 s.vars[deferBitsVar] = newDeferBits
5175 s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
5178 // openDeferSave generates SSA nodes to store a value (with type t) for an
5179 // open-coded defer at an explicit autotmp location on the stack, so it can be
5180 // reloaded and used for the appropriate call on exit. Type t must be a function type
5181 // (therefore SSAable). val is the value to be stored. The function returns an SSA
5182 // value representing a pointer to the autotmp location.
5183 func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
5185 s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
5187 if !t.HasPointers() {
5188 s.Fatalf("openDeferSave of pointerless type %v val=%v", t, val)
5191 temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
5192 temp.SetOpenDeferSlot(true)
5193 temp.SetFrameOffset(int64(len(s.openDefers))) // so cmpstackvarlt can order them
5194 var addrTemp *ssa.Value
5195 // Use OpVarLive to make sure stack slot for the closure is not removed by
5196 // dead-store elimination
5197 if s.curBlock.ID != s.f.Entry.ID {
5198 // Force the tmp storing this defer function to be declared in the entry
5199 // block, so that it will be live for the defer exit code (which will
5200 // actually access it only if the associated defer call has been activated).
5201 if t.HasPointers() {
5202 s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
5204 s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
5205 addrTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.defvars[s.f.Entry.ID][memVar])
5207 // Special case if we're still in the entry block. We can't use
5208 // the above code, since s.defvars[s.f.Entry.ID] isn't defined
5209 // until we end the entry block with s.endBlock().
5210 if t.HasPointers() {
5211 s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, temp, s.mem(), false)
5213 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, temp, s.mem(), false)
5214 addrTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.mem(), false)
5216 // Since we may use this temp during exit depending on the
5217 // deferBits, we must define it unconditionally on entry.
5218 // Therefore, we must make sure it is zeroed out in the entry
5219 // block if it contains pointers, else GC may wrongly follow an
5220 // uninitialized pointer value.
5221 temp.SetNeedzero(true)
5222 // We are storing to the stack, hence we can avoid the full checks in
5223 // storeType() (no write barrier) and do a simple store().
5224 s.store(t, addrTemp, val)
5228 // openDeferExit generates SSA for processing all the open coded defers at exit.
5229 // The code involves loading deferBits, and checking each of the bits to see if
5230 // the corresponding defer statement was executed. For each bit that is turned
5231 // on, the associated defer call is made.
5232 func (s *state) openDeferExit() {
5233 deferExit := s.f.NewBlock(ssa.BlockPlain)
5234 s.endBlock().AddEdgeTo(deferExit)
5235 s.startBlock(deferExit)
5236 s.lastDeferExit = deferExit
5237 s.lastDeferCount = len(s.openDefers)
5238 zeroval := s.constInt8(types.Types[types.TUINT8], 0)
5239 // Test for and run defers in reverse order
5240 for i := len(s.openDefers) - 1; i >= 0; i-- {
5241 r := s.openDefers[i]
5242 bCond := s.f.NewBlock(ssa.BlockPlain)
5243 bEnd := s.f.NewBlock(ssa.BlockPlain)
5245 deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
5246 // Generate code to check if the bit associated with the current
5248 bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
5249 andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
5250 eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
5252 b.Kind = ssa.BlockIf
5256 bCond.AddEdgeTo(bEnd)
5259 // Clear this bit in deferBits and force store back to stack, so
5260 // we will not try to re-run this defer call if this defer call panics.
5261 nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
5262 maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
5263 s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
5264 // Use this value for following tests, so we keep previous
5266 s.vars[deferBitsVar] = maskedval
5268 // Generate code to call the function call of the defer, using the
5269 // closure that were stored in argtmps at the point of the defer
5272 stksize := fn.Type().ArgWidth()
5273 var callArgs []*ssa.Value
5275 if r.closure != nil {
5276 v := s.load(r.closure.Type.Elem(), r.closure)
5277 s.maybeNilCheckClosure(v, callDefer)
5278 codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
5279 aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil))
5280 call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
5282 aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil))
5283 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5285 callArgs = append(callArgs, s.mem())
5286 call.AddArgs(callArgs...)
5287 call.AuxInt = stksize
5288 s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, 0, call)
5289 // Make sure that the stack slots with pointers are kept live
5290 // through the call (which is a pre-emption point). Also, we will
5291 // use the first call of the last defer exit to compute liveness
5292 // for the deferreturn, so we want all stack slots to be live.
5293 if r.closureNode != nil {
5294 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
5302 func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
5303 return s.call(n, k, false, nil)
5306 func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
5307 return s.call(n, k, true, nil)
5310 // Calls the function n using the specified call type.
5311 // Returns the address of the return value (or nil if none).
5312 func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool, deferExtra ir.Expr) *ssa.Value {
5314 var callee *ir.Name // target function (if static)
5315 var closure *ssa.Value // ptr to closure to run (if dynamic)
5316 var codeptr *ssa.Value // ptr to target code (if dynamic)
5317 var dextra *ssa.Value // defer extra arg
5318 var rcvr *ssa.Value // receiver to set
5320 var ACArgs []*types.Type // AuxCall args
5321 var ACResults []*types.Type // AuxCall results
5322 var callArgs []*ssa.Value // For late-expansion, the args themselves (not stored, args to the call instead).
5324 callABI := s.f.ABIDefault
5326 if k != callNormal && k != callTail && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.Fun.Type().NumResults() != 0) {
5327 s.Fatalf("go/defer call with arguments: %v", n)
5332 if (k == callNormal || k == callTail) && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
5335 if buildcfg.Experiment.RegabiArgs {
5336 // This is a static call, so it may be
5337 // a direct call to a non-ABIInternal
5338 // function. fn.Func may be nil for
5339 // some compiler-generated functions,
5340 // but those are all ABIInternal.
5342 callABI = abiForFunc(fn.Func, s.f.ABI0, s.f.ABI1)
5345 // TODO(register args) remove after register abi is working
5346 inRegistersImported := fn.Pragma()&ir.RegisterParams != 0
5347 inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0
5348 if inRegistersImported || inRegistersSamePackage {
5354 closure = s.expr(fn)
5355 if k != callDefer && k != callDeferStack {
5356 // Deferred nil function needs to panic when the function is invoked,
5357 // not the point of defer statement.
5358 s.maybeNilCheckClosure(closure, k)
5361 if fn.Op() != ir.ODOTINTER {
5362 s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
5364 fn := fn.(*ir.SelectorExpr)
5365 var iclosure *ssa.Value
5366 iclosure, rcvr = s.getClosureAndRcvr(fn)
5367 if k == callNormal {
5368 codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
5373 if deferExtra != nil {
5374 dextra = s.expr(deferExtra)
5377 params := callABI.ABIAnalyze(n.Fun.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
5378 types.CalcSize(fn.Type())
5379 stksize := params.ArgWidth() // includes receiver, args, and results
5381 res := n.Fun.Type().Results()
5382 if k == callNormal || k == callTail {
5383 for _, p := range params.OutParams() {
5384 ACResults = append(ACResults, p.Type)
5389 if k == callDeferStack {
5391 s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
5393 // Make a defer struct on the stack.
5395 _, addr := s.temp(n.Pos(), t)
5396 s.store(closure.Type,
5397 s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(deferStructFnField), addr),
5400 // Call runtime.deferprocStack with pointer to _defer record.
5401 ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
5402 aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(ACArgs, ACResults))
5403 callArgs = append(callArgs, addr, s.mem())
5404 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5405 call.AddArgs(callArgs...)
5406 call.AuxInt = int64(types.PtrSize) // deferprocStack takes a *_defer arg
5408 // Store arguments to stack, including defer/go arguments and receiver for method calls.
5409 // These are written in SP-offset order.
5410 argStart := base.Ctxt.Arch.FixedFrameSize
5412 if k != callNormal && k != callTail {
5413 // Write closure (arg to newproc/deferproc).
5414 ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) // not argExtra
5415 callArgs = append(callArgs, closure)
5416 stksize += int64(types.PtrSize)
5417 argStart += int64(types.PtrSize)
5419 // Extra token of type any for deferproc
5420 ACArgs = append(ACArgs, types.Types[types.TINTER])
5421 callArgs = append(callArgs, dextra)
5422 stksize += 2 * int64(types.PtrSize)
5423 argStart += 2 * int64(types.PtrSize)
5427 // Set receiver (for interface calls).
5429 callArgs = append(callArgs, rcvr)
5436 for _, p := range params.InParams() { // includes receiver for interface calls
5437 ACArgs = append(ACArgs, p.Type)
5440 // Split the entry block if there are open defers, because later calls to
5441 // openDeferSave may cause a mismatch between the mem for an OpDereference
5442 // and the call site which uses it. See #49282.
5443 if s.curBlock.ID == s.f.Entry.ID && s.hasOpenDefers {
5445 b.Kind = ssa.BlockPlain
5446 curb := s.f.NewBlock(ssa.BlockPlain)
5451 for i, n := range args {
5452 callArgs = append(callArgs, s.putArg(n, t.Param(i).Type))
5455 callArgs = append(callArgs, s.mem())
5459 case k == callDefer:
5460 sym := ir.Syms.Deferproc
5462 sym = ir.Syms.Deferprocat
5464 aux := ssa.StaticAuxCall(sym, s.f.ABIDefault.ABIAnalyzeTypes(ACArgs, ACResults)) // TODO paramResultInfo for Deferproc(at)
5465 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5467 aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(ACArgs, ACResults))
5468 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for Newproc
5469 case closure != nil:
5470 // rawLoad because loading the code pointer from a
5471 // closure is always safe, but IsSanitizerSafeAddr
5472 // can't always figure that out currently, and it's
5473 // critical that we not clobber any arguments already
5474 // stored onto the stack.
5475 codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
5476 aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(ACArgs, ACResults))
5477 call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
5478 case codeptr != nil:
5479 // Note that the "receiver" parameter is nil because the actual receiver is the first input parameter.
5480 aux := ssa.InterfaceAuxCall(params)
5481 call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
5483 aux := ssa.StaticAuxCall(callTargetLSym(callee), params)
5484 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5486 call.Op = ssa.OpTailLECall
5487 stksize = 0 // Tail call does not use stack. We reuse caller's frame.
5490 s.Fatalf("bad call type %v %v", n.Op(), n)
5492 call.AddArgs(callArgs...)
5493 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
5496 s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
5497 // Insert VarLive opcodes.
5498 for _, v := range n.KeepAlive {
5500 s.Fatalf("KeepAlive variable %v must have Addrtaken set", v)
5503 case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
5505 s.Fatalf("KeepAlive variable %v must be Auto or Arg", v)
5507 s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem())
5510 // Finish block for defers
5511 if k == callDefer || k == callDeferStack {
5513 b.Kind = ssa.BlockDefer
5515 bNext := s.f.NewBlock(ssa.BlockPlain)
5517 // Add recover edge to exit code.
5518 r := s.f.NewBlock(ssa.BlockPlain)
5522 b.Likely = ssa.BranchLikely
5526 if len(res) == 0 || k != callNormal {
5527 // call has no return value. Continue with the next statement.
5531 if returnResultAddr {
5532 return s.resultAddrOfCall(call, 0, fp.Type)
5534 return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
5537 // maybeNilCheckClosure checks if a nil check of a closure is needed in some
5538 // architecture-dependent situations and, if so, emits the nil check.
5539 func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
5540 if Arch.LinkArch.Family == sys.Wasm || buildcfg.GOOS == "aix" && k != callGo {
5541 // On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
5542 // TODO(neelance): On other architectures this should be eliminated by the optimization steps
5547 // getClosureAndRcvr returns values for the appropriate closure and receiver of an
5549 func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) {
5551 itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
5553 itabidx := fn.Offset() + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab
5554 closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
5555 rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
5556 return closure, rcvr
5559 // etypesign returns the signed-ness of e, for integer/pointer etypes.
5560 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
5561 func etypesign(e types.Kind) int8 {
5563 case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
5565 case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
5571 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
5572 // The value that the returned Value represents is guaranteed to be non-nil.
5573 func (s *state) addr(n ir.Node) *ssa.Value {
5574 if n.Op() != ir.ONAME {
5580 s.Fatalf("addr of canSSA expression: %+v", n)
5583 t := types.NewPtr(n.Type())
5584 linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value {
5585 v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb)
5586 // TODO: Make OpAddr use AuxInt as well as Aux.
5588 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
5593 case ir.OLINKSYMOFFSET:
5594 no := n.(*ir.LinksymOffsetExpr)
5595 return linksymOffset(no.Linksym, no.Offset_)
5598 if n.Heapaddr != nil {
5599 return s.expr(n.Heapaddr)
5604 return linksymOffset(n.Linksym(), 0)
5611 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
5614 return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
5616 case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
5617 // ensure that we reuse symbols for out parameters so
5618 // that cse works on their addresses
5619 return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
5621 s.Fatalf("variable address class %v not implemented", n.Class)
5625 // load return from callee
5626 n := n.(*ir.ResultExpr)
5627 return s.resultAddrOfCall(s.prevCall, n.Index, n.Type())
5629 n := n.(*ir.IndexExpr)
5630 if n.X.Type().IsSlice() {
5632 i := s.expr(n.Index)
5633 len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
5634 i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
5635 p := s.newValue1(ssa.OpSlicePtr, t, a)
5636 return s.newValue2(ssa.OpPtrIndex, t, p, i)
5639 i := s.expr(n.Index)
5640 len := s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
5641 i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
5642 return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.X.Type().Elem()), a, i)
5645 n := n.(*ir.StarExpr)
5646 return s.exprPtr(n.X, n.Bounded(), n.Pos())
5648 n := n.(*ir.SelectorExpr)
5650 return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
5652 n := n.(*ir.SelectorExpr)
5653 p := s.exprPtr(n.X, n.Bounded(), n.Pos())
5654 return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
5656 n := n.(*ir.ConvExpr)
5657 if n.Type() == n.X.Type() {
5661 return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
5662 case ir.OCALLFUNC, ir.OCALLINTER:
5663 n := n.(*ir.CallExpr)
5664 return s.callAddr(n, callNormal)
5665 case ir.ODOTTYPE, ir.ODYNAMICDOTTYPE:
5667 if n.Op() == ir.ODOTTYPE {
5668 v, _ = s.dottype(n.(*ir.TypeAssertExpr), false)
5670 v, _ = s.dynamicDottype(n.(*ir.DynamicTypeAssertExpr), false)
5672 if v.Op != ssa.OpLoad {
5673 s.Fatalf("dottype of non-load")
5675 if v.Args[1] != s.mem() {
5676 s.Fatalf("memory no longer live from dottype load")
5680 s.Fatalf("unhandled addr %v", n.Op())
5685 // canSSA reports whether n is SSA-able.
5686 // n must be an ONAME (or an ODOT sequence with an ONAME base).
5687 func (s *state) canSSA(n ir.Node) bool {
5688 if base.Flag.N != 0 {
5693 if nn.Op() == ir.ODOT {
5694 nn := nn.(*ir.SelectorExpr)
5698 if nn.Op() == ir.OINDEX {
5699 nn := nn.(*ir.IndexExpr)
5700 if nn.X.Type().IsArray() {
5707 if n.Op() != ir.ONAME {
5710 return s.canSSAName(n.(*ir.Name)) && ssa.CanSSA(n.Type())
5713 func (s *state) canSSAName(name *ir.Name) bool {
5714 if name.Addrtaken() || !name.OnStack() {
5720 // TODO: handle this case? Named return values must be
5721 // in memory so that the deferred function can see them.
5722 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
5723 // Or maybe not, see issue 18860. Even unnamed return values
5724 // must be written back so if a defer recovers, the caller can see them.
5727 if s.cgoUnsafeArgs {
5728 // Cgo effectively takes the address of all result args,
5729 // but the compiler can't see that.
5734 // TODO: try to make more variables SSAable?
5737 // exprPtr evaluates n to a pointer and nil-checks it.
5738 func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
5740 if bounded || n.NonNil() {
5741 if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
5742 s.f.Warnl(lineno, "removed nil check")
5750 // nilCheck generates nil pointer checking code.
5751 // Used only for automatically inserted nil checks,
5752 // not for user code like 'x != nil'.
5753 func (s *state) nilCheck(ptr *ssa.Value) {
5754 if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
5757 s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
5760 // boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
5761 // Starts a new block on return.
5762 // On input, len must be converted to full int width and be nonnegative.
5763 // Returns idx converted to full int width.
5764 // If bounded is true then caller guarantees the index is not out of bounds
5765 // (but boundsCheck will still extend the index to full int width).
5766 func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
5767 idx = s.extendIndex(idx, len, kind, bounded)
5769 if bounded || base.Flag.B != 0 {
5770 // If bounded or bounds checking is flag-disabled, then no check necessary,
5771 // just return the extended index.
5773 // Here, bounded == true if the compiler generated the index itself,
5774 // such as in the expansion of a slice initializer. These indexes are
5775 // compiler-generated, not Go program variables, so they cannot be
5776 // attacker-controlled, so we can omit Spectre masking as well.
5778 // Note that we do not want to omit Spectre masking in code like:
5780 // if 0 <= i && i < len(x) {
5784 // Lucky for us, bounded==false for that code.
5785 // In that case (handled below), we emit a bound check (and Spectre mask)
5786 // and then the prove pass will remove the bounds check.
5787 // In theory the prove pass could potentially remove certain
5788 // Spectre masks, but it's very delicate and probably better
5789 // to be conservative and leave them all in.
5793 bNext := s.f.NewBlock(ssa.BlockPlain)
5794 bPanic := s.f.NewBlock(ssa.BlockExit)
5796 if !idx.Type.IsSigned() {
5798 case ssa.BoundsIndex:
5799 kind = ssa.BoundsIndexU
5800 case ssa.BoundsSliceAlen:
5801 kind = ssa.BoundsSliceAlenU
5802 case ssa.BoundsSliceAcap:
5803 kind = ssa.BoundsSliceAcapU
5804 case ssa.BoundsSliceB:
5805 kind = ssa.BoundsSliceBU
5806 case ssa.BoundsSlice3Alen:
5807 kind = ssa.BoundsSlice3AlenU
5808 case ssa.BoundsSlice3Acap:
5809 kind = ssa.BoundsSlice3AcapU
5810 case ssa.BoundsSlice3B:
5811 kind = ssa.BoundsSlice3BU
5812 case ssa.BoundsSlice3C:
5813 kind = ssa.BoundsSlice3CU
5818 if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
5819 cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
5821 cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
5824 b.Kind = ssa.BlockIf
5826 b.Likely = ssa.BranchLikely
5830 s.startBlock(bPanic)
5831 if Arch.LinkArch.Family == sys.Wasm {
5832 // TODO(khr): figure out how to do "register" based calling convention for bounds checks.
5833 // Should be similar to gcWriteBarrier, but I can't make it work.
5834 s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
5836 mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
5837 s.endBlock().SetControl(mem)
5841 // In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
5842 if base.Flag.Cfg.SpectreIndex {
5843 op := ssa.OpSpectreIndex
5844 if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
5845 op = ssa.OpSpectreSliceIndex
5847 idx = s.newValue2(op, types.Types[types.TINT], idx, len)
5853 // If cmp (a bool) is false, panic using the given function.
5854 func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
5856 b.Kind = ssa.BlockIf
5858 b.Likely = ssa.BranchLikely
5859 bNext := s.f.NewBlock(ssa.BlockPlain)
5861 pos := base.Ctxt.PosTable.Pos(line)
5862 fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
5863 bPanic := s.panics[fl]
5865 bPanic = s.f.NewBlock(ssa.BlockPlain)
5866 s.panics[fl] = bPanic
5867 s.startBlock(bPanic)
5868 // The panic call takes/returns memory to ensure that the right
5869 // memory state is observed if the panic happens.
5870 s.rtcall(fn, false, nil)
5877 func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
5880 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
5886 // do a size-appropriate check for zero
5887 cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
5888 s.check(cmp, ir.Syms.Panicdivide)
5890 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
5893 // rtcall issues a call to the given runtime function fn with the listed args.
5894 // Returns a slice of results of the given result types.
5895 // The call is added to the end of the current block.
5896 // If returns is false, the block is marked as an exit block.
5897 func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
5899 // Write args to the stack
5900 off := base.Ctxt.Arch.FixedFrameSize
5901 var callArgs []*ssa.Value
5902 var callArgTypes []*types.Type
5904 for _, arg := range args {
5906 off = types.RoundUp(off, t.Alignment())
5908 callArgs = append(callArgs, arg)
5909 callArgTypes = append(callArgTypes, t)
5912 off = types.RoundUp(off, int64(types.RegSize))
5916 aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(callArgTypes, results))
5917 callArgs = append(callArgs, s.mem())
5918 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5919 call.AddArgs(callArgs...)
5920 s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(results)), call)
5925 b.Kind = ssa.BlockExit
5927 call.AuxInt = off - base.Ctxt.Arch.FixedFrameSize
5928 if len(results) > 0 {
5929 s.Fatalf("panic call can't have results")
5935 res := make([]*ssa.Value, len(results))
5936 for i, t := range results {
5937 off = types.RoundUp(off, t.Alignment())
5938 res[i] = s.resultOfCall(call, int64(i), t)
5941 off = types.RoundUp(off, int64(types.PtrSize))
5943 // Remember how much callee stack space we needed.
5949 // do *left = right for type t.
5950 func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
5951 s.instrument(t, left, instrumentWrite)
5953 if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
5954 // Known to not have write barrier. Store the whole type.
5955 s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
5959 // store scalar fields first, so write barrier stores for
5960 // pointer fields can be grouped together, and scalar values
5961 // don't need to be live across the write barrier call.
5962 // TODO: if the writebarrier pass knows how to reorder stores,
5963 // we can do a single store here as long as skip==0.
5964 s.storeTypeScalars(t, left, right, skip)
5965 if skip&skipPtr == 0 && t.HasPointers() {
5966 s.storeTypePtrs(t, left, right)
5970 // do *left = right for all scalar (non-pointer) parts of t.
5971 func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
5973 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
5974 s.store(t, left, right)
5975 case t.IsPtrShaped():
5976 if t.IsPtr() && t.Elem().NotInHeap() {
5977 s.store(t, left, right) // see issue 42032
5979 // otherwise, no scalar fields.
5981 if skip&skipLen != 0 {
5984 len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
5985 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
5986 s.store(types.Types[types.TINT], lenAddr, len)
5988 if skip&skipLen == 0 {
5989 len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
5990 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
5991 s.store(types.Types[types.TINT], lenAddr, len)
5993 if skip&skipCap == 0 {
5994 cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
5995 capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
5996 s.store(types.Types[types.TINT], capAddr, cap)
5998 case t.IsInterface():
5999 // itab field doesn't need a write barrier (even though it is a pointer).
6000 itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
6001 s.store(types.Types[types.TUINTPTR], left, itab)
6004 for i := 0; i < n; i++ {
6005 ft := t.FieldType(i)
6006 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
6007 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
6008 s.storeTypeScalars(ft, addr, val, 0)
6010 case t.IsArray() && t.NumElem() == 0:
6012 case t.IsArray() && t.NumElem() == 1:
6013 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
6015 s.Fatalf("bad write barrier type %v", t)
6019 // do *left = right for all pointer parts of t.
6020 func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
6022 case t.IsPtrShaped():
6023 if t.IsPtr() && t.Elem().NotInHeap() {
6024 break // see issue 42032
6026 s.store(t, left, right)
6028 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
6029 s.store(s.f.Config.Types.BytePtr, left, ptr)
6031 elType := types.NewPtr(t.Elem())
6032 ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
6033 s.store(elType, left, ptr)
6034 case t.IsInterface():
6035 // itab field is treated as a scalar.
6036 idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
6037 idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
6038 s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
6041 for i := 0; i < n; i++ {
6042 ft := t.FieldType(i)
6043 if !ft.HasPointers() {
6046 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
6047 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
6048 s.storeTypePtrs(ft, addr, val)
6050 case t.IsArray() && t.NumElem() == 0:
6052 case t.IsArray() && t.NumElem() == 1:
6053 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
6055 s.Fatalf("bad write barrier type %v", t)
6059 // putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call.
6060 func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
6063 a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
6070 func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
6071 pt := types.NewPtr(t)
6074 // Use special routine that avoids allocation on duplicate offsets.
6075 addr = s.constOffPtrSP(pt, off)
6077 addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
6087 s.storeType(t, addr, a, 0, false)
6090 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
6091 // i,j,k may be nil, in which case they are set to their default value.
6092 // v may be a slice, string or pointer to an array.
6093 func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
6095 var ptr, len, cap *ssa.Value
6098 ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
6099 len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
6100 cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
6102 ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
6103 len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
6106 if !t.Elem().IsArray() {
6107 s.Fatalf("bad ptr to array in slice %v\n", t)
6110 ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
6111 len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
6114 s.Fatalf("bad type in slice %v\n", t)
6117 // Set default values
6119 i = s.constInt(types.Types[types.TINT], 0)
6130 // Panic if slice indices are not in bounds.
6131 // Make sure we check these in reverse order so that we're always
6132 // comparing against a value known to be nonnegative. See issue 28797.
6135 kind := ssa.BoundsSlice3Alen
6137 kind = ssa.BoundsSlice3Acap
6139 k = s.boundsCheck(k, cap, kind, bounded)
6142 j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
6144 i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
6147 kind := ssa.BoundsSliceAlen
6149 kind = ssa.BoundsSliceAcap
6151 j = s.boundsCheck(j, k, kind, bounded)
6153 i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
6156 // Word-sized integer operations.
6157 subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
6158 mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
6159 andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
6161 // Calculate the length (rlen) and capacity (rcap) of the new slice.
6162 // For strings the capacity of the result is unimportant. However,
6163 // we use rcap to test if we've generated a zero-length slice.
6164 // Use length of strings for that.
6165 rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
6167 if j != k && !t.IsString() {
6168 rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
6171 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
6172 // No pointer arithmetic necessary.
6173 return ptr, rlen, rcap
6176 // Calculate the base pointer (rptr) for the new slice.
6178 // Generate the following code assuming that indexes are in bounds.
6179 // The masking is to make sure that we don't generate a slice
6180 // that points to the next object in memory. We cannot just set
6181 // the pointer to nil because then we would create a nil slice or
6186 // rptr = ptr + (mask(rcap) & (i * stride))
6188 // Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
6189 // of the element type.
6190 stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Size())
6192 // The delta is the number of bytes to offset ptr by.
6193 delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
6195 // If we're slicing to the point where the capacity is zero,
6196 // zero out the delta.
6197 mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
6198 delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
6200 // Compute rptr = ptr + delta.
6201 rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
6203 return rptr, rlen, rcap
6206 type u642fcvtTab struct {
6207 leq, cvt2F, and, rsh, or, add ssa.Op
6208 one func(*state, *types.Type, int64) *ssa.Value
6211 var u64_f64 = u642fcvtTab{
6213 cvt2F: ssa.OpCvt64to64F,
6215 rsh: ssa.OpRsh64Ux64,
6218 one: (*state).constInt64,
6221 var u64_f32 = u642fcvtTab{
6223 cvt2F: ssa.OpCvt64to32F,
6225 rsh: ssa.OpRsh64Ux64,
6228 one: (*state).constInt64,
6231 func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6232 return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
6235 func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6236 return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
6239 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6241 // result = (floatY) x
6243 // y = uintX(x) ; y = x & 1
6244 // z = uintX(x) ; z = z >> 1
6246 // result = floatY(z)
6247 // result = result + result
6250 // Code borrowed from old code generator.
6251 // What's going on: large 64-bit "unsigned" looks like
6252 // negative number to hardware's integer-to-float
6253 // conversion. However, because the mantissa is only
6254 // 63 bits, we don't need the LSB, so instead we do an
6255 // unsigned right shift (divide by two), convert, and
6256 // double. However, before we do that, we need to be
6257 // sure that we do not lose a "1" if that made the
6258 // difference in the resulting rounding. Therefore, we
6259 // preserve it, and OR (not ADD) it back in. The case
6260 // that matters is when the eleven discarded bits are
6261 // equal to 10000000001; that rounds up, and the 1 cannot
6262 // be lost else it would round down if the LSB of the
6263 // candidate mantissa is 0.
6264 cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
6266 b.Kind = ssa.BlockIf
6268 b.Likely = ssa.BranchLikely
6270 bThen := s.f.NewBlock(ssa.BlockPlain)
6271 bElse := s.f.NewBlock(ssa.BlockPlain)
6272 bAfter := s.f.NewBlock(ssa.BlockPlain)
6276 a0 := s.newValue1(cvttab.cvt2F, tt, x)
6279 bThen.AddEdgeTo(bAfter)
6283 one := cvttab.one(s, ft, 1)
6284 y := s.newValue2(cvttab.and, ft, x, one)
6285 z := s.newValue2(cvttab.rsh, ft, x, one)
6286 z = s.newValue2(cvttab.or, ft, z, y)
6287 a := s.newValue1(cvttab.cvt2F, tt, z)
6288 a1 := s.newValue2(cvttab.add, tt, a, a)
6291 bElse.AddEdgeTo(bAfter)
6293 s.startBlock(bAfter)
6294 return s.variable(n, n.Type())
6297 type u322fcvtTab struct {
6298 cvtI2F, cvtF2F ssa.Op
6301 var u32_f64 = u322fcvtTab{
6302 cvtI2F: ssa.OpCvt32to64F,
6306 var u32_f32 = u322fcvtTab{
6307 cvtI2F: ssa.OpCvt32to32F,
6308 cvtF2F: ssa.OpCvt64Fto32F,
6311 func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6312 return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
6315 func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6316 return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
6319 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6321 // result = floatY(x)
6323 // result = floatY(float64(x) + (1<<32))
6325 cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
6327 b.Kind = ssa.BlockIf
6329 b.Likely = ssa.BranchLikely
6331 bThen := s.f.NewBlock(ssa.BlockPlain)
6332 bElse := s.f.NewBlock(ssa.BlockPlain)
6333 bAfter := s.f.NewBlock(ssa.BlockPlain)
6337 a0 := s.newValue1(cvttab.cvtI2F, tt, x)
6340 bThen.AddEdgeTo(bAfter)
6344 a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
6345 twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
6346 a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
6347 a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
6351 bElse.AddEdgeTo(bAfter)
6353 s.startBlock(bAfter)
6354 return s.variable(n, n.Type())
6357 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
6358 func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
6359 if !n.X.Type().IsMap() && !n.X.Type().IsChan() {
6360 s.Fatalf("node must be a map or a channel")
6366 // return *((*int)n)
6368 // return *(((*int)n)+1)
6371 nilValue := s.constNil(types.Types[types.TUINTPTR])
6372 cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
6374 b.Kind = ssa.BlockIf
6376 b.Likely = ssa.BranchUnlikely
6378 bThen := s.f.NewBlock(ssa.BlockPlain)
6379 bElse := s.f.NewBlock(ssa.BlockPlain)
6380 bAfter := s.f.NewBlock(ssa.BlockPlain)
6382 // length/capacity of a nil map/chan is zero
6385 s.vars[n] = s.zeroVal(lenType)
6387 bThen.AddEdgeTo(bAfter)
6393 // length is stored in the first word for map/chan
6394 s.vars[n] = s.load(lenType, x)
6396 // capacity is stored in the second word for chan
6397 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Size(), x)
6398 s.vars[n] = s.load(lenType, sw)
6400 s.Fatalf("op must be OLEN or OCAP")
6403 bElse.AddEdgeTo(bAfter)
6405 s.startBlock(bAfter)
6406 return s.variable(n, lenType)
6409 type f2uCvtTab struct {
6410 ltf, cvt2U, subf, or ssa.Op
6411 floatValue func(*state, *types.Type, float64) *ssa.Value
6412 intValue func(*state, *types.Type, int64) *ssa.Value
6416 var f32_u64 = f2uCvtTab{
6418 cvt2U: ssa.OpCvt32Fto64,
6421 floatValue: (*state).constFloat32,
6422 intValue: (*state).constInt64,
6426 var f64_u64 = f2uCvtTab{
6428 cvt2U: ssa.OpCvt64Fto64,
6431 floatValue: (*state).constFloat64,
6432 intValue: (*state).constInt64,
6436 var f32_u32 = f2uCvtTab{
6438 cvt2U: ssa.OpCvt32Fto32,
6441 floatValue: (*state).constFloat32,
6442 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
6446 var f64_u32 = f2uCvtTab{
6448 cvt2U: ssa.OpCvt64Fto32,
6451 floatValue: (*state).constFloat64,
6452 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
6456 func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6457 return s.floatToUint(&f32_u64, n, x, ft, tt)
6459 func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6460 return s.floatToUint(&f64_u64, n, x, ft, tt)
6463 func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6464 return s.floatToUint(&f32_u32, n, x, ft, tt)
6467 func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6468 return s.floatToUint(&f64_u32, n, x, ft, tt)
6471 func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6472 // cutoff:=1<<(intY_Size-1)
6473 // if x < floatX(cutoff) {
6474 // result = uintY(x)
6476 // y = x - floatX(cutoff)
6478 // result = z | -(cutoff)
6480 cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
6481 cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
6483 b.Kind = ssa.BlockIf
6485 b.Likely = ssa.BranchLikely
6487 bThen := s.f.NewBlock(ssa.BlockPlain)
6488 bElse := s.f.NewBlock(ssa.BlockPlain)
6489 bAfter := s.f.NewBlock(ssa.BlockPlain)
6493 a0 := s.newValue1(cvttab.cvt2U, tt, x)
6496 bThen.AddEdgeTo(bAfter)
6500 y := s.newValue2(cvttab.subf, ft, x, cutoff)
6501 y = s.newValue1(cvttab.cvt2U, tt, y)
6502 z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
6503 a1 := s.newValue2(cvttab.or, tt, y, z)
6506 bElse.AddEdgeTo(bAfter)
6508 s.startBlock(bAfter)
6509 return s.variable(n, n.Type())
6512 // dottype generates SSA for a type assertion node.
6513 // commaok indicates whether to panic or return a bool.
6514 // If commaok is false, resok will be nil.
6515 func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
6516 iface := s.expr(n.X) // input interface
6517 target := s.reflectType(n.Type()) // target type
6518 var targetItab *ssa.Value
6520 targetItab = s.expr(n.ITab)
6522 return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok, n.Descriptor)
6525 func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
6526 iface := s.expr(n.X)
6527 var source, target, targetItab *ssa.Value
6528 if n.SrcRType != nil {
6529 source = s.expr(n.SrcRType)
6531 if !n.X.Type().IsEmptyInterface() && !n.Type().IsInterface() {
6532 byteptr := s.f.Config.Types.BytePtr
6533 targetItab = s.expr(n.ITab)
6534 // TODO(mdempsky): Investigate whether compiling n.RType could be
6535 // better than loading itab.typ.
6536 target = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), targetItab)) // itab.typ
6538 target = s.expr(n.RType)
6540 return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, source, target, targetItab, commaok, nil)
6543 // dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T)
6544 // and src is the type we're asserting from.
6545 // source is the *runtime._type of src
6546 // target is the *runtime._type of dst.
6547 // If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil.
6548 // commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails.
6549 // descriptor is a compiler-allocated internal/abi.TypeAssert whose address is passed to runtime.typeAssert when
6550 // the target type is a compile-time-known non-empty interface. It may be nil.
6551 func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, target, targetItab *ssa.Value, commaok bool, descriptor *obj.LSym) (res, resok *ssa.Value) {
6552 typs := s.f.Config.Types
6553 byteptr := typs.BytePtr
6554 if dst.IsInterface() {
6555 if dst.IsEmptyInterface() {
6556 // Converting to an empty interface.
6557 // Input could be an empty or nonempty interface.
6558 if base.Debug.TypeAssert > 0 {
6559 base.WarnfAt(pos, "type assertion inlined")
6562 // Get itab/type field from input.
6563 itab := s.newValue1(ssa.OpITab, byteptr, iface)
6564 // Conversion succeeds iff that field is not nil.
6565 cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
6567 if src.IsEmptyInterface() && commaok {
6568 // Converting empty interface to empty interface with ,ok is just a nil check.
6572 // Branch on nilness.
6574 b.Kind = ssa.BlockIf
6576 b.Likely = ssa.BranchLikely
6577 bOk := s.f.NewBlock(ssa.BlockPlain)
6578 bFail := s.f.NewBlock(ssa.BlockPlain)
6583 // On failure, panic by calling panicnildottype.
6585 s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
6587 // On success, return (perhaps modified) input interface.
6589 if src.IsEmptyInterface() {
6590 res = iface // Use input interface unchanged.
6593 // Load type out of itab, build interface with existing idata.
6594 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
6595 typ := s.load(byteptr, off)
6596 idata := s.newValue1(ssa.OpIData, byteptr, iface)
6597 res = s.newValue2(ssa.OpIMake, dst, typ, idata)
6602 // nonempty -> empty
6603 // Need to load type from itab
6604 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
6605 s.vars[typVar] = s.load(byteptr, off)
6608 // itab is nil, might as well use that as the nil result.
6610 s.vars[typVar] = itab
6614 bEnd := s.f.NewBlock(ssa.BlockPlain)
6616 bFail.AddEdgeTo(bEnd)
6618 idata := s.newValue1(ssa.OpIData, byteptr, iface)
6619 res = s.newValue2(ssa.OpIMake, dst, s.variable(typVar, byteptr), idata)
6621 delete(s.vars, typVar) // no practical effect, just to indicate typVar is no longer live.
6624 // converting to a nonempty interface needs a runtime call.
6625 if base.Debug.TypeAssert > 0 {
6626 base.WarnfAt(pos, "type assertion not inlined")
6629 itab := s.newValue1(ssa.OpITab, byteptr, iface)
6630 data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
6632 // First, check for nil.
6633 bNil := s.f.NewBlock(ssa.BlockPlain)
6634 bNonNil := s.f.NewBlock(ssa.BlockPlain)
6635 bMerge := s.f.NewBlock(ssa.BlockPlain)
6636 cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
6638 b.Kind = ssa.BlockIf
6640 b.Likely = ssa.BranchLikely
6641 b.AddEdgeTo(bNonNil)
6646 s.vars[typVar] = itab // which will be nil
6650 // Panic if input is nil.
6651 s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
6654 // Get typ, possibly by loading out of itab.
6655 s.startBlock(bNonNil)
6657 if !src.IsEmptyInterface() {
6658 typ = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab))
6661 // Check the cache first.
6663 if descriptor != nil {
6664 d = s.newValue1A(ssa.OpAddr, byteptr, descriptor, s.sb)
6665 if base.Flag.N == 0 && rtabi.UseInterfaceSwitchCache(Arch.LinkArch.Name) {
6666 // Note: we can only use the cache if we have the right atomic load instruction.
6667 // Double-check that here.
6668 if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "runtime/internal/atomic", "Loadp"}]; !ok {
6669 s.Fatalf("atomic load not available")
6671 // Pick right size ops.
6672 var mul, and, add, zext ssa.Op
6673 if s.config.PtrSize == 4 {
6682 zext = ssa.OpZeroExt32to64
6685 loopHead := s.f.NewBlock(ssa.BlockPlain)
6686 loopBody := s.f.NewBlock(ssa.BlockPlain)
6687 cacheHit := s.f.NewBlock(ssa.BlockPlain)
6688 cacheMiss := s.f.NewBlock(ssa.BlockPlain)
6690 // Load cache pointer out of descriptor, with an atomic load so
6691 // we ensure that we see a fully written cache.
6692 atomicLoad := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(typs.BytePtr, types.TypeMem), d, s.mem())
6693 cache := s.newValue1(ssa.OpSelect0, typs.BytePtr, atomicLoad)
6694 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, atomicLoad)
6696 // Load hash from type or itab.
6698 if src.IsEmptyInterface() {
6699 hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, 2*s.config.PtrSize, typ), s.mem())
6701 hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, 2*s.config.PtrSize, itab), s.mem())
6703 hash = s.newValue1(zext, typs.Uintptr, hash)
6704 s.vars[hashVar] = hash
6705 // Load mask from cache.
6706 mask := s.newValue2(ssa.OpLoad, typs.Uintptr, cache, s.mem())
6707 // Jump to loop head.
6709 b.AddEdgeTo(loopHead)
6711 // At loop head, get pointer to the cache entry.
6712 // e := &cache.Entries[hash&mask]
6713 s.startBlock(loopHead)
6714 idx := s.newValue2(and, typs.Uintptr, s.variable(hashVar, typs.Uintptr), mask)
6715 idx = s.newValue2(mul, typs.Uintptr, idx, s.uintptrConstant(uint64(2*s.config.PtrSize)))
6716 idx = s.newValue2(add, typs.Uintptr, idx, s.uintptrConstant(uint64(s.config.PtrSize)))
6717 e := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, cache, idx)
6719 s.vars[hashVar] = s.newValue2(add, typs.Uintptr, s.variable(hashVar, typs.Uintptr), s.uintptrConstant(1))
6721 // Look for a cache hit.
6722 // if e.Typ == typ { goto hit }
6723 eTyp := s.newValue2(ssa.OpLoad, typs.Uintptr, e, s.mem())
6724 cmp1 := s.newValue2(ssa.OpEqPtr, typs.Bool, typ, eTyp)
6726 b.Kind = ssa.BlockIf
6728 b.AddEdgeTo(cacheHit)
6729 b.AddEdgeTo(loopBody)
6731 // Look for an empty entry, the tombstone for this hash table.
6732 // if e.Typ == nil { goto miss }
6733 s.startBlock(loopBody)
6734 cmp2 := s.newValue2(ssa.OpEqPtr, typs.Bool, eTyp, s.constNil(typs.BytePtr))
6736 b.Kind = ssa.BlockIf
6738 b.AddEdgeTo(cacheMiss)
6739 b.AddEdgeTo(loopHead)
6741 // On a hit, load the data fields of the cache entry.
6743 s.startBlock(cacheHit)
6744 eItab := s.newValue2(ssa.OpLoad, typs.BytePtr, s.newValue1I(ssa.OpOffPtr, typs.BytePtrPtr, s.config.PtrSize, e), s.mem())
6745 s.vars[typVar] = eItab
6749 // On a miss, call into the runtime to get the answer.
6750 s.startBlock(cacheMiss)
6754 // Call into runtime to get itab for result.
6755 if descriptor != nil {
6756 itab = s.rtcall(ir.Syms.TypeAssert, true, []*types.Type{byteptr}, d, typ)[0]
6760 fn = ir.Syms.AssertE2I2
6762 fn = ir.Syms.AssertE2I
6764 itab = s.rtcall(fn, true, []*types.Type{byteptr}, target, typ)[0]
6766 s.vars[typVar] = itab
6770 // Build resulting interface.
6771 s.startBlock(bMerge)
6772 itab = s.variable(typVar, byteptr)
6775 ok = s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
6777 return s.newValue2(ssa.OpIMake, dst, itab, data), ok
6780 if base.Debug.TypeAssert > 0 {
6781 base.WarnfAt(pos, "type assertion inlined")
6784 // Converting to a concrete type.
6785 direct := types.IsDirectIface(dst)
6786 itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
6787 if base.Debug.TypeAssert > 0 {
6788 base.WarnfAt(pos, "type assertion inlined")
6790 var wantedFirstWord *ssa.Value
6791 if src.IsEmptyInterface() {
6792 // Looking for pointer to target type.
6793 wantedFirstWord = target
6795 // Looking for pointer to itab for target type and source interface.
6796 wantedFirstWord = targetItab
6799 var tmp ir.Node // temporary for use with large types
6800 var addr *ssa.Value // address of tmp
6801 if commaok && !ssa.CanSSA(dst) {
6802 // unSSAable type, use temporary.
6803 // TODO: get rid of some of these temporaries.
6804 tmp, addr = s.temp(pos, dst)
6807 cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, wantedFirstWord)
6809 b.Kind = ssa.BlockIf
6811 b.Likely = ssa.BranchLikely
6813 bOk := s.f.NewBlock(ssa.BlockPlain)
6814 bFail := s.f.NewBlock(ssa.BlockPlain)
6819 // on failure, panic by calling panicdottype
6823 taddr = s.reflectType(src)
6825 if src.IsEmptyInterface() {
6826 s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
6828 s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
6831 // on success, return data from interface
6834 return s.newValue1(ssa.OpIData, dst, iface), nil
6836 p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
6837 return s.load(dst, p), nil
6840 // commaok is the more complicated case because we have
6841 // a control flow merge point.
6842 bEnd := s.f.NewBlock(ssa.BlockPlain)
6843 // Note that we need a new valVar each time (unlike okVar where we can
6844 // reuse the variable) because it might have a different type every time.
6845 valVar := ssaMarker("val")
6847 // type assertion succeeded
6851 s.vars[valVar] = s.newValue1(ssa.OpIData, dst, iface)
6853 p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
6854 s.vars[valVar] = s.load(dst, p)
6857 p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
6858 s.move(dst, addr, p)
6860 s.vars[okVar] = s.constBool(true)
6864 // type assertion failed
6867 s.vars[valVar] = s.zeroVal(dst)
6871 s.vars[okVar] = s.constBool(false)
6873 bFail.AddEdgeTo(bEnd)
6878 res = s.variable(valVar, dst)
6879 delete(s.vars, valVar) // no practical effect, just to indicate typVar is no longer live.
6881 res = s.load(dst, addr)
6883 resok = s.variable(okVar, types.Types[types.TBOOL])
6884 delete(s.vars, okVar) // ditto
6888 // temp allocates a temp of type t at position pos
6889 func (s *state) temp(pos src.XPos, t *types.Type) (*ir.Name, *ssa.Value) {
6890 tmp := typecheck.TempAt(pos, s.curfn, t)
6891 if t.HasPointers() {
6892 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
6898 // variable returns the value of a variable at the current location.
6899 func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value {
6909 if s.curBlock == s.f.Entry {
6910 // No variable should be live at entry.
6911 s.f.Fatalf("value %v (%v) incorrectly live at entry", n, v)
6913 // Make a FwdRef, which records a value that's live on block input.
6914 // We'll find the matching definition as part of insertPhis.
6915 v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n})
6917 if n.Op() == ir.ONAME {
6918 s.addNamedValue(n.(*ir.Name), v)
6923 func (s *state) mem() *ssa.Value {
6924 return s.variable(memVar, types.TypeMem)
6927 func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
6928 if n.Class == ir.Pxxx {
6929 // Don't track our marker nodes (memVar etc.).
6932 if ir.IsAutoTmp(n) {
6933 // Don't track temporary variables.
6936 if n.Class == ir.PPARAMOUT {
6937 // Don't track named output values. This prevents return values
6938 // from being assigned too early. See #14591 and #14762. TODO: allow this.
6941 loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
6942 values, ok := s.f.NamedValues[loc]
6944 s.f.Names = append(s.f.Names, &loc)
6945 s.f.CanonicalLocalSlots[loc] = &loc
6947 s.f.NamedValues[loc] = append(values, v)
6950 // Branch is an unresolved branch.
6951 type Branch struct {
6952 P *obj.Prog // branch instruction
6953 B *ssa.Block // target
6956 // State contains state needed during Prog generation.
6962 // Branches remembers all the branch instructions we've seen
6963 // and where they would like to go.
6966 // JumpTables remembers all the jump tables we've seen.
6967 JumpTables []*ssa.Block
6969 // bstart remembers where each block starts (indexed by block ID)
6972 maxarg int64 // largest frame size for arguments to calls made by the function
6974 // Map from GC safe points to liveness index, generated by
6975 // liveness analysis.
6976 livenessMap liveness.Map
6978 // partLiveArgs includes arguments that may be partially live, for which we
6979 // need to generate instructions that spill the argument registers.
6980 partLiveArgs map[*ir.Name]bool
6982 // lineRunStart records the beginning of the current run of instructions
6983 // within a single block sharing the same line number
6984 // Used to move statement marks to the beginning of such runs.
6985 lineRunStart *obj.Prog
6987 // wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
6988 OnWasmStackSkipped int
6991 func (s *State) FuncInfo() *obj.FuncInfo {
6992 return s.pp.CurFunc.LSym.Func()
6995 // Prog appends a new Prog.
6996 func (s *State) Prog(as obj.As) *obj.Prog {
6998 if objw.LosesStmtMark(as) {
7001 // Float a statement start to the beginning of any same-line run.
7002 // lineRunStart is reset at block boundaries, which appears to work well.
7003 if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
7005 } else if p.Pos.IsStmt() == src.PosIsStmt {
7006 s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
7007 p.Pos = p.Pos.WithNotStmt()
7012 // Pc returns the current Prog.
7013 func (s *State) Pc() *obj.Prog {
7017 // SetPos sets the current source position.
7018 func (s *State) SetPos(pos src.XPos) {
7022 // Br emits a single branch instruction and returns the instruction.
7023 // Not all architectures need the returned instruction, but otherwise
7024 // the boilerplate is common to all.
7025 func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog {
7027 p.To.Type = obj.TYPE_BRANCH
7028 s.Branches = append(s.Branches, Branch{P: p, B: target})
7032 // DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
7033 // that reduce "jumpy" line number churn when debugging.
7034 // Spill/fill/copy instructions from the register allocator,
7035 // phi functions, and instructions with a no-pos position
7036 // are examples of instructions that can cause churn.
7037 func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
7039 case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
7040 // These are not statements
7041 s.SetPos(v.Pos.WithNotStmt())
7044 if p != src.NoXPos {
7045 // If the position is defined, update the position.
7046 // Also convert default IsStmt to NotStmt; only
7047 // explicit statement boundaries should appear
7048 // in the generated code.
7049 if p.IsStmt() != src.PosIsStmt {
7050 if s.pp.Pos.IsStmt() == src.PosIsStmt && s.pp.Pos.SameFileAndLine(p) {
7051 // If s.pp.Pos already has a statement mark, then it was set here (below) for
7052 // the previous value. If an actual instruction had been emitted for that
7053 // value, then the statement mark would have been reset. Since the statement
7054 // mark of s.pp.Pos was not reset, this position (file/line) still needs a
7055 // statement mark on an instruction. If file and line for this value are
7056 // the same as the previous value, then the first instruction for this
7057 // value will work to take the statement mark. Return early to avoid
7058 // resetting the statement mark.
7060 // The reset of s.pp.Pos occurs in (*Progs).Prog() -- if it emits
7061 // an instruction, and the instruction's statement mark was set,
7062 // and it is not one of the LosesStmtMark instructions,
7063 // then Prog() resets the statement mark on the (*Progs).Pos.
7067 // Calls use the pos attached to v, but copy the statement mark from State
7071 s.SetPos(s.pp.Pos.WithNotStmt())
7076 // emit argument info (locations on stack) for traceback.
7077 func emitArgInfo(e *ssafn, f *ssa.Func, pp *objw.Progs) {
7078 ft := e.curfn.Type()
7079 if ft.NumRecvs() == 0 && ft.NumParams() == 0 {
7083 x := EmitArgInfo(e.curfn, f.OwnAux.ABIInfo())
7084 x.Set(obj.AttrContentAddressable, true)
7085 e.curfn.LSym.Func().ArgInfo = x
7087 // Emit a funcdata pointing at the arg info data.
7088 p := pp.Prog(obj.AFUNCDATA)
7089 p.From.SetConst(rtabi.FUNCDATA_ArgInfo)
7090 p.To.Type = obj.TYPE_MEM
7091 p.To.Name = obj.NAME_EXTERN
7095 // emit argument info (locations on stack) of f for traceback.
7096 func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym {
7097 x := base.Ctxt.Lookup(fmt.Sprintf("%s.arginfo%d", f.LSym.Name, f.ABI))
7098 // NOTE: do not set ContentAddressable here. This may be referenced from
7099 // assembly code by name (in this case f is a declaration).
7100 // Instead, set it in emitArgInfo above.
7102 PtrSize := int64(types.PtrSize)
7103 uintptrTyp := types.Types[types.TUINTPTR]
7105 isAggregate := func(t *types.Type) bool {
7106 return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice()
7109 // Populate the data.
7110 // The data is a stream of bytes, which contains the offsets and sizes of the
7111 // non-aggregate arguments or non-aggregate fields/elements of aggregate-typed
7112 // arguments, along with special "operators". Specifically,
7113 // - for each non-aggrgate arg/field/element, its offset from FP (1 byte) and
7115 // - special operators:
7116 // - 0xff - end of sequence
7117 // - 0xfe - print { (at the start of an aggregate-typed argument)
7118 // - 0xfd - print } (at the end of an aggregate-typed argument)
7119 // - 0xfc - print ... (more args/fields/elements)
7120 // - 0xfb - print _ (offset too large)
7121 // These constants need to be in sync with runtime.traceback.go:printArgs.
7127 _offsetTooLarge = 0xfb
7128 _special = 0xf0 // above this are operators, below this are ordinary offsets
7132 limit = 10 // print no more than 10 args/components
7133 maxDepth = 5 // no more than 5 layers of nesting
7135 // maxLen is a (conservative) upper bound of the byte stream length. For
7136 // each arg/component, it has no more than 2 bytes of data (size, offset),
7137 // and no more than one {, }, ... at each level (it cannot have both the
7138 // data and ... unless it is the last one, just be conservative). Plus 1
7140 maxLen = (maxDepth*3+2)*limit + 1
7145 writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) }
7147 // Write one non-aggrgate arg/field/element.
7148 write1 := func(sz, offset int64) {
7149 if offset >= _special {
7150 writebyte(_offsetTooLarge)
7152 writebyte(uint8(offset))
7153 writebyte(uint8(sz))
7158 // Visit t recursively and write it out.
7159 // Returns whether to continue visiting.
7160 var visitType func(baseOffset int64, t *types.Type, depth int) bool
7161 visitType = func(baseOffset int64, t *types.Type, depth int) bool {
7163 writebyte(_dotdotdot)
7166 if !isAggregate(t) {
7167 write1(t.Size(), baseOffset)
7170 writebyte(_startAgg)
7172 if depth >= maxDepth {
7173 writebyte(_dotdotdot)
7179 case t.IsInterface(), t.IsString():
7180 _ = visitType(baseOffset, uintptrTyp, depth) &&
7181 visitType(baseOffset+PtrSize, uintptrTyp, depth)
7183 _ = visitType(baseOffset, uintptrTyp, depth) &&
7184 visitType(baseOffset+PtrSize, uintptrTyp, depth) &&
7185 visitType(baseOffset+PtrSize*2, uintptrTyp, depth)
7187 _ = visitType(baseOffset, types.FloatForComplex(t), depth) &&
7188 visitType(baseOffset+t.Size()/2, types.FloatForComplex(t), depth)
7190 if t.NumElem() == 0 {
7191 n++ // {} counts as a component
7194 for i := int64(0); i < t.NumElem(); i++ {
7195 if !visitType(baseOffset, t.Elem(), depth) {
7198 baseOffset += t.Elem().Size()
7201 if t.NumFields() == 0 {
7202 n++ // {} counts as a component
7205 for _, field := range t.Fields() {
7206 if !visitType(baseOffset+field.Offset, field.Type, depth) {
7216 if strings.Contains(f.LSym.Name, "[") {
7217 // Skip the dictionary argument - it is implicit and the user doesn't need to see it.
7221 for _, a := range abiInfo.InParams()[start:] {
7222 if !visitType(a.FrameOffset(abiInfo), a.Type, 0) {
7228 base.Fatalf("ArgInfo too large")
7234 // for wrapper, emit info of wrapped function.
7235 func emitWrappedFuncInfo(e *ssafn, pp *objw.Progs) {
7236 if base.Ctxt.Flag_linkshared {
7237 // Relative reference (SymPtrOff) to another shared object doesn't work.
7242 wfn := e.curfn.WrappedFunc
7247 wsym := wfn.Linksym()
7248 x := base.Ctxt.LookupInit(fmt.Sprintf("%s.wrapinfo", wsym.Name), func(x *obj.LSym) {
7249 objw.SymPtrOff(x, 0, wsym)
7250 x.Set(obj.AttrContentAddressable, true)
7252 e.curfn.LSym.Func().WrapInfo = x
7254 // Emit a funcdata pointing at the wrap info data.
7255 p := pp.Prog(obj.AFUNCDATA)
7256 p.From.SetConst(rtabi.FUNCDATA_WrapInfo)
7257 p.To.Type = obj.TYPE_MEM
7258 p.To.Name = obj.NAME_EXTERN
7262 // genssa appends entries to pp for each instruction in f.
7263 func genssa(f *ssa.Func, pp *objw.Progs) {
7265 s.ABI = f.OwnAux.Fn.ABI()
7267 e := f.Frontend().(*ssafn)
7269 s.livenessMap, s.partLiveArgs = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
7270 emitArgInfo(e, f, pp)
7271 argLiveBlockMap, argLiveValueMap := liveness.ArgLiveness(e.curfn, f, pp)
7273 openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
7274 if openDeferInfo != nil {
7275 // This function uses open-coded defers -- write out the funcdata
7276 // info that we computed at the end of genssa.
7277 p := pp.Prog(obj.AFUNCDATA)
7278 p.From.SetConst(rtabi.FUNCDATA_OpenCodedDeferInfo)
7279 p.To.Type = obj.TYPE_MEM
7280 p.To.Name = obj.NAME_EXTERN
7281 p.To.Sym = openDeferInfo
7284 emitWrappedFuncInfo(e, pp)
7286 // Remember where each block starts.
7287 s.bstart = make([]*obj.Prog, f.NumBlocks())
7289 var progToValue map[*obj.Prog]*ssa.Value
7290 var progToBlock map[*obj.Prog]*ssa.Block
7291 var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
7292 gatherPrintInfo := f.PrintOrHtmlSSA || ssa.GenssaDump[f.Name]
7293 if gatherPrintInfo {
7294 progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
7295 progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
7296 f.Logf("genssa %s\n", f.Name)
7297 progToBlock[s.pp.Next] = f.Blocks[0]
7300 if base.Ctxt.Flag_locationlists {
7301 if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
7302 f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
7304 valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
7305 for i := range valueToProgAfter {
7306 valueToProgAfter[i] = nil
7310 // If the very first instruction is not tagged as a statement,
7311 // debuggers may attribute it to previous function in program.
7312 firstPos := src.NoXPos
7313 for _, v := range f.Entry.Values {
7314 if v.Pos.IsStmt() == src.PosIsStmt && v.Op != ssa.OpArg && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg {
7316 v.Pos = firstPos.WithDefaultStmt()
7321 // inlMarks has an entry for each Prog that implements an inline mark.
7322 // It maps from that Prog to the global inlining id of the inlined body
7323 // which should unwind to this Prog's location.
7324 var inlMarks map[*obj.Prog]int32
7325 var inlMarkList []*obj.Prog
7327 // inlMarksByPos maps from a (column 1) source position to the set of
7328 // Progs that are in the set above and have that source position.
7329 var inlMarksByPos map[src.XPos][]*obj.Prog
7331 var argLiveIdx int = -1 // argument liveness info index
7333 // Emit basic blocks
7334 for i, b := range f.Blocks {
7335 s.bstart[b.ID] = s.pp.Next
7336 s.lineRunStart = nil
7337 s.SetPos(s.pp.Pos.WithNotStmt()) // It needs a non-empty Pos, but cannot be a statement boundary (yet).
7339 if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
7341 p := s.pp.Prog(obj.APCDATA)
7342 p.From.SetConst(rtabi.PCDATA_ArgLiveIndex)
7343 p.To.SetConst(int64(idx))
7346 // Emit values in block
7347 Arch.SSAMarkMoves(&s, b)
7348 for _, v := range b.Values {
7350 s.DebugFriendlySetPosFrom(v)
7352 if v.Op.ResultInArg0() && v.ResultReg() != v.Args[0].Reg() {
7353 v.Fatalf("input[0] and output not in same register %s", v.LongString())
7358 // memory arg needs no code
7360 // input args need no code
7361 case ssa.OpSP, ssa.OpSB:
7363 case ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult:
7366 // nothing to do when there's a g register,
7367 // and checkLower complains if there's not
7368 case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpWBend:
7369 // nothing to do; already used by liveness
7373 // nothing to do; no-op conversion for liveness
7374 if v.Args[0].Reg() != v.Reg() {
7375 v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
7378 p := Arch.Ginsnop(s.pp)
7379 if inlMarks == nil {
7380 inlMarks = map[*obj.Prog]int32{}
7381 inlMarksByPos = map[src.XPos][]*obj.Prog{}
7383 inlMarks[p] = v.AuxInt32()
7384 inlMarkList = append(inlMarkList, p)
7385 pos := v.Pos.AtColumn1()
7386 inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
7387 firstPos = src.NoXPos
7390 // Special case for first line in function; move it to the start (which cannot be a register-valued instruction)
7391 if firstPos != src.NoXPos && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg {
7393 firstPos = src.NoXPos
7395 // Attach this safe point to the next
7397 s.pp.NextLive = s.livenessMap.Get(v)
7398 s.pp.NextUnsafe = s.livenessMap.GetUnsafe(v)
7400 // let the backend handle it
7401 Arch.SSAGenValue(&s, v)
7404 if idx, ok := argLiveValueMap[v.ID]; ok && idx != argLiveIdx {
7406 p := s.pp.Prog(obj.APCDATA)
7407 p.From.SetConst(rtabi.PCDATA_ArgLiveIndex)
7408 p.To.SetConst(int64(idx))
7411 if base.Ctxt.Flag_locationlists {
7412 valueToProgAfter[v.ID] = s.pp.Next
7415 if gatherPrintInfo {
7416 for ; x != s.pp.Next; x = x.Link {
7421 // If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
7422 if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
7423 p := Arch.Ginsnop(s.pp)
7424 p.Pos = p.Pos.WithIsStmt()
7425 if b.Pos == src.NoXPos {
7426 b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652.
7427 if b.Pos == src.NoXPos {
7428 b.Pos = pp.Text.Pos // Sometimes p.Pos is empty. See #35695.
7431 b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
7434 // Set unsafe mark for any end-of-block generated instructions
7435 // (normally, conditional or unconditional branches).
7436 // This is particularly important for empty blocks, as there
7437 // are no values to inherit the unsafe mark from.
7438 s.pp.NextUnsafe = s.livenessMap.GetUnsafeBlock(b)
7440 // Emit control flow instructions for block
7442 if i < len(f.Blocks)-1 && base.Flag.N == 0 {
7443 // If -N, leave next==nil so every block with successors
7444 // ends in a JMP (except call blocks - plive doesn't like
7445 // select{send,recv} followed by a JMP call). Helps keep
7446 // line numbers for otherwise empty blocks.
7447 next = f.Blocks[i+1]
7451 Arch.SSAGenBlock(&s, b, next)
7452 if gatherPrintInfo {
7453 for ; x != s.pp.Next; x = x.Link {
7458 if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
7459 // We need the return address of a panic call to
7460 // still be inside the function in question. So if
7461 // it ends in a call which doesn't return, add a
7462 // nop (which will never execute) after the call.
7465 if openDeferInfo != nil {
7466 // When doing open-coded defers, generate a disconnected call to
7467 // deferreturn and a return. This will be used to during panic
7468 // recovery to unwind the stack and return back to the runtime.
7469 s.pp.NextLive = s.livenessMap.DeferReturn
7470 p := pp.Prog(obj.ACALL)
7471 p.To.Type = obj.TYPE_MEM
7472 p.To.Name = obj.NAME_EXTERN
7473 p.To.Sym = ir.Syms.Deferreturn
7475 // Load results into registers. So when a deferred function
7476 // recovers a panic, it will return to caller with right results.
7477 // The results are already in memory, because they are not SSA'd
7478 // when the function has defers (see canSSAName).
7479 for _, o := range f.OwnAux.ABIInfo().OutParams() {
7481 rts, offs := o.RegisterTypesAndOffsets()
7482 for i := range o.Registers {
7483 Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
7490 if inlMarks != nil {
7493 // We have some inline marks. Try to find other instructions we're
7494 // going to emit anyway, and use those instructions instead of the
7496 for p := pp.Text; p != nil; p = p.Link {
7497 if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
7498 // Don't use 0-sized instructions as inline marks, because we need
7499 // to identify inline mark instructions by pc offset.
7500 // (Some of these instructions are sometimes zero-sized, sometimes not.
7501 // We must not use anything that even might be zero-sized.)
7502 // TODO: are there others?
7505 if _, ok := inlMarks[p]; ok {
7506 // Don't use inline marks themselves. We don't know
7507 // whether they will be zero-sized or not yet.
7510 if p.As == obj.ACALL || p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
7513 pos := p.Pos.AtColumn1()
7514 s := inlMarksByPos[pos]
7518 for _, m := range s {
7519 // We found an instruction with the same source position as
7520 // some of the inline marks.
7521 // Use this instruction instead.
7522 p.Pos = p.Pos.WithIsStmt() // promote position to a statement
7523 pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
7524 // Make the inline mark a real nop, so it doesn't generate any code.
7530 delete(inlMarksByPos, pos)
7532 // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
7533 for _, p := range inlMarkList {
7534 if p.As != obj.ANOP {
7535 pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
7539 if e.stksize == 0 && !hasCall {
7540 // Frameless leaf function. It doesn't need any preamble,
7541 // so make sure its first instruction isn't from an inlined callee.
7542 // If it is, add a nop at the start of the function with a position
7543 // equal to the start of the function.
7544 // This ensures that runtime.FuncForPC(uintptr(reflect.ValueOf(fn).Pointer())).Name()
7545 // returns the right answer. See issue 58300.
7546 for p := pp.Text; p != nil; p = p.Link {
7547 if p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.ANOP {
7550 if base.Ctxt.PosTable.Pos(p.Pos).Base().InliningIndex() >= 0 {
7551 // Make a real (not 0-sized) nop.
7552 nop := Arch.Ginsnop(pp)
7553 nop.Pos = e.curfn.Pos().WithIsStmt()
7555 // Unfortunately, Ginsnop puts the instruction at the
7556 // end of the list. Move it up to just before p.
7558 // Unlink from the current list.
7559 for x := pp.Text; x != nil; x = x.Link {
7565 // Splice in right before p.
7566 for x := pp.Text; x != nil; x = x.Link {
7579 if base.Ctxt.Flag_locationlists {
7580 var debugInfo *ssa.FuncDebug
7581 debugInfo = e.curfn.DebugInfo.(*ssa.FuncDebug)
7582 if e.curfn.ABI == obj.ABIInternal && base.Flag.N != 0 {
7583 ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset, debugInfo)
7585 ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists, StackOffset, debugInfo)
7588 idToIdx := make([]int, f.NumBlocks())
7589 for i, b := range f.Blocks {
7592 // Note that at this moment, Prog.Pc is a sequence number; it's
7593 // not a real PC until after assembly, so this mapping has to
7595 debugInfo.GetPC = func(b, v ssa.ID) int64 {
7597 case ssa.BlockStart.ID:
7598 if b == f.Entry.ID {
7599 return 0 // Start at the very beginning, at the assembler-generated prologue.
7600 // this should only happen for function args (ssa.OpArg)
7603 case ssa.BlockEnd.ID:
7604 blk := f.Blocks[idToIdx[b]]
7605 nv := len(blk.Values)
7606 return valueToProgAfter[blk.Values[nv-1].ID].Pc
7607 case ssa.FuncEnd.ID:
7608 return e.curfn.LSym.Size
7610 return valueToProgAfter[v].Pc
7615 // Resolve branches, and relax DefaultStmt into NotStmt
7616 for _, br := range s.Branches {
7617 br.P.To.SetTarget(s.bstart[br.B.ID])
7618 if br.P.Pos.IsStmt() != src.PosIsStmt {
7619 br.P.Pos = br.P.Pos.WithNotStmt()
7620 } else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
7621 br.P.Pos = br.P.Pos.WithNotStmt()
7626 // Resolve jump table destinations.
7627 for _, jt := range s.JumpTables {
7628 // Convert from *Block targets to *Prog targets.
7629 targets := make([]*obj.Prog, len(jt.Succs))
7630 for i, e := range jt.Succs {
7631 targets[i] = s.bstart[e.Block().ID]
7633 // Add to list of jump tables to be resolved at assembly time.
7634 // The assembler converts from *Prog entries to absolute addresses
7635 // once it knows instruction byte offsets.
7636 fi := pp.CurFunc.LSym.Func()
7637 fi.JumpTables = append(fi.JumpTables, obj.JumpTable{Sym: jt.Aux.(*obj.LSym), Targets: targets})
7640 if e.log { // spew to stdout
7642 for p := pp.Text; p != nil; p = p.Link {
7643 if p.Pos.IsKnown() && p.InnermostFilename() != filename {
7644 filename = p.InnermostFilename()
7645 f.Logf("# %s\n", filename)
7649 if v, ok := progToValue[p]; ok {
7651 } else if b, ok := progToBlock[p]; ok {
7654 s = " " // most value and branch strings are 2-3 characters long
7656 f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
7659 if f.HTMLWriter != nil { // spew to ssa.html
7660 var buf strings.Builder
7661 buf.WriteString("<code>")
7662 buf.WriteString("<dl class=\"ssa-gen\">")
7664 for p := pp.Text; p != nil; p = p.Link {
7665 // Don't spam every line with the file name, which is often huge.
7666 // Only print changes, and "unknown" is not a change.
7667 if p.Pos.IsKnown() && p.InnermostFilename() != filename {
7668 filename = p.InnermostFilename()
7669 buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
7670 buf.WriteString(html.EscapeString("# " + filename))
7671 buf.WriteString("</dd>")
7674 buf.WriteString("<dt class=\"ssa-prog-src\">")
7675 if v, ok := progToValue[p]; ok {
7676 buf.WriteString(v.HTML())
7677 } else if b, ok := progToBlock[p]; ok {
7678 buf.WriteString("<b>" + b.HTML() + "</b>")
7680 buf.WriteString("</dt>")
7681 buf.WriteString("<dd class=\"ssa-prog\">")
7682 fmt.Fprintf(&buf, "%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString()))
7683 buf.WriteString("</dd>")
7685 buf.WriteString("</dl>")
7686 buf.WriteString("</code>")
7687 f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
7689 if ssa.GenssaDump[f.Name] {
7690 fi := f.DumpFileForPhase("genssa")
7693 // inliningDiffers if any filename changes or if any line number except the innermost (last index) changes.
7694 inliningDiffers := func(a, b []src.Pos) bool {
7695 if len(a) != len(b) {
7699 if a[i].Filename() != b[i].Filename() {
7702 if i != len(a)-1 && a[i].Line() != b[i].Line() {
7709 var allPosOld []src.Pos
7710 var allPos []src.Pos
7712 for p := pp.Text; p != nil; p = p.Link {
7713 if p.Pos.IsKnown() {
7715 p.Ctxt.AllPos(p.Pos, func(pos src.Pos) { allPos = append(allPos, pos) })
7716 if inliningDiffers(allPos, allPosOld) {
7717 for _, pos := range allPos {
7718 fmt.Fprintf(fi, "# %s:%d\n", pos.Filename(), pos.Line())
7720 allPos, allPosOld = allPosOld, allPos // swap, not copy, so that they do not share slice storage.
7725 if v, ok := progToValue[p]; ok {
7727 } else if b, ok := progToBlock[p]; ok {
7730 s = " " // most value and branch strings are 2-3 characters long
7732 fmt.Fprintf(fi, " %-6s\t%.5d %s\t%s\n", s, p.Pc, ssa.StmtString(p.Pos), p.InstructionString())
7740 f.HTMLWriter.Close()
7744 func defframe(s *State, e *ssafn, f *ssa.Func) {
7747 s.maxarg = types.RoundUp(s.maxarg, e.stkalign)
7748 frame := s.maxarg + e.stksize
7749 if Arch.PadFrame != nil {
7750 frame = Arch.PadFrame(frame)
7753 // Fill in argument and frame size.
7754 pp.Text.To.Type = obj.TYPE_TEXTSIZE
7755 pp.Text.To.Val = int32(types.RoundUp(f.OwnAux.ArgWidth(), int64(types.RegSize)))
7756 pp.Text.To.Offset = frame
7760 // Insert code to spill argument registers if the named slot may be partially
7761 // live. That is, the named slot is considered live by liveness analysis,
7762 // (because a part of it is live), but we may not spill all parts into the
7763 // slot. This can only happen with aggregate-typed arguments that are SSA-able
7764 // and not address-taken (for non-SSA-able or address-taken arguments we always
7766 // Note: spilling is unnecessary in the -N/no-optimize case, since all values
7767 // will be considered non-SSAable and spilled up front.
7768 // TODO(register args) Make liveness more fine-grained to that partial spilling is okay.
7769 if f.OwnAux.ABIInfo().InRegistersUsed() != 0 && base.Flag.N == 0 {
7770 // First, see if it is already spilled before it may be live. Look for a spill
7771 // in the entry block up to the first safepoint.
7772 type nameOff struct {
7776 partLiveArgsSpilled := make(map[nameOff]bool)
7777 for _, v := range f.Entry.Values {
7781 if v.Op != ssa.OpStoreReg || v.Args[0].Op != ssa.OpArgIntReg {
7784 n, off := ssa.AutoVar(v)
7785 if n.Class != ir.PPARAM || n.Addrtaken() || !ssa.CanSSA(n.Type()) || !s.partLiveArgs[n] {
7788 partLiveArgsSpilled[nameOff{n, off}] = true
7791 // Then, insert code to spill registers if not already.
7792 for _, a := range f.OwnAux.ABIInfo().InParams() {
7794 if n == nil || n.Addrtaken() || !ssa.CanSSA(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
7797 rts, offs := a.RegisterTypesAndOffsets()
7798 for i := range a.Registers {
7799 if !rts[i].HasPointers() {
7802 if partLiveArgsSpilled[nameOff{n, offs[i]}] {
7803 continue // already spilled
7805 reg := ssa.ObjRegForAbiReg(a.Registers[i], f.Config)
7806 p = Arch.SpillArgReg(pp, p, f, rts[i], reg, n, offs[i])
7811 // Insert code to zero ambiguously live variables so that the
7812 // garbage collector only sees initialized values when it
7813 // looks for pointers.
7816 // Opaque state for backend to use. Current backends use it to
7817 // keep track of which helper registers have been zeroed.
7820 // Iterate through declarations. Autos are sorted in decreasing
7821 // frame offset order.
7822 for _, n := range e.curfn.Dcl {
7826 if n.Class != ir.PAUTO {
7827 e.Fatalf(n.Pos(), "needzero class %d", n.Class)
7829 if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 {
7830 e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_)
7833 if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) {
7834 // Merge with range we already have.
7835 lo = n.FrameOffset()
7840 p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
7843 lo = n.FrameOffset()
7844 hi = lo + n.Type().Size()
7847 // Zero final range.
7848 Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
7851 // For generating consecutive jump instructions to model a specific branching
7852 type IndexJump struct {
7857 func (s *State) oneJump(b *ssa.Block, jump *IndexJump) {
7858 p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
7862 // CombJump generates combinational instructions (2 at present) for a block jump,
7863 // thereby the behaviour of non-standard condition codes could be simulated
7864 func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
7866 case b.Succs[0].Block():
7867 s.oneJump(b, &jumps[0][0])
7868 s.oneJump(b, &jumps[0][1])
7869 case b.Succs[1].Block():
7870 s.oneJump(b, &jumps[1][0])
7871 s.oneJump(b, &jumps[1][1])
7874 if b.Likely != ssa.BranchUnlikely {
7875 s.oneJump(b, &jumps[1][0])
7876 s.oneJump(b, &jumps[1][1])
7877 q = s.Br(obj.AJMP, b.Succs[1].Block())
7879 s.oneJump(b, &jumps[0][0])
7880 s.oneJump(b, &jumps[0][1])
7881 q = s.Br(obj.AJMP, b.Succs[0].Block())
7887 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
7888 func AddAux(a *obj.Addr, v *ssa.Value) {
7889 AddAux2(a, v, v.AuxInt)
7891 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
7892 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
7893 v.Fatalf("bad AddAux addr %v", a)
7895 // add integer offset
7898 // If no additional symbol offset, we're done.
7902 // Add symbol's offset from its base register.
7903 switch n := v.Aux.(type) {
7905 a.Name = obj.NAME_EXTERN
7908 a.Name = obj.NAME_EXTERN
7911 if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
7912 a.Name = obj.NAME_PARAM
7914 a.Name = obj.NAME_AUTO
7917 a.Offset += n.FrameOffset()
7919 v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
7923 // extendIndex extends v to a full int width.
7924 // panic with the given kind if v does not fit in an int (only on 32-bit archs).
7925 func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
7926 size := idx.Type.Size()
7927 if size == s.config.PtrSize {
7930 if size > s.config.PtrSize {
7931 // truncate 64-bit indexes on 32-bit pointer archs. Test the
7932 // high word and branch to out-of-bounds failure if it is not 0.
7934 if idx.Type.IsSigned() {
7935 lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
7937 lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
7939 if bounded || base.Flag.B != 0 {
7942 bNext := s.f.NewBlock(ssa.BlockPlain)
7943 bPanic := s.f.NewBlock(ssa.BlockExit)
7944 hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
7945 cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
7946 if !idx.Type.IsSigned() {
7948 case ssa.BoundsIndex:
7949 kind = ssa.BoundsIndexU
7950 case ssa.BoundsSliceAlen:
7951 kind = ssa.BoundsSliceAlenU
7952 case ssa.BoundsSliceAcap:
7953 kind = ssa.BoundsSliceAcapU
7954 case ssa.BoundsSliceB:
7955 kind = ssa.BoundsSliceBU
7956 case ssa.BoundsSlice3Alen:
7957 kind = ssa.BoundsSlice3AlenU
7958 case ssa.BoundsSlice3Acap:
7959 kind = ssa.BoundsSlice3AcapU
7960 case ssa.BoundsSlice3B:
7961 kind = ssa.BoundsSlice3BU
7962 case ssa.BoundsSlice3C:
7963 kind = ssa.BoundsSlice3CU
7967 b.Kind = ssa.BlockIf
7969 b.Likely = ssa.BranchLikely
7973 s.startBlock(bPanic)
7974 mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
7975 s.endBlock().SetControl(mem)
7981 // Extend value to the required size
7983 if idx.Type.IsSigned() {
7984 switch 10*size + s.config.PtrSize {
7986 op = ssa.OpSignExt8to32
7988 op = ssa.OpSignExt8to64
7990 op = ssa.OpSignExt16to32
7992 op = ssa.OpSignExt16to64
7994 op = ssa.OpSignExt32to64
7996 s.Fatalf("bad signed index extension %s", idx.Type)
7999 switch 10*size + s.config.PtrSize {
8001 op = ssa.OpZeroExt8to32
8003 op = ssa.OpZeroExt8to64
8005 op = ssa.OpZeroExt16to32
8007 op = ssa.OpZeroExt16to64
8009 op = ssa.OpZeroExt32to64
8011 s.Fatalf("bad unsigned index extension %s", idx.Type)
8014 return s.newValue1(op, types.Types[types.TINT], idx)
8017 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
8018 // Called during ssaGenValue.
8019 func CheckLoweredPhi(v *ssa.Value) {
8020 if v.Op != ssa.OpPhi {
8021 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
8023 if v.Type.IsMemory() {
8027 loc := f.RegAlloc[v.ID]
8028 for _, a := range v.Args {
8029 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
8030 v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
8035 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block,
8036 // except for incoming in-register arguments.
8037 // The output of LoweredGetClosurePtr is generally hardwired to the correct register.
8038 // That register contains the closure pointer on closure entry.
8039 func CheckLoweredGetClosurePtr(v *ssa.Value) {
8040 entry := v.Block.Func.Entry
8041 if entry != v.Block {
8042 base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
8044 for _, w := range entry.Values {
8049 case ssa.OpArgIntReg, ssa.OpArgFloatReg:
8052 base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
8057 // CheckArgReg ensures that v is in the function's entry block.
8058 func CheckArgReg(v *ssa.Value) {
8059 entry := v.Block.Func.Entry
8060 if entry != v.Block {
8061 base.Fatalf("in %s, badly placed ArgIReg or ArgFReg: %v %v", v.Block.Func.Name, v.Block, v)
8065 func AddrAuto(a *obj.Addr, v *ssa.Value) {
8066 n, off := ssa.AutoVar(v)
8067 a.Type = obj.TYPE_MEM
8069 a.Reg = int16(Arch.REGSP)
8070 a.Offset = n.FrameOffset() + off
8071 if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
8072 a.Name = obj.NAME_PARAM
8074 a.Name = obj.NAME_AUTO
8078 // Call returns a new CALL instruction for the SSA value v.
8079 // It uses PrepareCall to prepare the call.
8080 func (s *State) Call(v *ssa.Value) *obj.Prog {
8081 pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
8084 p := s.Prog(obj.ACALL)
8085 if pPosIsStmt == src.PosIsStmt {
8086 p.Pos = v.Pos.WithIsStmt()
8088 p.Pos = v.Pos.WithNotStmt()
8090 if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
8091 p.To.Type = obj.TYPE_MEM
8092 p.To.Name = obj.NAME_EXTERN
8095 // TODO(mdempsky): Can these differences be eliminated?
8096 switch Arch.LinkArch.Family {
8097 case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
8098 p.To.Type = obj.TYPE_REG
8099 case sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64:
8100 p.To.Type = obj.TYPE_MEM
8102 base.Fatalf("unknown indirect call family")
8104 p.To.Reg = v.Args[0].Reg()
8109 // TailCall returns a new tail call instruction for the SSA value v.
8110 // It is like Call, but for a tail call.
8111 func (s *State) TailCall(v *ssa.Value) *obj.Prog {
8117 // PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
8118 // It must be called immediately before emitting the actual CALL instruction,
8119 // since it emits PCDATA for the stack map at the call (calls are safe points).
8120 func (s *State) PrepareCall(v *ssa.Value) {
8121 idx := s.livenessMap.Get(v)
8122 if !idx.StackMapValid() {
8123 // See Liveness.hasStackMap.
8124 if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.WBZero || sym.Fn == ir.Syms.WBMove) {
8125 base.Fatalf("missing stack map index for %v", v.LongString())
8129 call, ok := v.Aux.(*ssa.AuxCall)
8132 // Record call graph information for nowritebarrierrec
8134 if nowritebarrierrecCheck != nil {
8135 nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
8139 if s.maxarg < v.AuxInt {
8144 // UseArgs records the fact that an instruction needs a certain amount of
8145 // callee args space for its use.
8146 func (s *State) UseArgs(n int64) {
8152 // fieldIdx finds the index of the field referred to by the ODOT node n.
8153 func fieldIdx(n *ir.SelectorExpr) int {
8156 panic("ODOT's LHS is not a struct")
8159 for i, f := range t.Fields() {
8161 if f.Offset != n.Offset() {
8162 panic("field offset doesn't match")
8167 panic(fmt.Sprintf("can't find field in expr %v\n", n))
8169 // TODO: keep the result of this function somewhere in the ODOT Node
8170 // so we don't have to recompute it each time we need it.
8173 // ssafn holds frontend information about a function that the backend is processing.
8174 // It also exports a bunch of compiler services for the ssa backend.
8177 strings map[string]*obj.LSym // map from constant string to data symbols
8178 stksize int64 // stack size for current frame
8179 stkptrsize int64 // prefix of stack containing pointers
8181 // alignment for current frame.
8182 // NOTE: when stkalign > PtrSize, currently this only ensures the offsets of
8183 // objects in the stack frame are aligned. The stack pointer is still aligned
8187 log bool // print ssa debug to the stdout
8190 // StringData returns a symbol which
8191 // is the data component of a global string constant containing s.
8192 func (e *ssafn) StringData(s string) *obj.LSym {
8193 if aux, ok := e.strings[s]; ok {
8196 if e.strings == nil {
8197 e.strings = make(map[string]*obj.LSym)
8199 data := staticdata.StringSym(e.curfn.Pos(), s)
8204 // SplitSlot returns a slot representing the data of parent starting at offset.
8205 func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
8208 if node.Class != ir.PAUTO || node.Addrtaken() {
8209 // addressed things and non-autos retain their parents (i.e., cannot truly be split)
8210 return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
8213 sym := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
8214 n := e.curfn.NewLocal(parent.N.Pos(), sym, t)
8216 n.SetEsc(ir.EscNever)
8218 return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
8221 // Logf logs a message from the compiler.
8222 func (e *ssafn) Logf(msg string, args ...interface{}) {
8224 fmt.Printf(msg, args...)
8228 func (e *ssafn) Log() bool {
8232 // Fatalf reports a compiler error and exits.
8233 func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
8235 nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
8236 base.Fatalf("'%s': "+msg, nargs...)
8239 // Warnl reports a "warning", which is usually flag-triggered
8240 // logging output for the benefit of tests.
8241 func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
8242 base.WarnfAt(pos, fmt_, args...)
8245 func (e *ssafn) Debug_checknil() bool {
8246 return base.Debug.Nil != 0
8249 func (e *ssafn) UseWriteBarrier() bool {
8253 func (e *ssafn) Syslook(name string) *obj.LSym {
8255 case "goschedguarded":
8256 return ir.Syms.Goschedguarded
8257 case "writeBarrier":
8258 return ir.Syms.WriteBarrier
8260 return ir.Syms.WBZero
8262 return ir.Syms.WBMove
8263 case "cgoCheckMemmove":
8264 return ir.Syms.CgoCheckMemmove
8265 case "cgoCheckPtrWrite":
8266 return ir.Syms.CgoCheckPtrWrite
8268 e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
8272 func (e *ssafn) Func() *ir.Func {
8276 func clobberBase(n ir.Node) ir.Node {
8277 if n.Op() == ir.ODOT {
8278 n := n.(*ir.SelectorExpr)
8279 if n.X.Type().NumFields() == 1 {
8280 return clobberBase(n.X)
8283 if n.Op() == ir.OINDEX {
8284 n := n.(*ir.IndexExpr)
8285 if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 {
8286 return clobberBase(n.X)
8292 // callTargetLSym returns the correct LSym to call 'callee' using its ABI.
8293 func callTargetLSym(callee *ir.Name) *obj.LSym {
8294 if callee.Func == nil {
8295 // TODO(austin): This happens in case of interface method I.M from imported package.
8296 // It's ABIInternal, and would be better if callee.Func was never nil and we didn't
8298 return callee.Linksym()
8301 return callee.LinksymABI(callee.Func.ABI)
8304 func min8(a, b int8) int8 {
8311 func max8(a, b int8) int8 {
8318 // deferStructFnField is the field index of _defer.fn.
8319 const deferStructFnField = 4
8321 var deferType *types.Type
8323 // deferstruct returns a type interchangeable with runtime._defer.
8324 // Make sure this stays in sync with runtime/runtime2.go:_defer.
8325 func deferstruct() *types.Type {
8326 if deferType != nil {
8330 makefield := func(name string, t *types.Type) *types.Field {
8331 sym := (*types.Pkg)(nil).Lookup(name)
8332 return types.NewField(src.NoXPos, sym, t)
8335 fields := []*types.Field{
8336 makefield("heap", types.Types[types.TBOOL]),
8337 makefield("rangefunc", types.Types[types.TBOOL]),
8338 makefield("sp", types.Types[types.TUINTPTR]),
8339 makefield("pc", types.Types[types.TUINTPTR]),
8340 // Note: the types here don't really matter. Defer structures
8341 // are always scanned explicitly during stack copying and GC,
8342 // so we make them uintptr type even though they are real pointers.
8343 makefield("fn", types.Types[types.TUINTPTR]),
8344 makefield("link", types.Types[types.TUINTPTR]),
8345 makefield("head", types.Types[types.TUINTPTR]),
8347 if name := fields[deferStructFnField].Sym.Name; name != "fn" {
8348 base.Fatalf("deferStructFnField is %q, not fn", name)
8351 n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("_defer"))
8352 typ := types.NewNamed(n)
8356 // build struct holding the above fields
8357 typ.SetUnderlying(types.NewStruct(fields))
8358 types.CalcStructSize(typ)
8364 // SpillSlotAddr uses LocalSlot information to initialize an obj.Addr
8365 // The resulting addr is used in a non-standard context -- in the prologue
8366 // of a function, before the frame has been constructed, so the standard
8367 // addressing for the parameters will be wrong.
8368 func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr {
8370 Name: obj.NAME_NONE,
8373 Offset: spill.Offset + extraOffset,
8378 BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
8379 ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym