1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 "cmd/compile/internal/abi"
20 "cmd/compile/internal/base"
21 "cmd/compile/internal/ir"
22 "cmd/compile/internal/liveness"
23 "cmd/compile/internal/objw"
24 "cmd/compile/internal/reflectdata"
25 "cmd/compile/internal/ssa"
26 "cmd/compile/internal/staticdata"
27 "cmd/compile/internal/typecheck"
28 "cmd/compile/internal/types"
30 "cmd/internal/obj/x86"
36 var ssaConfig *ssa.Config
37 var ssaCaches []ssa.Cache
39 var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for
40 var ssaDir string // optional destination for ssa dump file
41 var ssaDumpStdout bool // whether to dump to stdout
42 var ssaDumpCFG string // generate CFGs for these phases
43 const ssaDumpFile = "ssa.html"
45 // ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
46 var ssaDumpInlined []*ir.Func
48 func DumpInline(fn *ir.Func) {
49 if ssaDump != "" && ssaDump == ir.FuncName(fn) {
50 ssaDumpInlined = append(ssaDumpInlined, fn)
55 ssaDump = os.Getenv("GOSSAFUNC")
56 ssaDir = os.Getenv("GOSSADIR")
58 if strings.HasSuffix(ssaDump, "+") {
59 ssaDump = ssaDump[:len(ssaDump)-1]
62 spl := strings.Split(ssaDump, ":")
71 types_ := ssa.NewTypes()
77 // Generate a few pointer types that are uncommon in the frontend but common in the backend.
78 // Caching is disabled in the backend, so generating these here avoids allocations.
79 _ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
80 _ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
81 _ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
82 _ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte
83 _ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte
84 _ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
85 _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
86 _ = types.NewPtr(types.Types[types.TINT16]) // *int16
87 _ = types.NewPtr(types.Types[types.TINT64]) // *int64
88 _ = types.NewPtr(types.ErrorType) // *error
89 types.NewPtrCacheEnabled = false
90 ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
91 ssaConfig.Race = base.Flag.Race
92 ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
94 // Set up some runtime functions we'll need to call.
95 ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I")
96 ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2")
97 ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I")
98 ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2")
99 ir.Syms.CheckPtrAlignment = typecheck.LookupRuntimeFunc("checkptrAlignment")
100 ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
101 ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
102 ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
103 ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
104 ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero")
105 ir.Syms.GCWriteBarrier = typecheck.LookupRuntimeFunc("gcWriteBarrier")
106 ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
107 ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
108 ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove")
109 ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
110 ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
111 ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
112 ir.Syms.Asanread = typecheck.LookupRuntimeFunc("asanread")
113 ir.Syms.Asanwrite = typecheck.LookupRuntimeFunc("asanwrite")
114 ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
115 ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
116 ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
117 ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE")
118 ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI")
119 ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
120 ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
121 ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
122 ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
123 ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
124 ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
125 ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
126 ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool
127 ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool
128 ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool
129 ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool
130 ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
131 ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s")
132 ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr")
133 ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
134 ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI
135 ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... }
136 ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
138 // asm funcs with special ABI
139 if base.Ctxt.Arch.Name == "amd64" {
140 GCWriteBarrierReg = map[int16]*obj.LSym{
141 x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"),
142 x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"),
143 x86.REG_DX: typecheck.LookupRuntimeFunc("gcWriteBarrierDX"),
144 x86.REG_BX: typecheck.LookupRuntimeFunc("gcWriteBarrierBX"),
145 x86.REG_BP: typecheck.LookupRuntimeFunc("gcWriteBarrierBP"),
146 x86.REG_SI: typecheck.LookupRuntimeFunc("gcWriteBarrierSI"),
147 x86.REG_R8: typecheck.LookupRuntimeFunc("gcWriteBarrierR8"),
148 x86.REG_R9: typecheck.LookupRuntimeFunc("gcWriteBarrierR9"),
152 if Arch.LinkArch.Family == sys.Wasm {
153 BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex")
154 BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU")
155 BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen")
156 BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU")
157 BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap")
158 BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU")
159 BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB")
160 BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU")
161 BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen")
162 BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU")
163 BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap")
164 BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU")
165 BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B")
166 BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU")
167 BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
168 BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
169 BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("goPanicSliceConvert")
171 BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
172 BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
173 BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
174 BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
175 BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
176 BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
177 BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
178 BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
179 BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
180 BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
181 BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
182 BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
183 BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
184 BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
185 BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
186 BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
187 BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("panicSliceConvert")
189 if Arch.LinkArch.PtrSize == 4 {
190 ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
191 ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
192 ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
193 ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
194 ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
195 ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
196 ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
197 ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
198 ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
199 ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
200 ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
201 ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
202 ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
203 ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
204 ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
205 ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
208 // Wasm (all asm funcs with special ABIs)
209 ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv")
210 ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS")
211 ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU")
212 ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic")
215 // AbiForBodylessFuncStackMap returns the ABI for a bodyless function's stack map.
216 // This is not necessarily the ABI used to call it.
217 // Currently (1.17 dev) such a stack map is always ABI0;
218 // any ABI wrapper that is present is nosplit, hence a precise
219 // stack map is not needed there (the parameters survive only long
220 // enough to call the wrapped assembly function).
221 // This always returns a freshly copied ABI.
222 func AbiForBodylessFuncStackMap(fn *ir.Func) *abi.ABIConfig {
223 return ssaConfig.ABI0.Copy() // No idea what races will result, be safe
226 // abiForFunc implements ABI policy for a function, but does not return a copy of the ABI.
227 // Passing a nil function returns the default ABI based on experiment configuration.
228 func abiForFunc(fn *ir.Func, abi0, abi1 *abi.ABIConfig) *abi.ABIConfig {
229 if buildcfg.Experiment.RegabiArgs {
230 // Select the ABI based on the function's defining ABI.
237 case obj.ABIInternal:
238 // TODO(austin): Clean up the nomenclature here.
239 // It's not clear that "abi1" is ABIInternal.
242 base.Fatalf("function %v has unknown ABI %v", fn, fn.ABI)
243 panic("not reachable")
248 if fn.Pragma&ir.RegisterParams != 0 { // TODO(register args) remove after register abi is working
255 // dvarint writes a varint v to the funcdata in symbol x and returns the new offset.
256 func dvarint(x *obj.LSym, off int, v int64) int {
257 if v < 0 || v > 1e9 {
258 panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
261 return objw.Uint8(x, off, uint8(v))
263 off = objw.Uint8(x, off, uint8((v&127)|128))
265 return objw.Uint8(x, off, uint8(v>>7))
267 off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
269 return objw.Uint8(x, off, uint8(v>>14))
271 off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
273 return objw.Uint8(x, off, uint8(v>>21))
275 off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
276 return objw.Uint8(x, off, uint8(v>>28))
279 // emitOpenDeferInfo emits FUNCDATA information about the defers in a function
280 // that is using open-coded defers. This funcdata is used to determine the active
281 // defers in a function and execute those defers during panic processing.
283 // The funcdata is all encoded in varints (since values will almost always be less than
284 // 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets)
285 // for stack variables are specified as the number of bytes below varp (pointer to the
286 // top of the local variables) for their starting address. The format is:
288 // - Offset of the deferBits variable
289 // - Number of defers in the function
290 // - Information about each defer call, in reverse order of appearance in the function:
291 // - Offset of the closure value to call
292 func (s *state) emitOpenDeferInfo() {
293 x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
294 x.Set(obj.AttrContentAddressable, true)
295 s.curfn.LSym.Func().OpenCodedDeferInfo = x
297 off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
298 off = dvarint(x, off, int64(len(s.openDefers)))
300 // Write in reverse-order, for ease of running in that order at runtime
301 for i := len(s.openDefers) - 1; i >= 0; i-- {
303 off = dvarint(x, off, -r.closureNode.FrameOffset())
307 func okOffset(offset int64) int64 {
308 if offset == types.BOGUS_FUNARG_OFFSET {
309 panic(fmt.Errorf("Bogus offset %d", offset))
314 // buildssa builds an SSA function for fn.
315 // worker indicates which of the backend workers is doing the processing.
316 func buildssa(fn *ir.Func, worker int) *ssa.Func {
317 name := ir.FuncName(fn)
319 if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset"
320 pkgDotName := base.Ctxt.Pkgpath + "." + name
321 printssa = name == ssaDump ||
322 strings.HasSuffix(pkgDotName, ssaDump) && (pkgDotName == ssaDump || strings.HasSuffix(pkgDotName, "/"+ssaDump))
324 var astBuf *bytes.Buffer
326 astBuf = &bytes.Buffer{}
327 ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
328 ir.FDumpList(astBuf, "buildssa-body", fn.Body)
329 ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
331 fmt.Println("generating SSA for", name)
332 fmt.Print(astBuf.String())
340 s.hasdefer = fn.HasDefer()
341 if fn.Pragma&ir.CgoUnsafeArgs != 0 {
342 s.cgoUnsafeArgs = true
344 s.checkPtrEnabled = ir.ShouldCheckPtr(fn, 1)
348 log: printssa && ssaDumpStdout,
352 s.f = ssa.NewFunc(&fe)
355 s.f.Config = ssaConfig
356 s.f.Cache = &ssaCaches[worker]
359 s.f.PrintOrHtmlSSA = printssa
360 if fn.Pragma&ir.Nosplit != 0 {
363 s.f.ABI0 = ssaConfig.ABI0.Copy() // Make a copy to avoid racy map operations in type-register-width cache.
364 s.f.ABI1 = ssaConfig.ABI1.Copy()
365 s.f.ABIDefault = abiForFunc(nil, s.f.ABI0, s.f.ABI1)
366 s.f.ABISelf = abiForFunc(fn, s.f.ABI0, s.f.ABI1)
368 s.panics = map[funcLine]*ssa.Block{}
369 s.softFloat = s.config.SoftFloat
371 // Allocate starting block
372 s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
373 s.f.Entry.Pos = fn.Pos()
378 ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
379 ssaD := filepath.Dir(ssaDF)
380 os.MkdirAll(ssaD, 0755)
382 s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
383 // TODO: generate and print a mapping from nodes to values and blocks
384 dumpSourcesColumn(s.f.HTMLWriter, fn)
385 s.f.HTMLWriter.WriteAST("AST", astBuf)
388 // Allocate starting values
389 s.labels = map[string]*ssaLabel{}
390 s.fwdVars = map[ir.Node]*ssa.Value{}
391 s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
393 s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
395 case base.Debug.NoOpenDefer != 0:
396 s.hasOpenDefers = false
397 case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
398 // Don't support open-coded defers for 386 ONLY when using shared
399 // libraries, because there is extra code (added by rewriteToUseGot())
400 // preceding the deferreturn/ret code that we don't track correctly.
401 s.hasOpenDefers = false
403 if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
404 // Skip doing open defers if there is any extra exit code (likely
405 // race detection), since we will not generate that code in the
406 // case of the extra deferreturn/ret segment.
407 s.hasOpenDefers = false
410 // Similarly, skip if there are any heap-allocated result
411 // parameters that need to be copied back to their stack slots.
412 for _, f := range s.curfn.Type().Results().FieldSlice() {
413 if !f.Nname.(*ir.Name).OnStack() {
414 s.hasOpenDefers = false
419 if s.hasOpenDefers &&
420 s.curfn.NumReturns*s.curfn.NumDefers > 15 {
421 // Since we are generating defer calls at every exit for
422 // open-coded defers, skip doing open-coded defers if there are
423 // too many returns (especially if there are multiple defers).
424 // Open-coded defers are most important for improving performance
425 // for smaller functions (which don't have many returns).
426 s.hasOpenDefers = false
429 s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
430 s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
432 s.startBlock(s.f.Entry)
433 s.vars[memVar] = s.startmem
435 // Create the deferBits variable and stack slot. deferBits is a
436 // bitmask showing which of the open-coded defers in this function
437 // have been activated.
438 deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
439 deferBitsTemp.SetAddrtaken(true)
440 s.deferBitsTemp = deferBitsTemp
441 // For this value, AuxInt is initialized to zero by default
442 startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
443 s.vars[deferBitsVar] = startDeferBits
444 s.deferBitsAddr = s.addr(deferBitsTemp)
445 s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
446 // Make sure that the deferBits stack slot is kept alive (for use
447 // by panics) and stores to deferBits are not eliminated, even if
448 // all checking code on deferBits in the function exit can be
449 // eliminated, because the defer statements were all
451 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
454 var params *abi.ABIParamResultInfo
455 params = s.f.ABISelf.ABIAnalyze(fn.Type(), true)
457 // The backend's stackframe pass prunes away entries from the fn's
458 // Dcl list, including PARAMOUT nodes that correspond to output
459 // params passed in registers. Walk the Dcl list and capture these
460 // nodes to a side list, so that we'll have them available during
461 // DWARF-gen later on. See issue 48573 for more details.
462 var debugInfo ssa.FuncDebug
463 for _, n := range fn.Dcl {
464 if n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters() {
465 debugInfo.RegOutputParams = append(debugInfo.RegOutputParams, n)
468 fn.DebugInfo = &debugInfo
470 // Generate addresses of local declarations
471 s.decladdrs = map[*ir.Name]*ssa.Value{}
472 for _, n := range fn.Dcl {
475 // Be aware that blank and unnamed input parameters will not appear here, but do appear in the type
476 s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
478 s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
480 // processed at each use, to prevent Addr coming
483 s.Fatalf("local variable with class %v unimplemented", n.Class)
487 s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, params)
489 // Populate SSAable arguments.
490 for _, n := range fn.Dcl {
491 if n.Class == ir.PPARAM {
493 v := s.newValue0A(ssa.OpArg, n.Type(), n)
495 s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
496 } else { // address was taken AND/OR too large for SSA
497 paramAssignment := ssa.ParamAssignmentForArgName(s.f, n)
498 if len(paramAssignment.Registers) > 0 {
499 if TypeOK(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
500 v := s.newValue0A(ssa.OpArg, n.Type(), n)
501 s.store(n.Type(), s.decladdrs[n], v)
502 } else { // Too big for SSA.
503 // Brute force, and early, do a bunch of stores from registers
504 // TODO fix the nasty storeArgOrLoad recursion in ssa/expand_calls.go so this Just Works with store of a big Arg.
505 s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false)
512 // Populate closure variables.
514 clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
515 offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
516 for _, n := range fn.ClosureVars {
519 typ = types.NewPtr(typ)
522 offset = types.RoundUp(offset, typ.Alignment())
523 ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
526 // If n is a small variable captured by value, promote
527 // it to PAUTO so it can be converted to SSA.
529 // Note: While we never capture a variable by value if
530 // the user took its address, we may have generated
531 // runtime calls that did (#43701). Since we don't
532 // convert Addrtaken variables to SSA anyway, no point
533 // in promoting them either.
534 if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) {
536 fn.Dcl = append(fn.Dcl, n)
537 s.assign(n, s.load(n.Type(), ptr), false, 0)
542 ptr = s.load(typ, ptr)
544 s.setHeapaddr(fn.Pos(), n, ptr)
548 // Convert the AST-based IR to the SSA-based IR
554 // fallthrough to exit
555 if s.curBlock != nil {
556 s.pushLine(fn.Endlineno)
561 for _, b := range s.f.Blocks {
562 if b.Pos != src.NoXPos {
563 s.updateUnsetPredPos(b)
567 s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis")
571 // Main call to ssa package to compile function
575 s.emitOpenDeferInfo()
578 // Record incoming parameter spill information for morestack calls emitted in the assembler.
579 // This is done here, using all the parameters (used, partially used, and unused) because
580 // it mimics the behavior of the former ABI (everything stored) and because it's not 100%
581 // clear if naming conventions are respected in autogenerated code.
582 // TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also.
583 for _, p := range params.InParams() {
584 typs, offs := p.RegisterTypesAndOffsets()
585 for i, t := range typs {
586 o := offs[i] // offset within parameter
587 fo := p.FrameOffset(params) // offset of parameter in frame
588 reg := ssa.ObjRegForAbiReg(p.Registers[i], s.f.Config)
589 s.f.RegArgs = append(s.f.RegArgs, ssa.Spill{Reg: reg, Offset: fo + o, Type: t})
596 func (s *state) storeParameterRegsToStack(abi *abi.ABIConfig, paramAssignment *abi.ABIParamAssignment, n *ir.Name, addr *ssa.Value, pointersOnly bool) {
597 typs, offs := paramAssignment.RegisterTypesAndOffsets()
598 for i, t := range typs {
599 if pointersOnly && !t.IsPtrShaped() {
602 r := paramAssignment.Registers[i]
604 op, reg := ssa.ArgOpAndRegisterFor(r, abi)
605 aux := &ssa.AuxNameOffset{Name: n, Offset: o}
606 v := s.newValue0I(op, t, reg)
608 p := s.newValue1I(ssa.OpOffPtr, types.NewPtr(t), o, addr)
613 // zeroResults zeros the return values at the start of the function.
614 // We need to do this very early in the function. Defer might stop a
615 // panic and show the return values as they exist at the time of
616 // panic. For precise stacks, the garbage collector assumes results
617 // are always live, so we need to zero them before any allocations,
618 // even allocations to move params/results to the heap.
619 func (s *state) zeroResults() {
620 for _, f := range s.curfn.Type().Results().FieldSlice() {
621 n := f.Nname.(*ir.Name)
623 // The local which points to the return value is the
624 // thing that needs zeroing. This is already handled
625 // by a Needzero annotation in plive.go:(*liveness).epilogue.
628 // Zero the stack location containing f.
629 if typ := n.Type(); TypeOK(typ) {
630 s.assign(n, s.zeroVal(typ), false, 0)
632 if typ.HasPointers() {
633 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
635 s.zero(n.Type(), s.decladdrs[n])
640 // paramsToHeap produces code to allocate memory for heap-escaped parameters
641 // and to copy non-result parameters' values from the stack.
642 func (s *state) paramsToHeap() {
643 do := func(params *types.Type) {
644 for _, f := range params.FieldSlice() {
646 continue // anonymous or blank parameter
648 n := f.Nname.(*ir.Name)
649 if ir.IsBlank(n) || n.OnStack() {
653 if n.Class == ir.PPARAM {
654 s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n])
659 typ := s.curfn.Type()
665 // newHeapaddr allocates heap memory for n and sets its heap address.
666 func (s *state) newHeapaddr(n *ir.Name) {
667 s.setHeapaddr(n.Pos(), n, s.newObject(n.Type(), nil))
670 // setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
671 // and then sets it as n's heap address.
672 func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
673 if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) {
674 base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type)
677 // Declare variable to hold address.
678 addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
679 addr.SetType(types.NewPtr(n.Type()))
680 addr.Class = ir.PAUTO
683 s.curfn.Dcl = append(s.curfn.Dcl, addr)
684 types.CalcSize(addr.Type())
686 if n.Class == ir.PPARAMOUT {
687 addr.SetIsOutputParamHeapAddr(true)
691 s.assign(addr, ptr, false, 0)
694 // newObject returns an SSA value denoting new(typ).
695 func (s *state) newObject(typ *types.Type, rtype *ssa.Value) *ssa.Value {
697 return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
700 rtype = s.reflectType(typ)
702 return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, rtype)[0]
705 func (s *state) checkPtrAlignment(n *ir.ConvExpr, v *ssa.Value, count *ssa.Value) {
706 if !n.Type().IsPtr() {
707 s.Fatalf("expected pointer type: %v", n.Type())
709 elem, rtypeExpr := n.Type().Elem(), n.ElemRType
712 s.Fatalf("expected array type: %v", elem)
714 elem, rtypeExpr = elem.Elem(), n.ElemElemRType
717 // Casting from larger type to smaller one is ok, so for smallest type, do nothing.
718 if elem.Alignment() == 1 && (size == 0 || size == 1 || count == nil) {
722 count = s.constInt(types.Types[types.TUINTPTR], 1)
724 if count.Type.Size() != s.config.PtrSize {
725 s.Fatalf("expected count fit to an uintptr size, have: %d, want: %d", count.Type.Size(), s.config.PtrSize)
728 if rtypeExpr != nil {
729 rtype = s.expr(rtypeExpr)
731 rtype = s.reflectType(elem)
733 s.rtcall(ir.Syms.CheckPtrAlignment, true, nil, v, rtype, count)
736 // reflectType returns an SSA value representing a pointer to typ's
737 // reflection type descriptor.
738 func (s *state) reflectType(typ *types.Type) *ssa.Value {
739 // TODO(mdempsky): Make this Fatalf under Unified IR; frontend needs
740 // to supply RType expressions.
741 lsym := reflectdata.TypeLinksym(typ)
742 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
745 func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
746 // Read sources of target function fn.
747 fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
748 targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line())
750 writer.Logf("cannot read sources for function %v: %v", fn, err)
753 // Read sources of inlined functions.
754 var inlFns []*ssa.FuncLines
755 for _, fi := range ssaDumpInlined {
757 fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
758 fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
760 writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
763 inlFns = append(inlFns, fnLines)
766 sort.Sort(ssa.ByTopo(inlFns))
768 inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
771 writer.WriteSources("sources", inlFns)
774 func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
775 f, err := os.Open(os.ExpandEnv(file))
782 scanner := bufio.NewScanner(f)
783 for scanner.Scan() && ln <= end {
785 lines = append(lines, scanner.Text())
789 return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
792 // updateUnsetPredPos propagates the earliest-value position information for b
793 // towards all of b's predecessors that need a position, and recurs on that
794 // predecessor if its position is updated. B should have a non-empty position.
795 func (s *state) updateUnsetPredPos(b *ssa.Block) {
796 if b.Pos == src.NoXPos {
797 s.Fatalf("Block %s should have a position", b)
799 bestPos := src.NoXPos
800 for _, e := range b.Preds {
805 if bestPos == src.NoXPos {
807 for _, v := range b.Values {
811 if v.Pos != src.NoXPos {
812 // Assume values are still in roughly textual order;
813 // TODO: could also seek minimum position?
820 s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
824 // Information about each open-coded defer.
825 type openDeferInfo struct {
826 // The node representing the call of the defer
828 // If defer call is closure call, the address of the argtmp where the
829 // closure is stored.
831 // The node representing the argtmp where the closure is stored - used for
832 // function, method, or interface call, to store a closure that panic
833 // processing can use for this defer.
838 // configuration (arch) information
841 // function we're building
848 labels map[string]*ssaLabel
850 // unlabeled break and continue statement tracking
851 breakTo *ssa.Block // current target for plain break statement
852 continueTo *ssa.Block // current target for plain continue statement
854 // current location where we're interpreting the AST
857 // variable assignments in the current block (map from variable symbol to ssa value)
858 // *Node is the unique identifier (an ONAME Node) for the variable.
859 // TODO: keep a single varnum map, then make all of these maps slices instead?
860 vars map[ir.Node]*ssa.Value
862 // fwdVars are variables that are used before they are defined in the current block.
863 // This map exists just to coalesce multiple references into a single FwdRef op.
864 // *Node is the unique identifier (an ONAME Node) for the variable.
865 fwdVars map[ir.Node]*ssa.Value
867 // all defined variables at the end of each block. Indexed by block ID.
868 defvars []map[ir.Node]*ssa.Value
870 // addresses of PPARAM and PPARAMOUT variables on the stack.
871 decladdrs map[*ir.Name]*ssa.Value
873 // starting values. Memory, stack pointer, and globals pointer
877 // value representing address of where deferBits autotmp is stored
878 deferBitsAddr *ssa.Value
879 deferBitsTemp *ir.Name
881 // line number stack. The current line number is top of stack
883 // the last line number processed; it may have been popped
886 // list of panic calls by function name and line number.
887 // Used to deduplicate panic calls.
888 panics map[funcLine]*ssa.Block
891 hasdefer bool // whether the function contains a defer statement
893 hasOpenDefers bool // whether we are doing open-coded defers
894 checkPtrEnabled bool // whether to insert checkptr instrumentation
896 // If doing open-coded defers, list of info about the defer calls in
897 // scanning order. Hence, at exit we should run these defers in reverse
898 // order of this list
899 openDefers []*openDeferInfo
900 // For open-coded defers, this is the beginning and end blocks of the last
901 // defer exit code that we have generated so far. We use these to share
902 // code between exits if the shareDeferExits option (disabled by default)
904 lastDeferExit *ssa.Block // Entry block of last defer exit code we generated
905 lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
906 lastDeferCount int // Number of defers encountered at that point
908 prevCall *ssa.Value // the previous call; use this to tie results to the call op.
911 type funcLine struct {
917 type ssaLabel struct {
918 target *ssa.Block // block identified by this label
919 breakTarget *ssa.Block // block to break to in control flow node identified by this label
920 continueTarget *ssa.Block // block to continue to in control flow node identified by this label
923 // label returns the label associated with sym, creating it if necessary.
924 func (s *state) label(sym *types.Sym) *ssaLabel {
925 lab := s.labels[sym.Name]
928 s.labels[sym.Name] = lab
933 func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
934 func (s *state) Log() bool { return s.f.Log() }
935 func (s *state) Fatalf(msg string, args ...interface{}) {
936 s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
938 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
939 func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
941 func ssaMarker(name string) *ir.Name {
942 return typecheck.NewName(&types.Sym{Name: name})
946 // marker node for the memory variable
947 memVar = ssaMarker("mem")
949 // marker nodes for temporary variables
950 ptrVar = ssaMarker("ptr")
951 lenVar = ssaMarker("len")
952 capVar = ssaMarker("cap")
953 typVar = ssaMarker("typ")
954 okVar = ssaMarker("ok")
955 deferBitsVar = ssaMarker("deferBits")
958 // startBlock sets the current block we're generating code in to b.
959 func (s *state) startBlock(b *ssa.Block) {
960 if s.curBlock != nil {
961 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
964 s.vars = map[ir.Node]*ssa.Value{}
965 for n := range s.fwdVars {
970 // endBlock marks the end of generating code for the current block.
971 // Returns the (former) current block. Returns nil if there is no current
972 // block, i.e. if no code flows to the current execution point.
973 func (s *state) endBlock() *ssa.Block {
978 for len(s.defvars) <= int(b.ID) {
979 s.defvars = append(s.defvars, nil)
981 s.defvars[b.ID] = s.vars
985 // Empty plain blocks get the line of their successor (handled after all blocks created),
986 // except for increment blocks in For statements (handled in ssa conversion of OFOR),
987 // and for blocks ending in GOTO/BREAK/CONTINUE.
995 // pushLine pushes a line number on the line number stack.
996 func (s *state) pushLine(line src.XPos) {
998 // the frontend may emit node with line number missing,
999 // use the parent line number in this case.
1001 if base.Flag.K != 0 {
1002 base.Warn("buildssa: unknown position (line 0)")
1008 s.line = append(s.line, line)
1011 // popLine pops the top of the line number stack.
1012 func (s *state) popLine() {
1013 s.line = s.line[:len(s.line)-1]
1016 // peekPos peeks the top of the line number stack.
1017 func (s *state) peekPos() src.XPos {
1018 return s.line[len(s.line)-1]
1021 // newValue0 adds a new value with no arguments to the current block.
1022 func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
1023 return s.curBlock.NewValue0(s.peekPos(), op, t)
1026 // newValue0A adds a new value with no arguments and an aux value to the current block.
1027 func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
1028 return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
1031 // newValue0I adds a new value with no arguments and an auxint value to the current block.
1032 func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
1033 return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
1036 // newValue1 adds a new value with one argument to the current block.
1037 func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
1038 return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
1041 // newValue1A adds a new value with one argument and an aux value to the current block.
1042 func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
1043 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
1046 // newValue1Apos adds a new value with one argument and an aux value to the current block.
1047 // isStmt determines whether the created values may be a statement or not
1048 // (i.e., false means never, yes means maybe).
1049 func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value {
1051 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
1053 return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
1056 // newValue1I adds a new value with one argument and an auxint value to the current block.
1057 func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
1058 return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
1061 // newValue2 adds a new value with two arguments to the current block.
1062 func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
1063 return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
1066 // newValue2A adds a new value with two arguments and an aux value to the current block.
1067 func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
1068 return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
1071 // newValue2Apos adds a new value with two arguments and an aux value to the current block.
1072 // isStmt determines whether the created values may be a statement or not
1073 // (i.e., false means never, yes means maybe).
1074 func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
1076 return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
1078 return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
1081 // newValue2I adds a new value with two arguments and an auxint value to the current block.
1082 func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
1083 return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
1086 // newValue3 adds a new value with three arguments to the current block.
1087 func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
1088 return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
1091 // newValue3I adds a new value with three arguments and an auxint value to the current block.
1092 func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
1093 return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
1096 // newValue3A adds a new value with three arguments and an aux value to the current block.
1097 func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
1098 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
1101 // newValue3Apos adds a new value with three arguments and an aux value to the current block.
1102 // isStmt determines whether the created values may be a statement or not
1103 // (i.e., false means never, yes means maybe).
1104 func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
1106 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
1108 return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
1111 // newValue4 adds a new value with four arguments to the current block.
1112 func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
1113 return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
1116 // newValue4I adds a new value with four arguments and an auxint value to the current block.
1117 func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
1118 return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
1121 func (s *state) entryBlock() *ssa.Block {
1123 if base.Flag.N > 0 && s.curBlock != nil {
1124 // If optimizations are off, allocate in current block instead. Since with -N
1125 // we're not doing the CSE or tighten passes, putting lots of stuff in the
1126 // entry block leads to O(n^2) entries in the live value map during regalloc.
1133 // entryNewValue0 adds a new value with no arguments to the entry block.
1134 func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
1135 return s.entryBlock().NewValue0(src.NoXPos, op, t)
1138 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
1139 func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
1140 return s.entryBlock().NewValue0A(src.NoXPos, op, t, aux)
1143 // entryNewValue1 adds a new value with one argument to the entry block.
1144 func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
1145 return s.entryBlock().NewValue1(src.NoXPos, op, t, arg)
1148 // entryNewValue1I adds a new value with one argument and an auxint value to the entry block.
1149 func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
1150 return s.entryBlock().NewValue1I(src.NoXPos, op, t, auxint, arg)
1153 // entryNewValue1A adds a new value with one argument and an aux value to the entry block.
1154 func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
1155 return s.entryBlock().NewValue1A(src.NoXPos, op, t, aux, arg)
1158 // entryNewValue2 adds a new value with two arguments to the entry block.
1159 func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
1160 return s.entryBlock().NewValue2(src.NoXPos, op, t, arg0, arg1)
1163 // entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
1164 func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
1165 return s.entryBlock().NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
1168 // const* routines add a new const value to the entry block.
1169 func (s *state) constSlice(t *types.Type) *ssa.Value {
1170 return s.f.ConstSlice(t)
1172 func (s *state) constInterface(t *types.Type) *ssa.Value {
1173 return s.f.ConstInterface(t)
1175 func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
1176 func (s *state) constEmptyString(t *types.Type) *ssa.Value {
1177 return s.f.ConstEmptyString(t)
1179 func (s *state) constBool(c bool) *ssa.Value {
1180 return s.f.ConstBool(types.Types[types.TBOOL], c)
1182 func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
1183 return s.f.ConstInt8(t, c)
1185 func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
1186 return s.f.ConstInt16(t, c)
1188 func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
1189 return s.f.ConstInt32(t, c)
1191 func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
1192 return s.f.ConstInt64(t, c)
1194 func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
1195 return s.f.ConstFloat32(t, c)
1197 func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
1198 return s.f.ConstFloat64(t, c)
1200 func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
1201 if s.config.PtrSize == 8 {
1202 return s.constInt64(t, c)
1204 if int64(int32(c)) != c {
1205 s.Fatalf("integer constant too big %d", c)
1207 return s.constInt32(t, int32(c))
1209 func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
1210 return s.f.ConstOffPtrSP(t, c, s.sp)
1213 // newValueOrSfCall* are wrappers around newValue*, which may create a call to a
1214 // soft-float runtime function instead (when emitting soft-float code).
1215 func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
1217 if c, ok := s.sfcall(op, arg); ok {
1221 return s.newValue1(op, t, arg)
1223 func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
1225 if c, ok := s.sfcall(op, arg0, arg1); ok {
1229 return s.newValue2(op, t, arg0, arg1)
1232 type instrumentKind uint8
1235 instrumentRead = iota
1240 func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) {
1241 s.instrument2(t, addr, nil, kind)
1244 // instrumentFields instruments a read/write operation on addr.
1245 // If it is instrumenting for MSAN or ASAN and t is a struct type, it instruments
1246 // operation for each field, instead of for the whole struct.
1247 func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
1248 if !(base.Flag.MSan || base.Flag.ASan) || !t.IsStruct() {
1249 s.instrument(t, addr, kind)
1252 for _, f := range t.Fields().Slice() {
1253 if f.Sym.IsBlank() {
1256 offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr)
1257 s.instrumentFields(f.Type, offptr, kind)
1261 func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
1263 s.instrument2(t, dst, src, instrumentMove)
1265 s.instrument(t, src, instrumentRead)
1266 s.instrument(t, dst, instrumentWrite)
1270 func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
1271 if !s.curfn.InstrumentBody() {
1277 return // can't race on zero-sized things
1280 if ssa.IsSanitizerSafeAddr(addr) {
1287 if addr2 != nil && kind != instrumentMove {
1288 panic("instrument2: non-nil addr2 for non-move instrumentation")
1293 case instrumentRead:
1294 fn = ir.Syms.Msanread
1295 case instrumentWrite:
1296 fn = ir.Syms.Msanwrite
1297 case instrumentMove:
1298 fn = ir.Syms.Msanmove
1300 panic("unreachable")
1303 } else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
1304 // for composite objects we have to write every address
1305 // because a write might happen to any subobject.
1306 // composites with only one element don't have subobjects, though.
1308 case instrumentRead:
1309 fn = ir.Syms.Racereadrange
1310 case instrumentWrite:
1311 fn = ir.Syms.Racewriterange
1313 panic("unreachable")
1316 } else if base.Flag.Race {
1317 // for non-composite objects we can write just the start
1318 // address, as any write must write the first byte.
1320 case instrumentRead:
1321 fn = ir.Syms.Raceread
1322 case instrumentWrite:
1323 fn = ir.Syms.Racewrite
1325 panic("unreachable")
1327 } else if base.Flag.ASan {
1329 case instrumentRead:
1330 fn = ir.Syms.Asanread
1331 case instrumentWrite:
1332 fn = ir.Syms.Asanwrite
1334 panic("unreachable")
1338 panic("unreachable")
1341 args := []*ssa.Value{addr}
1343 args = append(args, addr2)
1346 args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
1348 s.rtcall(fn, true, nil, args...)
1351 func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
1352 s.instrumentFields(t, src, instrumentRead)
1353 return s.rawLoad(t, src)
1356 func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
1357 return s.newValue2(ssa.OpLoad, t, src, s.mem())
1360 func (s *state) store(t *types.Type, dst, val *ssa.Value) {
1361 s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
1364 func (s *state) zero(t *types.Type, dst *ssa.Value) {
1365 s.instrument(t, dst, instrumentWrite)
1366 store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
1368 s.vars[memVar] = store
1371 func (s *state) move(t *types.Type, dst, src *ssa.Value) {
1372 s.moveWhichMayOverlap(t, dst, src, false)
1374 func (s *state) moveWhichMayOverlap(t *types.Type, dst, src *ssa.Value, mayOverlap bool) {
1375 s.instrumentMove(t, dst, src)
1376 if mayOverlap && t.IsArray() && t.NumElem() > 1 && !ssa.IsInlinableMemmove(dst, src, t.Size(), s.f.Config) {
1377 // Normally, when moving Go values of type T from one location to another,
1378 // we don't need to worry about partial overlaps. The two Ts must either be
1379 // in disjoint (nonoverlapping) memory or in exactly the same location.
1380 // There are 2 cases where this isn't true:
1381 // 1) Using unsafe you can arrange partial overlaps.
1382 // 2) Since Go 1.17, you can use a cast from a slice to a ptr-to-array.
1383 // https://go.dev/ref/spec#Conversions_from_slice_to_array_pointer
1384 // This feature can be used to construct partial overlaps of array types.
1386 // p := (*[2]int)(a[:])
1387 // q := (*[2]int)(a[1:])
1389 // We don't care about solving 1. Or at least, we haven't historically
1390 // and no one has complained.
1391 // For 2, we need to ensure that if there might be partial overlap,
1392 // then we can't use OpMove; we must use memmove instead.
1393 // (memmove handles partial overlap by copying in the correct
1394 // direction. OpMove does not.)
1396 // Note that we have to be careful here not to introduce a call when
1397 // we're marshaling arguments to a call or unmarshaling results from a call.
1398 // Cases where this is happening must pass mayOverlap to false.
1399 // (Currently this only happens when unmarshaling results of a call.)
1400 if t.HasPointers() {
1401 s.rtcall(ir.Syms.Typedmemmove, true, nil, s.reflectType(t), dst, src)
1402 // We would have otherwise implemented this move with straightline code,
1403 // including a write barrier. Pretend we issue a write barrier here,
1404 // so that the write barrier tests work. (Otherwise they'd need to know
1405 // the details of IsInlineableMemmove.)
1406 s.curfn.SetWBPos(s.peekPos())
1408 s.rtcall(ir.Syms.Memmove, true, nil, dst, src, s.constInt(types.Types[types.TUINTPTR], t.Size()))
1410 ssa.LogLargeCopy(s.f.Name, s.peekPos(), t.Size())
1413 store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
1415 s.vars[memVar] = store
1418 // stmtList converts the statement list n to SSA and adds it to s.
1419 func (s *state) stmtList(l ir.Nodes) {
1420 for _, n := range l {
1425 // stmt converts the statement n to SSA and adds it to s.
1426 func (s *state) stmt(n ir.Node) {
1430 // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
1431 // then this code is dead. Stop here.
1432 if s.curBlock == nil && n.Op() != ir.OLABEL {
1436 s.stmtList(n.Init())
1440 n := n.(*ir.BlockStmt)
1444 case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
1446 // Expression statements
1448 n := n.(*ir.CallExpr)
1449 if ir.IsIntrinsicCall(n) {
1456 n := n.(*ir.CallExpr)
1457 s.callResult(n, callNormal)
1458 if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PFUNC {
1459 if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
1460 n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap" || fn == "panicunsafeslicelen" || fn == "panicunsafeslicenilptr" || fn == "panicunsafestringlen" || fn == "panicunsafestringnilptr") {
1463 b.Kind = ssa.BlockExit
1465 // TODO: never rewrite OPANIC to OCALLFUNC in the
1466 // first place. Need to wait until all backends
1471 n := n.(*ir.GoDeferStmt)
1472 if base.Debug.Defer > 0 {
1473 var defertype string
1474 if s.hasOpenDefers {
1475 defertype = "open-coded"
1476 } else if n.Esc() == ir.EscNever {
1477 defertype = "stack-allocated"
1479 defertype = "heap-allocated"
1481 base.WarnfAt(n.Pos(), "%s defer", defertype)
1483 if s.hasOpenDefers {
1484 s.openDeferRecord(n.Call.(*ir.CallExpr))
1487 if n.Esc() == ir.EscNever {
1490 s.callResult(n.Call.(*ir.CallExpr), d)
1493 n := n.(*ir.GoDeferStmt)
1494 s.callResult(n.Call.(*ir.CallExpr), callGo)
1496 case ir.OAS2DOTTYPE:
1497 n := n.(*ir.AssignListStmt)
1498 var res, resok *ssa.Value
1499 if n.Rhs[0].Op() == ir.ODOTTYPE2 {
1500 res, resok = s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
1502 res, resok = s.dynamicDottype(n.Rhs[0].(*ir.DynamicTypeAssertExpr), true)
1505 if !TypeOK(n.Rhs[0].Type()) {
1506 if res.Op != ssa.OpLoad {
1507 s.Fatalf("dottype of non-load")
1510 if res.Args[1] != mem {
1511 s.Fatalf("memory no longer live from 2-result dottype load")
1516 s.assign(n.Lhs[0], res, deref, 0)
1517 s.assign(n.Lhs[1], resok, false, 0)
1521 // We come here only when it is an intrinsic call returning two values.
1522 n := n.(*ir.AssignListStmt)
1523 call := n.Rhs[0].(*ir.CallExpr)
1524 if !ir.IsIntrinsicCall(call) {
1525 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call)
1527 v := s.intrinsicCall(call)
1528 v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v)
1529 v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v)
1530 s.assign(n.Lhs[0], v1, false, 0)
1531 s.assign(n.Lhs[1], v2, false, 0)
1536 if v := n.X; v.Esc() == ir.EscHeap {
1541 n := n.(*ir.LabelStmt)
1544 // Nothing to do because the label isn't targetable. See issue 52278.
1549 // The label might already have a target block via a goto.
1550 if lab.target == nil {
1551 lab.target = s.f.NewBlock(ssa.BlockPlain)
1554 // Go to that label.
1555 // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
1556 if s.curBlock != nil {
1558 b.AddEdgeTo(lab.target)
1560 s.startBlock(lab.target)
1563 n := n.(*ir.BranchStmt)
1567 if lab.target == nil {
1568 lab.target = s.f.NewBlock(ssa.BlockPlain)
1572 b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
1573 b.AddEdgeTo(lab.target)
1576 n := n.(*ir.AssignStmt)
1577 if n.X == n.Y && n.X.Op() == ir.ONAME {
1578 // An x=x assignment. No point in doing anything
1579 // here. In addition, skipping this assignment
1580 // prevents generating:
1583 // which is bad because x is incorrectly considered
1584 // dead before the vardef. See issue #14904.
1588 // mayOverlap keeps track of whether the LHS and RHS might
1589 // refer to partially overlapping memory. Partial overlapping can
1590 // only happen for arrays, see the comment in moveWhichMayOverlap.
1592 // If both sides of the assignment are not dereferences, then partial
1593 // overlap can't happen. Partial overlap can only occur only when the
1594 // arrays referenced are strictly smaller parts of the same base array.
1595 // If one side of the assignment is a full array, then partial overlap
1596 // can't happen. (The arrays are either disjoint or identical.)
1597 mayOverlap := n.X.Op() == ir.ODEREF && (n.Y != nil && n.Y.Op() == ir.ODEREF)
1598 if n.Y != nil && n.Y.Op() == ir.ODEREF {
1599 p := n.Y.(*ir.StarExpr).X
1600 for p.Op() == ir.OCONVNOP {
1601 p = p.(*ir.ConvExpr).X
1603 if p.Op() == ir.OSPTR && p.(*ir.UnaryExpr).X.Type().IsString() {
1604 // Pointer fields of strings point to unmodifiable memory.
1605 // That memory can't overlap with the memory being written.
1614 case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
1615 // All literals with nonzero fields have already been
1616 // rewritten during walk. Any that remain are just T{}
1617 // or equivalents. Use the zero value.
1618 if !ir.IsZero(rhs) {
1619 s.Fatalf("literal with nonzero value in SSA: %v", rhs)
1623 rhs := rhs.(*ir.CallExpr)
1624 // Check whether we're writing the result of an append back to the same slice.
1625 // If so, we handle it specially to avoid write barriers on the fast
1626 // (non-growth) path.
1627 if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 {
1630 // If the slice can be SSA'd, it'll be on the stack,
1631 // so there will be no write barriers,
1632 // so there's no need to attempt to prevent them.
1634 if base.Debug.Append > 0 { // replicating old diagnostic message
1635 base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
1639 if base.Debug.Append > 0 {
1640 base.WarnfAt(n.Pos(), "append: len-only update")
1647 if ir.IsBlank(n.X) {
1649 // Just evaluate rhs for side-effects.
1667 r = nil // Signal assign to use OpZero.
1680 if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) {
1681 // We're assigning a slicing operation back to its source.
1682 // Don't write back fields we aren't changing. See issue #14855.
1683 rhs := rhs.(*ir.SliceExpr)
1684 i, j, k := rhs.Low, rhs.High, rhs.Max
1685 if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) {
1686 // [0:...] is the same as [:...]
1689 // TODO: detect defaults for len/cap also.
1690 // Currently doesn't really work because (*p)[:len(*p)] appears here as:
1693 // if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) {
1696 // if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) {
1710 s.assignWhichMayOverlap(n.X, r, deref, skip, mayOverlap)
1714 if ir.IsConst(n.Cond, constant.Bool) {
1715 s.stmtList(n.Cond.Init())
1716 if ir.BoolVal(n.Cond) {
1724 bEnd := s.f.NewBlock(ssa.BlockPlain)
1729 var bThen *ssa.Block
1730 if len(n.Body) != 0 {
1731 bThen = s.f.NewBlock(ssa.BlockPlain)
1735 var bElse *ssa.Block
1736 if len(n.Else) != 0 {
1737 bElse = s.f.NewBlock(ssa.BlockPlain)
1741 s.condBranch(n.Cond, bThen, bElse, likely)
1743 if len(n.Body) != 0 {
1746 if b := s.endBlock(); b != nil {
1750 if len(n.Else) != 0 {
1753 if b := s.endBlock(); b != nil {
1760 n := n.(*ir.ReturnStmt)
1761 s.stmtList(n.Results)
1763 b.Pos = s.lastPos.WithIsStmt()
1766 n := n.(*ir.TailCallStmt)
1767 s.callResult(n.Call, callTail)
1770 b.Kind = ssa.BlockRetJmp // could use BlockExit. BlockRetJmp is mostly for clarity.
1773 case ir.OCONTINUE, ir.OBREAK:
1774 n := n.(*ir.BranchStmt)
1777 // plain break/continue
1785 // labeled break/continue; look up the target
1790 to = lab.continueTarget
1792 to = lab.breakTarget
1797 b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
1801 // OFOR: for Ninit; Left; Right { Nbody }
1802 // cond (Left); body (Nbody); incr (Right)
1803 n := n.(*ir.ForStmt)
1804 bCond := s.f.NewBlock(ssa.BlockPlain)
1805 bBody := s.f.NewBlock(ssa.BlockPlain)
1806 bIncr := s.f.NewBlock(ssa.BlockPlain)
1807 bEnd := s.f.NewBlock(ssa.BlockPlain)
1809 // ensure empty for loops have correct position; issue #30167
1812 // first, jump to condition test
1816 // generate code to test condition
1819 s.condBranch(n.Cond, bBody, bEnd, 1)
1822 b.Kind = ssa.BlockPlain
1826 // set up for continue/break in body
1827 prevContinue := s.continueTo
1828 prevBreak := s.breakTo
1829 s.continueTo = bIncr
1832 if sym := n.Label; sym != nil {
1835 lab.continueTarget = bIncr
1836 lab.breakTarget = bEnd
1843 // tear down continue/break
1844 s.continueTo = prevContinue
1845 s.breakTo = prevBreak
1847 lab.continueTarget = nil
1848 lab.breakTarget = nil
1851 // done with body, goto incr
1852 if b := s.endBlock(); b != nil {
1861 if b := s.endBlock(); b != nil {
1863 // It can happen that bIncr ends in a block containing only VARKILL,
1864 // and that muddles the debugging experience.
1865 if b.Pos == src.NoXPos {
1872 case ir.OSWITCH, ir.OSELECT:
1873 // These have been mostly rewritten by the front end into their Nbody fields.
1874 // Our main task is to correctly hook up any break statements.
1875 bEnd := s.f.NewBlock(ssa.BlockPlain)
1877 prevBreak := s.breakTo
1881 if n.Op() == ir.OSWITCH {
1882 n := n.(*ir.SwitchStmt)
1886 n := n.(*ir.SelectStmt)
1895 lab.breakTarget = bEnd
1898 // generate body code
1901 s.breakTo = prevBreak
1903 lab.breakTarget = nil
1906 // walk adds explicit OBREAK nodes to the end of all reachable code paths.
1907 // If we still have a current block here, then mark it unreachable.
1908 if s.curBlock != nil {
1911 b.Kind = ssa.BlockExit
1917 n := n.(*ir.JumpTableStmt)
1919 // Make blocks we'll need.
1920 jt := s.f.NewBlock(ssa.BlockJumpTable)
1921 bEnd := s.f.NewBlock(ssa.BlockPlain)
1923 // The only thing that needs evaluating is the index we're looking up.
1924 idx := s.expr(n.Idx)
1925 unsigned := idx.Type.IsUnsigned()
1927 // Extend so we can do everything in uintptr arithmetic.
1928 t := types.Types[types.TUINTPTR]
1929 idx = s.conv(nil, idx, idx.Type, t)
1931 // The ending condition for the current block decides whether we'll use
1932 // the jump table at all.
1933 // We check that min <= idx <= max and jump around the jump table
1934 // if that test fails.
1935 // We implement min <= idx <= max with 0 <= idx-min <= max-min, because
1936 // we'll need idx-min anyway as the control value for the jump table.
1939 min, _ = constant.Uint64Val(n.Cases[0])
1940 max, _ = constant.Uint64Val(n.Cases[len(n.Cases)-1])
1942 mn, _ := constant.Int64Val(n.Cases[0])
1943 mx, _ := constant.Int64Val(n.Cases[len(n.Cases)-1])
1947 // Compare idx-min with max-min, to see if we can use the jump table.
1948 idx = s.newValue2(s.ssaOp(ir.OSUB, t), t, idx, s.uintptrConstant(min))
1949 width := s.uintptrConstant(max - min)
1950 cmp := s.newValue2(s.ssaOp(ir.OLE, t), types.Types[types.TBOOL], idx, width)
1952 b.Kind = ssa.BlockIf
1954 b.AddEdgeTo(jt) // in range - use jump table
1955 b.AddEdgeTo(bEnd) // out of range - no case in the jump table will trigger
1956 b.Likely = ssa.BranchLikely // TODO: assumes missing the table entirely is unlikely. True?
1958 // Build jump table block.
1961 if base.Flag.Cfg.SpectreIndex {
1962 idx = s.newValue2(ssa.OpSpectreSliceIndex, t, idx, width)
1966 // Figure out where we should go for each index in the table.
1967 table := make([]*ssa.Block, max-min+1)
1968 for i := range table {
1969 table[i] = bEnd // default target
1971 for i := range n.Targets {
1973 lab := s.label(n.Targets[i])
1974 if lab.target == nil {
1975 lab.target = s.f.NewBlock(ssa.BlockPlain)
1979 val, _ = constant.Uint64Val(c)
1981 vl, _ := constant.Int64Val(c)
1984 // Overwrite the default target.
1985 table[val-min] = lab.target
1987 for _, t := range table {
1995 n := n.(*ir.UnaryExpr)
2000 n := n.(*ir.InlineMarkStmt)
2001 s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem())
2004 s.Fatalf("unhandled stmt %v", n.Op())
2008 // If true, share as many open-coded defer exits as possible (with the downside of
2009 // worse line-number information)
2010 const shareDeferExits = false
2012 // exit processes any code that needs to be generated just before returning.
2013 // It returns a BlockRet block that ends the control flow. Its control value
2014 // will be set to the final memory state.
2015 func (s *state) exit() *ssa.Block {
2017 if s.hasOpenDefers {
2018 if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
2019 if s.curBlock.Kind != ssa.BlockPlain {
2020 panic("Block for an exit should be BlockPlain")
2022 s.curBlock.AddEdgeTo(s.lastDeferExit)
2024 return s.lastDeferFinalBlock
2028 s.rtcall(ir.Syms.Deferreturn, true, nil)
2034 // Do actual return.
2035 // These currently turn into self-copies (in many cases).
2036 resultFields := s.curfn.Type().Results().FieldSlice()
2037 results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
2038 m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
2039 // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
2040 for i, f := range resultFields {
2041 n := f.Nname.(*ir.Name)
2042 if s.canSSA(n) { // result is in some SSA variable
2043 if !n.IsOutputParamInRegisters() && n.Type().HasPointers() {
2044 // We are about to store to the result slot.
2045 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
2047 results[i] = s.variable(n, n.Type())
2048 } else if !n.OnStack() { // result is actually heap allocated
2049 // We are about to copy the in-heap result to the result slot.
2050 if n.Type().HasPointers() {
2051 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
2053 ha := s.expr(n.Heapaddr)
2054 s.instrumentFields(n.Type(), ha, instrumentRead)
2055 results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem())
2056 } else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA.
2057 // Before register ABI this ought to be a self-move, home=dest,
2058 // With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed)
2059 // No VarDef, as the result slot is already holding live value.
2060 results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem())
2064 // Run exit code. Today, this is just racefuncexit, in -race mode.
2065 // TODO(register args) this seems risky here with a register-ABI, but not clear it is right to do it earlier either.
2066 // Spills in register allocation might just fix it.
2067 s.stmtList(s.curfn.Exit)
2069 results[len(results)-1] = s.mem()
2070 m.AddArgs(results...)
2073 b.Kind = ssa.BlockRet
2075 if s.hasdefer && s.hasOpenDefers {
2076 s.lastDeferFinalBlock = b
2081 type opAndType struct {
2086 var opToSSA = map[opAndType]ssa.Op{
2087 {ir.OADD, types.TINT8}: ssa.OpAdd8,
2088 {ir.OADD, types.TUINT8}: ssa.OpAdd8,
2089 {ir.OADD, types.TINT16}: ssa.OpAdd16,
2090 {ir.OADD, types.TUINT16}: ssa.OpAdd16,
2091 {ir.OADD, types.TINT32}: ssa.OpAdd32,
2092 {ir.OADD, types.TUINT32}: ssa.OpAdd32,
2093 {ir.OADD, types.TINT64}: ssa.OpAdd64,
2094 {ir.OADD, types.TUINT64}: ssa.OpAdd64,
2095 {ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
2096 {ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
2098 {ir.OSUB, types.TINT8}: ssa.OpSub8,
2099 {ir.OSUB, types.TUINT8}: ssa.OpSub8,
2100 {ir.OSUB, types.TINT16}: ssa.OpSub16,
2101 {ir.OSUB, types.TUINT16}: ssa.OpSub16,
2102 {ir.OSUB, types.TINT32}: ssa.OpSub32,
2103 {ir.OSUB, types.TUINT32}: ssa.OpSub32,
2104 {ir.OSUB, types.TINT64}: ssa.OpSub64,
2105 {ir.OSUB, types.TUINT64}: ssa.OpSub64,
2106 {ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
2107 {ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
2109 {ir.ONOT, types.TBOOL}: ssa.OpNot,
2111 {ir.ONEG, types.TINT8}: ssa.OpNeg8,
2112 {ir.ONEG, types.TUINT8}: ssa.OpNeg8,
2113 {ir.ONEG, types.TINT16}: ssa.OpNeg16,
2114 {ir.ONEG, types.TUINT16}: ssa.OpNeg16,
2115 {ir.ONEG, types.TINT32}: ssa.OpNeg32,
2116 {ir.ONEG, types.TUINT32}: ssa.OpNeg32,
2117 {ir.ONEG, types.TINT64}: ssa.OpNeg64,
2118 {ir.ONEG, types.TUINT64}: ssa.OpNeg64,
2119 {ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
2120 {ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
2122 {ir.OBITNOT, types.TINT8}: ssa.OpCom8,
2123 {ir.OBITNOT, types.TUINT8}: ssa.OpCom8,
2124 {ir.OBITNOT, types.TINT16}: ssa.OpCom16,
2125 {ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
2126 {ir.OBITNOT, types.TINT32}: ssa.OpCom32,
2127 {ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
2128 {ir.OBITNOT, types.TINT64}: ssa.OpCom64,
2129 {ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
2131 {ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag,
2132 {ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
2133 {ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal,
2134 {ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
2136 {ir.OMUL, types.TINT8}: ssa.OpMul8,
2137 {ir.OMUL, types.TUINT8}: ssa.OpMul8,
2138 {ir.OMUL, types.TINT16}: ssa.OpMul16,
2139 {ir.OMUL, types.TUINT16}: ssa.OpMul16,
2140 {ir.OMUL, types.TINT32}: ssa.OpMul32,
2141 {ir.OMUL, types.TUINT32}: ssa.OpMul32,
2142 {ir.OMUL, types.TINT64}: ssa.OpMul64,
2143 {ir.OMUL, types.TUINT64}: ssa.OpMul64,
2144 {ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
2145 {ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
2147 {ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
2148 {ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
2150 {ir.ODIV, types.TINT8}: ssa.OpDiv8,
2151 {ir.ODIV, types.TUINT8}: ssa.OpDiv8u,
2152 {ir.ODIV, types.TINT16}: ssa.OpDiv16,
2153 {ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
2154 {ir.ODIV, types.TINT32}: ssa.OpDiv32,
2155 {ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
2156 {ir.ODIV, types.TINT64}: ssa.OpDiv64,
2157 {ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
2159 {ir.OMOD, types.TINT8}: ssa.OpMod8,
2160 {ir.OMOD, types.TUINT8}: ssa.OpMod8u,
2161 {ir.OMOD, types.TINT16}: ssa.OpMod16,
2162 {ir.OMOD, types.TUINT16}: ssa.OpMod16u,
2163 {ir.OMOD, types.TINT32}: ssa.OpMod32,
2164 {ir.OMOD, types.TUINT32}: ssa.OpMod32u,
2165 {ir.OMOD, types.TINT64}: ssa.OpMod64,
2166 {ir.OMOD, types.TUINT64}: ssa.OpMod64u,
2168 {ir.OAND, types.TINT8}: ssa.OpAnd8,
2169 {ir.OAND, types.TUINT8}: ssa.OpAnd8,
2170 {ir.OAND, types.TINT16}: ssa.OpAnd16,
2171 {ir.OAND, types.TUINT16}: ssa.OpAnd16,
2172 {ir.OAND, types.TINT32}: ssa.OpAnd32,
2173 {ir.OAND, types.TUINT32}: ssa.OpAnd32,
2174 {ir.OAND, types.TINT64}: ssa.OpAnd64,
2175 {ir.OAND, types.TUINT64}: ssa.OpAnd64,
2177 {ir.OOR, types.TINT8}: ssa.OpOr8,
2178 {ir.OOR, types.TUINT8}: ssa.OpOr8,
2179 {ir.OOR, types.TINT16}: ssa.OpOr16,
2180 {ir.OOR, types.TUINT16}: ssa.OpOr16,
2181 {ir.OOR, types.TINT32}: ssa.OpOr32,
2182 {ir.OOR, types.TUINT32}: ssa.OpOr32,
2183 {ir.OOR, types.TINT64}: ssa.OpOr64,
2184 {ir.OOR, types.TUINT64}: ssa.OpOr64,
2186 {ir.OXOR, types.TINT8}: ssa.OpXor8,
2187 {ir.OXOR, types.TUINT8}: ssa.OpXor8,
2188 {ir.OXOR, types.TINT16}: ssa.OpXor16,
2189 {ir.OXOR, types.TUINT16}: ssa.OpXor16,
2190 {ir.OXOR, types.TINT32}: ssa.OpXor32,
2191 {ir.OXOR, types.TUINT32}: ssa.OpXor32,
2192 {ir.OXOR, types.TINT64}: ssa.OpXor64,
2193 {ir.OXOR, types.TUINT64}: ssa.OpXor64,
2195 {ir.OEQ, types.TBOOL}: ssa.OpEqB,
2196 {ir.OEQ, types.TINT8}: ssa.OpEq8,
2197 {ir.OEQ, types.TUINT8}: ssa.OpEq8,
2198 {ir.OEQ, types.TINT16}: ssa.OpEq16,
2199 {ir.OEQ, types.TUINT16}: ssa.OpEq16,
2200 {ir.OEQ, types.TINT32}: ssa.OpEq32,
2201 {ir.OEQ, types.TUINT32}: ssa.OpEq32,
2202 {ir.OEQ, types.TINT64}: ssa.OpEq64,
2203 {ir.OEQ, types.TUINT64}: ssa.OpEq64,
2204 {ir.OEQ, types.TINTER}: ssa.OpEqInter,
2205 {ir.OEQ, types.TSLICE}: ssa.OpEqSlice,
2206 {ir.OEQ, types.TFUNC}: ssa.OpEqPtr,
2207 {ir.OEQ, types.TMAP}: ssa.OpEqPtr,
2208 {ir.OEQ, types.TCHAN}: ssa.OpEqPtr,
2209 {ir.OEQ, types.TPTR}: ssa.OpEqPtr,
2210 {ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr,
2211 {ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
2212 {ir.OEQ, types.TFLOAT64}: ssa.OpEq64F,
2213 {ir.OEQ, types.TFLOAT32}: ssa.OpEq32F,
2215 {ir.ONE, types.TBOOL}: ssa.OpNeqB,
2216 {ir.ONE, types.TINT8}: ssa.OpNeq8,
2217 {ir.ONE, types.TUINT8}: ssa.OpNeq8,
2218 {ir.ONE, types.TINT16}: ssa.OpNeq16,
2219 {ir.ONE, types.TUINT16}: ssa.OpNeq16,
2220 {ir.ONE, types.TINT32}: ssa.OpNeq32,
2221 {ir.ONE, types.TUINT32}: ssa.OpNeq32,
2222 {ir.ONE, types.TINT64}: ssa.OpNeq64,
2223 {ir.ONE, types.TUINT64}: ssa.OpNeq64,
2224 {ir.ONE, types.TINTER}: ssa.OpNeqInter,
2225 {ir.ONE, types.TSLICE}: ssa.OpNeqSlice,
2226 {ir.ONE, types.TFUNC}: ssa.OpNeqPtr,
2227 {ir.ONE, types.TMAP}: ssa.OpNeqPtr,
2228 {ir.ONE, types.TCHAN}: ssa.OpNeqPtr,
2229 {ir.ONE, types.TPTR}: ssa.OpNeqPtr,
2230 {ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr,
2231 {ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
2232 {ir.ONE, types.TFLOAT64}: ssa.OpNeq64F,
2233 {ir.ONE, types.TFLOAT32}: ssa.OpNeq32F,
2235 {ir.OLT, types.TINT8}: ssa.OpLess8,
2236 {ir.OLT, types.TUINT8}: ssa.OpLess8U,
2237 {ir.OLT, types.TINT16}: ssa.OpLess16,
2238 {ir.OLT, types.TUINT16}: ssa.OpLess16U,
2239 {ir.OLT, types.TINT32}: ssa.OpLess32,
2240 {ir.OLT, types.TUINT32}: ssa.OpLess32U,
2241 {ir.OLT, types.TINT64}: ssa.OpLess64,
2242 {ir.OLT, types.TUINT64}: ssa.OpLess64U,
2243 {ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
2244 {ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
2246 {ir.OLE, types.TINT8}: ssa.OpLeq8,
2247 {ir.OLE, types.TUINT8}: ssa.OpLeq8U,
2248 {ir.OLE, types.TINT16}: ssa.OpLeq16,
2249 {ir.OLE, types.TUINT16}: ssa.OpLeq16U,
2250 {ir.OLE, types.TINT32}: ssa.OpLeq32,
2251 {ir.OLE, types.TUINT32}: ssa.OpLeq32U,
2252 {ir.OLE, types.TINT64}: ssa.OpLeq64,
2253 {ir.OLE, types.TUINT64}: ssa.OpLeq64U,
2254 {ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
2255 {ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
2258 func (s *state) concreteEtype(t *types.Type) types.Kind {
2264 if s.config.PtrSize == 8 {
2269 if s.config.PtrSize == 8 {
2270 return types.TUINT64
2272 return types.TUINT32
2273 case types.TUINTPTR:
2274 if s.config.PtrSize == 8 {
2275 return types.TUINT64
2277 return types.TUINT32
2281 func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
2282 etype := s.concreteEtype(t)
2283 x, ok := opToSSA[opAndType{op, etype}]
2285 s.Fatalf("unhandled binary op %v %s", op, etype)
2290 type opAndTwoTypes struct {
2296 type twoTypes struct {
2301 type twoOpsAndType struct {
2304 intermediateType types.Kind
2307 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
2309 {types.TINT8, types.TFLOAT32}: {ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
2310 {types.TINT16, types.TFLOAT32}: {ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
2311 {types.TINT32, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
2312 {types.TINT64, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
2314 {types.TINT8, types.TFLOAT64}: {ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
2315 {types.TINT16, types.TFLOAT64}: {ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
2316 {types.TINT32, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
2317 {types.TINT64, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
2319 {types.TFLOAT32, types.TINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
2320 {types.TFLOAT32, types.TINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
2321 {types.TFLOAT32, types.TINT32}: {ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
2322 {types.TFLOAT32, types.TINT64}: {ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
2324 {types.TFLOAT64, types.TINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
2325 {types.TFLOAT64, types.TINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
2326 {types.TFLOAT64, types.TINT32}: {ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
2327 {types.TFLOAT64, types.TINT64}: {ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
2329 {types.TUINT8, types.TFLOAT32}: {ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
2330 {types.TUINT16, types.TFLOAT32}: {ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
2331 {types.TUINT32, types.TFLOAT32}: {ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
2332 {types.TUINT64, types.TFLOAT32}: {ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead
2334 {types.TUINT8, types.TFLOAT64}: {ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
2335 {types.TUINT16, types.TFLOAT64}: {ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
2336 {types.TUINT32, types.TFLOAT64}: {ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
2337 {types.TUINT64, types.TFLOAT64}: {ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead
2339 {types.TFLOAT32, types.TUINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
2340 {types.TFLOAT32, types.TUINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
2341 {types.TFLOAT32, types.TUINT32}: {ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
2342 {types.TFLOAT32, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
2344 {types.TFLOAT64, types.TUINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
2345 {types.TFLOAT64, types.TUINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
2346 {types.TFLOAT64, types.TUINT32}: {ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
2347 {types.TFLOAT64, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
2350 {types.TFLOAT64, types.TFLOAT32}: {ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
2351 {types.TFLOAT64, types.TFLOAT64}: {ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
2352 {types.TFLOAT32, types.TFLOAT32}: {ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
2353 {types.TFLOAT32, types.TFLOAT64}: {ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
2356 // this map is used only for 32-bit arch, and only includes the difference
2357 // on 32-bit arch, don't use int64<->float conversion for uint32
2358 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
2359 {types.TUINT32, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
2360 {types.TUINT32, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
2361 {types.TFLOAT32, types.TUINT32}: {ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
2362 {types.TFLOAT64, types.TUINT32}: {ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
2365 // uint64<->float conversions, only on machines that have instructions for that
2366 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
2367 {types.TUINT64, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
2368 {types.TUINT64, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
2369 {types.TFLOAT32, types.TUINT64}: {ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
2370 {types.TFLOAT64, types.TUINT64}: {ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
2373 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
2374 {ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8,
2375 {ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8,
2376 {ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16,
2377 {ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
2378 {ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32,
2379 {ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
2380 {ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64,
2381 {ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
2383 {ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8,
2384 {ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8,
2385 {ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16,
2386 {ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
2387 {ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32,
2388 {ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
2389 {ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64,
2390 {ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
2392 {ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8,
2393 {ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8,
2394 {ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16,
2395 {ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
2396 {ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32,
2397 {ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
2398 {ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64,
2399 {ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
2401 {ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8,
2402 {ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8,
2403 {ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16,
2404 {ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
2405 {ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32,
2406 {ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
2407 {ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64,
2408 {ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
2410 {ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8,
2411 {ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8,
2412 {ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16,
2413 {ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
2414 {ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32,
2415 {ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
2416 {ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64,
2417 {ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
2419 {ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8,
2420 {ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8,
2421 {ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16,
2422 {ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
2423 {ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32,
2424 {ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
2425 {ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64,
2426 {ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
2428 {ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8,
2429 {ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8,
2430 {ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16,
2431 {ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
2432 {ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32,
2433 {ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
2434 {ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64,
2435 {ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
2437 {ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8,
2438 {ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8,
2439 {ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16,
2440 {ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
2441 {ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32,
2442 {ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
2443 {ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64,
2444 {ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
2447 func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
2448 etype1 := s.concreteEtype(t)
2449 etype2 := s.concreteEtype(u)
2450 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
2452 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
2457 func (s *state) uintptrConstant(v uint64) *ssa.Value {
2458 if s.config.PtrSize == 4 {
2459 return s.newValue0I(ssa.OpConst32, types.Types[types.TUINTPTR], int64(v))
2461 return s.newValue0I(ssa.OpConst64, types.Types[types.TUINTPTR], int64(v))
2464 func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value {
2465 if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
2466 // Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
2467 return s.newValue1(ssa.OpCvtBoolToUint8, tt, v)
2469 if ft.IsInteger() && tt.IsInteger() {
2471 if tt.Size() == ft.Size() {
2473 } else if tt.Size() < ft.Size() {
2475 switch 10*ft.Size() + tt.Size() {
2477 op = ssa.OpTrunc16to8
2479 op = ssa.OpTrunc32to8
2481 op = ssa.OpTrunc32to16
2483 op = ssa.OpTrunc64to8
2485 op = ssa.OpTrunc64to16
2487 op = ssa.OpTrunc64to32
2489 s.Fatalf("weird integer truncation %v -> %v", ft, tt)
2491 } else if ft.IsSigned() {
2493 switch 10*ft.Size() + tt.Size() {
2495 op = ssa.OpSignExt8to16
2497 op = ssa.OpSignExt8to32
2499 op = ssa.OpSignExt8to64
2501 op = ssa.OpSignExt16to32
2503 op = ssa.OpSignExt16to64
2505 op = ssa.OpSignExt32to64
2507 s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
2511 switch 10*ft.Size() + tt.Size() {
2513 op = ssa.OpZeroExt8to16
2515 op = ssa.OpZeroExt8to32
2517 op = ssa.OpZeroExt8to64
2519 op = ssa.OpZeroExt16to32
2521 op = ssa.OpZeroExt16to64
2523 op = ssa.OpZeroExt32to64
2525 s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
2528 return s.newValue1(op, tt, v)
2531 if ft.IsComplex() && tt.IsComplex() {
2533 if ft.Size() == tt.Size() {
2540 s.Fatalf("weird complex conversion %v -> %v", ft, tt)
2542 } else if ft.Size() == 8 && tt.Size() == 16 {
2543 op = ssa.OpCvt32Fto64F
2544 } else if ft.Size() == 16 && tt.Size() == 8 {
2545 op = ssa.OpCvt64Fto32F
2547 s.Fatalf("weird complex conversion %v -> %v", ft, tt)
2549 ftp := types.FloatForComplex(ft)
2550 ttp := types.FloatForComplex(tt)
2551 return s.newValue2(ssa.OpComplexMake, tt,
2552 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, v)),
2553 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, v)))
2556 if tt.IsComplex() { // and ft is not complex
2557 // Needed for generics support - can't happen in normal Go code.
2558 et := types.FloatForComplex(tt)
2559 v = s.conv(n, v, ft, et)
2560 return s.newValue2(ssa.OpComplexMake, tt, v, s.zeroVal(et))
2563 if ft.IsFloat() || tt.IsFloat() {
2564 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
2565 if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
2566 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
2570 if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
2571 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
2576 if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
2577 if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
2578 // tt is float32 or float64, and ft is also unsigned
2580 return s.uint32Tofloat32(n, v, ft, tt)
2583 return s.uint32Tofloat64(n, v, ft, tt)
2585 } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
2586 // ft is float32 or float64, and tt is unsigned integer
2588 return s.float32ToUint32(n, v, ft, tt)
2591 return s.float64ToUint32(n, v, ft, tt)
2597 s.Fatalf("weird float conversion %v -> %v", ft, tt)
2599 op1, op2, it := conv.op1, conv.op2, conv.intermediateType
2601 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
2602 // normal case, not tripping over unsigned 64
2603 if op1 == ssa.OpCopy {
2604 if op2 == ssa.OpCopy {
2607 return s.newValueOrSfCall1(op2, tt, v)
2609 if op2 == ssa.OpCopy {
2610 return s.newValueOrSfCall1(op1, tt, v)
2612 return s.newValueOrSfCall1(op2, tt, s.newValueOrSfCall1(op1, types.Types[it], v))
2614 // Tricky 64-bit unsigned cases.
2616 // tt is float32 or float64, and ft is also unsigned
2618 return s.uint64Tofloat32(n, v, ft, tt)
2621 return s.uint64Tofloat64(n, v, ft, tt)
2623 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
2625 // ft is float32 or float64, and tt is unsigned integer
2627 return s.float32ToUint64(n, v, ft, tt)
2630 return s.float64ToUint64(n, v, ft, tt)
2632 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
2636 s.Fatalf("unhandled OCONV %s -> %s", ft.Kind(), tt.Kind())
2640 // expr converts the expression n to ssa, adds it to s and returns the ssa result.
2641 func (s *state) expr(n ir.Node) *ssa.Value {
2642 return s.exprCheckPtr(n, true)
2645 func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
2646 if ir.HasUniquePos(n) {
2647 // ONAMEs and named OLITERALs have the line number
2648 // of the decl, not the use. See issue 14742.
2653 s.stmtList(n.Init())
2655 case ir.OBYTES2STRTMP:
2656 n := n.(*ir.ConvExpr)
2657 slice := s.expr(n.X)
2658 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
2659 len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
2660 return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
2661 case ir.OSTR2BYTESTMP:
2662 n := n.(*ir.ConvExpr)
2664 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
2665 len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
2666 return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
2668 n := n.(*ir.UnaryExpr)
2669 aux := n.X.(*ir.Name).Linksym()
2670 // OCFUNC is used to build function values, which must
2671 // always reference ABIInternal entry points.
2672 if aux.ABI() != obj.ABIInternal {
2673 s.Fatalf("expected ABIInternal: %v", aux.ABI())
2675 return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
2678 if n.Class == ir.PFUNC {
2679 // "value" of a function is the address of the function's closure
2680 sym := staticdata.FuncLinksym(n)
2681 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
2684 return s.variable(n, n.Type())
2686 return s.load(n.Type(), s.addr(n))
2687 case ir.OLINKSYMOFFSET:
2688 n := n.(*ir.LinksymOffsetExpr)
2689 return s.load(n.Type(), s.addr(n))
2691 n := n.(*ir.NilExpr)
2695 return s.constSlice(t)
2696 case t.IsInterface():
2697 return s.constInterface(t)
2699 return s.constNil(t)
2702 switch u := n.Val(); u.Kind() {
2704 i := ir.IntVal(n.Type(), u)
2705 switch n.Type().Size() {
2707 return s.constInt8(n.Type(), int8(i))
2709 return s.constInt16(n.Type(), int16(i))
2711 return s.constInt32(n.Type(), int32(i))
2713 return s.constInt64(n.Type(), i)
2715 s.Fatalf("bad integer size %d", n.Type().Size())
2718 case constant.String:
2719 i := constant.StringVal(u)
2721 return s.constEmptyString(n.Type())
2723 return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i))
2725 return s.constBool(constant.BoolVal(u))
2726 case constant.Float:
2727 f, _ := constant.Float64Val(u)
2728 switch n.Type().Size() {
2730 return s.constFloat32(n.Type(), f)
2732 return s.constFloat64(n.Type(), f)
2734 s.Fatalf("bad float size %d", n.Type().Size())
2737 case constant.Complex:
2738 re, _ := constant.Float64Val(constant.Real(u))
2739 im, _ := constant.Float64Val(constant.Imag(u))
2740 switch n.Type().Size() {
2742 pt := types.Types[types.TFLOAT32]
2743 return s.newValue2(ssa.OpComplexMake, n.Type(),
2744 s.constFloat32(pt, re),
2745 s.constFloat32(pt, im))
2747 pt := types.Types[types.TFLOAT64]
2748 return s.newValue2(ssa.OpComplexMake, n.Type(),
2749 s.constFloat64(pt, re),
2750 s.constFloat64(pt, im))
2752 s.Fatalf("bad complex size %d", n.Type().Size())
2756 s.Fatalf("unhandled OLITERAL %v", u.Kind())
2760 n := n.(*ir.ConvExpr)
2764 // Assume everything will work out, so set up our return value.
2765 // Anything interesting that happens from here is a fatal.
2771 // Special case for not confusing GC and liveness.
2772 // We don't want pointers accidentally classified
2773 // as not-pointers or vice-versa because of copy
2775 if to.IsPtrShaped() != from.IsPtrShaped() {
2776 return s.newValue2(ssa.OpConvert, to, x, s.mem())
2779 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
2782 if to.Kind() == types.TFUNC && from.IsPtrShaped() {
2786 // named <--> unnamed type or typed <--> untyped const
2787 if from.Kind() == to.Kind() {
2791 // unsafe.Pointer <--> *T
2792 if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
2793 if s.checkPtrEnabled && checkPtrOK && to.IsPtr() && from.IsUnsafePtr() {
2794 s.checkPtrAlignment(n, v, nil)
2800 if to.Kind() == types.TMAP && from.IsPtr() &&
2801 to.MapType().Hmap == from.Elem() {
2805 types.CalcSize(from)
2807 if from.Size() != to.Size() {
2808 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Size(), to, to.Size())
2811 if etypesign(from.Kind()) != etypesign(to.Kind()) {
2812 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind())
2816 if base.Flag.Cfg.Instrumenting {
2817 // These appear to be fine, but they fail the
2818 // integer constraint below, so okay them here.
2819 // Sample non-integer conversion: map[string]string -> *uint8
2823 if etypesign(from.Kind()) == 0 {
2824 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
2828 // integer, same width, same sign
2832 n := n.(*ir.ConvExpr)
2834 return s.conv(n, x, n.X.Type(), n.Type())
2837 n := n.(*ir.TypeAssertExpr)
2838 res, _ := s.dottype(n, false)
2841 case ir.ODYNAMICDOTTYPE:
2842 n := n.(*ir.DynamicTypeAssertExpr)
2843 res, _ := s.dynamicDottype(n, false)
2847 case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
2848 n := n.(*ir.BinaryExpr)
2851 if n.X.Type().IsComplex() {
2852 pt := types.FloatForComplex(n.X.Type())
2853 op := s.ssaOp(ir.OEQ, pt)
2854 r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
2855 i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
2856 c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
2861 return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
2863 s.Fatalf("ordered complex compare %v", n.Op())
2867 // Convert OGE and OGT into OLE and OLT.
2871 op, a, b = ir.OLE, b, a
2873 op, a, b = ir.OLT, b, a
2875 if n.X.Type().IsFloat() {
2877 return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
2879 // integer comparison
2880 return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
2882 n := n.(*ir.BinaryExpr)
2885 if n.Type().IsComplex() {
2886 mulop := ssa.OpMul64F
2887 addop := ssa.OpAdd64F
2888 subop := ssa.OpSub64F
2889 pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
2890 wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
2892 areal := s.newValue1(ssa.OpComplexReal, pt, a)
2893 breal := s.newValue1(ssa.OpComplexReal, pt, b)
2894 aimag := s.newValue1(ssa.OpComplexImag, pt, a)
2895 bimag := s.newValue1(ssa.OpComplexImag, pt, b)
2897 if pt != wt { // Widen for calculation
2898 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
2899 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
2900 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
2901 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
2904 xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
2905 ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
2907 if pt != wt { // Narrow to store back
2908 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
2909 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
2912 return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
2915 if n.Type().IsFloat() {
2916 return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2919 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2922 n := n.(*ir.BinaryExpr)
2925 if n.Type().IsComplex() {
2926 // TODO this is not executed because the front-end substitutes a runtime call.
2927 // That probably ought to change; with modest optimization the widen/narrow
2928 // conversions could all be elided in larger expression trees.
2929 mulop := ssa.OpMul64F
2930 addop := ssa.OpAdd64F
2931 subop := ssa.OpSub64F
2932 divop := ssa.OpDiv64F
2933 pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
2934 wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
2936 areal := s.newValue1(ssa.OpComplexReal, pt, a)
2937 breal := s.newValue1(ssa.OpComplexReal, pt, b)
2938 aimag := s.newValue1(ssa.OpComplexImag, pt, a)
2939 bimag := s.newValue1(ssa.OpComplexImag, pt, b)
2941 if pt != wt { // Widen for calculation
2942 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
2943 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
2944 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
2945 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
2948 denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
2949 xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
2950 ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
2952 // TODO not sure if this is best done in wide precision or narrow
2953 // Double-rounding might be an issue.
2954 // Note that the pre-SSA implementation does the entire calculation
2955 // in wide format, so wide is compatible.
2956 xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
2957 ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
2959 if pt != wt { // Narrow to store back
2960 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
2961 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
2963 return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
2965 if n.Type().IsFloat() {
2966 return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2968 return s.intDivide(n, a, b)
2970 n := n.(*ir.BinaryExpr)
2973 return s.intDivide(n, a, b)
2974 case ir.OADD, ir.OSUB:
2975 n := n.(*ir.BinaryExpr)
2978 if n.Type().IsComplex() {
2979 pt := types.FloatForComplex(n.Type())
2980 op := s.ssaOp(n.Op(), pt)
2981 return s.newValue2(ssa.OpComplexMake, n.Type(),
2982 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
2983 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
2985 if n.Type().IsFloat() {
2986 return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2988 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2989 case ir.OAND, ir.OOR, ir.OXOR:
2990 n := n.(*ir.BinaryExpr)
2993 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2995 n := n.(*ir.BinaryExpr)
2998 b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
2999 return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
3000 case ir.OLSH, ir.ORSH:
3001 n := n.(*ir.BinaryExpr)
3006 cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
3007 s.check(cmp, ir.Syms.Panicshift)
3008 bt = bt.ToUnsigned()
3010 return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
3011 case ir.OANDAND, ir.OOROR:
3012 // To implement OANDAND (and OOROR), we introduce a
3013 // new temporary variable to hold the result. The
3014 // variable is associated with the OANDAND node in the
3015 // s.vars table (normally variables are only
3016 // associated with ONAME nodes). We convert
3023 // Using var in the subsequent block introduces the
3024 // necessary phi variable.
3025 n := n.(*ir.LogicalExpr)
3030 b.Kind = ssa.BlockIf
3032 // In theory, we should set b.Likely here based on context.
3033 // However, gc only gives us likeliness hints
3034 // in a single place, for plain OIF statements,
3035 // and passing around context is finnicky, so don't bother for now.
3037 bRight := s.f.NewBlock(ssa.BlockPlain)
3038 bResult := s.f.NewBlock(ssa.BlockPlain)
3039 if n.Op() == ir.OANDAND {
3041 b.AddEdgeTo(bResult)
3042 } else if n.Op() == ir.OOROR {
3043 b.AddEdgeTo(bResult)
3047 s.startBlock(bRight)
3052 b.AddEdgeTo(bResult)
3054 s.startBlock(bResult)
3055 return s.variable(n, types.Types[types.TBOOL])
3057 n := n.(*ir.BinaryExpr)
3060 return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
3064 n := n.(*ir.UnaryExpr)
3066 if n.Type().IsComplex() {
3067 tp := types.FloatForComplex(n.Type())
3068 negop := s.ssaOp(n.Op(), tp)
3069 return s.newValue2(ssa.OpComplexMake, n.Type(),
3070 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
3071 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
3073 return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
3074 case ir.ONOT, ir.OBITNOT:
3075 n := n.(*ir.UnaryExpr)
3077 return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
3078 case ir.OIMAG, ir.OREAL:
3079 n := n.(*ir.UnaryExpr)
3081 return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a)
3083 n := n.(*ir.UnaryExpr)
3087 n := n.(*ir.AddrExpr)
3091 n := n.(*ir.ResultExpr)
3092 if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
3093 panic("Expected to see a previous call")
3097 panic(fmt.Errorf("ORESULT %v does not match call %s", n, s.prevCall))
3099 return s.resultOfCall(s.prevCall, which, n.Type())
3102 n := n.(*ir.StarExpr)
3103 p := s.exprPtr(n.X, n.Bounded(), n.Pos())
3104 return s.load(n.Type(), p)
3107 n := n.(*ir.SelectorExpr)
3108 if n.X.Op() == ir.OSTRUCTLIT {
3109 // All literals with nonzero fields have already been
3110 // rewritten during walk. Any that remain are just T{}
3111 // or equivalents. Use the zero value.
3112 if !ir.IsZero(n.X) {
3113 s.Fatalf("literal with nonzero value in SSA: %v", n.X)
3115 return s.zeroVal(n.Type())
3117 // If n is addressable and can't be represented in
3118 // SSA, then load just the selected field. This
3119 // prevents false memory dependencies in race/msan/asan
3121 if ir.IsAddressable(n) && !s.canSSA(n) {
3123 return s.load(n.Type(), p)
3126 return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
3129 n := n.(*ir.SelectorExpr)
3130 p := s.exprPtr(n.X, n.Bounded(), n.Pos())
3131 p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
3132 return s.load(n.Type(), p)
3135 n := n.(*ir.IndexExpr)
3137 case n.X.Type().IsString():
3138 if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) {
3139 // Replace "abc"[1] with 'b'.
3140 // Delayed until now because "abc"[1] is not an ideal constant.
3141 // See test/fixedbugs/issue11370.go.
3142 return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)])))
3145 i := s.expr(n.Index)
3146 len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
3147 i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
3148 ptrtyp := s.f.Config.Types.BytePtr
3149 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
3150 if ir.IsConst(n.Index, constant.Int) {
3151 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr)
3153 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
3155 return s.load(types.Types[types.TUINT8], ptr)
3156 case n.X.Type().IsSlice():
3158 return s.load(n.X.Type().Elem(), p)
3159 case n.X.Type().IsArray():
3160 if TypeOK(n.X.Type()) {
3161 // SSA can handle arrays of length at most 1.
3162 bound := n.X.Type().NumElem()
3164 i := s.expr(n.Index)
3166 // Bounds check will never succeed. Might as well
3167 // use constants for the bounds check.
3168 z := s.constInt(types.Types[types.TINT], 0)
3169 s.boundsCheck(z, z, ssa.BoundsIndex, false)
3170 // The return value won't be live, return junk.
3171 // But not quite junk, in case bounds checks are turned off. See issue 48092.
3172 return s.zeroVal(n.Type())
3174 len := s.constInt(types.Types[types.TINT], bound)
3175 s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
3176 return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
3179 return s.load(n.X.Type().Elem(), p)
3181 s.Fatalf("bad type for index %v", n.X.Type())
3185 case ir.OLEN, ir.OCAP:
3186 n := n.(*ir.UnaryExpr)
3188 case n.X.Type().IsSlice():
3189 op := ssa.OpSliceLen
3190 if n.Op() == ir.OCAP {
3193 return s.newValue1(op, types.Types[types.TINT], s.expr(n.X))
3194 case n.X.Type().IsString(): // string; not reachable for OCAP
3195 return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X))
3196 case n.X.Type().IsMap(), n.X.Type().IsChan():
3197 return s.referenceTypeBuiltin(n, s.expr(n.X))
3199 return s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
3203 n := n.(*ir.UnaryExpr)
3205 if n.X.Type().IsSlice() {
3206 return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
3208 return s.newValue1(ssa.OpStringPtr, n.Type(), a)
3212 n := n.(*ir.UnaryExpr)
3214 return s.newValue1(ssa.OpITab, n.Type(), a)
3217 n := n.(*ir.UnaryExpr)
3219 return s.newValue1(ssa.OpIData, n.Type(), a)
3222 n := n.(*ir.BinaryExpr)
3225 return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
3227 case ir.OSLICEHEADER:
3228 n := n.(*ir.SliceHeaderExpr)
3232 return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
3234 case ir.OSTRINGHEADER:
3235 n := n.(*ir.StringHeaderExpr)
3238 return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
3240 case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
3241 n := n.(*ir.SliceExpr)
3242 check := s.checkPtrEnabled && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr()
3243 v := s.exprCheckPtr(n.X, !check)
3244 var i, j, k *ssa.Value
3254 p, l, c := s.slice(v, i, j, k, n.Bounded())
3256 // Emit checkptr instrumentation after bound check to prevent false positive, see #46938.
3257 s.checkPtrAlignment(n.X.(*ir.ConvExpr), v, s.conv(n.Max, k, k.Type, types.Types[types.TUINTPTR]))
3259 return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
3262 n := n.(*ir.SliceExpr)
3271 p, l, _ := s.slice(v, i, j, nil, n.Bounded())
3272 return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
3274 case ir.OSLICE2ARRPTR:
3275 // if arrlen > slice.len {
3279 n := n.(*ir.ConvExpr)
3281 arrlen := s.constInt(types.Types[types.TINT], n.Type().Elem().NumElem())
3282 cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
3283 s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false)
3284 return s.newValue1(ssa.OpSlicePtrUnchecked, n.Type(), v)
3287 n := n.(*ir.CallExpr)
3288 if ir.IsIntrinsicCall(n) {
3289 return s.intrinsicCall(n)
3294 n := n.(*ir.CallExpr)
3295 return s.callResult(n, callNormal)
3298 n := n.(*ir.CallExpr)
3299 return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
3301 case ir.OGETCALLERPC:
3302 n := n.(*ir.CallExpr)
3303 return s.newValue0(ssa.OpGetCallerPC, n.Type())
3305 case ir.OGETCALLERSP:
3306 n := n.(*ir.CallExpr)
3307 return s.newValue1(ssa.OpGetCallerSP, n.Type(), s.mem())
3310 return s.append(n.(*ir.CallExpr), false)
3312 case ir.OSTRUCTLIT, ir.OARRAYLIT:
3313 // All literals with nonzero fields have already been
3314 // rewritten during walk. Any that remain are just T{}
3315 // or equivalents. Use the zero value.
3316 n := n.(*ir.CompLitExpr)
3318 s.Fatalf("literal with nonzero value in SSA: %v", n)
3320 return s.zeroVal(n.Type())
3323 n := n.(*ir.UnaryExpr)
3324 var rtype *ssa.Value
3325 if x, ok := n.X.(*ir.DynamicType); ok && x.Op() == ir.ODYNAMICTYPE {
3326 rtype = s.expr(x.RType)
3328 return s.newObject(n.Type().Elem(), rtype)
3331 n := n.(*ir.BinaryExpr)
3335 // Force len to uintptr to prevent misuse of garbage bits in the
3336 // upper part of the register (#48536).
3337 len = s.conv(n, len, len.Type, types.Types[types.TUINTPTR])
3339 return s.newValue2(ssa.OpAddPtr, n.Type(), ptr, len)
3342 s.Fatalf("unhandled expr %v", n.Op())
3347 func (s *state) resultOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
3348 aux := c.Aux.(*ssa.AuxCall)
3349 pa := aux.ParamAssignmentForResult(which)
3350 // TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded.
3351 // SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future.
3352 if len(pa.Registers) == 0 && !TypeOK(t) {
3353 addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
3354 return s.rawLoad(t, addr)
3356 return s.newValue1I(ssa.OpSelectN, t, which, c)
3359 func (s *state) resultAddrOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
3360 aux := c.Aux.(*ssa.AuxCall)
3361 pa := aux.ParamAssignmentForResult(which)
3362 if len(pa.Registers) == 0 {
3363 return s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
3365 _, addr := s.temp(c.Pos, t)
3366 rval := s.newValue1I(ssa.OpSelectN, t, which, c)
3367 s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, addr, rval, s.mem(), false)
3371 // append converts an OAPPEND node to SSA.
3372 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
3373 // adds it to s, and returns the Value.
3374 // If inplace is true, it writes the result of the OAPPEND expression n
3375 // back to the slice being appended to, and returns nil.
3376 // inplace MUST be set to false if the slice can be SSA'd.
3377 // Note: this code only handles fixed-count appends. Dotdotdot appends
3378 // have already been rewritten at this point (by walk).
3379 func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
3380 // If inplace is false, process as expression "append(s, e1, e2, e3)":
3382 // ptr, len, cap := s
3384 // if uint(len) > uint(cap) {
3385 // ptr, len, cap = growslice(ptr, len, cap, 3, typ)
3386 // Note that len is unmodified by growslice.
3388 // // with write barriers, if needed:
3389 // *(ptr+(len-3)) = e1
3390 // *(ptr+(len-2)) = e2
3391 // *(ptr+(len-1)) = e3
3392 // return makeslice(ptr, len, cap)
3395 // If inplace is true, process as statement "s = append(s, e1, e2, e3)":
3398 // ptr, len, cap := s
3400 // if uint(len) > uint(cap) {
3401 // ptr, len, cap = growslice(ptr, len, cap, 3, typ)
3402 // vardef(a) // if necessary, advise liveness we are writing a new a
3403 // *a.cap = cap // write before ptr to avoid a spill
3404 // *a.ptr = ptr // with write barrier
3407 // // with write barriers, if needed:
3408 // *(ptr+(len-3)) = e1
3409 // *(ptr+(len-2)) = e2
3410 // *(ptr+(len-1)) = e3
3412 et := n.Type().Elem()
3413 pt := types.NewPtr(et)
3416 sn := n.Args[0] // the slice node is the first in the list
3417 var slice, addr *ssa.Value
3420 slice = s.load(n.Type(), addr)
3425 // Allocate new blocks
3426 grow := s.f.NewBlock(ssa.BlockPlain)
3427 assign := s.f.NewBlock(ssa.BlockPlain)
3429 // Decomposse input slice.
3430 p := s.newValue1(ssa.OpSlicePtr, pt, slice)
3431 l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
3432 c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
3434 // Add number of new elements to length.
3435 nargs := s.constInt(types.Types[types.TINT], int64(len(n.Args)-1))
3436 l = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, nargs)
3438 // Decide if we need to grow
3439 cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, l)
3441 // Record values of ptr/len/cap before branch.
3449 b.Kind = ssa.BlockIf
3450 b.Likely = ssa.BranchUnlikely
3457 taddr := s.expr(n.X)
3458 r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr)
3460 // Decompose output slice
3461 p = s.newValue1(ssa.OpSlicePtr, pt, r[0])
3462 l = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], r[0])
3463 c = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], r[0])
3469 if sn.Op() == ir.ONAME {
3471 if sn.Class != ir.PEXTERN {
3472 // Tell liveness we're about to build a new slice
3473 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
3476 capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr)
3477 s.store(types.Types[types.TINT], capaddr, c)
3478 s.store(pt, addr, p)
3484 // assign new elements to slots
3485 s.startBlock(assign)
3486 p = s.variable(ptrVar, pt) // generates phi for ptr
3487 l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
3489 c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap
3493 // Update length in place.
3494 // We have to wait until here to make sure growslice succeeded.
3495 lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr)
3496 s.store(types.Types[types.TINT], lenaddr, l)
3500 type argRec struct {
3501 // if store is true, we're appending the value v. If false, we're appending the
3506 args := make([]argRec, 0, len(n.Args[1:]))
3507 for _, n := range n.Args[1:] {
3508 if TypeOK(n.Type()) {
3509 args = append(args, argRec{v: s.expr(n), store: true})
3512 args = append(args, argRec{v: v})
3516 // Write args into slice.
3517 oldLen := s.newValue2(s.ssaOp(ir.OSUB, types.Types[types.TINT]), types.Types[types.TINT], l, nargs)
3518 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, oldLen)
3519 for i, arg := range args {
3520 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
3522 s.storeType(et, addr, arg.v, 0, true)
3524 s.move(et, addr, arg.v)
3528 delete(s.vars, ptrVar)
3529 delete(s.vars, lenVar)
3531 delete(s.vars, capVar)
3538 return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
3541 // condBranch evaluates the boolean expression cond and branches to yes
3542 // if cond is true and no if cond is false.
3543 // This function is intended to handle && and || better than just calling
3544 // s.expr(cond) and branching on the result.
3545 func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
3548 cond := cond.(*ir.LogicalExpr)
3549 mid := s.f.NewBlock(ssa.BlockPlain)
3550 s.stmtList(cond.Init())
3551 s.condBranch(cond.X, mid, no, max8(likely, 0))
3553 s.condBranch(cond.Y, yes, no, likely)
3555 // Note: if likely==1, then both recursive calls pass 1.
3556 // If likely==-1, then we don't have enough information to decide
3557 // whether the first branch is likely or not. So we pass 0 for
3558 // the likeliness of the first branch.
3559 // TODO: have the frontend give us branch prediction hints for
3560 // OANDAND and OOROR nodes (if it ever has such info).
3562 cond := cond.(*ir.LogicalExpr)
3563 mid := s.f.NewBlock(ssa.BlockPlain)
3564 s.stmtList(cond.Init())
3565 s.condBranch(cond.X, yes, mid, min8(likely, 0))
3567 s.condBranch(cond.Y, yes, no, likely)
3569 // Note: if likely==-1, then both recursive calls pass -1.
3570 // If likely==1, then we don't have enough info to decide
3571 // the likelihood of the first branch.
3573 cond := cond.(*ir.UnaryExpr)
3574 s.stmtList(cond.Init())
3575 s.condBranch(cond.X, no, yes, -likely)
3578 cond := cond.(*ir.ConvExpr)
3579 s.stmtList(cond.Init())
3580 s.condBranch(cond.X, yes, no, likely)
3585 b.Kind = ssa.BlockIf
3587 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
3595 skipPtr skipMask = 1 << iota
3600 // assign does left = right.
3601 // Right has already been evaluated to ssa, left has not.
3602 // If deref is true, then we do left = *right instead (and right has already been nil-checked).
3603 // If deref is true and right == nil, just do left = 0.
3604 // skip indicates assignments (at the top level) that can be avoided.
3605 // mayOverlap indicates whether left&right might partially overlap in memory. Default is false.
3606 func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
3607 s.assignWhichMayOverlap(left, right, deref, skip, false)
3609 func (s *state) assignWhichMayOverlap(left ir.Node, right *ssa.Value, deref bool, skip skipMask, mayOverlap bool) {
3610 if left.Op() == ir.ONAME && ir.IsBlank(left) {
3617 s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
3619 if left.Op() == ir.ODOT {
3620 // We're assigning to a field of an ssa-able value.
3621 // We need to build a new structure with the new value for the
3622 // field we're assigning and the old values for the other fields.
3624 // type T struct {a, b, c int}
3627 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
3629 // Grab information about the structure type.
3630 left := left.(*ir.SelectorExpr)
3633 idx := fieldIdx(left)
3635 // Grab old value of structure.
3636 old := s.expr(left.X)
3638 // Make new structure.
3639 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
3641 // Add fields as args.
3642 for i := 0; i < nf; i++ {
3646 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
3650 // Recursively assign the new value we've made to the base of the dot op.
3651 s.assign(left.X, new, false, 0)
3652 // TODO: do we need to update named values here?
3655 if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() {
3656 left := left.(*ir.IndexExpr)
3657 s.pushLine(left.Pos())
3659 // We're assigning to an element of an ssa-able array.
3664 i := s.expr(left.Index) // index
3666 // The bounds check must fail. Might as well
3667 // ignore the actual index and just use zeros.
3668 z := s.constInt(types.Types[types.TINT], 0)
3669 s.boundsCheck(z, z, ssa.BoundsIndex, false)
3673 s.Fatalf("assigning to non-1-length array")
3675 // Rewrite to a = [1]{v}
3676 len := s.constInt(types.Types[types.TINT], 1)
3677 s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
3678 v := s.newValue1(ssa.OpArrayMake1, t, right)
3679 s.assign(left.X, v, false, 0)
3682 left := left.(*ir.Name)
3683 // Update variable assignment.
3684 s.vars[left] = right
3685 s.addNamedValue(left, right)
3689 // If this assignment clobbers an entire local variable, then emit
3690 // OpVarDef so liveness analysis knows the variable is redefined.
3691 if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 && t.HasPointers() {
3692 s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
3695 // Left is not ssa-able. Compute its address.
3696 addr := s.addr(left)
3697 if ir.IsReflectHeaderDataField(left) {
3698 // Package unsafe's documentation says storing pointers into
3699 // reflect.SliceHeader and reflect.StringHeader's Data fields
3700 // is valid, even though they have type uintptr (#19168).
3701 // Mark it pointer type to signal the writebarrier pass to
3702 // insert a write barrier.
3703 t = types.Types[types.TUNSAFEPTR]
3706 // Treat as a mem->mem move.
3710 s.moveWhichMayOverlap(t, addr, right, mayOverlap)
3714 // Treat as a store.
3715 s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
3718 // zeroVal returns the zero value for type t.
3719 func (s *state) zeroVal(t *types.Type) *ssa.Value {
3724 return s.constInt8(t, 0)
3726 return s.constInt16(t, 0)
3728 return s.constInt32(t, 0)
3730 return s.constInt64(t, 0)
3732 s.Fatalf("bad sized integer type %v", t)
3737 return s.constFloat32(t, 0)
3739 return s.constFloat64(t, 0)
3741 s.Fatalf("bad sized float type %v", t)
3746 z := s.constFloat32(types.Types[types.TFLOAT32], 0)
3747 return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
3749 z := s.constFloat64(types.Types[types.TFLOAT64], 0)
3750 return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
3752 s.Fatalf("bad sized complex type %v", t)
3756 return s.constEmptyString(t)
3757 case t.IsPtrShaped():
3758 return s.constNil(t)
3760 return s.constBool(false)
3761 case t.IsInterface():
3762 return s.constInterface(t)
3764 return s.constSlice(t)
3767 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
3768 for i := 0; i < n; i++ {
3769 v.AddArg(s.zeroVal(t.FieldType(i)))
3773 switch t.NumElem() {
3775 return s.entryNewValue0(ssa.OpArrayMake0, t)
3777 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
3780 s.Fatalf("zero for type %v not implemented", t)
3787 callNormal callKind = iota
3794 type sfRtCallDef struct {
3799 var softFloatOps map[ssa.Op]sfRtCallDef
3801 func softfloatInit() {
3802 // Some of these operations get transformed by sfcall.
3803 softFloatOps = map[ssa.Op]sfRtCallDef{
3804 ssa.OpAdd32F: {typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
3805 ssa.OpAdd64F: {typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
3806 ssa.OpSub32F: {typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
3807 ssa.OpSub64F: {typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
3808 ssa.OpMul32F: {typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32},
3809 ssa.OpMul64F: {typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64},
3810 ssa.OpDiv32F: {typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32},
3811 ssa.OpDiv64F: {typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64},
3813 ssa.OpEq64F: {typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
3814 ssa.OpEq32F: {typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
3815 ssa.OpNeq64F: {typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
3816 ssa.OpNeq32F: {typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
3817 ssa.OpLess64F: {typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL},
3818 ssa.OpLess32F: {typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL},
3819 ssa.OpLeq64F: {typecheck.LookupRuntimeFunc("fge64"), types.TBOOL},
3820 ssa.OpLeq32F: {typecheck.LookupRuntimeFunc("fge32"), types.TBOOL},
3822 ssa.OpCvt32to32F: {typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32},
3823 ssa.OpCvt32Fto32: {typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32},
3824 ssa.OpCvt64to32F: {typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32},
3825 ssa.OpCvt32Fto64: {typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64},
3826 ssa.OpCvt64Uto32F: {typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32},
3827 ssa.OpCvt32Fto64U: {typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64},
3828 ssa.OpCvt32to64F: {typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64},
3829 ssa.OpCvt64Fto32: {typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32},
3830 ssa.OpCvt64to64F: {typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64},
3831 ssa.OpCvt64Fto64: {typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64},
3832 ssa.OpCvt64Uto64F: {typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64},
3833 ssa.OpCvt64Fto64U: {typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64},
3834 ssa.OpCvt32Fto64F: {typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64},
3835 ssa.OpCvt64Fto32F: {typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32},
3839 // TODO: do not emit sfcall if operation can be optimized to constant in later
3841 func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
3842 f2i := func(t *types.Type) *types.Type {
3844 case types.TFLOAT32:
3845 return types.Types[types.TUINT32]
3846 case types.TFLOAT64:
3847 return types.Types[types.TUINT64]
3852 if callDef, ok := softFloatOps[op]; ok {
3858 args[0], args[1] = args[1], args[0]
3861 args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
3864 // runtime functions take uints for floats and returns uints.
3865 // Convert to uints so we use the right calling convention.
3866 for i, a := range args {
3867 if a.Type.IsFloat() {
3868 args[i] = s.newValue1(ssa.OpCopy, f2i(a.Type), a)
3872 rt := types.Types[callDef.rtype]
3873 result := s.rtcall(callDef.rtfn, true, []*types.Type{f2i(rt)}, args...)[0]
3875 result = s.newValue1(ssa.OpCopy, rt, result)
3877 if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
3878 result = s.newValue1(ssa.OpNot, result.Type, result)
3885 var intrinsics map[intrinsicKey]intrinsicBuilder
3887 // An intrinsicBuilder converts a call node n into an ssa value that
3888 // implements that call as an intrinsic. args is a list of arguments to the func.
3889 type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value
3891 type intrinsicKey struct {
3898 intrinsics = map[intrinsicKey]intrinsicBuilder{}
3903 var lwatomics []*sys.Arch
3904 for _, a := range &sys.Archs {
3905 all = append(all, a)
3911 if a.Family != sys.PPC64 {
3912 lwatomics = append(lwatomics, a)
3916 // add adds the intrinsic b for pkg.fn for the given list of architectures.
3917 add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
3918 for _, a := range archs {
3919 intrinsics[intrinsicKey{a, pkg, fn}] = b
3922 // addF does the same as add but operates on architecture families.
3923 addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
3925 for _, f := range archFamilies {
3927 panic("too many architecture families")
3931 for _, a := range all {
3932 if m>>uint(a.Family)&1 != 0 {
3933 intrinsics[intrinsicKey{a, pkg, fn}] = b
3937 // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
3938 alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
3940 for _, a := range archs {
3941 if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
3942 intrinsics[intrinsicKey{a, pkg, fn}] = b
3947 panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn))
3951 /******** runtime ********/
3952 if !base.Flag.Cfg.Instrumenting {
3953 add("runtime", "slicebytetostringtmp",
3954 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3955 // Compiler frontend optimizations emit OBYTES2STRTMP nodes
3956 // for the backend instead of slicebytetostringtmp calls
3957 // when not instrumenting.
3958 return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
3962 addF("runtime/internal/math", "MulUintptr",
3963 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3964 if s.config.PtrSize == 4 {
3965 return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
3967 return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
3969 sys.AMD64, sys.I386, sys.Loong64, sys.MIPS64, sys.RISCV64, sys.ARM64)
3970 alias("runtime", "mulUintptr", "runtime/internal/math", "MulUintptr", all...)
3971 add("runtime", "KeepAlive",
3972 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3973 data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
3974 s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
3978 add("runtime", "getclosureptr",
3979 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3980 return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
3984 add("runtime", "getcallerpc",
3985 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3986 return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
3990 add("runtime", "getcallersp",
3991 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3992 return s.newValue1(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr, s.mem())
3996 addF("runtime", "publicationBarrier",
3997 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3998 s.vars[memVar] = s.newValue1(ssa.OpPubBarrier, types.TypeMem, s.mem())
4001 sys.ARM64, sys.PPC64)
4003 brev_arch := []sys.ArchFamily{sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X}
4004 if buildcfg.GOPPC64 >= 10 {
4005 // Use only on Power10 as the new byte reverse instructions that Power10 provide
4006 // make it worthwhile as an intrinsic
4007 brev_arch = append(brev_arch, sys.PPC64)
4009 /******** runtime/internal/sys ********/
4010 addF("runtime/internal/sys", "Bswap32",
4011 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4012 return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
4015 addF("runtime/internal/sys", "Bswap64",
4016 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4017 return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
4021 /****** Prefetch ******/
4022 makePrefetchFunc := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4023 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4024 s.vars[memVar] = s.newValue2(op, types.TypeMem, args[0], s.mem())
4029 // Make Prefetch intrinsics for supported platforms
4030 // On the unsupported platforms stub function will be eliminated
4031 addF("runtime/internal/sys", "Prefetch", makePrefetchFunc(ssa.OpPrefetchCache),
4032 sys.AMD64, sys.ARM64, sys.PPC64)
4033 addF("runtime/internal/sys", "PrefetchStreamed", makePrefetchFunc(ssa.OpPrefetchCacheStreamed),
4034 sys.AMD64, sys.ARM64, sys.PPC64)
4036 /******** runtime/internal/atomic ********/
4037 addF("runtime/internal/atomic", "Load",
4038 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4039 v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
4040 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4041 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
4043 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4044 addF("runtime/internal/atomic", "Load8",
4045 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4046 v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
4047 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4048 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
4050 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4051 addF("runtime/internal/atomic", "Load64",
4052 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4053 v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
4054 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4055 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
4057 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4058 addF("runtime/internal/atomic", "LoadAcq",
4059 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4060 v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
4061 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4062 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
4064 sys.PPC64, sys.S390X)
4065 addF("runtime/internal/atomic", "LoadAcq64",
4066 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4067 v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
4068 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4069 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
4072 addF("runtime/internal/atomic", "Loadp",
4073 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4074 v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
4075 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4076 return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
4078 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4080 addF("runtime/internal/atomic", "Store",
4081 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4082 s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
4085 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4086 addF("runtime/internal/atomic", "Store8",
4087 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4088 s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
4091 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4092 addF("runtime/internal/atomic", "Store64",
4093 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4094 s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
4097 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4098 addF("runtime/internal/atomic", "StorepNoWB",
4099 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4100 s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
4103 sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
4104 addF("runtime/internal/atomic", "StoreRel",
4105 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4106 s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
4109 sys.PPC64, sys.S390X)
4110 addF("runtime/internal/atomic", "StoreRel64",
4111 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4112 s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
4117 addF("runtime/internal/atomic", "Xchg",
4118 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4119 v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
4120 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4121 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
4123 sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4124 addF("runtime/internal/atomic", "Xchg64",
4125 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4126 v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
4127 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4128 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
4130 sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4132 type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind)
4134 makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
4136 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4137 // Target Atomic feature is identified by dynamic detection
4138 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb)
4139 v := s.load(types.Types[types.TBOOL], addr)
4141 b.Kind = ssa.BlockIf
4143 bTrue := s.f.NewBlock(ssa.BlockPlain)
4144 bFalse := s.f.NewBlock(ssa.BlockPlain)
4145 bEnd := s.f.NewBlock(ssa.BlockPlain)
4148 b.Likely = ssa.BranchLikely
4150 // We have atomic instructions - use it directly.
4152 emit(s, n, args, op1, typ)
4153 s.endBlock().AddEdgeTo(bEnd)
4155 // Use original instruction sequence.
4156 s.startBlock(bFalse)
4157 emit(s, n, args, op0, typ)
4158 s.endBlock().AddEdgeTo(bEnd)
4162 if rtyp == types.TNIL {
4165 return s.variable(n, types.Types[rtyp])
4170 atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
4171 v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
4172 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4173 s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
4175 addF("runtime/internal/atomic", "Xchg",
4176 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
4178 addF("runtime/internal/atomic", "Xchg64",
4179 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
4182 addF("runtime/internal/atomic", "Xadd",
4183 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4184 v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
4185 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4186 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
4188 sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4189 addF("runtime/internal/atomic", "Xadd64",
4190 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4191 v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
4192 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4193 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
4195 sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4197 addF("runtime/internal/atomic", "Xadd",
4198 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
4200 addF("runtime/internal/atomic", "Xadd64",
4201 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
4204 addF("runtime/internal/atomic", "Cas",
4205 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4206 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
4207 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4208 return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
4210 sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4211 addF("runtime/internal/atomic", "Cas64",
4212 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4213 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
4214 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4215 return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
4217 sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
4218 addF("runtime/internal/atomic", "CasRel",
4219 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4220 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
4221 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4222 return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
4226 atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
4227 v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
4228 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4229 s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
4232 addF("runtime/internal/atomic", "Cas",
4233 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
4235 addF("runtime/internal/atomic", "Cas64",
4236 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
4239 addF("runtime/internal/atomic", "And8",
4240 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4241 s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
4244 sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
4245 addF("runtime/internal/atomic", "And",
4246 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4247 s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
4250 sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
4251 addF("runtime/internal/atomic", "Or8",
4252 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4253 s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
4256 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
4257 addF("runtime/internal/atomic", "Or",
4258 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4259 s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
4262 sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
4264 atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
4265 s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
4268 addF("runtime/internal/atomic", "And8",
4269 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4271 addF("runtime/internal/atomic", "And",
4272 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4274 addF("runtime/internal/atomic", "Or8",
4275 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4277 addF("runtime/internal/atomic", "Or",
4278 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4281 // Aliases for atomic load operations
4282 alias("runtime/internal/atomic", "Loadint32", "runtime/internal/atomic", "Load", all...)
4283 alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
4284 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
4285 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
4286 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
4287 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
4288 alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
4289 alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
4290 alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
4291 alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
4292 alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
4293 alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
4295 // Aliases for atomic store operations
4296 alias("runtime/internal/atomic", "Storeint32", "runtime/internal/atomic", "Store", all...)
4297 alias("runtime/internal/atomic", "Storeint64", "runtime/internal/atomic", "Store64", all...)
4298 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
4299 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
4300 alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
4301 alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
4302 alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
4303 alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
4304 alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
4305 alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
4307 // Aliases for atomic swap operations
4308 alias("runtime/internal/atomic", "Xchgint32", "runtime/internal/atomic", "Xchg", all...)
4309 alias("runtime/internal/atomic", "Xchgint64", "runtime/internal/atomic", "Xchg64", all...)
4310 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
4311 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
4313 // Aliases for atomic add operations
4314 alias("runtime/internal/atomic", "Xaddint32", "runtime/internal/atomic", "Xadd", all...)
4315 alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
4316 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
4317 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
4319 // Aliases for atomic CAS operations
4320 alias("runtime/internal/atomic", "Casint32", "runtime/internal/atomic", "Cas", all...)
4321 alias("runtime/internal/atomic", "Casint64", "runtime/internal/atomic", "Cas64", all...)
4322 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
4323 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
4324 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
4325 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
4326 alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
4328 /******** math ********/
4329 addF("math", "sqrt",
4330 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4331 return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
4333 sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
4334 addF("math", "Trunc",
4335 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4336 return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
4338 sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
4339 addF("math", "Ceil",
4340 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4341 return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
4343 sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
4344 addF("math", "Floor",
4345 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4346 return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
4348 sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
4349 addF("math", "Round",
4350 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4351 return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
4353 sys.ARM64, sys.PPC64, sys.S390X)
4354 addF("math", "RoundToEven",
4355 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4356 return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
4358 sys.ARM64, sys.S390X, sys.Wasm)
4360 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4361 return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
4363 sys.ARM64, sys.ARM, sys.PPC64, sys.RISCV64, sys.Wasm)
4364 addF("math", "Copysign",
4365 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4366 return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
4368 sys.PPC64, sys.RISCV64, sys.Wasm)
4370 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4371 return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4373 sys.ARM64, sys.PPC64, sys.RISCV64, sys.S390X)
4375 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4376 if !s.config.UseFMA {
4377 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4378 return s.variable(n, types.Types[types.TFLOAT64])
4381 if buildcfg.GOAMD64 >= 3 {
4382 return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4385 v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA)
4387 b.Kind = ssa.BlockIf
4389 bTrue := s.f.NewBlock(ssa.BlockPlain)
4390 bFalse := s.f.NewBlock(ssa.BlockPlain)
4391 bEnd := s.f.NewBlock(ssa.BlockPlain)
4394 b.Likely = ssa.BranchLikely // >= haswell cpus are common
4396 // We have the intrinsic - use it directly.
4398 s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4399 s.endBlock().AddEdgeTo(bEnd)
4401 // Call the pure Go version.
4402 s.startBlock(bFalse)
4403 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4404 s.endBlock().AddEdgeTo(bEnd)
4408 return s.variable(n, types.Types[types.TFLOAT64])
4412 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4413 if !s.config.UseFMA {
4414 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4415 return s.variable(n, types.Types[types.TFLOAT64])
4417 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb)
4418 v := s.load(types.Types[types.TBOOL], addr)
4420 b.Kind = ssa.BlockIf
4422 bTrue := s.f.NewBlock(ssa.BlockPlain)
4423 bFalse := s.f.NewBlock(ssa.BlockPlain)
4424 bEnd := s.f.NewBlock(ssa.BlockPlain)
4427 b.Likely = ssa.BranchLikely
4429 // We have the intrinsic - use it directly.
4431 s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4432 s.endBlock().AddEdgeTo(bEnd)
4434 // Call the pure Go version.
4435 s.startBlock(bFalse)
4436 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4437 s.endBlock().AddEdgeTo(bEnd)
4441 return s.variable(n, types.Types[types.TFLOAT64])
4445 makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4446 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4447 if buildcfg.GOAMD64 >= 2 {
4448 return s.newValue1(op, types.Types[types.TFLOAT64], args[0])
4451 v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41)
4453 b.Kind = ssa.BlockIf
4455 bTrue := s.f.NewBlock(ssa.BlockPlain)
4456 bFalse := s.f.NewBlock(ssa.BlockPlain)
4457 bEnd := s.f.NewBlock(ssa.BlockPlain)
4460 b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
4462 // We have the intrinsic - use it directly.
4464 s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
4465 s.endBlock().AddEdgeTo(bEnd)
4467 // Call the pure Go version.
4468 s.startBlock(bFalse)
4469 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4470 s.endBlock().AddEdgeTo(bEnd)
4474 return s.variable(n, types.Types[types.TFLOAT64])
4477 addF("math", "RoundToEven",
4478 makeRoundAMD64(ssa.OpRoundToEven),
4480 addF("math", "Floor",
4481 makeRoundAMD64(ssa.OpFloor),
4483 addF("math", "Ceil",
4484 makeRoundAMD64(ssa.OpCeil),
4486 addF("math", "Trunc",
4487 makeRoundAMD64(ssa.OpTrunc),
4490 /******** math/bits ********/
4491 addF("math/bits", "TrailingZeros64",
4492 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4493 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
4495 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4496 addF("math/bits", "TrailingZeros32",
4497 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4498 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
4500 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4501 addF("math/bits", "TrailingZeros16",
4502 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4503 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
4504 c := s.constInt32(types.Types[types.TUINT32], 1<<16)
4505 y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
4506 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
4509 addF("math/bits", "TrailingZeros16",
4510 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4511 return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
4513 sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
4514 addF("math/bits", "TrailingZeros16",
4515 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4516 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
4517 c := s.constInt64(types.Types[types.TUINT64], 1<<16)
4518 y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
4519 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
4521 sys.S390X, sys.PPC64)
4522 addF("math/bits", "TrailingZeros8",
4523 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4524 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
4525 c := s.constInt32(types.Types[types.TUINT32], 1<<8)
4526 y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
4527 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
4530 addF("math/bits", "TrailingZeros8",
4531 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4532 return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
4534 sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
4535 addF("math/bits", "TrailingZeros8",
4536 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4537 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
4538 c := s.constInt64(types.Types[types.TUINT64], 1<<8)
4539 y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
4540 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
4543 alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
4544 alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
4545 // ReverseBytes inlines correctly, no need to intrinsify it.
4546 // Nothing special is needed for targets where ReverseBytes16 lowers to a rotate
4547 // On Power10, 16-bit rotate is not available so use BRH instruction
4548 if buildcfg.GOPPC64 >= 10 {
4549 addF("math/bits", "ReverseBytes16",
4550 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4551 return s.newValue1(ssa.OpBswap16, types.Types[types.TUINT], args[0])
4556 addF("math/bits", "Len64",
4557 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4558 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
4560 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4561 addF("math/bits", "Len32",
4562 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4563 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
4565 sys.AMD64, sys.ARM64, sys.PPC64)
4566 addF("math/bits", "Len32",
4567 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4568 if s.config.PtrSize == 4 {
4569 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
4571 x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
4572 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
4574 sys.ARM, sys.S390X, sys.MIPS, sys.Wasm)
4575 addF("math/bits", "Len16",
4576 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4577 if s.config.PtrSize == 4 {
4578 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
4579 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
4581 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
4582 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
4584 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4585 addF("math/bits", "Len16",
4586 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4587 return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
4590 addF("math/bits", "Len8",
4591 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4592 if s.config.PtrSize == 4 {
4593 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
4594 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
4596 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
4597 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
4599 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4600 addF("math/bits", "Len8",
4601 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4602 return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
4605 addF("math/bits", "Len",
4606 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4607 if s.config.PtrSize == 4 {
4608 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
4610 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
4612 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4613 // LeadingZeros is handled because it trivially calls Len.
4614 addF("math/bits", "Reverse64",
4615 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4616 return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
4619 addF("math/bits", "Reverse32",
4620 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4621 return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
4624 addF("math/bits", "Reverse16",
4625 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4626 return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
4629 addF("math/bits", "Reverse8",
4630 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4631 return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
4634 addF("math/bits", "Reverse",
4635 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4636 return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
4639 addF("math/bits", "RotateLeft8",
4640 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4641 return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
4644 addF("math/bits", "RotateLeft16",
4645 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4646 return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
4649 addF("math/bits", "RotateLeft32",
4650 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4651 return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
4653 sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm, sys.Loong64)
4654 addF("math/bits", "RotateLeft64",
4655 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4656 return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
4658 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm, sys.Loong64)
4659 alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
4661 makeOnesCountAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4662 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4663 if buildcfg.GOAMD64 >= 2 {
4664 return s.newValue1(op, types.Types[types.TINT], args[0])
4667 v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT)
4669 b.Kind = ssa.BlockIf
4671 bTrue := s.f.NewBlock(ssa.BlockPlain)
4672 bFalse := s.f.NewBlock(ssa.BlockPlain)
4673 bEnd := s.f.NewBlock(ssa.BlockPlain)
4676 b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
4678 // We have the intrinsic - use it directly.
4680 s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
4681 s.endBlock().AddEdgeTo(bEnd)
4683 // Call the pure Go version.
4684 s.startBlock(bFalse)
4685 s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
4686 s.endBlock().AddEdgeTo(bEnd)
4690 return s.variable(n, types.Types[types.TINT])
4693 addF("math/bits", "OnesCount64",
4694 makeOnesCountAMD64(ssa.OpPopCount64),
4696 addF("math/bits", "OnesCount64",
4697 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4698 return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
4700 sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
4701 addF("math/bits", "OnesCount32",
4702 makeOnesCountAMD64(ssa.OpPopCount32),
4704 addF("math/bits", "OnesCount32",
4705 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4706 return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
4708 sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
4709 addF("math/bits", "OnesCount16",
4710 makeOnesCountAMD64(ssa.OpPopCount16),
4712 addF("math/bits", "OnesCount16",
4713 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4714 return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
4716 sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
4717 addF("math/bits", "OnesCount8",
4718 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4719 return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
4721 sys.S390X, sys.PPC64, sys.Wasm)
4722 addF("math/bits", "OnesCount",
4723 makeOnesCountAMD64(ssa.OpPopCount64),
4725 addF("math/bits", "Mul64",
4726 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4727 return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
4729 sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64, sys.RISCV64, sys.Loong64)
4730 alias("math/bits", "Mul", "math/bits", "Mul64", p8...)
4731 alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", p8...)
4732 addF("math/bits", "Add64",
4733 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4734 return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
4736 sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64)
4737 alias("math/bits", "Add", "math/bits", "Add64", p8...)
4738 addF("math/bits", "Sub64",
4739 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4740 return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
4742 sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64)
4743 alias("math/bits", "Sub", "math/bits", "Sub64", p8...)
4744 addF("math/bits", "Div64",
4745 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4746 // check for divide-by-zero/overflow and panic with appropriate message
4747 cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
4748 s.check(cmpZero, ir.Syms.Panicdivide)
4749 cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
4750 s.check(cmpOverflow, ir.Syms.Panicoverflow)
4751 return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
4754 alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
4756 alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...)
4757 alias("runtime/internal/sys", "TrailingZeros32", "math/bits", "TrailingZeros32", all...)
4758 alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...)
4759 alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...)
4760 alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...)
4761 alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...)
4763 /******** sync/atomic ********/
4765 // Note: these are disabled by flag_race in findIntrinsic below.
4766 alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
4767 alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
4768 alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
4769 alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
4770 alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
4771 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
4772 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
4774 alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
4775 alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
4776 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
4777 alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
4778 alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
4779 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
4780 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
4782 alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
4783 alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
4784 alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
4785 alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
4786 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
4787 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
4789 alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
4790 alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
4791 alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
4792 alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
4793 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
4794 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
4796 alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
4797 alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
4798 alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
4799 alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
4800 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
4801 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
4803 /******** math/big ********/
4804 alias("math/big", "mulWW", "math/bits", "Mul64", p8...)
4807 // findIntrinsic returns a function which builds the SSA equivalent of the
4808 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
4809 func findIntrinsic(sym *types.Sym) intrinsicBuilder {
4810 if sym == nil || sym.Pkg == nil {
4814 if sym.Pkg == ir.Pkgs.Runtime {
4817 if base.Flag.Race && pkg == "sync/atomic" {
4818 // The race detector needs to be able to intercept these calls.
4819 // We can't intrinsify them.
4822 // Skip intrinsifying math functions (which may contain hard-float
4823 // instructions) when soft-float
4824 if Arch.SoftFloat && pkg == "math" {
4829 if ssa.IntrinsicsDisable {
4830 if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
4831 // These runtime functions don't have definitions, must be intrinsics.
4836 return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}]
4839 func IsIntrinsicCall(n *ir.CallExpr) bool {
4843 name, ok := n.X.(*ir.Name)
4847 return findIntrinsic(name.Sym()) != nil
4850 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
4851 func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
4852 v := findIntrinsic(n.X.Sym())(s, n, s.intrinsicArgs(n))
4853 if ssa.IntrinsicsDebug > 0 {
4858 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
4861 base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.X.Sym().Name, x.LongString())
4866 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
4867 func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
4868 args := make([]*ssa.Value, len(n.Args))
4869 for i, n := range n.Args {
4875 // openDeferRecord adds code to evaluate and store the function for an open-code defer
4876 // call, and records info about the defer, so we can generate proper code on the
4877 // exit paths. n is the sub-node of the defer node that is the actual function
4878 // call. We will also record funcdata information on where the function is stored
4879 // (as well as the deferBits variable), and this will enable us to run the proper
4880 // defer calls during panics.
4881 func (s *state) openDeferRecord(n *ir.CallExpr) {
4882 if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.X.Type().NumResults() != 0 {
4883 s.Fatalf("defer call with arguments or results: %v", n)
4886 opendefer := &openDeferInfo{
4890 // We must always store the function value in a stack slot for the
4891 // runtime panic code to use. But in the defer exit code, we will
4892 // call the function directly if it is a static function.
4893 closureVal := s.expr(fn)
4894 closure := s.openDeferSave(fn.Type(), closureVal)
4895 opendefer.closureNode = closure.Aux.(*ir.Name)
4896 if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
4897 opendefer.closure = closure
4899 index := len(s.openDefers)
4900 s.openDefers = append(s.openDefers, opendefer)
4902 // Update deferBits only after evaluation and storage to stack of
4903 // the function is successful.
4904 bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
4905 newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
4906 s.vars[deferBitsVar] = newDeferBits
4907 s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
4910 // openDeferSave generates SSA nodes to store a value (with type t) for an
4911 // open-coded defer at an explicit autotmp location on the stack, so it can be
4912 // reloaded and used for the appropriate call on exit. Type t must be a function type
4913 // (therefore SSAable). val is the value to be stored. The function returns an SSA
4914 // value representing a pointer to the autotmp location.
4915 func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
4917 s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
4919 if !t.HasPointers() {
4920 s.Fatalf("openDeferSave of pointerless type %v val=%v", t, val)
4923 temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
4924 temp.SetOpenDeferSlot(true)
4925 var addrTemp *ssa.Value
4926 // Use OpVarLive to make sure stack slot for the closure is not removed by
4927 // dead-store elimination
4928 if s.curBlock.ID != s.f.Entry.ID {
4929 // Force the tmp storing this defer function to be declared in the entry
4930 // block, so that it will be live for the defer exit code (which will
4931 // actually access it only if the associated defer call has been activated).
4932 if t.HasPointers() {
4933 s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
4935 s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
4936 addrTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.defvars[s.f.Entry.ID][memVar])
4938 // Special case if we're still in the entry block. We can't use
4939 // the above code, since s.defvars[s.f.Entry.ID] isn't defined
4940 // until we end the entry block with s.endBlock().
4941 if t.HasPointers() {
4942 s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, temp, s.mem(), false)
4944 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, temp, s.mem(), false)
4945 addrTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.mem(), false)
4947 // Since we may use this temp during exit depending on the
4948 // deferBits, we must define it unconditionally on entry.
4949 // Therefore, we must make sure it is zeroed out in the entry
4950 // block if it contains pointers, else GC may wrongly follow an
4951 // uninitialized pointer value.
4952 temp.SetNeedzero(true)
4953 // We are storing to the stack, hence we can avoid the full checks in
4954 // storeType() (no write barrier) and do a simple store().
4955 s.store(t, addrTemp, val)
4959 // openDeferExit generates SSA for processing all the open coded defers at exit.
4960 // The code involves loading deferBits, and checking each of the bits to see if
4961 // the corresponding defer statement was executed. For each bit that is turned
4962 // on, the associated defer call is made.
4963 func (s *state) openDeferExit() {
4964 deferExit := s.f.NewBlock(ssa.BlockPlain)
4965 s.endBlock().AddEdgeTo(deferExit)
4966 s.startBlock(deferExit)
4967 s.lastDeferExit = deferExit
4968 s.lastDeferCount = len(s.openDefers)
4969 zeroval := s.constInt8(types.Types[types.TUINT8], 0)
4970 // Test for and run defers in reverse order
4971 for i := len(s.openDefers) - 1; i >= 0; i-- {
4972 r := s.openDefers[i]
4973 bCond := s.f.NewBlock(ssa.BlockPlain)
4974 bEnd := s.f.NewBlock(ssa.BlockPlain)
4976 deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
4977 // Generate code to check if the bit associated with the current
4979 bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
4980 andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
4981 eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
4983 b.Kind = ssa.BlockIf
4987 bCond.AddEdgeTo(bEnd)
4990 // Clear this bit in deferBits and force store back to stack, so
4991 // we will not try to re-run this defer call if this defer call panics.
4992 nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
4993 maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
4994 s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
4995 // Use this value for following tests, so we keep previous
4997 s.vars[deferBitsVar] = maskedval
4999 // Generate code to call the function call of the defer, using the
5000 // closure that were stored in argtmps at the point of the defer
5003 stksize := fn.Type().ArgWidth()
5004 var callArgs []*ssa.Value
5006 if r.closure != nil {
5007 v := s.load(r.closure.Type.Elem(), r.closure)
5008 s.maybeNilCheckClosure(v, callDefer)
5009 codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
5010 aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
5011 call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
5013 aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
5014 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5016 callArgs = append(callArgs, s.mem())
5017 call.AddArgs(callArgs...)
5018 call.AuxInt = stksize
5019 s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, 0, call)
5020 // Make sure that the stack slots with pointers are kept live
5021 // through the call (which is a pre-emption point). Also, we will
5022 // use the first call of the last defer exit to compute liveness
5023 // for the deferreturn, so we want all stack slots to be live.
5024 if r.closureNode != nil {
5025 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
5033 func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
5034 return s.call(n, k, false)
5037 func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
5038 return s.call(n, k, true)
5041 // Calls the function n using the specified call type.
5042 // Returns the address of the return value (or nil if none).
5043 func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
5045 var callee *ir.Name // target function (if static)
5046 var closure *ssa.Value // ptr to closure to run (if dynamic)
5047 var codeptr *ssa.Value // ptr to target code (if dynamic)
5048 var rcvr *ssa.Value // receiver to set
5050 var ACArgs []*types.Type // AuxCall args
5051 var ACResults []*types.Type // AuxCall results
5052 var callArgs []*ssa.Value // For late-expansion, the args themselves (not stored, args to the call instead).
5054 callABI := s.f.ABIDefault
5056 if k != callNormal && k != callTail && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
5057 s.Fatalf("go/defer call with arguments: %v", n)
5062 if (k == callNormal || k == callTail) && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
5065 if buildcfg.Experiment.RegabiArgs {
5066 // This is a static call, so it may be
5067 // a direct call to a non-ABIInternal
5068 // function. fn.Func may be nil for
5069 // some compiler-generated functions,
5070 // but those are all ABIInternal.
5072 callABI = abiForFunc(fn.Func, s.f.ABI0, s.f.ABI1)
5075 // TODO(register args) remove after register abi is working
5076 inRegistersImported := fn.Pragma()&ir.RegisterParams != 0
5077 inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0
5078 if inRegistersImported || inRegistersSamePackage {
5084 closure = s.expr(fn)
5085 if k != callDefer && k != callDeferStack {
5086 // Deferred nil function needs to panic when the function is invoked,
5087 // not the point of defer statement.
5088 s.maybeNilCheckClosure(closure, k)
5091 if fn.Op() != ir.ODOTINTER {
5092 s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
5094 fn := fn.(*ir.SelectorExpr)
5095 var iclosure *ssa.Value
5096 iclosure, rcvr = s.getClosureAndRcvr(fn)
5097 if k == callNormal {
5098 codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
5104 params := callABI.ABIAnalyze(n.X.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
5105 types.CalcSize(fn.Type())
5106 stksize := params.ArgWidth() // includes receiver, args, and results
5108 res := n.X.Type().Results()
5109 if k == callNormal || k == callTail {
5110 for _, p := range params.OutParams() {
5111 ACResults = append(ACResults, p.Type)
5116 if k == callDeferStack {
5117 // Make a defer struct d on the stack.
5119 s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
5123 d := typecheck.TempAt(n.Pos(), s.curfn, t)
5125 if t.HasPointers() {
5126 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
5130 // Must match deferstruct() below and src/runtime/runtime2.go:_defer.
5131 // 0: started, set in deferprocStack
5132 // 1: heap, set in deferprocStack
5134 // 3: sp, set in deferprocStack
5135 // 4: pc, set in deferprocStack
5137 s.store(closure.Type,
5138 s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
5140 // 6: panic, set in deferprocStack
5141 // 7: link, set in deferprocStack
5146 // Call runtime.deferprocStack with pointer to _defer record.
5147 ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
5148 aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
5149 callArgs = append(callArgs, addr, s.mem())
5150 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5151 call.AddArgs(callArgs...)
5152 call.AuxInt = int64(types.PtrSize) // deferprocStack takes a *_defer arg
5154 // Store arguments to stack, including defer/go arguments and receiver for method calls.
5155 // These are written in SP-offset order.
5156 argStart := base.Ctxt.Arch.FixedFrameSize
5158 if k != callNormal && k != callTail {
5159 // Write closure (arg to newproc/deferproc).
5160 ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) // not argExtra
5161 callArgs = append(callArgs, closure)
5162 stksize += int64(types.PtrSize)
5163 argStart += int64(types.PtrSize)
5166 // Set receiver (for interface calls).
5168 callArgs = append(callArgs, rcvr)
5175 for _, p := range params.InParams() { // includes receiver for interface calls
5176 ACArgs = append(ACArgs, p.Type)
5179 // Split the entry block if there are open defers, because later calls to
5180 // openDeferSave may cause a mismatch between the mem for an OpDereference
5181 // and the call site which uses it. See #49282.
5182 if s.curBlock.ID == s.f.Entry.ID && s.hasOpenDefers {
5184 b.Kind = ssa.BlockPlain
5185 curb := s.f.NewBlock(ssa.BlockPlain)
5190 for i, n := range args {
5191 callArgs = append(callArgs, s.putArg(n, t.Params().Field(i).Type))
5194 callArgs = append(callArgs, s.mem())
5198 case k == callDefer:
5199 aux := ssa.StaticAuxCall(ir.Syms.Deferproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) // TODO paramResultInfo for DeferProc
5200 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5202 aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
5203 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for NewProc
5204 case closure != nil:
5205 // rawLoad because loading the code pointer from a
5206 // closure is always safe, but IsSanitizerSafeAddr
5207 // can't always figure that out currently, and it's
5208 // critical that we not clobber any arguments already
5209 // stored onto the stack.
5210 codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
5211 aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(nil, ACArgs, ACResults))
5212 call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
5213 case codeptr != nil:
5214 // Note that the "receiver" parameter is nil because the actual receiver is the first input parameter.
5215 aux := ssa.InterfaceAuxCall(params)
5216 call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
5218 aux := ssa.StaticAuxCall(callTargetLSym(callee), params)
5219 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5221 call.Op = ssa.OpTailLECall
5222 stksize = 0 // Tail call does not use stack. We reuse caller's frame.
5225 s.Fatalf("bad call type %v %v", n.Op(), n)
5227 call.AddArgs(callArgs...)
5228 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
5231 s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
5232 // Insert VarLive opcodes.
5233 for _, v := range n.KeepAlive {
5235 s.Fatalf("KeepAlive variable %v must have Addrtaken set", v)
5238 case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
5240 s.Fatalf("KeepAlive variable %v must be Auto or Arg", v)
5242 s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem())
5245 // Finish block for defers
5246 if k == callDefer || k == callDeferStack {
5248 b.Kind = ssa.BlockDefer
5250 bNext := s.f.NewBlock(ssa.BlockPlain)
5252 // Add recover edge to exit code.
5253 r := s.f.NewBlock(ssa.BlockPlain)
5257 b.Likely = ssa.BranchLikely
5261 if res.NumFields() == 0 || k != callNormal {
5262 // call has no return value. Continue with the next statement.
5266 if returnResultAddr {
5267 return s.resultAddrOfCall(call, 0, fp.Type)
5269 return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
5272 // maybeNilCheckClosure checks if a nil check of a closure is needed in some
5273 // architecture-dependent situations and, if so, emits the nil check.
5274 func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
5275 if Arch.LinkArch.Family == sys.Wasm || buildcfg.GOOS == "aix" && k != callGo {
5276 // On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
5277 // TODO(neelance): On other architectures this should be eliminated by the optimization steps
5282 // getClosureAndRcvr returns values for the appropriate closure and receiver of an
5284 func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) {
5286 itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
5288 itabidx := fn.Offset() + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab
5289 closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
5290 rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
5291 return closure, rcvr
5294 // etypesign returns the signed-ness of e, for integer/pointer etypes.
5295 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
5296 func etypesign(e types.Kind) int8 {
5298 case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
5300 case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
5306 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
5307 // The value that the returned Value represents is guaranteed to be non-nil.
5308 func (s *state) addr(n ir.Node) *ssa.Value {
5309 if n.Op() != ir.ONAME {
5315 s.Fatalf("addr of canSSA expression: %+v", n)
5318 t := types.NewPtr(n.Type())
5319 linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value {
5320 v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb)
5321 // TODO: Make OpAddr use AuxInt as well as Aux.
5323 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
5328 case ir.OLINKSYMOFFSET:
5329 no := n.(*ir.LinksymOffsetExpr)
5330 return linksymOffset(no.Linksym, no.Offset_)
5333 if n.Heapaddr != nil {
5334 return s.expr(n.Heapaddr)
5339 return linksymOffset(n.Linksym(), 0)
5346 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
5349 return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
5351 case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
5352 // ensure that we reuse symbols for out parameters so
5353 // that cse works on their addresses
5354 return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
5356 s.Fatalf("variable address class %v not implemented", n.Class)
5360 // load return from callee
5361 n := n.(*ir.ResultExpr)
5362 return s.resultAddrOfCall(s.prevCall, n.Index, n.Type())
5364 n := n.(*ir.IndexExpr)
5365 if n.X.Type().IsSlice() {
5367 i := s.expr(n.Index)
5368 len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
5369 i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
5370 p := s.newValue1(ssa.OpSlicePtr, t, a)
5371 return s.newValue2(ssa.OpPtrIndex, t, p, i)
5374 i := s.expr(n.Index)
5375 len := s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
5376 i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
5377 return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.X.Type().Elem()), a, i)
5380 n := n.(*ir.StarExpr)
5381 return s.exprPtr(n.X, n.Bounded(), n.Pos())
5383 n := n.(*ir.SelectorExpr)
5385 return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
5387 n := n.(*ir.SelectorExpr)
5388 p := s.exprPtr(n.X, n.Bounded(), n.Pos())
5389 return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
5391 n := n.(*ir.ConvExpr)
5392 if n.Type() == n.X.Type() {
5396 return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
5397 case ir.OCALLFUNC, ir.OCALLINTER:
5398 n := n.(*ir.CallExpr)
5399 return s.callAddr(n, callNormal)
5400 case ir.ODOTTYPE, ir.ODYNAMICDOTTYPE:
5402 if n.Op() == ir.ODOTTYPE {
5403 v, _ = s.dottype(n.(*ir.TypeAssertExpr), false)
5405 v, _ = s.dynamicDottype(n.(*ir.DynamicTypeAssertExpr), false)
5407 if v.Op != ssa.OpLoad {
5408 s.Fatalf("dottype of non-load")
5410 if v.Args[1] != s.mem() {
5411 s.Fatalf("memory no longer live from dottype load")
5415 s.Fatalf("unhandled addr %v", n.Op())
5420 // canSSA reports whether n is SSA-able.
5421 // n must be an ONAME (or an ODOT sequence with an ONAME base).
5422 func (s *state) canSSA(n ir.Node) bool {
5423 if base.Flag.N != 0 {
5428 if nn.Op() == ir.ODOT {
5429 nn := nn.(*ir.SelectorExpr)
5433 if nn.Op() == ir.OINDEX {
5434 nn := nn.(*ir.IndexExpr)
5435 if nn.X.Type().IsArray() {
5442 if n.Op() != ir.ONAME {
5445 return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type())
5448 func (s *state) canSSAName(name *ir.Name) bool {
5449 if name.Addrtaken() || !name.OnStack() {
5455 // TODO: handle this case? Named return values must be
5456 // in memory so that the deferred function can see them.
5457 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
5458 // Or maybe not, see issue 18860. Even unnamed return values
5459 // must be written back so if a defer recovers, the caller can see them.
5462 if s.cgoUnsafeArgs {
5463 // Cgo effectively takes the address of all result args,
5464 // but the compiler can't see that.
5469 // TODO: try to make more variables SSAable?
5472 // TypeOK reports whether variables of type t are SSA-able.
5473 func TypeOK(t *types.Type) bool {
5475 if t.Size() > int64(4*types.PtrSize) {
5476 // 4*Widthptr is an arbitrary constant. We want it
5477 // to be at least 3*Widthptr so slices can be registerized.
5478 // Too big and we'll introduce too much register pressure.
5483 // We can't do larger arrays because dynamic indexing is
5484 // not supported on SSA variables.
5485 // TODO: allow if all indexes are constant.
5486 if t.NumElem() <= 1 {
5487 return TypeOK(t.Elem())
5491 if t.NumFields() > ssa.MaxStruct {
5494 for _, t1 := range t.Fields().Slice() {
5495 if !TypeOK(t1.Type) {
5505 // exprPtr evaluates n to a pointer and nil-checks it.
5506 func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
5508 if bounded || n.NonNil() {
5509 if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
5510 s.f.Warnl(lineno, "removed nil check")
5518 // nilCheck generates nil pointer checking code.
5519 // Used only for automatically inserted nil checks,
5520 // not for user code like 'x != nil'.
5521 func (s *state) nilCheck(ptr *ssa.Value) {
5522 if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
5525 s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
5528 // boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
5529 // Starts a new block on return.
5530 // On input, len must be converted to full int width and be nonnegative.
5531 // Returns idx converted to full int width.
5532 // If bounded is true then caller guarantees the index is not out of bounds
5533 // (but boundsCheck will still extend the index to full int width).
5534 func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
5535 idx = s.extendIndex(idx, len, kind, bounded)
5537 if bounded || base.Flag.B != 0 {
5538 // If bounded or bounds checking is flag-disabled, then no check necessary,
5539 // just return the extended index.
5541 // Here, bounded == true if the compiler generated the index itself,
5542 // such as in the expansion of a slice initializer. These indexes are
5543 // compiler-generated, not Go program variables, so they cannot be
5544 // attacker-controlled, so we can omit Spectre masking as well.
5546 // Note that we do not want to omit Spectre masking in code like:
5548 // if 0 <= i && i < len(x) {
5552 // Lucky for us, bounded==false for that code.
5553 // In that case (handled below), we emit a bound check (and Spectre mask)
5554 // and then the prove pass will remove the bounds check.
5555 // In theory the prove pass could potentially remove certain
5556 // Spectre masks, but it's very delicate and probably better
5557 // to be conservative and leave them all in.
5561 bNext := s.f.NewBlock(ssa.BlockPlain)
5562 bPanic := s.f.NewBlock(ssa.BlockExit)
5564 if !idx.Type.IsSigned() {
5566 case ssa.BoundsIndex:
5567 kind = ssa.BoundsIndexU
5568 case ssa.BoundsSliceAlen:
5569 kind = ssa.BoundsSliceAlenU
5570 case ssa.BoundsSliceAcap:
5571 kind = ssa.BoundsSliceAcapU
5572 case ssa.BoundsSliceB:
5573 kind = ssa.BoundsSliceBU
5574 case ssa.BoundsSlice3Alen:
5575 kind = ssa.BoundsSlice3AlenU
5576 case ssa.BoundsSlice3Acap:
5577 kind = ssa.BoundsSlice3AcapU
5578 case ssa.BoundsSlice3B:
5579 kind = ssa.BoundsSlice3BU
5580 case ssa.BoundsSlice3C:
5581 kind = ssa.BoundsSlice3CU
5586 if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
5587 cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
5589 cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
5592 b.Kind = ssa.BlockIf
5594 b.Likely = ssa.BranchLikely
5598 s.startBlock(bPanic)
5599 if Arch.LinkArch.Family == sys.Wasm {
5600 // TODO(khr): figure out how to do "register" based calling convention for bounds checks.
5601 // Should be similar to gcWriteBarrier, but I can't make it work.
5602 s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
5604 mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
5605 s.endBlock().SetControl(mem)
5609 // In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
5610 if base.Flag.Cfg.SpectreIndex {
5611 op := ssa.OpSpectreIndex
5612 if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
5613 op = ssa.OpSpectreSliceIndex
5615 idx = s.newValue2(op, types.Types[types.TINT], idx, len)
5621 // If cmp (a bool) is false, panic using the given function.
5622 func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
5624 b.Kind = ssa.BlockIf
5626 b.Likely = ssa.BranchLikely
5627 bNext := s.f.NewBlock(ssa.BlockPlain)
5629 pos := base.Ctxt.PosTable.Pos(line)
5630 fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
5631 bPanic := s.panics[fl]
5633 bPanic = s.f.NewBlock(ssa.BlockPlain)
5634 s.panics[fl] = bPanic
5635 s.startBlock(bPanic)
5636 // The panic call takes/returns memory to ensure that the right
5637 // memory state is observed if the panic happens.
5638 s.rtcall(fn, false, nil)
5645 func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
5648 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
5654 // do a size-appropriate check for zero
5655 cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
5656 s.check(cmp, ir.Syms.Panicdivide)
5658 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
5661 // rtcall issues a call to the given runtime function fn with the listed args.
5662 // Returns a slice of results of the given result types.
5663 // The call is added to the end of the current block.
5664 // If returns is false, the block is marked as an exit block.
5665 func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
5667 // Write args to the stack
5668 off := base.Ctxt.Arch.FixedFrameSize
5669 var callArgs []*ssa.Value
5670 var callArgTypes []*types.Type
5672 for _, arg := range args {
5674 off = types.RoundUp(off, t.Alignment())
5676 callArgs = append(callArgs, arg)
5677 callArgTypes = append(callArgTypes, t)
5680 off = types.RoundUp(off, int64(types.RegSize))
5684 aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(nil, callArgTypes, results))
5685 callArgs = append(callArgs, s.mem())
5686 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5687 call.AddArgs(callArgs...)
5688 s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(results)), call)
5693 b.Kind = ssa.BlockExit
5695 call.AuxInt = off - base.Ctxt.Arch.FixedFrameSize
5696 if len(results) > 0 {
5697 s.Fatalf("panic call can't have results")
5703 res := make([]*ssa.Value, len(results))
5704 for i, t := range results {
5705 off = types.RoundUp(off, t.Alignment())
5706 res[i] = s.resultOfCall(call, int64(i), t)
5709 off = types.RoundUp(off, int64(types.PtrSize))
5711 // Remember how much callee stack space we needed.
5717 // do *left = right for type t.
5718 func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
5719 s.instrument(t, left, instrumentWrite)
5721 if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
5722 // Known to not have write barrier. Store the whole type.
5723 s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
5727 // store scalar fields first, so write barrier stores for
5728 // pointer fields can be grouped together, and scalar values
5729 // don't need to be live across the write barrier call.
5730 // TODO: if the writebarrier pass knows how to reorder stores,
5731 // we can do a single store here as long as skip==0.
5732 s.storeTypeScalars(t, left, right, skip)
5733 if skip&skipPtr == 0 && t.HasPointers() {
5734 s.storeTypePtrs(t, left, right)
5738 // do *left = right for all scalar (non-pointer) parts of t.
5739 func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
5741 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
5742 s.store(t, left, right)
5743 case t.IsPtrShaped():
5744 if t.IsPtr() && t.Elem().NotInHeap() {
5745 s.store(t, left, right) // see issue 42032
5747 // otherwise, no scalar fields.
5749 if skip&skipLen != 0 {
5752 len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
5753 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
5754 s.store(types.Types[types.TINT], lenAddr, len)
5756 if skip&skipLen == 0 {
5757 len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
5758 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
5759 s.store(types.Types[types.TINT], lenAddr, len)
5761 if skip&skipCap == 0 {
5762 cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
5763 capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
5764 s.store(types.Types[types.TINT], capAddr, cap)
5766 case t.IsInterface():
5767 // itab field doesn't need a write barrier (even though it is a pointer).
5768 itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
5769 s.store(types.Types[types.TUINTPTR], left, itab)
5772 for i := 0; i < n; i++ {
5773 ft := t.FieldType(i)
5774 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
5775 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
5776 s.storeTypeScalars(ft, addr, val, 0)
5778 case t.IsArray() && t.NumElem() == 0:
5780 case t.IsArray() && t.NumElem() == 1:
5781 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
5783 s.Fatalf("bad write barrier type %v", t)
5787 // do *left = right for all pointer parts of t.
5788 func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
5790 case t.IsPtrShaped():
5791 if t.IsPtr() && t.Elem().NotInHeap() {
5792 break // see issue 42032
5794 s.store(t, left, right)
5796 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
5797 s.store(s.f.Config.Types.BytePtr, left, ptr)
5799 elType := types.NewPtr(t.Elem())
5800 ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
5801 s.store(elType, left, ptr)
5802 case t.IsInterface():
5803 // itab field is treated as a scalar.
5804 idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
5805 idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
5806 s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
5809 for i := 0; i < n; i++ {
5810 ft := t.FieldType(i)
5811 if !ft.HasPointers() {
5814 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
5815 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
5816 s.storeTypePtrs(ft, addr, val)
5818 case t.IsArray() && t.NumElem() == 0:
5820 case t.IsArray() && t.NumElem() == 1:
5821 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
5823 s.Fatalf("bad write barrier type %v", t)
5827 // putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call.
5828 func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
5831 a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
5838 func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
5839 pt := types.NewPtr(t)
5842 // Use special routine that avoids allocation on duplicate offsets.
5843 addr = s.constOffPtrSP(pt, off)
5845 addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
5855 s.storeType(t, addr, a, 0, false)
5858 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
5859 // i,j,k may be nil, in which case they are set to their default value.
5860 // v may be a slice, string or pointer to an array.
5861 func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
5863 var ptr, len, cap *ssa.Value
5866 ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
5867 len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
5868 cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
5870 ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
5871 len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
5874 if !t.Elem().IsArray() {
5875 s.Fatalf("bad ptr to array in slice %v\n", t)
5878 ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
5879 len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
5882 s.Fatalf("bad type in slice %v\n", t)
5885 // Set default values
5887 i = s.constInt(types.Types[types.TINT], 0)
5898 // Panic if slice indices are not in bounds.
5899 // Make sure we check these in reverse order so that we're always
5900 // comparing against a value known to be nonnegative. See issue 28797.
5903 kind := ssa.BoundsSlice3Alen
5905 kind = ssa.BoundsSlice3Acap
5907 k = s.boundsCheck(k, cap, kind, bounded)
5910 j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
5912 i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
5915 kind := ssa.BoundsSliceAlen
5917 kind = ssa.BoundsSliceAcap
5919 j = s.boundsCheck(j, k, kind, bounded)
5921 i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
5924 // Word-sized integer operations.
5925 subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
5926 mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
5927 andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
5929 // Calculate the length (rlen) and capacity (rcap) of the new slice.
5930 // For strings the capacity of the result is unimportant. However,
5931 // we use rcap to test if we've generated a zero-length slice.
5932 // Use length of strings for that.
5933 rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
5935 if j != k && !t.IsString() {
5936 rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
5939 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
5940 // No pointer arithmetic necessary.
5941 return ptr, rlen, rcap
5944 // Calculate the base pointer (rptr) for the new slice.
5946 // Generate the following code assuming that indexes are in bounds.
5947 // The masking is to make sure that we don't generate a slice
5948 // that points to the next object in memory. We cannot just set
5949 // the pointer to nil because then we would create a nil slice or
5954 // rptr = ptr + (mask(rcap) & (i * stride))
5956 // Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
5957 // of the element type.
5958 stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Size())
5960 // The delta is the number of bytes to offset ptr by.
5961 delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
5963 // If we're slicing to the point where the capacity is zero,
5964 // zero out the delta.
5965 mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
5966 delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
5968 // Compute rptr = ptr + delta.
5969 rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
5971 return rptr, rlen, rcap
5974 type u642fcvtTab struct {
5975 leq, cvt2F, and, rsh, or, add ssa.Op
5976 one func(*state, *types.Type, int64) *ssa.Value
5979 var u64_f64 = u642fcvtTab{
5981 cvt2F: ssa.OpCvt64to64F,
5983 rsh: ssa.OpRsh64Ux64,
5986 one: (*state).constInt64,
5989 var u64_f32 = u642fcvtTab{
5991 cvt2F: ssa.OpCvt64to32F,
5993 rsh: ssa.OpRsh64Ux64,
5996 one: (*state).constInt64,
5999 func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6000 return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
6003 func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6004 return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
6007 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6009 // result = (floatY) x
6011 // y = uintX(x) ; y = x & 1
6012 // z = uintX(x) ; z = z >> 1
6014 // result = floatY(z)
6015 // result = result + result
6018 // Code borrowed from old code generator.
6019 // What's going on: large 64-bit "unsigned" looks like
6020 // negative number to hardware's integer-to-float
6021 // conversion. However, because the mantissa is only
6022 // 63 bits, we don't need the LSB, so instead we do an
6023 // unsigned right shift (divide by two), convert, and
6024 // double. However, before we do that, we need to be
6025 // sure that we do not lose a "1" if that made the
6026 // difference in the resulting rounding. Therefore, we
6027 // preserve it, and OR (not ADD) it back in. The case
6028 // that matters is when the eleven discarded bits are
6029 // equal to 10000000001; that rounds up, and the 1 cannot
6030 // be lost else it would round down if the LSB of the
6031 // candidate mantissa is 0.
6032 cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
6034 b.Kind = ssa.BlockIf
6036 b.Likely = ssa.BranchLikely
6038 bThen := s.f.NewBlock(ssa.BlockPlain)
6039 bElse := s.f.NewBlock(ssa.BlockPlain)
6040 bAfter := s.f.NewBlock(ssa.BlockPlain)
6044 a0 := s.newValue1(cvttab.cvt2F, tt, x)
6047 bThen.AddEdgeTo(bAfter)
6051 one := cvttab.one(s, ft, 1)
6052 y := s.newValue2(cvttab.and, ft, x, one)
6053 z := s.newValue2(cvttab.rsh, ft, x, one)
6054 z = s.newValue2(cvttab.or, ft, z, y)
6055 a := s.newValue1(cvttab.cvt2F, tt, z)
6056 a1 := s.newValue2(cvttab.add, tt, a, a)
6059 bElse.AddEdgeTo(bAfter)
6061 s.startBlock(bAfter)
6062 return s.variable(n, n.Type())
6065 type u322fcvtTab struct {
6066 cvtI2F, cvtF2F ssa.Op
6069 var u32_f64 = u322fcvtTab{
6070 cvtI2F: ssa.OpCvt32to64F,
6074 var u32_f32 = u322fcvtTab{
6075 cvtI2F: ssa.OpCvt32to32F,
6076 cvtF2F: ssa.OpCvt64Fto32F,
6079 func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6080 return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
6083 func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6084 return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
6087 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6089 // result = floatY(x)
6091 // result = floatY(float64(x) + (1<<32))
6093 cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
6095 b.Kind = ssa.BlockIf
6097 b.Likely = ssa.BranchLikely
6099 bThen := s.f.NewBlock(ssa.BlockPlain)
6100 bElse := s.f.NewBlock(ssa.BlockPlain)
6101 bAfter := s.f.NewBlock(ssa.BlockPlain)
6105 a0 := s.newValue1(cvttab.cvtI2F, tt, x)
6108 bThen.AddEdgeTo(bAfter)
6112 a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
6113 twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
6114 a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
6115 a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
6119 bElse.AddEdgeTo(bAfter)
6121 s.startBlock(bAfter)
6122 return s.variable(n, n.Type())
6125 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
6126 func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
6127 if !n.X.Type().IsMap() && !n.X.Type().IsChan() {
6128 s.Fatalf("node must be a map or a channel")
6134 // return *((*int)n)
6136 // return *(((*int)n)+1)
6139 nilValue := s.constNil(types.Types[types.TUINTPTR])
6140 cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
6142 b.Kind = ssa.BlockIf
6144 b.Likely = ssa.BranchUnlikely
6146 bThen := s.f.NewBlock(ssa.BlockPlain)
6147 bElse := s.f.NewBlock(ssa.BlockPlain)
6148 bAfter := s.f.NewBlock(ssa.BlockPlain)
6150 // length/capacity of a nil map/chan is zero
6153 s.vars[n] = s.zeroVal(lenType)
6155 bThen.AddEdgeTo(bAfter)
6161 // length is stored in the first word for map/chan
6162 s.vars[n] = s.load(lenType, x)
6164 // capacity is stored in the second word for chan
6165 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Size(), x)
6166 s.vars[n] = s.load(lenType, sw)
6168 s.Fatalf("op must be OLEN or OCAP")
6171 bElse.AddEdgeTo(bAfter)
6173 s.startBlock(bAfter)
6174 return s.variable(n, lenType)
6177 type f2uCvtTab struct {
6178 ltf, cvt2U, subf, or ssa.Op
6179 floatValue func(*state, *types.Type, float64) *ssa.Value
6180 intValue func(*state, *types.Type, int64) *ssa.Value
6184 var f32_u64 = f2uCvtTab{
6186 cvt2U: ssa.OpCvt32Fto64,
6189 floatValue: (*state).constFloat32,
6190 intValue: (*state).constInt64,
6194 var f64_u64 = f2uCvtTab{
6196 cvt2U: ssa.OpCvt64Fto64,
6199 floatValue: (*state).constFloat64,
6200 intValue: (*state).constInt64,
6204 var f32_u32 = f2uCvtTab{
6206 cvt2U: ssa.OpCvt32Fto32,
6209 floatValue: (*state).constFloat32,
6210 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
6214 var f64_u32 = f2uCvtTab{
6216 cvt2U: ssa.OpCvt64Fto32,
6219 floatValue: (*state).constFloat64,
6220 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
6224 func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6225 return s.floatToUint(&f32_u64, n, x, ft, tt)
6227 func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6228 return s.floatToUint(&f64_u64, n, x, ft, tt)
6231 func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6232 return s.floatToUint(&f32_u32, n, x, ft, tt)
6235 func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6236 return s.floatToUint(&f64_u32, n, x, ft, tt)
6239 func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6240 // cutoff:=1<<(intY_Size-1)
6241 // if x < floatX(cutoff) {
6242 // result = uintY(x)
6244 // y = x - floatX(cutoff)
6246 // result = z | -(cutoff)
6248 cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
6249 cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
6251 b.Kind = ssa.BlockIf
6253 b.Likely = ssa.BranchLikely
6255 bThen := s.f.NewBlock(ssa.BlockPlain)
6256 bElse := s.f.NewBlock(ssa.BlockPlain)
6257 bAfter := s.f.NewBlock(ssa.BlockPlain)
6261 a0 := s.newValue1(cvttab.cvt2U, tt, x)
6264 bThen.AddEdgeTo(bAfter)
6268 y := s.newValue2(cvttab.subf, ft, x, cutoff)
6269 y = s.newValue1(cvttab.cvt2U, tt, y)
6270 z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
6271 a1 := s.newValue2(cvttab.or, tt, y, z)
6274 bElse.AddEdgeTo(bAfter)
6276 s.startBlock(bAfter)
6277 return s.variable(n, n.Type())
6280 // dottype generates SSA for a type assertion node.
6281 // commaok indicates whether to panic or return a bool.
6282 // If commaok is false, resok will be nil.
6283 func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
6284 iface := s.expr(n.X) // input interface
6285 target := s.reflectType(n.Type()) // target type
6286 var targetItab *ssa.Value
6288 targetItab = s.expr(n.ITab)
6290 return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok)
6293 func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
6294 iface := s.expr(n.X)
6295 var source, target, targetItab *ssa.Value
6296 if n.SrcRType != nil {
6297 source = s.expr(n.SrcRType)
6299 if !n.X.Type().IsEmptyInterface() && !n.Type().IsInterface() {
6300 byteptr := s.f.Config.Types.BytePtr
6301 targetItab = s.expr(n.ITab)
6302 // TODO(mdempsky): Investigate whether compiling n.RType could be
6303 // better than loading itab.typ.
6304 target = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), targetItab)) // itab.typ
6306 target = s.expr(n.RType)
6308 return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, source, target, targetItab, commaok)
6311 // dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T)
6312 // and src is the type we're asserting from.
6313 // source is the *runtime._type of src
6314 // target is the *runtime._type of dst.
6315 // If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil.
6316 // commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails.
6317 func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, target, targetItab *ssa.Value, commaok bool) (res, resok *ssa.Value) {
6318 byteptr := s.f.Config.Types.BytePtr
6319 if dst.IsInterface() {
6320 if dst.IsEmptyInterface() {
6321 // Converting to an empty interface.
6322 // Input could be an empty or nonempty interface.
6323 if base.Debug.TypeAssert > 0 {
6324 base.WarnfAt(pos, "type assertion inlined")
6327 // Get itab/type field from input.
6328 itab := s.newValue1(ssa.OpITab, byteptr, iface)
6329 // Conversion succeeds iff that field is not nil.
6330 cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
6332 if src.IsEmptyInterface() && commaok {
6333 // Converting empty interface to empty interface with ,ok is just a nil check.
6337 // Branch on nilness.
6339 b.Kind = ssa.BlockIf
6341 b.Likely = ssa.BranchLikely
6342 bOk := s.f.NewBlock(ssa.BlockPlain)
6343 bFail := s.f.NewBlock(ssa.BlockPlain)
6348 // On failure, panic by calling panicnildottype.
6350 s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
6352 // On success, return (perhaps modified) input interface.
6354 if src.IsEmptyInterface() {
6355 res = iface // Use input interface unchanged.
6358 // Load type out of itab, build interface with existing idata.
6359 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
6360 typ := s.load(byteptr, off)
6361 idata := s.newValue1(ssa.OpIData, byteptr, iface)
6362 res = s.newValue2(ssa.OpIMake, dst, typ, idata)
6367 // nonempty -> empty
6368 // Need to load type from itab
6369 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
6370 s.vars[typVar] = s.load(byteptr, off)
6373 // itab is nil, might as well use that as the nil result.
6375 s.vars[typVar] = itab
6379 bEnd := s.f.NewBlock(ssa.BlockPlain)
6381 bFail.AddEdgeTo(bEnd)
6383 idata := s.newValue1(ssa.OpIData, byteptr, iface)
6384 res = s.newValue2(ssa.OpIMake, dst, s.variable(typVar, byteptr), idata)
6386 delete(s.vars, typVar)
6389 // converting to a nonempty interface needs a runtime call.
6390 if base.Debug.TypeAssert > 0 {
6391 base.WarnfAt(pos, "type assertion not inlined")
6394 fn := ir.Syms.AssertI2I
6395 if src.IsEmptyInterface() {
6396 fn = ir.Syms.AssertE2I
6398 data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
6399 tab := s.newValue1(ssa.OpITab, byteptr, iface)
6400 tab = s.rtcall(fn, true, []*types.Type{byteptr}, target, tab)[0]
6401 return s.newValue2(ssa.OpIMake, dst, tab, data), nil
6403 fn := ir.Syms.AssertI2I2
6404 if src.IsEmptyInterface() {
6405 fn = ir.Syms.AssertE2I2
6407 res = s.rtcall(fn, true, []*types.Type{dst}, target, iface)[0]
6408 resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(dst))
6412 if base.Debug.TypeAssert > 0 {
6413 base.WarnfAt(pos, "type assertion inlined")
6416 // Converting to a concrete type.
6417 direct := types.IsDirectIface(dst)
6418 itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
6419 if base.Debug.TypeAssert > 0 {
6420 base.WarnfAt(pos, "type assertion inlined")
6422 var wantedFirstWord *ssa.Value
6423 if src.IsEmptyInterface() {
6424 // Looking for pointer to target type.
6425 wantedFirstWord = target
6427 // Looking for pointer to itab for target type and source interface.
6428 wantedFirstWord = targetItab
6431 var tmp ir.Node // temporary for use with large types
6432 var addr *ssa.Value // address of tmp
6433 if commaok && !TypeOK(dst) {
6434 // unSSAable type, use temporary.
6435 // TODO: get rid of some of these temporaries.
6436 tmp, addr = s.temp(pos, dst)
6439 cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, wantedFirstWord)
6441 b.Kind = ssa.BlockIf
6443 b.Likely = ssa.BranchLikely
6445 bOk := s.f.NewBlock(ssa.BlockPlain)
6446 bFail := s.f.NewBlock(ssa.BlockPlain)
6451 // on failure, panic by calling panicdottype
6455 taddr = s.reflectType(src)
6457 if src.IsEmptyInterface() {
6458 s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
6460 s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
6463 // on success, return data from interface
6466 return s.newValue1(ssa.OpIData, dst, iface), nil
6468 p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
6469 return s.load(dst, p), nil
6472 // commaok is the more complicated case because we have
6473 // a control flow merge point.
6474 bEnd := s.f.NewBlock(ssa.BlockPlain)
6475 // Note that we need a new valVar each time (unlike okVar where we can
6476 // reuse the variable) because it might have a different type every time.
6477 valVar := ssaMarker("val")
6479 // type assertion succeeded
6483 s.vars[valVar] = s.newValue1(ssa.OpIData, dst, iface)
6485 p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
6486 s.vars[valVar] = s.load(dst, p)
6489 p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
6490 s.move(dst, addr, p)
6492 s.vars[okVar] = s.constBool(true)
6496 // type assertion failed
6499 s.vars[valVar] = s.zeroVal(dst)
6503 s.vars[okVar] = s.constBool(false)
6505 bFail.AddEdgeTo(bEnd)
6510 res = s.variable(valVar, dst)
6511 delete(s.vars, valVar)
6513 res = s.load(dst, addr)
6515 resok = s.variable(okVar, types.Types[types.TBOOL])
6516 delete(s.vars, okVar)
6520 // temp allocates a temp of type t at position pos
6521 func (s *state) temp(pos src.XPos, t *types.Type) (*ir.Name, *ssa.Value) {
6522 tmp := typecheck.TempAt(pos, s.curfn, t)
6523 if t.HasPointers() {
6524 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
6530 // variable returns the value of a variable at the current location.
6531 func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value {
6541 if s.curBlock == s.f.Entry {
6542 // No variable should be live at entry.
6543 s.f.Fatalf("value %v (%v) incorrectly live at entry", n, v)
6545 // Make a FwdRef, which records a value that's live on block input.
6546 // We'll find the matching definition as part of insertPhis.
6547 v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n})
6549 if n.Op() == ir.ONAME {
6550 s.addNamedValue(n.(*ir.Name), v)
6555 func (s *state) mem() *ssa.Value {
6556 return s.variable(memVar, types.TypeMem)
6559 func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
6560 if n.Class == ir.Pxxx {
6561 // Don't track our marker nodes (memVar etc.).
6564 if ir.IsAutoTmp(n) {
6565 // Don't track temporary variables.
6568 if n.Class == ir.PPARAMOUT {
6569 // Don't track named output values. This prevents return values
6570 // from being assigned too early. See #14591 and #14762. TODO: allow this.
6573 loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
6574 values, ok := s.f.NamedValues[loc]
6576 s.f.Names = append(s.f.Names, &loc)
6577 s.f.CanonicalLocalSlots[loc] = &loc
6579 s.f.NamedValues[loc] = append(values, v)
6582 // Branch is an unresolved branch.
6583 type Branch struct {
6584 P *obj.Prog // branch instruction
6585 B *ssa.Block // target
6588 // State contains state needed during Prog generation.
6594 // Branches remembers all the branch instructions we've seen
6595 // and where they would like to go.
6598 // JumpTables remembers all the jump tables we've seen.
6599 JumpTables []*ssa.Block
6601 // bstart remembers where each block starts (indexed by block ID)
6604 maxarg int64 // largest frame size for arguments to calls made by the function
6606 // Map from GC safe points to liveness index, generated by
6607 // liveness analysis.
6608 livenessMap liveness.Map
6610 // partLiveArgs includes arguments that may be partially live, for which we
6611 // need to generate instructions that spill the argument registers.
6612 partLiveArgs map[*ir.Name]bool
6614 // lineRunStart records the beginning of the current run of instructions
6615 // within a single block sharing the same line number
6616 // Used to move statement marks to the beginning of such runs.
6617 lineRunStart *obj.Prog
6619 // wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
6620 OnWasmStackSkipped int
6623 func (s *State) FuncInfo() *obj.FuncInfo {
6624 return s.pp.CurFunc.LSym.Func()
6627 // Prog appends a new Prog.
6628 func (s *State) Prog(as obj.As) *obj.Prog {
6630 if objw.LosesStmtMark(as) {
6633 // Float a statement start to the beginning of any same-line run.
6634 // lineRunStart is reset at block boundaries, which appears to work well.
6635 if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
6637 } else if p.Pos.IsStmt() == src.PosIsStmt {
6638 s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
6639 p.Pos = p.Pos.WithNotStmt()
6644 // Pc returns the current Prog.
6645 func (s *State) Pc() *obj.Prog {
6649 // SetPos sets the current source position.
6650 func (s *State) SetPos(pos src.XPos) {
6654 // Br emits a single branch instruction and returns the instruction.
6655 // Not all architectures need the returned instruction, but otherwise
6656 // the boilerplate is common to all.
6657 func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog {
6659 p.To.Type = obj.TYPE_BRANCH
6660 s.Branches = append(s.Branches, Branch{P: p, B: target})
6664 // DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
6665 // that reduce "jumpy" line number churn when debugging.
6666 // Spill/fill/copy instructions from the register allocator,
6667 // phi functions, and instructions with a no-pos position
6668 // are examples of instructions that can cause churn.
6669 func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
6671 case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
6672 // These are not statements
6673 s.SetPos(v.Pos.WithNotStmt())
6676 if p != src.NoXPos {
6677 // If the position is defined, update the position.
6678 // Also convert default IsStmt to NotStmt; only
6679 // explicit statement boundaries should appear
6680 // in the generated code.
6681 if p.IsStmt() != src.PosIsStmt {
6682 if s.pp.Pos.IsStmt() == src.PosIsStmt && s.pp.Pos.SameFileAndLine(p) {
6683 // If s.pp.Pos already has a statement mark, then it was set here (below) for
6684 // the previous value. If an actual instruction had been emitted for that
6685 // value, then the statement mark would have been reset. Since the statement
6686 // mark of s.pp.Pos was not reset, this position (file/line) still needs a
6687 // statement mark on an instruction. If file and line for this value are
6688 // the same as the previous value, then the first instruction for this
6689 // value will work to take the statement mark. Return early to avoid
6690 // resetting the statement mark.
6692 // The reset of s.pp.Pos occurs in (*Progs).Prog() -- if it emits
6693 // an instruction, and the instruction's statement mark was set,
6694 // and it is not one of the LosesStmtMark instructions,
6695 // then Prog() resets the statement mark on the (*Progs).Pos.
6699 // Calls use the pos attached to v, but copy the statement mark from State
6703 s.SetPos(s.pp.Pos.WithNotStmt())
6708 // emit argument info (locations on stack) for traceback.
6709 func emitArgInfo(e *ssafn, f *ssa.Func, pp *objw.Progs) {
6710 ft := e.curfn.Type()
6711 if ft.NumRecvs() == 0 && ft.NumParams() == 0 {
6715 x := EmitArgInfo(e.curfn, f.OwnAux.ABIInfo())
6716 x.Set(obj.AttrContentAddressable, true)
6717 e.curfn.LSym.Func().ArgInfo = x
6719 // Emit a funcdata pointing at the arg info data.
6720 p := pp.Prog(obj.AFUNCDATA)
6721 p.From.SetConst(objabi.FUNCDATA_ArgInfo)
6722 p.To.Type = obj.TYPE_MEM
6723 p.To.Name = obj.NAME_EXTERN
6727 // emit argument info (locations on stack) of f for traceback.
6728 func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym {
6729 x := base.Ctxt.Lookup(fmt.Sprintf("%s.arginfo%d", f.LSym.Name, f.ABI))
6730 // NOTE: do not set ContentAddressable here. This may be referenced from
6731 // assembly code by name (in this case f is a declaration).
6732 // Instead, set it in emitArgInfo above.
6734 PtrSize := int64(types.PtrSize)
6735 uintptrTyp := types.Types[types.TUINTPTR]
6737 isAggregate := func(t *types.Type) bool {
6738 return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice()
6741 // Populate the data.
6742 // The data is a stream of bytes, which contains the offsets and sizes of the
6743 // non-aggregate arguments or non-aggregate fields/elements of aggregate-typed
6744 // arguments, along with special "operators". Specifically,
6745 // - for each non-aggrgate arg/field/element, its offset from FP (1 byte) and
6747 // - special operators:
6748 // - 0xff - end of sequence
6749 // - 0xfe - print { (at the start of an aggregate-typed argument)
6750 // - 0xfd - print } (at the end of an aggregate-typed argument)
6751 // - 0xfc - print ... (more args/fields/elements)
6752 // - 0xfb - print _ (offset too large)
6753 // These constants need to be in sync with runtime.traceback.go:printArgs.
6759 _offsetTooLarge = 0xfb
6760 _special = 0xf0 // above this are operators, below this are ordinary offsets
6764 limit = 10 // print no more than 10 args/components
6765 maxDepth = 5 // no more than 5 layers of nesting
6767 // maxLen is a (conservative) upper bound of the byte stream length. For
6768 // each arg/component, it has no more than 2 bytes of data (size, offset),
6769 // and no more than one {, }, ... at each level (it cannot have both the
6770 // data and ... unless it is the last one, just be conservative). Plus 1
6772 maxLen = (maxDepth*3+2)*limit + 1
6777 writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) }
6779 // Write one non-aggrgate arg/field/element.
6780 write1 := func(sz, offset int64) {
6781 if offset >= _special {
6782 writebyte(_offsetTooLarge)
6784 writebyte(uint8(offset))
6785 writebyte(uint8(sz))
6790 // Visit t recursively and write it out.
6791 // Returns whether to continue visiting.
6792 var visitType func(baseOffset int64, t *types.Type, depth int) bool
6793 visitType = func(baseOffset int64, t *types.Type, depth int) bool {
6795 writebyte(_dotdotdot)
6798 if !isAggregate(t) {
6799 write1(t.Size(), baseOffset)
6802 writebyte(_startAgg)
6804 if depth >= maxDepth {
6805 writebyte(_dotdotdot)
6811 case t.IsInterface(), t.IsString():
6812 _ = visitType(baseOffset, uintptrTyp, depth) &&
6813 visitType(baseOffset+PtrSize, uintptrTyp, depth)
6815 _ = visitType(baseOffset, uintptrTyp, depth) &&
6816 visitType(baseOffset+PtrSize, uintptrTyp, depth) &&
6817 visitType(baseOffset+PtrSize*2, uintptrTyp, depth)
6819 _ = visitType(baseOffset, types.FloatForComplex(t), depth) &&
6820 visitType(baseOffset+t.Size()/2, types.FloatForComplex(t), depth)
6822 if t.NumElem() == 0 {
6823 n++ // {} counts as a component
6826 for i := int64(0); i < t.NumElem(); i++ {
6827 if !visitType(baseOffset, t.Elem(), depth) {
6830 baseOffset += t.Elem().Size()
6833 if t.NumFields() == 0 {
6834 n++ // {} counts as a component
6837 for _, field := range t.Fields().Slice() {
6838 if !visitType(baseOffset+field.Offset, field.Type, depth) {
6848 if strings.Contains(f.LSym.Name, "[") {
6849 // Skip the dictionary argument - it is implicit and the user doesn't need to see it.
6853 for _, a := range abiInfo.InParams()[start:] {
6854 if !visitType(a.FrameOffset(abiInfo), a.Type, 0) {
6860 base.Fatalf("ArgInfo too large")
6866 // for wrapper, emit info of wrapped function.
6867 func emitWrappedFuncInfo(e *ssafn, pp *objw.Progs) {
6868 if base.Ctxt.Flag_linkshared {
6869 // Relative reference (SymPtrOff) to another shared object doesn't work.
6874 wfn := e.curfn.WrappedFunc
6879 wsym := wfn.Linksym()
6880 x := base.Ctxt.LookupInit(fmt.Sprintf("%s.wrapinfo", wsym.Name), func(x *obj.LSym) {
6881 objw.SymPtrOff(x, 0, wsym)
6882 x.Set(obj.AttrContentAddressable, true)
6884 e.curfn.LSym.Func().WrapInfo = x
6886 // Emit a funcdata pointing at the wrap info data.
6887 p := pp.Prog(obj.AFUNCDATA)
6888 p.From.SetConst(objabi.FUNCDATA_WrapInfo)
6889 p.To.Type = obj.TYPE_MEM
6890 p.To.Name = obj.NAME_EXTERN
6894 // genssa appends entries to pp for each instruction in f.
6895 func genssa(f *ssa.Func, pp *objw.Progs) {
6897 s.ABI = f.OwnAux.Fn.ABI()
6899 e := f.Frontend().(*ssafn)
6901 s.livenessMap, s.partLiveArgs = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
6902 emitArgInfo(e, f, pp)
6903 argLiveBlockMap, argLiveValueMap := liveness.ArgLiveness(e.curfn, f, pp)
6905 openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
6906 if openDeferInfo != nil {
6907 // This function uses open-coded defers -- write out the funcdata
6908 // info that we computed at the end of genssa.
6909 p := pp.Prog(obj.AFUNCDATA)
6910 p.From.SetConst(objabi.FUNCDATA_OpenCodedDeferInfo)
6911 p.To.Type = obj.TYPE_MEM
6912 p.To.Name = obj.NAME_EXTERN
6913 p.To.Sym = openDeferInfo
6916 emitWrappedFuncInfo(e, pp)
6918 // Remember where each block starts.
6919 s.bstart = make([]*obj.Prog, f.NumBlocks())
6921 var progToValue map[*obj.Prog]*ssa.Value
6922 var progToBlock map[*obj.Prog]*ssa.Block
6923 var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
6924 gatherPrintInfo := f.PrintOrHtmlSSA || ssa.GenssaDump[f.Name]
6925 if gatherPrintInfo {
6926 progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
6927 progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
6928 f.Logf("genssa %s\n", f.Name)
6929 progToBlock[s.pp.Next] = f.Blocks[0]
6932 if base.Ctxt.Flag_locationlists {
6933 if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
6934 f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
6936 valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
6937 for i := range valueToProgAfter {
6938 valueToProgAfter[i] = nil
6942 // If the very first instruction is not tagged as a statement,
6943 // debuggers may attribute it to previous function in program.
6944 firstPos := src.NoXPos
6945 for _, v := range f.Entry.Values {
6946 if v.Pos.IsStmt() == src.PosIsStmt && v.Op != ssa.OpArg && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg {
6948 v.Pos = firstPos.WithDefaultStmt()
6953 // inlMarks has an entry for each Prog that implements an inline mark.
6954 // It maps from that Prog to the global inlining id of the inlined body
6955 // which should unwind to this Prog's location.
6956 var inlMarks map[*obj.Prog]int32
6957 var inlMarkList []*obj.Prog
6959 // inlMarksByPos maps from a (column 1) source position to the set of
6960 // Progs that are in the set above and have that source position.
6961 var inlMarksByPos map[src.XPos][]*obj.Prog
6963 var argLiveIdx int = -1 // argument liveness info index
6965 // Emit basic blocks
6966 for i, b := range f.Blocks {
6967 s.bstart[b.ID] = s.pp.Next
6968 s.lineRunStart = nil
6969 s.SetPos(s.pp.Pos.WithNotStmt()) // It needs a non-empty Pos, but cannot be a statement boundary (yet).
6971 // Attach a "default" liveness info. Normally this will be
6972 // overwritten in the Values loop below for each Value. But
6973 // for an empty block this will be used for its control
6974 // instruction. We won't use the actual liveness map on a
6975 // control instruction. Just mark it something that is
6976 // preemptible, unless this function is "all unsafe".
6977 s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
6979 if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
6981 p := s.pp.Prog(obj.APCDATA)
6982 p.From.SetConst(objabi.PCDATA_ArgLiveIndex)
6983 p.To.SetConst(int64(idx))
6986 // Emit values in block
6987 Arch.SSAMarkMoves(&s, b)
6988 for _, v := range b.Values {
6990 s.DebugFriendlySetPosFrom(v)
6992 if v.Op.ResultInArg0() && v.ResultReg() != v.Args[0].Reg() {
6993 v.Fatalf("input[0] and output not in same register %s", v.LongString())
6998 // memory arg needs no code
7000 // input args need no code
7001 case ssa.OpSP, ssa.OpSB:
7003 case ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult:
7006 // nothing to do when there's a g register,
7007 // and checkLower complains if there's not
7008 case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpWBend:
7009 // nothing to do; already used by liveness
7013 // nothing to do; no-op conversion for liveness
7014 if v.Args[0].Reg() != v.Reg() {
7015 v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
7018 p := Arch.Ginsnop(s.pp)
7019 if inlMarks == nil {
7020 inlMarks = map[*obj.Prog]int32{}
7021 inlMarksByPos = map[src.XPos][]*obj.Prog{}
7023 inlMarks[p] = v.AuxInt32()
7024 inlMarkList = append(inlMarkList, p)
7025 pos := v.Pos.AtColumn1()
7026 inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
7027 firstPos = src.NoXPos
7030 // Special case for first line in function; move it to the start (which cannot be a register-valued instruction)
7031 if firstPos != src.NoXPos && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg {
7033 firstPos = src.NoXPos
7035 // Attach this safe point to the next
7037 s.pp.NextLive = s.livenessMap.Get(v)
7039 // let the backend handle it
7040 Arch.SSAGenValue(&s, v)
7043 if idx, ok := argLiveValueMap[v.ID]; ok && idx != argLiveIdx {
7045 p := s.pp.Prog(obj.APCDATA)
7046 p.From.SetConst(objabi.PCDATA_ArgLiveIndex)
7047 p.To.SetConst(int64(idx))
7050 if base.Ctxt.Flag_locationlists {
7051 valueToProgAfter[v.ID] = s.pp.Next
7054 if gatherPrintInfo {
7055 for ; x != s.pp.Next; x = x.Link {
7060 // If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
7061 if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
7062 p := Arch.Ginsnop(s.pp)
7063 p.Pos = p.Pos.WithIsStmt()
7064 if b.Pos == src.NoXPos {
7065 b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652.
7066 if b.Pos == src.NoXPos {
7067 b.Pos = pp.Text.Pos // Sometimes p.Pos is empty. See #35695.
7070 b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
7072 // Emit control flow instructions for block
7074 if i < len(f.Blocks)-1 && base.Flag.N == 0 {
7075 // If -N, leave next==nil so every block with successors
7076 // ends in a JMP (except call blocks - plive doesn't like
7077 // select{send,recv} followed by a JMP call). Helps keep
7078 // line numbers for otherwise empty blocks.
7079 next = f.Blocks[i+1]
7083 Arch.SSAGenBlock(&s, b, next)
7084 if gatherPrintInfo {
7085 for ; x != s.pp.Next; x = x.Link {
7090 if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
7091 // We need the return address of a panic call to
7092 // still be inside the function in question. So if
7093 // it ends in a call which doesn't return, add a
7094 // nop (which will never execute) after the call.
7097 if openDeferInfo != nil {
7098 // When doing open-coded defers, generate a disconnected call to
7099 // deferreturn and a return. This will be used to during panic
7100 // recovery to unwind the stack and return back to the runtime.
7101 s.pp.NextLive = s.livenessMap.DeferReturn
7102 p := pp.Prog(obj.ACALL)
7103 p.To.Type = obj.TYPE_MEM
7104 p.To.Name = obj.NAME_EXTERN
7105 p.To.Sym = ir.Syms.Deferreturn
7107 // Load results into registers. So when a deferred function
7108 // recovers a panic, it will return to caller with right results.
7109 // The results are already in memory, because they are not SSA'd
7110 // when the function has defers (see canSSAName).
7111 for _, o := range f.OwnAux.ABIInfo().OutParams() {
7112 n := o.Name.(*ir.Name)
7113 rts, offs := o.RegisterTypesAndOffsets()
7114 for i := range o.Registers {
7115 Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
7122 if inlMarks != nil {
7125 // We have some inline marks. Try to find other instructions we're
7126 // going to emit anyway, and use those instructions instead of the
7128 for p := pp.Text; p != nil; p = p.Link {
7129 if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
7130 // Don't use 0-sized instructions as inline marks, because we need
7131 // to identify inline mark instructions by pc offset.
7132 // (Some of these instructions are sometimes zero-sized, sometimes not.
7133 // We must not use anything that even might be zero-sized.)
7134 // TODO: are there others?
7137 if _, ok := inlMarks[p]; ok {
7138 // Don't use inline marks themselves. We don't know
7139 // whether they will be zero-sized or not yet.
7142 if p.As == obj.ACALL || p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
7145 pos := p.Pos.AtColumn1()
7146 s := inlMarksByPos[pos]
7150 for _, m := range s {
7151 // We found an instruction with the same source position as
7152 // some of the inline marks.
7153 // Use this instruction instead.
7154 p.Pos = p.Pos.WithIsStmt() // promote position to a statement
7155 pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
7156 // Make the inline mark a real nop, so it doesn't generate any code.
7162 delete(inlMarksByPos, pos)
7164 // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
7165 for _, p := range inlMarkList {
7166 if p.As != obj.ANOP {
7167 pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
7171 if e.stksize == 0 && !hasCall {
7172 // Frameless leaf function. It doesn't need any preamble,
7173 // so make sure its first instruction isn't from an inlined callee.
7174 // If it is, add a nop at the start of the function with a position
7175 // equal to the start of the function.
7176 // This ensures that runtime.FuncForPC(uintptr(reflect.ValueOf(fn).Pointer())).Name()
7177 // returns the right answer. See issue 58300.
7178 for p := pp.Text; p != nil; p = p.Link {
7179 if p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT {
7182 if base.Ctxt.PosTable.Pos(p.Pos).Base().InliningIndex() >= 0 {
7183 // Make a real (not 0-sized) nop.
7184 nop := Arch.Ginsnop(pp)
7185 nop.Pos = e.curfn.Pos().WithIsStmt()
7187 // Unfortunately, Ginsnop puts the instruction at the
7188 // end of the list. Move it up to just before p.
7190 // Unlink from the current list.
7191 for x := pp.Text; x != nil; x = x.Link {
7197 // Splice in right before p.
7198 for x := pp.Text; x != nil; x = x.Link {
7211 if base.Ctxt.Flag_locationlists {
7212 var debugInfo *ssa.FuncDebug
7213 debugInfo = e.curfn.DebugInfo.(*ssa.FuncDebug)
7214 if e.curfn.ABI == obj.ABIInternal && base.Flag.N != 0 {
7215 ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset, debugInfo)
7217 ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists, StackOffset, debugInfo)
7220 idToIdx := make([]int, f.NumBlocks())
7221 for i, b := range f.Blocks {
7224 // Note that at this moment, Prog.Pc is a sequence number; it's
7225 // not a real PC until after assembly, so this mapping has to
7227 debugInfo.GetPC = func(b, v ssa.ID) int64 {
7229 case ssa.BlockStart.ID:
7230 if b == f.Entry.ID {
7231 return 0 // Start at the very beginning, at the assembler-generated prologue.
7232 // this should only happen for function args (ssa.OpArg)
7235 case ssa.BlockEnd.ID:
7236 blk := f.Blocks[idToIdx[b]]
7237 nv := len(blk.Values)
7238 return valueToProgAfter[blk.Values[nv-1].ID].Pc
7239 case ssa.FuncEnd.ID:
7240 return e.curfn.LSym.Size
7242 return valueToProgAfter[v].Pc
7247 // Resolve branches, and relax DefaultStmt into NotStmt
7248 for _, br := range s.Branches {
7249 br.P.To.SetTarget(s.bstart[br.B.ID])
7250 if br.P.Pos.IsStmt() != src.PosIsStmt {
7251 br.P.Pos = br.P.Pos.WithNotStmt()
7252 } else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
7253 br.P.Pos = br.P.Pos.WithNotStmt()
7258 // Resolve jump table destinations.
7259 for _, jt := range s.JumpTables {
7260 // Convert from *Block targets to *Prog targets.
7261 targets := make([]*obj.Prog, len(jt.Succs))
7262 for i, e := range jt.Succs {
7263 targets[i] = s.bstart[e.Block().ID]
7265 // Add to list of jump tables to be resolved at assembly time.
7266 // The assembler converts from *Prog entries to absolute addresses
7267 // once it knows instruction byte offsets.
7268 fi := pp.CurFunc.LSym.Func()
7269 fi.JumpTables = append(fi.JumpTables, obj.JumpTable{Sym: jt.Aux.(*obj.LSym), Targets: targets})
7272 if e.log { // spew to stdout
7274 for p := pp.Text; p != nil; p = p.Link {
7275 if p.Pos.IsKnown() && p.InnermostFilename() != filename {
7276 filename = p.InnermostFilename()
7277 f.Logf("# %s\n", filename)
7281 if v, ok := progToValue[p]; ok {
7283 } else if b, ok := progToBlock[p]; ok {
7286 s = " " // most value and branch strings are 2-3 characters long
7288 f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
7291 if f.HTMLWriter != nil { // spew to ssa.html
7292 var buf strings.Builder
7293 buf.WriteString("<code>")
7294 buf.WriteString("<dl class=\"ssa-gen\">")
7296 for p := pp.Text; p != nil; p = p.Link {
7297 // Don't spam every line with the file name, which is often huge.
7298 // Only print changes, and "unknown" is not a change.
7299 if p.Pos.IsKnown() && p.InnermostFilename() != filename {
7300 filename = p.InnermostFilename()
7301 buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
7302 buf.WriteString(html.EscapeString("# " + filename))
7303 buf.WriteString("</dd>")
7306 buf.WriteString("<dt class=\"ssa-prog-src\">")
7307 if v, ok := progToValue[p]; ok {
7308 buf.WriteString(v.HTML())
7309 } else if b, ok := progToBlock[p]; ok {
7310 buf.WriteString("<b>" + b.HTML() + "</b>")
7312 buf.WriteString("</dt>")
7313 buf.WriteString("<dd class=\"ssa-prog\">")
7314 fmt.Fprintf(&buf, "%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString()))
7315 buf.WriteString("</dd>")
7317 buf.WriteString("</dl>")
7318 buf.WriteString("</code>")
7319 f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
7321 if ssa.GenssaDump[f.Name] {
7322 fi := f.DumpFileForPhase("genssa")
7325 // inliningDiffers if any filename changes or if any line number except the innermost (index 0) changes.
7326 inliningDiffers := func(a, b []src.Pos) bool {
7327 if len(a) != len(b) {
7331 if a[i].Filename() != b[i].Filename() {
7334 if i > 0 && a[i].Line() != b[i].Line() {
7341 var allPosOld []src.Pos
7342 var allPos []src.Pos
7344 for p := pp.Text; p != nil; p = p.Link {
7345 if p.Pos.IsKnown() {
7346 allPos = p.AllPos(allPos)
7347 if inliningDiffers(allPos, allPosOld) {
7348 for i := len(allPos) - 1; i >= 0; i-- {
7350 fmt.Fprintf(fi, "# %s:%d\n", pos.Filename(), pos.Line())
7352 allPos, allPosOld = allPosOld, allPos // swap, not copy, so that they do not share slice storage.
7357 if v, ok := progToValue[p]; ok {
7359 } else if b, ok := progToBlock[p]; ok {
7362 s = " " // most value and branch strings are 2-3 characters long
7364 fmt.Fprintf(fi, " %-6s\t%.5d %s\t%s\n", s, p.Pc, ssa.StmtString(p.Pos), p.InstructionString())
7372 f.HTMLWriter.Close()
7376 func defframe(s *State, e *ssafn, f *ssa.Func) {
7379 s.maxarg = types.RoundUp(s.maxarg, e.stkalign)
7380 frame := s.maxarg + e.stksize
7381 if Arch.PadFrame != nil {
7382 frame = Arch.PadFrame(frame)
7385 // Fill in argument and frame size.
7386 pp.Text.To.Type = obj.TYPE_TEXTSIZE
7387 pp.Text.To.Val = int32(types.RoundUp(f.OwnAux.ArgWidth(), int64(types.RegSize)))
7388 pp.Text.To.Offset = frame
7392 // Insert code to spill argument registers if the named slot may be partially
7393 // live. That is, the named slot is considered live by liveness analysis,
7394 // (because a part of it is live), but we may not spill all parts into the
7395 // slot. This can only happen with aggregate-typed arguments that are SSA-able
7396 // and not address-taken (for non-SSA-able or address-taken arguments we always
7398 // Note: spilling is unnecessary in the -N/no-optimize case, since all values
7399 // will be considered non-SSAable and spilled up front.
7400 // TODO(register args) Make liveness more fine-grained to that partial spilling is okay.
7401 if f.OwnAux.ABIInfo().InRegistersUsed() != 0 && base.Flag.N == 0 {
7402 // First, see if it is already spilled before it may be live. Look for a spill
7403 // in the entry block up to the first safepoint.
7404 type nameOff struct {
7408 partLiveArgsSpilled := make(map[nameOff]bool)
7409 for _, v := range f.Entry.Values {
7413 if v.Op != ssa.OpStoreReg || v.Args[0].Op != ssa.OpArgIntReg {
7416 n, off := ssa.AutoVar(v)
7417 if n.Class != ir.PPARAM || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] {
7420 partLiveArgsSpilled[nameOff{n, off}] = true
7423 // Then, insert code to spill registers if not already.
7424 for _, a := range f.OwnAux.ABIInfo().InParams() {
7425 n, ok := a.Name.(*ir.Name)
7426 if !ok || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
7429 rts, offs := a.RegisterTypesAndOffsets()
7430 for i := range a.Registers {
7431 if !rts[i].HasPointers() {
7434 if partLiveArgsSpilled[nameOff{n, offs[i]}] {
7435 continue // already spilled
7437 reg := ssa.ObjRegForAbiReg(a.Registers[i], f.Config)
7438 p = Arch.SpillArgReg(pp, p, f, rts[i], reg, n, offs[i])
7443 // Insert code to zero ambiguously live variables so that the
7444 // garbage collector only sees initialized values when it
7445 // looks for pointers.
7448 // Opaque state for backend to use. Current backends use it to
7449 // keep track of which helper registers have been zeroed.
7452 // Iterate through declarations. Autos are sorted in decreasing
7453 // frame offset order.
7454 for _, n := range e.curfn.Dcl {
7458 if n.Class != ir.PAUTO {
7459 e.Fatalf(n.Pos(), "needzero class %d", n.Class)
7461 if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 {
7462 e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_)
7465 if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) {
7466 // Merge with range we already have.
7467 lo = n.FrameOffset()
7472 p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
7475 lo = n.FrameOffset()
7476 hi = lo + n.Type().Size()
7479 // Zero final range.
7480 Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
7483 // For generating consecutive jump instructions to model a specific branching
7484 type IndexJump struct {
7489 func (s *State) oneJump(b *ssa.Block, jump *IndexJump) {
7490 p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
7494 // CombJump generates combinational instructions (2 at present) for a block jump,
7495 // thereby the behaviour of non-standard condition codes could be simulated
7496 func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
7498 case b.Succs[0].Block():
7499 s.oneJump(b, &jumps[0][0])
7500 s.oneJump(b, &jumps[0][1])
7501 case b.Succs[1].Block():
7502 s.oneJump(b, &jumps[1][0])
7503 s.oneJump(b, &jumps[1][1])
7506 if b.Likely != ssa.BranchUnlikely {
7507 s.oneJump(b, &jumps[1][0])
7508 s.oneJump(b, &jumps[1][1])
7509 q = s.Br(obj.AJMP, b.Succs[1].Block())
7511 s.oneJump(b, &jumps[0][0])
7512 s.oneJump(b, &jumps[0][1])
7513 q = s.Br(obj.AJMP, b.Succs[0].Block())
7519 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
7520 func AddAux(a *obj.Addr, v *ssa.Value) {
7521 AddAux2(a, v, v.AuxInt)
7523 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
7524 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
7525 v.Fatalf("bad AddAux addr %v", a)
7527 // add integer offset
7530 // If no additional symbol offset, we're done.
7534 // Add symbol's offset from its base register.
7535 switch n := v.Aux.(type) {
7537 a.Name = obj.NAME_EXTERN
7540 a.Name = obj.NAME_EXTERN
7543 if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
7544 a.Name = obj.NAME_PARAM
7545 a.Sym = ir.Orig(n).(*ir.Name).Linksym()
7546 a.Offset += n.FrameOffset()
7549 a.Name = obj.NAME_AUTO
7550 if n.Class == ir.PPARAMOUT {
7551 a.Sym = ir.Orig(n).(*ir.Name).Linksym()
7555 a.Offset += n.FrameOffset()
7557 v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
7561 // extendIndex extends v to a full int width.
7562 // panic with the given kind if v does not fit in an int (only on 32-bit archs).
7563 func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
7564 size := idx.Type.Size()
7565 if size == s.config.PtrSize {
7568 if size > s.config.PtrSize {
7569 // truncate 64-bit indexes on 32-bit pointer archs. Test the
7570 // high word and branch to out-of-bounds failure if it is not 0.
7572 if idx.Type.IsSigned() {
7573 lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
7575 lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
7577 if bounded || base.Flag.B != 0 {
7580 bNext := s.f.NewBlock(ssa.BlockPlain)
7581 bPanic := s.f.NewBlock(ssa.BlockExit)
7582 hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
7583 cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
7584 if !idx.Type.IsSigned() {
7586 case ssa.BoundsIndex:
7587 kind = ssa.BoundsIndexU
7588 case ssa.BoundsSliceAlen:
7589 kind = ssa.BoundsSliceAlenU
7590 case ssa.BoundsSliceAcap:
7591 kind = ssa.BoundsSliceAcapU
7592 case ssa.BoundsSliceB:
7593 kind = ssa.BoundsSliceBU
7594 case ssa.BoundsSlice3Alen:
7595 kind = ssa.BoundsSlice3AlenU
7596 case ssa.BoundsSlice3Acap:
7597 kind = ssa.BoundsSlice3AcapU
7598 case ssa.BoundsSlice3B:
7599 kind = ssa.BoundsSlice3BU
7600 case ssa.BoundsSlice3C:
7601 kind = ssa.BoundsSlice3CU
7605 b.Kind = ssa.BlockIf
7607 b.Likely = ssa.BranchLikely
7611 s.startBlock(bPanic)
7612 mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
7613 s.endBlock().SetControl(mem)
7619 // Extend value to the required size
7621 if idx.Type.IsSigned() {
7622 switch 10*size + s.config.PtrSize {
7624 op = ssa.OpSignExt8to32
7626 op = ssa.OpSignExt8to64
7628 op = ssa.OpSignExt16to32
7630 op = ssa.OpSignExt16to64
7632 op = ssa.OpSignExt32to64
7634 s.Fatalf("bad signed index extension %s", idx.Type)
7637 switch 10*size + s.config.PtrSize {
7639 op = ssa.OpZeroExt8to32
7641 op = ssa.OpZeroExt8to64
7643 op = ssa.OpZeroExt16to32
7645 op = ssa.OpZeroExt16to64
7647 op = ssa.OpZeroExt32to64
7649 s.Fatalf("bad unsigned index extension %s", idx.Type)
7652 return s.newValue1(op, types.Types[types.TINT], idx)
7655 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
7656 // Called during ssaGenValue.
7657 func CheckLoweredPhi(v *ssa.Value) {
7658 if v.Op != ssa.OpPhi {
7659 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
7661 if v.Type.IsMemory() {
7665 loc := f.RegAlloc[v.ID]
7666 for _, a := range v.Args {
7667 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
7668 v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
7673 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block,
7674 // except for incoming in-register arguments.
7675 // The output of LoweredGetClosurePtr is generally hardwired to the correct register.
7676 // That register contains the closure pointer on closure entry.
7677 func CheckLoweredGetClosurePtr(v *ssa.Value) {
7678 entry := v.Block.Func.Entry
7679 if entry != v.Block {
7680 base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
7682 for _, w := range entry.Values {
7687 case ssa.OpArgIntReg, ssa.OpArgFloatReg:
7690 base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
7695 // CheckArgReg ensures that v is in the function's entry block.
7696 func CheckArgReg(v *ssa.Value) {
7697 entry := v.Block.Func.Entry
7698 if entry != v.Block {
7699 base.Fatalf("in %s, badly placed ArgIReg or ArgFReg: %v %v", v.Block.Func.Name, v.Block, v)
7703 func AddrAuto(a *obj.Addr, v *ssa.Value) {
7704 n, off := ssa.AutoVar(v)
7705 a.Type = obj.TYPE_MEM
7707 a.Reg = int16(Arch.REGSP)
7708 a.Offset = n.FrameOffset() + off
7709 if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
7710 a.Name = obj.NAME_PARAM
7712 a.Name = obj.NAME_AUTO
7716 // Call returns a new CALL instruction for the SSA value v.
7717 // It uses PrepareCall to prepare the call.
7718 func (s *State) Call(v *ssa.Value) *obj.Prog {
7719 pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
7722 p := s.Prog(obj.ACALL)
7723 if pPosIsStmt == src.PosIsStmt {
7724 p.Pos = v.Pos.WithIsStmt()
7726 p.Pos = v.Pos.WithNotStmt()
7728 if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
7729 p.To.Type = obj.TYPE_MEM
7730 p.To.Name = obj.NAME_EXTERN
7733 // TODO(mdempsky): Can these differences be eliminated?
7734 switch Arch.LinkArch.Family {
7735 case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
7736 p.To.Type = obj.TYPE_REG
7737 case sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64:
7738 p.To.Type = obj.TYPE_MEM
7740 base.Fatalf("unknown indirect call family")
7742 p.To.Reg = v.Args[0].Reg()
7747 // TailCall returns a new tail call instruction for the SSA value v.
7748 // It is like Call, but for a tail call.
7749 func (s *State) TailCall(v *ssa.Value) *obj.Prog {
7755 // PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
7756 // It must be called immediately before emitting the actual CALL instruction,
7757 // since it emits PCDATA for the stack map at the call (calls are safe points).
7758 func (s *State) PrepareCall(v *ssa.Value) {
7759 idx := s.livenessMap.Get(v)
7760 if !idx.StackMapValid() {
7761 // See Liveness.hasStackMap.
7762 if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
7763 base.Fatalf("missing stack map index for %v", v.LongString())
7767 call, ok := v.Aux.(*ssa.AuxCall)
7770 // Record call graph information for nowritebarrierrec
7772 if nowritebarrierrecCheck != nil {
7773 nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
7777 if s.maxarg < v.AuxInt {
7782 // UseArgs records the fact that an instruction needs a certain amount of
7783 // callee args space for its use.
7784 func (s *State) UseArgs(n int64) {
7790 // fieldIdx finds the index of the field referred to by the ODOT node n.
7791 func fieldIdx(n *ir.SelectorExpr) int {
7794 panic("ODOT's LHS is not a struct")
7797 for i, f := range t.Fields().Slice() {
7799 if f.Offset != n.Offset() {
7800 panic("field offset doesn't match")
7805 panic(fmt.Sprintf("can't find field in expr %v\n", n))
7807 // TODO: keep the result of this function somewhere in the ODOT Node
7808 // so we don't have to recompute it each time we need it.
7811 // ssafn holds frontend information about a function that the backend is processing.
7812 // It also exports a bunch of compiler services for the ssa backend.
7815 strings map[string]*obj.LSym // map from constant string to data symbols
7816 stksize int64 // stack size for current frame
7817 stkptrsize int64 // prefix of stack containing pointers
7819 // alignment for current frame.
7820 // NOTE: when stkalign > PtrSize, currently this only ensures the offsets of
7821 // objects in the stack frame are aligned. The stack pointer is still aligned
7825 log bool // print ssa debug to the stdout
7828 // StringData returns a symbol which
7829 // is the data component of a global string constant containing s.
7830 func (e *ssafn) StringData(s string) *obj.LSym {
7831 if aux, ok := e.strings[s]; ok {
7834 if e.strings == nil {
7835 e.strings = make(map[string]*obj.LSym)
7837 data := staticdata.StringSym(e.curfn.Pos(), s)
7842 func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
7843 return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
7846 // SplitSlot returns a slot representing the data of parent starting at offset.
7847 func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
7850 if node.Class != ir.PAUTO || node.Addrtaken() {
7851 // addressed things and non-autos retain their parents (i.e., cannot truly be split)
7852 return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
7855 s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
7856 n := ir.NewNameAt(parent.N.Pos(), s)
7858 ir.AsNode(s.Def).Name().SetUsed(true)
7861 n.SetEsc(ir.EscNever)
7863 e.curfn.Dcl = append(e.curfn.Dcl, n)
7865 return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
7868 func (e *ssafn) CanSSA(t *types.Type) bool {
7872 func (e *ssafn) Line(pos src.XPos) string {
7873 return base.FmtPos(pos)
7876 // Logf logs a message from the compiler.
7877 func (e *ssafn) Logf(msg string, args ...interface{}) {
7879 fmt.Printf(msg, args...)
7883 func (e *ssafn) Log() bool {
7887 // Fatalf reports a compiler error and exits.
7888 func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
7890 nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
7891 base.Fatalf("'%s': "+msg, nargs...)
7894 // Warnl reports a "warning", which is usually flag-triggered
7895 // logging output for the benefit of tests.
7896 func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
7897 base.WarnfAt(pos, fmt_, args...)
7900 func (e *ssafn) Debug_checknil() bool {
7901 return base.Debug.Nil != 0
7904 func (e *ssafn) UseWriteBarrier() bool {
7908 func (e *ssafn) Syslook(name string) *obj.LSym {
7910 case "goschedguarded":
7911 return ir.Syms.Goschedguarded
7912 case "writeBarrier":
7913 return ir.Syms.WriteBarrier
7914 case "gcWriteBarrier":
7915 return ir.Syms.GCWriteBarrier
7916 case "typedmemmove":
7917 return ir.Syms.Typedmemmove
7919 return ir.Syms.Typedmemclr
7921 e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
7925 func (e *ssafn) SetWBPos(pos src.XPos) {
7926 e.curfn.SetWBPos(pos)
7929 func (e *ssafn) MyImportPath() string {
7930 return base.Ctxt.Pkgpath
7933 func (e *ssafn) LSym() string {
7934 return e.curfn.LSym.Name
7937 func clobberBase(n ir.Node) ir.Node {
7938 if n.Op() == ir.ODOT {
7939 n := n.(*ir.SelectorExpr)
7940 if n.X.Type().NumFields() == 1 {
7941 return clobberBase(n.X)
7944 if n.Op() == ir.OINDEX {
7945 n := n.(*ir.IndexExpr)
7946 if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 {
7947 return clobberBase(n.X)
7953 // callTargetLSym returns the correct LSym to call 'callee' using its ABI.
7954 func callTargetLSym(callee *ir.Name) *obj.LSym {
7955 if callee.Func == nil {
7956 // TODO(austin): This happens in a few cases of
7957 // compiler-generated functions. These are all
7958 // ABIInternal. It would be better if callee.Func was
7959 // never nil and we didn't need this case.
7960 return callee.Linksym()
7963 return callee.LinksymABI(callee.Func.ABI)
7966 func min8(a, b int8) int8 {
7973 func max8(a, b int8) int8 {
7980 // deferstruct makes a runtime._defer structure.
7981 func deferstruct() *types.Type {
7982 makefield := func(name string, typ *types.Type) *types.Field {
7983 // Unlike the global makefield function, this one needs to set Pkg
7984 // because these types might be compared (in SSA CSE sorting).
7985 // TODO: unify this makefield and the global one above.
7986 sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
7987 return types.NewField(src.NoXPos, sym, typ)
7989 // These fields must match the ones in runtime/runtime2.go:_defer and
7990 // (*state).call above.
7991 fields := []*types.Field{
7992 makefield("started", types.Types[types.TBOOL]),
7993 makefield("heap", types.Types[types.TBOOL]),
7994 makefield("openDefer", types.Types[types.TBOOL]),
7995 makefield("sp", types.Types[types.TUINTPTR]),
7996 makefield("pc", types.Types[types.TUINTPTR]),
7997 // Note: the types here don't really matter. Defer structures
7998 // are always scanned explicitly during stack copying and GC,
7999 // so we make them uintptr type even though they are real pointers.
8000 makefield("fn", types.Types[types.TUINTPTR]),
8001 makefield("_panic", types.Types[types.TUINTPTR]),
8002 makefield("link", types.Types[types.TUINTPTR]),
8003 makefield("fd", types.Types[types.TUINTPTR]),
8004 makefield("varp", types.Types[types.TUINTPTR]),
8005 makefield("framepc", types.Types[types.TUINTPTR]),
8008 // build struct holding the above fields
8009 s := types.NewStruct(fields)
8011 types.CalcStructSize(s)
8015 // SpillSlotAddr uses LocalSlot information to initialize an obj.Addr
8016 // The resulting addr is used in a non-standard context -- in the prologue
8017 // of a function, before the frame has been constructed, so the standard
8018 // addressing for the parameters will be wrong.
8019 func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr {
8021 Name: obj.NAME_NONE,
8024 Offset: spill.Offset + extraOffset,
8029 BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
8030 ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
8033 // GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
8034 var GCWriteBarrierReg map[int16]*obj.LSym