1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 "cmd/compile/internal/abi"
20 "cmd/compile/internal/base"
21 "cmd/compile/internal/ir"
22 "cmd/compile/internal/liveness"
23 "cmd/compile/internal/objw"
24 "cmd/compile/internal/reflectdata"
25 "cmd/compile/internal/ssa"
26 "cmd/compile/internal/staticdata"
27 "cmd/compile/internal/typecheck"
28 "cmd/compile/internal/types"
30 "cmd/internal/obj/x86"
36 var ssaConfig *ssa.Config
37 var ssaCaches []ssa.Cache
39 var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for
40 var ssaDir string // optional destination for ssa dump file
41 var ssaDumpStdout bool // whether to dump to stdout
42 var ssaDumpCFG string // generate CFGs for these phases
43 const ssaDumpFile = "ssa.html"
45 // ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
46 var ssaDumpInlined []*ir.Func
48 func DumpInline(fn *ir.Func) {
49 if ssaDump != "" && ssaDump == ir.FuncName(fn) {
50 ssaDumpInlined = append(ssaDumpInlined, fn)
55 ssaDump = os.Getenv("GOSSAFUNC")
56 ssaDir = os.Getenv("GOSSADIR")
58 if strings.HasSuffix(ssaDump, "+") {
59 ssaDump = ssaDump[:len(ssaDump)-1]
62 spl := strings.Split(ssaDump, ":")
71 types_ := ssa.NewTypes()
77 // Generate a few pointer types that are uncommon in the frontend but common in the backend.
78 // Caching is disabled in the backend, so generating these here avoids allocations.
79 _ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
80 _ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
81 _ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
82 _ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte
83 _ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte
84 _ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
85 _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
86 _ = types.NewPtr(types.Types[types.TINT16]) // *int16
87 _ = types.NewPtr(types.Types[types.TINT64]) // *int64
88 _ = types.NewPtr(types.ErrorType) // *error
89 types.NewPtrCacheEnabled = false
90 ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0)
91 ssaConfig.SoftFloat = Arch.SoftFloat
92 ssaConfig.Race = base.Flag.Race
93 ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
95 // Set up some runtime functions we'll need to call.
96 ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I")
97 ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2")
98 ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I")
99 ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2")
100 ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
101 ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
102 ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
103 ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
104 ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero")
105 ir.Syms.GCWriteBarrier = typecheck.LookupRuntimeFunc("gcWriteBarrier")
106 ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
107 ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
108 ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
109 ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
110 ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
111 ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
112 ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
113 ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
114 ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE")
115 ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI")
116 ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
117 ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
118 ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
119 ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
120 ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
121 ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
122 ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
123 ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool
124 ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool
125 ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool
126 ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool
127 ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
128 ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s")
129 ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr")
130 ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
131 ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI
132 ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... }
133 ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
135 // asm funcs with special ABI
136 if base.Ctxt.Arch.Name == "amd64" {
137 GCWriteBarrierReg = map[int16]*obj.LSym{
138 x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"),
139 x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"),
140 x86.REG_DX: typecheck.LookupRuntimeFunc("gcWriteBarrierDX"),
141 x86.REG_BX: typecheck.LookupRuntimeFunc("gcWriteBarrierBX"),
142 x86.REG_BP: typecheck.LookupRuntimeFunc("gcWriteBarrierBP"),
143 x86.REG_SI: typecheck.LookupRuntimeFunc("gcWriteBarrierSI"),
144 x86.REG_R8: typecheck.LookupRuntimeFunc("gcWriteBarrierR8"),
145 x86.REG_R9: typecheck.LookupRuntimeFunc("gcWriteBarrierR9"),
149 if Arch.LinkArch.Family == sys.Wasm {
150 BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex")
151 BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU")
152 BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen")
153 BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU")
154 BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap")
155 BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU")
156 BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB")
157 BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU")
158 BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen")
159 BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU")
160 BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap")
161 BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU")
162 BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B")
163 BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU")
164 BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
165 BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
166 BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("goPanicSliceConvert")
168 BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
169 BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
170 BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
171 BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
172 BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
173 BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
174 BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
175 BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
176 BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
177 BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
178 BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
179 BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
180 BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
181 BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
182 BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
183 BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
184 BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("panicSliceConvert")
186 if Arch.LinkArch.PtrSize == 4 {
187 ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
188 ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
189 ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
190 ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
191 ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
192 ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
193 ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
194 ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
195 ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
196 ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
197 ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
198 ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
199 ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
200 ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
201 ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
202 ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
205 // Wasm (all asm funcs with special ABIs)
206 ir.Syms.WasmMove = typecheck.LookupRuntimeVar("wasmMove")
207 ir.Syms.WasmZero = typecheck.LookupRuntimeVar("wasmZero")
208 ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv")
209 ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS")
210 ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU")
211 ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic")
214 // AbiForBodylessFuncStackMap returns the ABI for a bodyless function's stack map.
215 // This is not necessarily the ABI used to call it.
216 // Currently (1.17 dev) such a stack map is always ABI0;
217 // any ABI wrapper that is present is nosplit, hence a precise
218 // stack map is not needed there (the parameters survive only long
219 // enough to call the wrapped assembly function).
220 // This always returns a freshly copied ABI.
221 func AbiForBodylessFuncStackMap(fn *ir.Func) *abi.ABIConfig {
222 return ssaConfig.ABI0.Copy() // No idea what races will result, be safe
225 // These are disabled but remain ready for use in case they are needed for the next regabi port.
226 // TODO if they are not needed for 1.18 / next register abi port, delete them.
227 const magicNameDotSuffix = ".*disabled*MagicMethodNameForTestingRegisterABI"
228 const magicLastTypeName = "*disabled*MagicLastTypeNameForTestingRegisterABI"
230 // abiForFunc implements ABI policy for a function, but does not return a copy of the ABI.
231 // Passing a nil function returns the default ABI based on experiment configuration.
232 func abiForFunc(fn *ir.Func, abi0, abi1 *abi.ABIConfig) *abi.ABIConfig {
233 if buildcfg.Experiment.RegabiArgs {
234 // Select the ABI based on the function's defining ABI.
241 case obj.ABIInternal:
242 // TODO(austin): Clean up the nomenclature here.
243 // It's not clear that "abi1" is ABIInternal.
246 base.Fatalf("function %v has unknown ABI %v", fn, fn.ABI)
247 panic("not reachable")
252 name := ir.FuncName(fn)
253 magicName := strings.HasSuffix(name, magicNameDotSuffix)
254 if fn.Pragma&ir.RegisterParams != 0 { // TODO(register args) remove after register abi is working
255 if strings.Contains(name, ".") {
257 base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name)
261 } else if magicName {
262 if base.FmtPos(fn.Pos()) == "<autogenerated>:1" {
263 // no way to put a pragma here, and it will error out in the real source code if they did not do it there.
266 base.ErrorfAt(fn.Pos(), "Methods with magic name %s (method %s) must also specify //go:registerparams", magicNameDotSuffix[1:], name)
269 if regAbiForFuncType(fn.Type().FuncType()) {
270 // fmt.Printf("Saw magic last type name for function %s\n", name)
277 func regAbiForFuncType(ft *types.Func) bool {
278 np := ft.Params.NumFields()
279 return np > 0 && strings.Contains(ft.Params.FieldType(np-1).String(), magicLastTypeName)
282 // dvarint writes a varint v to the funcdata in symbol x and returns the new offset
283 func dvarint(x *obj.LSym, off int, v int64) int {
284 if v < 0 || v > 1e9 {
285 panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
288 return objw.Uint8(x, off, uint8(v))
290 off = objw.Uint8(x, off, uint8((v&127)|128))
292 return objw.Uint8(x, off, uint8(v>>7))
294 off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
296 return objw.Uint8(x, off, uint8(v>>14))
298 off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
300 return objw.Uint8(x, off, uint8(v>>21))
302 off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
303 return objw.Uint8(x, off, uint8(v>>28))
306 // emitOpenDeferInfo emits FUNCDATA information about the defers in a function
307 // that is using open-coded defers. This funcdata is used to determine the active
308 // defers in a function and execute those defers during panic processing.
310 // The funcdata is all encoded in varints (since values will almost always be less than
311 // 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets)
312 // for stack variables are specified as the number of bytes below varp (pointer to the
313 // top of the local variables) for their starting address. The format is:
315 // - Offset of the deferBits variable
316 // - Number of defers in the function
317 // - Information about each defer call, in reverse order of appearance in the function:
318 // - Offset of the closure value to call
319 func (s *state) emitOpenDeferInfo() {
320 x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
321 s.curfn.LSym.Func().OpenCodedDeferInfo = x
323 off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
324 off = dvarint(x, off, int64(len(s.openDefers)))
326 // Write in reverse-order, for ease of running in that order at runtime
327 for i := len(s.openDefers) - 1; i >= 0; i-- {
329 off = dvarint(x, off, -r.closureNode.FrameOffset())
333 func okOffset(offset int64) int64 {
334 if offset == types.BOGUS_FUNARG_OFFSET {
335 panic(fmt.Errorf("Bogus offset %d", offset))
340 // buildssa builds an SSA function for fn.
341 // worker indicates which of the backend workers is doing the processing.
342 func buildssa(fn *ir.Func, worker int) *ssa.Func {
343 name := ir.FuncName(fn)
345 if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset"
346 pkgDotName := base.Ctxt.Pkgpath + "." + name
347 printssa = name == ssaDump ||
348 strings.HasSuffix(pkgDotName, ssaDump) && (pkgDotName == ssaDump || strings.HasSuffix(pkgDotName, "/"+ssaDump))
350 var astBuf *bytes.Buffer
352 astBuf = &bytes.Buffer{}
353 ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
354 ir.FDumpList(astBuf, "buildssa-body", fn.Body)
355 ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
357 fmt.Println("generating SSA for", name)
358 fmt.Print(astBuf.String())
366 s.hasdefer = fn.HasDefer()
367 if fn.Pragma&ir.CgoUnsafeArgs != 0 {
368 s.cgoUnsafeArgs = true
373 log: printssa && ssaDumpStdout,
377 s.f = ssa.NewFunc(&fe)
380 s.f.Config = ssaConfig
381 s.f.Cache = &ssaCaches[worker]
384 s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
385 s.f.PrintOrHtmlSSA = printssa
386 if fn.Pragma&ir.Nosplit != 0 {
389 s.f.ABI0 = ssaConfig.ABI0.Copy() // Make a copy to avoid racy map operations in type-register-width cache.
390 s.f.ABI1 = ssaConfig.ABI1.Copy()
391 s.f.ABIDefault = abiForFunc(nil, s.f.ABI0, s.f.ABI1)
392 s.f.ABISelf = abiForFunc(fn, s.f.ABI0, s.f.ABI1)
394 s.panics = map[funcLine]*ssa.Block{}
395 s.softFloat = s.config.SoftFloat
397 // Allocate starting block
398 s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
399 s.f.Entry.Pos = fn.Pos()
404 ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
405 ssaD := filepath.Dir(ssaDF)
406 os.MkdirAll(ssaD, 0755)
408 s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
409 // TODO: generate and print a mapping from nodes to values and blocks
410 dumpSourcesColumn(s.f.HTMLWriter, fn)
411 s.f.HTMLWriter.WriteAST("AST", astBuf)
414 // Allocate starting values
415 s.labels = map[string]*ssaLabel{}
416 s.fwdVars = map[ir.Node]*ssa.Value{}
417 s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
419 s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
421 case base.Debug.NoOpenDefer != 0:
422 s.hasOpenDefers = false
423 case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
424 // Don't support open-coded defers for 386 ONLY when using shared
425 // libraries, because there is extra code (added by rewriteToUseGot())
426 // preceding the deferreturn/ret code that we don't track correctly.
427 s.hasOpenDefers = false
429 if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
430 // Skip doing open defers if there is any extra exit code (likely
431 // race detection), since we will not generate that code in the
432 // case of the extra deferreturn/ret segment.
433 s.hasOpenDefers = false
436 // Similarly, skip if there are any heap-allocated result
437 // parameters that need to be copied back to their stack slots.
438 for _, f := range s.curfn.Type().Results().FieldSlice() {
439 if !f.Nname.(*ir.Name).OnStack() {
440 s.hasOpenDefers = false
445 if s.hasOpenDefers &&
446 s.curfn.NumReturns*s.curfn.NumDefers > 15 {
447 // Since we are generating defer calls at every exit for
448 // open-coded defers, skip doing open-coded defers if there are
449 // too many returns (especially if there are multiple defers).
450 // Open-coded defers are most important for improving performance
451 // for smaller functions (which don't have many returns).
452 s.hasOpenDefers = false
455 s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
456 s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
458 s.startBlock(s.f.Entry)
459 s.vars[memVar] = s.startmem
461 // Create the deferBits variable and stack slot. deferBits is a
462 // bitmask showing which of the open-coded defers in this function
463 // have been activated.
464 deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
465 deferBitsTemp.SetAddrtaken(true)
466 s.deferBitsTemp = deferBitsTemp
467 // For this value, AuxInt is initialized to zero by default
468 startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
469 s.vars[deferBitsVar] = startDeferBits
470 s.deferBitsAddr = s.addr(deferBitsTemp)
471 s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
472 // Make sure that the deferBits stack slot is kept alive (for use
473 // by panics) and stores to deferBits are not eliminated, even if
474 // all checking code on deferBits in the function exit can be
475 // eliminated, because the defer statements were all
477 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
480 var params *abi.ABIParamResultInfo
481 params = s.f.ABISelf.ABIAnalyze(fn.Type(), true)
483 // Generate addresses of local declarations
484 s.decladdrs = map[*ir.Name]*ssa.Value{}
485 for _, n := range fn.Dcl {
488 // Be aware that blank and unnamed input parameters will not appear here, but do appear in the type
489 s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
491 s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
493 // processed at each use, to prevent Addr coming
496 s.Fatalf("local variable with class %v unimplemented", n.Class)
500 s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, params)
502 // Populate SSAable arguments.
503 for _, n := range fn.Dcl {
504 if n.Class == ir.PPARAM {
506 v := s.newValue0A(ssa.OpArg, n.Type(), n)
508 s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
509 } else { // address was taken AND/OR too large for SSA
510 paramAssignment := ssa.ParamAssignmentForArgName(s.f, n)
511 if len(paramAssignment.Registers) > 0 {
512 if TypeOK(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
513 v := s.newValue0A(ssa.OpArg, n.Type(), n)
514 s.store(n.Type(), s.decladdrs[n], v)
515 } else { // Too big for SSA.
516 // Brute force, and early, do a bunch of stores from registers
517 // TODO fix the nasty storeArgOrLoad recursion in ssa/expand_calls.go so this Just Works with store of a big Arg.
518 s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false)
525 // Populate closure variables.
527 clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
528 offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
529 for _, n := range fn.ClosureVars {
532 typ = types.NewPtr(typ)
535 offset = types.Rnd(offset, typ.Alignment())
536 ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
539 // If n is a small variable captured by value, promote
540 // it to PAUTO so it can be converted to SSA.
542 // Note: While we never capture a variable by value if
543 // the user took its address, we may have generated
544 // runtime calls that did (#43701). Since we don't
545 // convert Addrtaken variables to SSA anyway, no point
546 // in promoting them either.
547 if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) {
549 fn.Dcl = append(fn.Dcl, n)
550 s.assign(n, s.load(n.Type(), ptr), false, 0)
555 ptr = s.load(typ, ptr)
557 s.setHeapaddr(fn.Pos(), n, ptr)
561 // Convert the AST-based IR to the SSA-based IR
567 // fallthrough to exit
568 if s.curBlock != nil {
569 s.pushLine(fn.Endlineno)
574 for _, b := range s.f.Blocks {
575 if b.Pos != src.NoXPos {
576 s.updateUnsetPredPos(b)
580 s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis")
584 // Main call to ssa package to compile function
588 s.emitOpenDeferInfo()
591 // Record incoming parameter spill information for morestack calls emitted in the assembler.
592 // This is done here, using all the parameters (used, partially used, and unused) because
593 // it mimics the behavior of the former ABI (everything stored) and because it's not 100%
594 // clear if naming conventions are respected in autogenerated code.
595 // TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also.
596 for _, p := range params.InParams() {
597 typs, offs := p.RegisterTypesAndOffsets()
598 for i, t := range typs {
599 o := offs[i] // offset within parameter
600 fo := p.FrameOffset(params) // offset of parameter in frame
601 reg := ssa.ObjRegForAbiReg(p.Registers[i], s.f.Config)
602 s.f.RegArgs = append(s.f.RegArgs, ssa.Spill{Reg: reg, Offset: fo + o, Type: t})
609 func (s *state) storeParameterRegsToStack(abi *abi.ABIConfig, paramAssignment *abi.ABIParamAssignment, n *ir.Name, addr *ssa.Value, pointersOnly bool) {
610 typs, offs := paramAssignment.RegisterTypesAndOffsets()
611 for i, t := range typs {
612 if pointersOnly && !t.IsPtrShaped() {
615 r := paramAssignment.Registers[i]
617 op, reg := ssa.ArgOpAndRegisterFor(r, abi)
618 aux := &ssa.AuxNameOffset{Name: n, Offset: o}
619 v := s.newValue0I(op, t, reg)
621 p := s.newValue1I(ssa.OpOffPtr, types.NewPtr(t), o, addr)
626 // zeroResults zeros the return values at the start of the function.
627 // We need to do this very early in the function. Defer might stop a
628 // panic and show the return values as they exist at the time of
629 // panic. For precise stacks, the garbage collector assumes results
630 // are always live, so we need to zero them before any allocations,
631 // even allocations to move params/results to the heap.
632 func (s *state) zeroResults() {
633 for _, f := range s.curfn.Type().Results().FieldSlice() {
634 n := f.Nname.(*ir.Name)
636 // The local which points to the return value is the
637 // thing that needs zeroing. This is already handled
638 // by a Needzero annotation in plive.go:(*liveness).epilogue.
641 // Zero the stack location containing f.
642 if typ := n.Type(); TypeOK(typ) {
643 s.assign(n, s.zeroVal(typ), false, 0)
645 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
646 s.zero(n.Type(), s.decladdrs[n])
651 // paramsToHeap produces code to allocate memory for heap-escaped parameters
652 // and to copy non-result parameters' values from the stack.
653 func (s *state) paramsToHeap() {
654 do := func(params *types.Type) {
655 for _, f := range params.FieldSlice() {
657 continue // anonymous or blank parameter
659 n := f.Nname.(*ir.Name)
660 if ir.IsBlank(n) || n.OnStack() {
664 if n.Class == ir.PPARAM {
665 s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n])
670 typ := s.curfn.Type()
676 // newHeapaddr allocates heap memory for n and sets its heap address.
677 func (s *state) newHeapaddr(n *ir.Name) {
678 s.setHeapaddr(n.Pos(), n, s.newObject(n.Type()))
681 // setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
682 // and then sets it as n's heap address.
683 func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
684 if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) {
685 base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type)
688 // Declare variable to hold address.
689 addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
690 addr.SetType(types.NewPtr(n.Type()))
691 addr.Class = ir.PAUTO
694 s.curfn.Dcl = append(s.curfn.Dcl, addr)
695 types.CalcSize(addr.Type())
697 if n.Class == ir.PPARAMOUT {
698 addr.SetIsOutputParamHeapAddr(true)
702 s.assign(addr, ptr, false, 0)
705 // newObject returns an SSA value denoting new(typ).
706 func (s *state) newObject(typ *types.Type) *ssa.Value {
708 return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
710 return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0]
713 // reflectType returns an SSA value representing a pointer to typ's
714 // reflection type descriptor.
715 func (s *state) reflectType(typ *types.Type) *ssa.Value {
716 lsym := reflectdata.TypeLinksym(typ)
717 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
720 func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
721 // Read sources of target function fn.
722 fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
723 targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line())
725 writer.Logf("cannot read sources for function %v: %v", fn, err)
728 // Read sources of inlined functions.
729 var inlFns []*ssa.FuncLines
730 for _, fi := range ssaDumpInlined {
732 fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
733 fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
735 writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
738 inlFns = append(inlFns, fnLines)
741 sort.Sort(ssa.ByTopo(inlFns))
743 inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
746 writer.WriteSources("sources", inlFns)
749 func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
750 f, err := os.Open(os.ExpandEnv(file))
757 scanner := bufio.NewScanner(f)
758 for scanner.Scan() && ln <= end {
760 lines = append(lines, scanner.Text())
764 return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
767 // updateUnsetPredPos propagates the earliest-value position information for b
768 // towards all of b's predecessors that need a position, and recurs on that
769 // predecessor if its position is updated. B should have a non-empty position.
770 func (s *state) updateUnsetPredPos(b *ssa.Block) {
771 if b.Pos == src.NoXPos {
772 s.Fatalf("Block %s should have a position", b)
774 bestPos := src.NoXPos
775 for _, e := range b.Preds {
780 if bestPos == src.NoXPos {
782 for _, v := range b.Values {
786 if v.Pos != src.NoXPos {
787 // Assume values are still in roughly textual order;
788 // TODO: could also seek minimum position?
795 s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
799 // Information about each open-coded defer.
800 type openDeferInfo struct {
801 // The node representing the call of the defer
803 // If defer call is closure call, the address of the argtmp where the
804 // closure is stored.
806 // The node representing the argtmp where the closure is stored - used for
807 // function, method, or interface call, to store a closure that panic
808 // processing can use for this defer.
813 // configuration (arch) information
816 // function we're building
823 labels map[string]*ssaLabel
825 // unlabeled break and continue statement tracking
826 breakTo *ssa.Block // current target for plain break statement
827 continueTo *ssa.Block // current target for plain continue statement
829 // current location where we're interpreting the AST
832 // variable assignments in the current block (map from variable symbol to ssa value)
833 // *Node is the unique identifier (an ONAME Node) for the variable.
834 // TODO: keep a single varnum map, then make all of these maps slices instead?
835 vars map[ir.Node]*ssa.Value
837 // fwdVars are variables that are used before they are defined in the current block.
838 // This map exists just to coalesce multiple references into a single FwdRef op.
839 // *Node is the unique identifier (an ONAME Node) for the variable.
840 fwdVars map[ir.Node]*ssa.Value
842 // all defined variables at the end of each block. Indexed by block ID.
843 defvars []map[ir.Node]*ssa.Value
845 // addresses of PPARAM and PPARAMOUT variables on the stack.
846 decladdrs map[*ir.Name]*ssa.Value
848 // starting values. Memory, stack pointer, and globals pointer
852 // value representing address of where deferBits autotmp is stored
853 deferBitsAddr *ssa.Value
854 deferBitsTemp *ir.Name
856 // line number stack. The current line number is top of stack
858 // the last line number processed; it may have been popped
861 // list of panic calls by function name and line number.
862 // Used to deduplicate panic calls.
863 panics map[funcLine]*ssa.Block
866 hasdefer bool // whether the function contains a defer statement
868 hasOpenDefers bool // whether we are doing open-coded defers
870 // If doing open-coded defers, list of info about the defer calls in
871 // scanning order. Hence, at exit we should run these defers in reverse
872 // order of this list
873 openDefers []*openDeferInfo
874 // For open-coded defers, this is the beginning and end blocks of the last
875 // defer exit code that we have generated so far. We use these to share
876 // code between exits if the shareDeferExits option (disabled by default)
878 lastDeferExit *ssa.Block // Entry block of last defer exit code we generated
879 lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
880 lastDeferCount int // Number of defers encountered at that point
882 prevCall *ssa.Value // the previous call; use this to tie results to the call op.
885 type funcLine struct {
891 type ssaLabel struct {
892 target *ssa.Block // block identified by this label
893 breakTarget *ssa.Block // block to break to in control flow node identified by this label
894 continueTarget *ssa.Block // block to continue to in control flow node identified by this label
897 // label returns the label associated with sym, creating it if necessary.
898 func (s *state) label(sym *types.Sym) *ssaLabel {
899 lab := s.labels[sym.Name]
902 s.labels[sym.Name] = lab
907 func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
908 func (s *state) Log() bool { return s.f.Log() }
909 func (s *state) Fatalf(msg string, args ...interface{}) {
910 s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
912 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
913 func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
915 func ssaMarker(name string) *ir.Name {
916 return typecheck.NewName(&types.Sym{Name: name})
920 // marker node for the memory variable
921 memVar = ssaMarker("mem")
923 // marker nodes for temporary variables
924 ptrVar = ssaMarker("ptr")
925 lenVar = ssaMarker("len")
926 newlenVar = ssaMarker("newlen")
927 capVar = ssaMarker("cap")
928 typVar = ssaMarker("typ")
929 okVar = ssaMarker("ok")
930 deferBitsVar = ssaMarker("deferBits")
933 // startBlock sets the current block we're generating code in to b.
934 func (s *state) startBlock(b *ssa.Block) {
935 if s.curBlock != nil {
936 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
939 s.vars = map[ir.Node]*ssa.Value{}
940 for n := range s.fwdVars {
945 // endBlock marks the end of generating code for the current block.
946 // Returns the (former) current block. Returns nil if there is no current
947 // block, i.e. if no code flows to the current execution point.
948 func (s *state) endBlock() *ssa.Block {
953 for len(s.defvars) <= int(b.ID) {
954 s.defvars = append(s.defvars, nil)
956 s.defvars[b.ID] = s.vars
960 // Empty plain blocks get the line of their successor (handled after all blocks created),
961 // except for increment blocks in For statements (handled in ssa conversion of OFOR),
962 // and for blocks ending in GOTO/BREAK/CONTINUE.
970 // pushLine pushes a line number on the line number stack.
971 func (s *state) pushLine(line src.XPos) {
973 // the frontend may emit node with line number missing,
974 // use the parent line number in this case.
976 if base.Flag.K != 0 {
977 base.Warn("buildssa: unknown position (line 0)")
983 s.line = append(s.line, line)
986 // popLine pops the top of the line number stack.
987 func (s *state) popLine() {
988 s.line = s.line[:len(s.line)-1]
991 // peekPos peeks the top of the line number stack.
992 func (s *state) peekPos() src.XPos {
993 return s.line[len(s.line)-1]
996 // newValue0 adds a new value with no arguments to the current block.
997 func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
998 return s.curBlock.NewValue0(s.peekPos(), op, t)
1001 // newValue0A adds a new value with no arguments and an aux value to the current block.
1002 func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
1003 return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
1006 // newValue0I adds a new value with no arguments and an auxint value to the current block.
1007 func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
1008 return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
1011 // newValue1 adds a new value with one argument to the current block.
1012 func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
1013 return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
1016 // newValue1A adds a new value with one argument and an aux value to the current block.
1017 func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
1018 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
1021 // newValue1Apos adds a new value with one argument and an aux value to the current block.
1022 // isStmt determines whether the created values may be a statement or not
1023 // (i.e., false means never, yes means maybe).
1024 func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value {
1026 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
1028 return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
1031 // newValue1I adds a new value with one argument and an auxint value to the current block.
1032 func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
1033 return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
1036 // newValue2 adds a new value with two arguments to the current block.
1037 func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
1038 return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
1041 // newValue2A adds a new value with two arguments and an aux value to the current block.
1042 func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
1043 return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
1046 // newValue2Apos adds a new value with two arguments and an aux value to the current block.
1047 // isStmt determines whether the created values may be a statement or not
1048 // (i.e., false means never, yes means maybe).
1049 func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
1051 return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
1053 return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
1056 // newValue2I adds a new value with two arguments and an auxint value to the current block.
1057 func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
1058 return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
1061 // newValue3 adds a new value with three arguments to the current block.
1062 func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
1063 return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
1066 // newValue3I adds a new value with three arguments and an auxint value to the current block.
1067 func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
1068 return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
1071 // newValue3A adds a new value with three arguments and an aux value to the current block.
1072 func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
1073 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
1076 // newValue3Apos adds a new value with three arguments and an aux value to the current block.
1077 // isStmt determines whether the created values may be a statement or not
1078 // (i.e., false means never, yes means maybe).
1079 func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
1081 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
1083 return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
1086 // newValue4 adds a new value with four arguments to the current block.
1087 func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
1088 return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
1091 // newValue4 adds a new value with four arguments and an auxint value to the current block.
1092 func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
1093 return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
1096 func (s *state) entryBlock() *ssa.Block {
1098 if base.Flag.N > 0 && s.curBlock != nil {
1099 // If optimizations are off, allocate in current block instead. Since with -N
1100 // we're not doing the CSE or tighten passes, putting lots of stuff in the
1101 // entry block leads to O(n^2) entries in the live value map during regalloc.
1108 // entryNewValue0 adds a new value with no arguments to the entry block.
1109 func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
1110 return s.entryBlock().NewValue0(src.NoXPos, op, t)
1113 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
1114 func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
1115 return s.entryBlock().NewValue0A(src.NoXPos, op, t, aux)
1118 // entryNewValue1 adds a new value with one argument to the entry block.
1119 func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
1120 return s.entryBlock().NewValue1(src.NoXPos, op, t, arg)
1123 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
1124 func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
1125 return s.entryBlock().NewValue1I(src.NoXPos, op, t, auxint, arg)
1128 // entryNewValue1A adds a new value with one argument and an aux value to the entry block.
1129 func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
1130 return s.entryBlock().NewValue1A(src.NoXPos, op, t, aux, arg)
1133 // entryNewValue2 adds a new value with two arguments to the entry block.
1134 func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
1135 return s.entryBlock().NewValue2(src.NoXPos, op, t, arg0, arg1)
1138 // entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
1139 func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
1140 return s.entryBlock().NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
1143 // const* routines add a new const value to the entry block.
1144 func (s *state) constSlice(t *types.Type) *ssa.Value {
1145 return s.f.ConstSlice(t)
1147 func (s *state) constInterface(t *types.Type) *ssa.Value {
1148 return s.f.ConstInterface(t)
1150 func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
1151 func (s *state) constEmptyString(t *types.Type) *ssa.Value {
1152 return s.f.ConstEmptyString(t)
1154 func (s *state) constBool(c bool) *ssa.Value {
1155 return s.f.ConstBool(types.Types[types.TBOOL], c)
1157 func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
1158 return s.f.ConstInt8(t, c)
1160 func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
1161 return s.f.ConstInt16(t, c)
1163 func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
1164 return s.f.ConstInt32(t, c)
1166 func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
1167 return s.f.ConstInt64(t, c)
1169 func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
1170 return s.f.ConstFloat32(t, c)
1172 func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
1173 return s.f.ConstFloat64(t, c)
1175 func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
1176 if s.config.PtrSize == 8 {
1177 return s.constInt64(t, c)
1179 if int64(int32(c)) != c {
1180 s.Fatalf("integer constant too big %d", c)
1182 return s.constInt32(t, int32(c))
1184 func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
1185 return s.f.ConstOffPtrSP(t, c, s.sp)
1188 // newValueOrSfCall* are wrappers around newValue*, which may create a call to a
1189 // soft-float runtime function instead (when emitting soft-float code).
1190 func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
1192 if c, ok := s.sfcall(op, arg); ok {
1196 return s.newValue1(op, t, arg)
1198 func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
1200 if c, ok := s.sfcall(op, arg0, arg1); ok {
1204 return s.newValue2(op, t, arg0, arg1)
1207 type instrumentKind uint8
1210 instrumentRead = iota
1215 func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) {
1216 s.instrument2(t, addr, nil, kind)
1219 // instrumentFields instruments a read/write operation on addr.
1220 // If it is instrumenting for MSAN and t is a struct type, it instruments
1221 // operation for each field, instead of for the whole struct.
1222 func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
1223 if !base.Flag.MSan || !t.IsStruct() {
1224 s.instrument(t, addr, kind)
1227 for _, f := range t.Fields().Slice() {
1228 if f.Sym.IsBlank() {
1231 offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr)
1232 s.instrumentFields(f.Type, offptr, kind)
1236 func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
1238 s.instrument2(t, dst, src, instrumentMove)
1240 s.instrument(t, src, instrumentRead)
1241 s.instrument(t, dst, instrumentWrite)
1245 func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
1246 if !s.curfn.InstrumentBody() {
1252 return // can't race on zero-sized things
1255 if ssa.IsSanitizerSafeAddr(addr) {
1262 if addr2 != nil && kind != instrumentMove {
1263 panic("instrument2: non-nil addr2 for non-move instrumentation")
1268 case instrumentRead:
1269 fn = ir.Syms.Msanread
1270 case instrumentWrite:
1271 fn = ir.Syms.Msanwrite
1272 case instrumentMove:
1273 fn = ir.Syms.Msanmove
1275 panic("unreachable")
1278 } else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
1279 // for composite objects we have to write every address
1280 // because a write might happen to any subobject.
1281 // composites with only one element don't have subobjects, though.
1283 case instrumentRead:
1284 fn = ir.Syms.Racereadrange
1285 case instrumentWrite:
1286 fn = ir.Syms.Racewriterange
1288 panic("unreachable")
1291 } else if base.Flag.Race {
1292 // for non-composite objects we can write just the start
1293 // address, as any write must write the first byte.
1295 case instrumentRead:
1296 fn = ir.Syms.Raceread
1297 case instrumentWrite:
1298 fn = ir.Syms.Racewrite
1300 panic("unreachable")
1303 panic("unreachable")
1306 args := []*ssa.Value{addr}
1308 args = append(args, addr2)
1311 args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
1313 s.rtcall(fn, true, nil, args...)
1316 func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
1317 s.instrumentFields(t, src, instrumentRead)
1318 return s.rawLoad(t, src)
1321 func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
1322 return s.newValue2(ssa.OpLoad, t, src, s.mem())
1325 func (s *state) store(t *types.Type, dst, val *ssa.Value) {
1326 s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
1329 func (s *state) zero(t *types.Type, dst *ssa.Value) {
1330 s.instrument(t, dst, instrumentWrite)
1331 store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
1333 s.vars[memVar] = store
1336 func (s *state) move(t *types.Type, dst, src *ssa.Value) {
1337 s.instrumentMove(t, dst, src)
1338 store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
1340 s.vars[memVar] = store
1343 // stmtList converts the statement list n to SSA and adds it to s.
1344 func (s *state) stmtList(l ir.Nodes) {
1345 for _, n := range l {
1350 // stmt converts the statement n to SSA and adds it to s.
1351 func (s *state) stmt(n ir.Node) {
1352 if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
1353 // OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
1358 // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
1359 // then this code is dead. Stop here.
1360 if s.curBlock == nil && n.Op() != ir.OLABEL {
1364 s.stmtList(n.Init())
1368 n := n.(*ir.BlockStmt)
1372 case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
1374 // Expression statements
1376 n := n.(*ir.CallExpr)
1377 if ir.IsIntrinsicCall(n) {
1384 n := n.(*ir.CallExpr)
1385 s.callResult(n, callNormal)
1386 if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PFUNC {
1387 if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
1388 n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
1391 b.Kind = ssa.BlockExit
1393 // TODO: never rewrite OPANIC to OCALLFUNC in the
1394 // first place. Need to wait until all backends
1399 n := n.(*ir.GoDeferStmt)
1400 if base.Debug.Defer > 0 {
1401 var defertype string
1402 if s.hasOpenDefers {
1403 defertype = "open-coded"
1404 } else if n.Esc() == ir.EscNever {
1405 defertype = "stack-allocated"
1407 defertype = "heap-allocated"
1409 base.WarnfAt(n.Pos(), "%s defer", defertype)
1411 if s.hasOpenDefers {
1412 s.openDeferRecord(n.Call.(*ir.CallExpr))
1415 if n.Esc() == ir.EscNever {
1418 s.callResult(n.Call.(*ir.CallExpr), d)
1421 n := n.(*ir.GoDeferStmt)
1422 s.callResult(n.Call.(*ir.CallExpr), callGo)
1424 case ir.OAS2DOTTYPE:
1425 n := n.(*ir.AssignListStmt)
1426 res, resok := s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
1428 if !TypeOK(n.Rhs[0].Type()) {
1429 if res.Op != ssa.OpLoad {
1430 s.Fatalf("dottype of non-load")
1433 if mem.Op == ssa.OpVarKill {
1436 if res.Args[1] != mem {
1437 s.Fatalf("memory no longer live from 2-result dottype load")
1442 s.assign(n.Lhs[0], res, deref, 0)
1443 s.assign(n.Lhs[1], resok, false, 0)
1447 // We come here only when it is an intrinsic call returning two values.
1448 n := n.(*ir.AssignListStmt)
1449 call := n.Rhs[0].(*ir.CallExpr)
1450 if !ir.IsIntrinsicCall(call) {
1451 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call)
1453 v := s.intrinsicCall(call)
1454 v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v)
1455 v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v)
1456 s.assign(n.Lhs[0], v1, false, 0)
1457 s.assign(n.Lhs[1], v2, false, 0)
1462 if v := n.X; v.Esc() == ir.EscHeap {
1467 n := n.(*ir.LabelStmt)
1471 // The label might already have a target block via a goto.
1472 if lab.target == nil {
1473 lab.target = s.f.NewBlock(ssa.BlockPlain)
1476 // Go to that label.
1477 // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
1478 if s.curBlock != nil {
1480 b.AddEdgeTo(lab.target)
1482 s.startBlock(lab.target)
1485 n := n.(*ir.BranchStmt)
1489 if lab.target == nil {
1490 lab.target = s.f.NewBlock(ssa.BlockPlain)
1494 b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
1495 b.AddEdgeTo(lab.target)
1498 n := n.(*ir.AssignStmt)
1499 if n.X == n.Y && n.X.Op() == ir.ONAME {
1500 // An x=x assignment. No point in doing anything
1501 // here. In addition, skipping this assignment
1502 // prevents generating:
1505 // which is bad because x is incorrectly considered
1506 // dead before the vardef. See issue #14904.
1514 case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
1515 // All literals with nonzero fields have already been
1516 // rewritten during walk. Any that remain are just T{}
1517 // or equivalents. Use the zero value.
1518 if !ir.IsZero(rhs) {
1519 s.Fatalf("literal with nonzero value in SSA: %v", rhs)
1523 rhs := rhs.(*ir.CallExpr)
1524 // Check whether we're writing the result of an append back to the same slice.
1525 // If so, we handle it specially to avoid write barriers on the fast
1526 // (non-growth) path.
1527 if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 {
1530 // If the slice can be SSA'd, it'll be on the stack,
1531 // so there will be no write barriers,
1532 // so there's no need to attempt to prevent them.
1534 if base.Debug.Append > 0 { // replicating old diagnostic message
1535 base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
1539 if base.Debug.Append > 0 {
1540 base.WarnfAt(n.Pos(), "append: len-only update")
1547 if ir.IsBlank(n.X) {
1549 // Just evaluate rhs for side-effects.
1567 r = nil // Signal assign to use OpZero.
1580 if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) {
1581 // We're assigning a slicing operation back to its source.
1582 // Don't write back fields we aren't changing. See issue #14855.
1583 rhs := rhs.(*ir.SliceExpr)
1584 i, j, k := rhs.Low, rhs.High, rhs.Max
1585 if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) {
1586 // [0:...] is the same as [:...]
1589 // TODO: detect defaults for len/cap also.
1590 // Currently doesn't really work because (*p)[:len(*p)] appears here as:
1593 //if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) {
1596 //if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) {
1610 s.assign(n.X, r, deref, skip)
1614 if ir.IsConst(n.Cond, constant.Bool) {
1615 s.stmtList(n.Cond.Init())
1616 if ir.BoolVal(n.Cond) {
1624 bEnd := s.f.NewBlock(ssa.BlockPlain)
1629 var bThen *ssa.Block
1630 if len(n.Body) != 0 {
1631 bThen = s.f.NewBlock(ssa.BlockPlain)
1635 var bElse *ssa.Block
1636 if len(n.Else) != 0 {
1637 bElse = s.f.NewBlock(ssa.BlockPlain)
1641 s.condBranch(n.Cond, bThen, bElse, likely)
1643 if len(n.Body) != 0 {
1646 if b := s.endBlock(); b != nil {
1650 if len(n.Else) != 0 {
1653 if b := s.endBlock(); b != nil {
1660 n := n.(*ir.ReturnStmt)
1661 s.stmtList(n.Results)
1663 b.Pos = s.lastPos.WithIsStmt()
1666 n := n.(*ir.TailCallStmt)
1668 b.Kind = ssa.BlockRetJmp // override BlockRet
1669 b.Aux = callTargetLSym(n.Target)
1671 case ir.OCONTINUE, ir.OBREAK:
1672 n := n.(*ir.BranchStmt)
1675 // plain break/continue
1683 // labeled break/continue; look up the target
1688 to = lab.continueTarget
1690 to = lab.breakTarget
1695 b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
1698 case ir.OFOR, ir.OFORUNTIL:
1699 // OFOR: for Ninit; Left; Right { Nbody }
1700 // cond (Left); body (Nbody); incr (Right)
1702 // OFORUNTIL: for Ninit; Left; Right; List { Nbody }
1703 // => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end:
1704 n := n.(*ir.ForStmt)
1705 bCond := s.f.NewBlock(ssa.BlockPlain)
1706 bBody := s.f.NewBlock(ssa.BlockPlain)
1707 bIncr := s.f.NewBlock(ssa.BlockPlain)
1708 bEnd := s.f.NewBlock(ssa.BlockPlain)
1710 // ensure empty for loops have correct position; issue #30167
1713 // first, jump to condition test (OFOR) or body (OFORUNTIL)
1715 if n.Op() == ir.OFOR {
1717 // generate code to test condition
1720 s.condBranch(n.Cond, bBody, bEnd, 1)
1723 b.Kind = ssa.BlockPlain
1731 // set up for continue/break in body
1732 prevContinue := s.continueTo
1733 prevBreak := s.breakTo
1734 s.continueTo = bIncr
1737 if sym := n.Label; sym != nil {
1740 lab.continueTarget = bIncr
1741 lab.breakTarget = bEnd
1748 // tear down continue/break
1749 s.continueTo = prevContinue
1750 s.breakTo = prevBreak
1752 lab.continueTarget = nil
1753 lab.breakTarget = nil
1756 // done with body, goto incr
1757 if b := s.endBlock(); b != nil {
1761 // generate incr (and, for OFORUNTIL, condition)
1766 if n.Op() == ir.OFOR {
1767 if b := s.endBlock(); b != nil {
1769 // It can happen that bIncr ends in a block containing only VARKILL,
1770 // and that muddles the debugging experience.
1771 if b.Pos == src.NoXPos {
1776 // bCond is unused in OFORUNTIL, so repurpose it.
1779 s.condBranch(n.Cond, bLateIncr, bEnd, 1)
1780 // generate late increment
1781 s.startBlock(bLateIncr)
1783 s.endBlock().AddEdgeTo(bBody)
1788 case ir.OSWITCH, ir.OSELECT:
1789 // These have been mostly rewritten by the front end into their Nbody fields.
1790 // Our main task is to correctly hook up any break statements.
1791 bEnd := s.f.NewBlock(ssa.BlockPlain)
1793 prevBreak := s.breakTo
1797 if n.Op() == ir.OSWITCH {
1798 n := n.(*ir.SwitchStmt)
1802 n := n.(*ir.SelectStmt)
1811 lab.breakTarget = bEnd
1814 // generate body code
1817 s.breakTo = prevBreak
1819 lab.breakTarget = nil
1822 // walk adds explicit OBREAK nodes to the end of all reachable code paths.
1823 // If we still have a current block here, then mark it unreachable.
1824 if s.curBlock != nil {
1827 b.Kind = ssa.BlockExit
1833 n := n.(*ir.UnaryExpr)
1835 s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
1838 // Insert a varkill op to record that a variable is no longer live.
1839 // We only care about liveness info at call sites, so putting the
1840 // varkill in the store chain is enough to keep it correctly ordered
1841 // with respect to call ops.
1842 n := n.(*ir.UnaryExpr)
1844 s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
1848 // Insert a varlive op to record that a variable is still live.
1849 n := n.(*ir.UnaryExpr)
1852 s.Fatalf("VARLIVE variable %v must have Addrtaken set", v)
1855 case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
1857 s.Fatalf("VARLIVE variable %v must be Auto or Arg", v)
1859 s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem())
1862 n := n.(*ir.UnaryExpr)
1867 n := n.(*ir.InlineMarkStmt)
1868 s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem())
1871 s.Fatalf("unhandled stmt %v", n.Op())
1875 // If true, share as many open-coded defer exits as possible (with the downside of
1876 // worse line-number information)
1877 const shareDeferExits = false
1879 // exit processes any code that needs to be generated just before returning.
1880 // It returns a BlockRet block that ends the control flow. Its control value
1881 // will be set to the final memory state.
1882 func (s *state) exit() *ssa.Block {
1884 if s.hasOpenDefers {
1885 if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
1886 if s.curBlock.Kind != ssa.BlockPlain {
1887 panic("Block for an exit should be BlockPlain")
1889 s.curBlock.AddEdgeTo(s.lastDeferExit)
1891 return s.lastDeferFinalBlock
1895 s.rtcall(ir.Syms.Deferreturn, true, nil)
1901 // Do actual return.
1902 // These currently turn into self-copies (in many cases).
1903 resultFields := s.curfn.Type().Results().FieldSlice()
1904 results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
1905 m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
1906 // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
1907 for i, f := range resultFields {
1908 n := f.Nname.(*ir.Name)
1909 if s.canSSA(n) { // result is in some SSA variable
1910 if !n.IsOutputParamInRegisters() {
1911 // We are about to store to the result slot.
1912 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
1914 results[i] = s.variable(n, n.Type())
1915 } else if !n.OnStack() { // result is actually heap allocated
1916 // We are about to copy the in-heap result to the result slot.
1917 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
1918 ha := s.expr(n.Heapaddr)
1919 s.instrumentFields(n.Type(), ha, instrumentRead)
1920 results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem())
1921 } else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA.
1922 // Before register ABI this ought to be a self-move, home=dest,
1923 // With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed)
1924 // No VarDef, as the result slot is already holding live value.
1925 results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem())
1929 // Run exit code. Today, this is just racefuncexit, in -race mode.
1930 // TODO(register args) this seems risky here with a register-ABI, but not clear it is right to do it earlier either.
1931 // Spills in register allocation might just fix it.
1932 s.stmtList(s.curfn.Exit)
1934 results[len(results)-1] = s.mem()
1935 m.AddArgs(results...)
1938 b.Kind = ssa.BlockRet
1940 if s.hasdefer && s.hasOpenDefers {
1941 s.lastDeferFinalBlock = b
1946 type opAndType struct {
1951 var opToSSA = map[opAndType]ssa.Op{
1952 opAndType{ir.OADD, types.TINT8}: ssa.OpAdd8,
1953 opAndType{ir.OADD, types.TUINT8}: ssa.OpAdd8,
1954 opAndType{ir.OADD, types.TINT16}: ssa.OpAdd16,
1955 opAndType{ir.OADD, types.TUINT16}: ssa.OpAdd16,
1956 opAndType{ir.OADD, types.TINT32}: ssa.OpAdd32,
1957 opAndType{ir.OADD, types.TUINT32}: ssa.OpAdd32,
1958 opAndType{ir.OADD, types.TINT64}: ssa.OpAdd64,
1959 opAndType{ir.OADD, types.TUINT64}: ssa.OpAdd64,
1960 opAndType{ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
1961 opAndType{ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
1963 opAndType{ir.OSUB, types.TINT8}: ssa.OpSub8,
1964 opAndType{ir.OSUB, types.TUINT8}: ssa.OpSub8,
1965 opAndType{ir.OSUB, types.TINT16}: ssa.OpSub16,
1966 opAndType{ir.OSUB, types.TUINT16}: ssa.OpSub16,
1967 opAndType{ir.OSUB, types.TINT32}: ssa.OpSub32,
1968 opAndType{ir.OSUB, types.TUINT32}: ssa.OpSub32,
1969 opAndType{ir.OSUB, types.TINT64}: ssa.OpSub64,
1970 opAndType{ir.OSUB, types.TUINT64}: ssa.OpSub64,
1971 opAndType{ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
1972 opAndType{ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
1974 opAndType{ir.ONOT, types.TBOOL}: ssa.OpNot,
1976 opAndType{ir.ONEG, types.TINT8}: ssa.OpNeg8,
1977 opAndType{ir.ONEG, types.TUINT8}: ssa.OpNeg8,
1978 opAndType{ir.ONEG, types.TINT16}: ssa.OpNeg16,
1979 opAndType{ir.ONEG, types.TUINT16}: ssa.OpNeg16,
1980 opAndType{ir.ONEG, types.TINT32}: ssa.OpNeg32,
1981 opAndType{ir.ONEG, types.TUINT32}: ssa.OpNeg32,
1982 opAndType{ir.ONEG, types.TINT64}: ssa.OpNeg64,
1983 opAndType{ir.ONEG, types.TUINT64}: ssa.OpNeg64,
1984 opAndType{ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
1985 opAndType{ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
1987 opAndType{ir.OBITNOT, types.TINT8}: ssa.OpCom8,
1988 opAndType{ir.OBITNOT, types.TUINT8}: ssa.OpCom8,
1989 opAndType{ir.OBITNOT, types.TINT16}: ssa.OpCom16,
1990 opAndType{ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
1991 opAndType{ir.OBITNOT, types.TINT32}: ssa.OpCom32,
1992 opAndType{ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
1993 opAndType{ir.OBITNOT, types.TINT64}: ssa.OpCom64,
1994 opAndType{ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
1996 opAndType{ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag,
1997 opAndType{ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
1998 opAndType{ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal,
1999 opAndType{ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
2001 opAndType{ir.OMUL, types.TINT8}: ssa.OpMul8,
2002 opAndType{ir.OMUL, types.TUINT8}: ssa.OpMul8,
2003 opAndType{ir.OMUL, types.TINT16}: ssa.OpMul16,
2004 opAndType{ir.OMUL, types.TUINT16}: ssa.OpMul16,
2005 opAndType{ir.OMUL, types.TINT32}: ssa.OpMul32,
2006 opAndType{ir.OMUL, types.TUINT32}: ssa.OpMul32,
2007 opAndType{ir.OMUL, types.TINT64}: ssa.OpMul64,
2008 opAndType{ir.OMUL, types.TUINT64}: ssa.OpMul64,
2009 opAndType{ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
2010 opAndType{ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
2012 opAndType{ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
2013 opAndType{ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
2015 opAndType{ir.ODIV, types.TINT8}: ssa.OpDiv8,
2016 opAndType{ir.ODIV, types.TUINT8}: ssa.OpDiv8u,
2017 opAndType{ir.ODIV, types.TINT16}: ssa.OpDiv16,
2018 opAndType{ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
2019 opAndType{ir.ODIV, types.TINT32}: ssa.OpDiv32,
2020 opAndType{ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
2021 opAndType{ir.ODIV, types.TINT64}: ssa.OpDiv64,
2022 opAndType{ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
2024 opAndType{ir.OMOD, types.TINT8}: ssa.OpMod8,
2025 opAndType{ir.OMOD, types.TUINT8}: ssa.OpMod8u,
2026 opAndType{ir.OMOD, types.TINT16}: ssa.OpMod16,
2027 opAndType{ir.OMOD, types.TUINT16}: ssa.OpMod16u,
2028 opAndType{ir.OMOD, types.TINT32}: ssa.OpMod32,
2029 opAndType{ir.OMOD, types.TUINT32}: ssa.OpMod32u,
2030 opAndType{ir.OMOD, types.TINT64}: ssa.OpMod64,
2031 opAndType{ir.OMOD, types.TUINT64}: ssa.OpMod64u,
2033 opAndType{ir.OAND, types.TINT8}: ssa.OpAnd8,
2034 opAndType{ir.OAND, types.TUINT8}: ssa.OpAnd8,
2035 opAndType{ir.OAND, types.TINT16}: ssa.OpAnd16,
2036 opAndType{ir.OAND, types.TUINT16}: ssa.OpAnd16,
2037 opAndType{ir.OAND, types.TINT32}: ssa.OpAnd32,
2038 opAndType{ir.OAND, types.TUINT32}: ssa.OpAnd32,
2039 opAndType{ir.OAND, types.TINT64}: ssa.OpAnd64,
2040 opAndType{ir.OAND, types.TUINT64}: ssa.OpAnd64,
2042 opAndType{ir.OOR, types.TINT8}: ssa.OpOr8,
2043 opAndType{ir.OOR, types.TUINT8}: ssa.OpOr8,
2044 opAndType{ir.OOR, types.TINT16}: ssa.OpOr16,
2045 opAndType{ir.OOR, types.TUINT16}: ssa.OpOr16,
2046 opAndType{ir.OOR, types.TINT32}: ssa.OpOr32,
2047 opAndType{ir.OOR, types.TUINT32}: ssa.OpOr32,
2048 opAndType{ir.OOR, types.TINT64}: ssa.OpOr64,
2049 opAndType{ir.OOR, types.TUINT64}: ssa.OpOr64,
2051 opAndType{ir.OXOR, types.TINT8}: ssa.OpXor8,
2052 opAndType{ir.OXOR, types.TUINT8}: ssa.OpXor8,
2053 opAndType{ir.OXOR, types.TINT16}: ssa.OpXor16,
2054 opAndType{ir.OXOR, types.TUINT16}: ssa.OpXor16,
2055 opAndType{ir.OXOR, types.TINT32}: ssa.OpXor32,
2056 opAndType{ir.OXOR, types.TUINT32}: ssa.OpXor32,
2057 opAndType{ir.OXOR, types.TINT64}: ssa.OpXor64,
2058 opAndType{ir.OXOR, types.TUINT64}: ssa.OpXor64,
2060 opAndType{ir.OEQ, types.TBOOL}: ssa.OpEqB,
2061 opAndType{ir.OEQ, types.TINT8}: ssa.OpEq8,
2062 opAndType{ir.OEQ, types.TUINT8}: ssa.OpEq8,
2063 opAndType{ir.OEQ, types.TINT16}: ssa.OpEq16,
2064 opAndType{ir.OEQ, types.TUINT16}: ssa.OpEq16,
2065 opAndType{ir.OEQ, types.TINT32}: ssa.OpEq32,
2066 opAndType{ir.OEQ, types.TUINT32}: ssa.OpEq32,
2067 opAndType{ir.OEQ, types.TINT64}: ssa.OpEq64,
2068 opAndType{ir.OEQ, types.TUINT64}: ssa.OpEq64,
2069 opAndType{ir.OEQ, types.TINTER}: ssa.OpEqInter,
2070 opAndType{ir.OEQ, types.TSLICE}: ssa.OpEqSlice,
2071 opAndType{ir.OEQ, types.TFUNC}: ssa.OpEqPtr,
2072 opAndType{ir.OEQ, types.TMAP}: ssa.OpEqPtr,
2073 opAndType{ir.OEQ, types.TCHAN}: ssa.OpEqPtr,
2074 opAndType{ir.OEQ, types.TPTR}: ssa.OpEqPtr,
2075 opAndType{ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr,
2076 opAndType{ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
2077 opAndType{ir.OEQ, types.TFLOAT64}: ssa.OpEq64F,
2078 opAndType{ir.OEQ, types.TFLOAT32}: ssa.OpEq32F,
2080 opAndType{ir.ONE, types.TBOOL}: ssa.OpNeqB,
2081 opAndType{ir.ONE, types.TINT8}: ssa.OpNeq8,
2082 opAndType{ir.ONE, types.TUINT8}: ssa.OpNeq8,
2083 opAndType{ir.ONE, types.TINT16}: ssa.OpNeq16,
2084 opAndType{ir.ONE, types.TUINT16}: ssa.OpNeq16,
2085 opAndType{ir.ONE, types.TINT32}: ssa.OpNeq32,
2086 opAndType{ir.ONE, types.TUINT32}: ssa.OpNeq32,
2087 opAndType{ir.ONE, types.TINT64}: ssa.OpNeq64,
2088 opAndType{ir.ONE, types.TUINT64}: ssa.OpNeq64,
2089 opAndType{ir.ONE, types.TINTER}: ssa.OpNeqInter,
2090 opAndType{ir.ONE, types.TSLICE}: ssa.OpNeqSlice,
2091 opAndType{ir.ONE, types.TFUNC}: ssa.OpNeqPtr,
2092 opAndType{ir.ONE, types.TMAP}: ssa.OpNeqPtr,
2093 opAndType{ir.ONE, types.TCHAN}: ssa.OpNeqPtr,
2094 opAndType{ir.ONE, types.TPTR}: ssa.OpNeqPtr,
2095 opAndType{ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr,
2096 opAndType{ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
2097 opAndType{ir.ONE, types.TFLOAT64}: ssa.OpNeq64F,
2098 opAndType{ir.ONE, types.TFLOAT32}: ssa.OpNeq32F,
2100 opAndType{ir.OLT, types.TINT8}: ssa.OpLess8,
2101 opAndType{ir.OLT, types.TUINT8}: ssa.OpLess8U,
2102 opAndType{ir.OLT, types.TINT16}: ssa.OpLess16,
2103 opAndType{ir.OLT, types.TUINT16}: ssa.OpLess16U,
2104 opAndType{ir.OLT, types.TINT32}: ssa.OpLess32,
2105 opAndType{ir.OLT, types.TUINT32}: ssa.OpLess32U,
2106 opAndType{ir.OLT, types.TINT64}: ssa.OpLess64,
2107 opAndType{ir.OLT, types.TUINT64}: ssa.OpLess64U,
2108 opAndType{ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
2109 opAndType{ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
2111 opAndType{ir.OLE, types.TINT8}: ssa.OpLeq8,
2112 opAndType{ir.OLE, types.TUINT8}: ssa.OpLeq8U,
2113 opAndType{ir.OLE, types.TINT16}: ssa.OpLeq16,
2114 opAndType{ir.OLE, types.TUINT16}: ssa.OpLeq16U,
2115 opAndType{ir.OLE, types.TINT32}: ssa.OpLeq32,
2116 opAndType{ir.OLE, types.TUINT32}: ssa.OpLeq32U,
2117 opAndType{ir.OLE, types.TINT64}: ssa.OpLeq64,
2118 opAndType{ir.OLE, types.TUINT64}: ssa.OpLeq64U,
2119 opAndType{ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
2120 opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
2123 func (s *state) concreteEtype(t *types.Type) types.Kind {
2129 if s.config.PtrSize == 8 {
2134 if s.config.PtrSize == 8 {
2135 return types.TUINT64
2137 return types.TUINT32
2138 case types.TUINTPTR:
2139 if s.config.PtrSize == 8 {
2140 return types.TUINT64
2142 return types.TUINT32
2146 func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
2147 etype := s.concreteEtype(t)
2148 x, ok := opToSSA[opAndType{op, etype}]
2150 s.Fatalf("unhandled binary op %v %s", op, etype)
2155 type opAndTwoTypes struct {
2161 type twoTypes struct {
2166 type twoOpsAndType struct {
2169 intermediateType types.Kind
2172 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
2174 twoTypes{types.TINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
2175 twoTypes{types.TINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
2176 twoTypes{types.TINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
2177 twoTypes{types.TINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
2179 twoTypes{types.TINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
2180 twoTypes{types.TINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
2181 twoTypes{types.TINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
2182 twoTypes{types.TINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
2184 twoTypes{types.TFLOAT32, types.TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
2185 twoTypes{types.TFLOAT32, types.TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
2186 twoTypes{types.TFLOAT32, types.TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
2187 twoTypes{types.TFLOAT32, types.TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
2189 twoTypes{types.TFLOAT64, types.TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
2190 twoTypes{types.TFLOAT64, types.TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
2191 twoTypes{types.TFLOAT64, types.TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
2192 twoTypes{types.TFLOAT64, types.TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
2194 twoTypes{types.TUINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
2195 twoTypes{types.TUINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
2196 twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
2197 twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead
2199 twoTypes{types.TUINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
2200 twoTypes{types.TUINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
2201 twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
2202 twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead
2204 twoTypes{types.TFLOAT32, types.TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
2205 twoTypes{types.TFLOAT32, types.TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
2206 twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
2207 twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
2209 twoTypes{types.TFLOAT64, types.TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
2210 twoTypes{types.TFLOAT64, types.TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
2211 twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
2212 twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
2215 twoTypes{types.TFLOAT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
2216 twoTypes{types.TFLOAT64, types.TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
2217 twoTypes{types.TFLOAT32, types.TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
2218 twoTypes{types.TFLOAT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
2221 // this map is used only for 32-bit arch, and only includes the difference
2222 // on 32-bit arch, don't use int64<->float conversion for uint32
2223 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
2224 twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
2225 twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
2226 twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
2227 twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
2230 // uint64<->float conversions, only on machines that have instructions for that
2231 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
2232 twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
2233 twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
2234 twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
2235 twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
2238 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
2239 opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8,
2240 opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8,
2241 opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16,
2242 opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
2243 opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32,
2244 opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
2245 opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64,
2246 opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
2248 opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8,
2249 opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8,
2250 opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16,
2251 opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
2252 opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32,
2253 opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
2254 opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64,
2255 opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
2257 opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8,
2258 opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8,
2259 opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16,
2260 opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
2261 opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32,
2262 opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
2263 opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64,
2264 opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
2266 opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8,
2267 opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8,
2268 opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16,
2269 opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
2270 opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32,
2271 opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
2272 opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64,
2273 opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
2275 opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8,
2276 opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8,
2277 opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16,
2278 opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
2279 opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32,
2280 opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
2281 opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64,
2282 opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
2284 opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8,
2285 opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8,
2286 opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16,
2287 opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
2288 opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32,
2289 opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
2290 opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64,
2291 opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
2293 opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8,
2294 opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8,
2295 opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16,
2296 opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
2297 opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32,
2298 opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
2299 opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64,
2300 opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
2302 opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8,
2303 opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8,
2304 opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16,
2305 opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
2306 opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32,
2307 opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
2308 opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64,
2309 opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
2312 func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
2313 etype1 := s.concreteEtype(t)
2314 etype2 := s.concreteEtype(u)
2315 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
2317 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
2322 // expr converts the expression n to ssa, adds it to s and returns the ssa result.
2323 func (s *state) expr(n ir.Node) *ssa.Value {
2324 if ir.HasUniquePos(n) {
2325 // ONAMEs and named OLITERALs have the line number
2326 // of the decl, not the use. See issue 14742.
2331 s.stmtList(n.Init())
2333 case ir.OBYTES2STRTMP:
2334 n := n.(*ir.ConvExpr)
2335 slice := s.expr(n.X)
2336 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
2337 len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
2338 return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
2339 case ir.OSTR2BYTESTMP:
2340 n := n.(*ir.ConvExpr)
2342 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
2343 len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
2344 return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
2346 n := n.(*ir.UnaryExpr)
2347 aux := n.X.(*ir.Name).Linksym()
2348 // OCFUNC is used to build function values, which must
2349 // always reference ABIInternal entry points.
2350 if aux.ABI() != obj.ABIInternal {
2351 s.Fatalf("expected ABIInternal: %v", aux.ABI())
2353 return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
2356 if n.Class == ir.PFUNC {
2357 // "value" of a function is the address of the function's closure
2358 sym := staticdata.FuncLinksym(n)
2359 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
2362 return s.variable(n, n.Type())
2364 return s.load(n.Type(), s.addr(n))
2365 case ir.OLINKSYMOFFSET:
2366 n := n.(*ir.LinksymOffsetExpr)
2367 return s.load(n.Type(), s.addr(n))
2369 n := n.(*ir.NilExpr)
2373 return s.constSlice(t)
2374 case t.IsInterface():
2375 return s.constInterface(t)
2377 return s.constNil(t)
2380 switch u := n.Val(); u.Kind() {
2382 i := ir.IntVal(n.Type(), u)
2383 switch n.Type().Size() {
2385 return s.constInt8(n.Type(), int8(i))
2387 return s.constInt16(n.Type(), int16(i))
2389 return s.constInt32(n.Type(), int32(i))
2391 return s.constInt64(n.Type(), i)
2393 s.Fatalf("bad integer size %d", n.Type().Size())
2396 case constant.String:
2397 i := constant.StringVal(u)
2399 return s.constEmptyString(n.Type())
2401 return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i))
2403 return s.constBool(constant.BoolVal(u))
2404 case constant.Float:
2405 f, _ := constant.Float64Val(u)
2406 switch n.Type().Size() {
2408 return s.constFloat32(n.Type(), f)
2410 return s.constFloat64(n.Type(), f)
2412 s.Fatalf("bad float size %d", n.Type().Size())
2415 case constant.Complex:
2416 re, _ := constant.Float64Val(constant.Real(u))
2417 im, _ := constant.Float64Val(constant.Imag(u))
2418 switch n.Type().Size() {
2420 pt := types.Types[types.TFLOAT32]
2421 return s.newValue2(ssa.OpComplexMake, n.Type(),
2422 s.constFloat32(pt, re),
2423 s.constFloat32(pt, im))
2425 pt := types.Types[types.TFLOAT64]
2426 return s.newValue2(ssa.OpComplexMake, n.Type(),
2427 s.constFloat64(pt, re),
2428 s.constFloat64(pt, im))
2430 s.Fatalf("bad complex size %d", n.Type().Size())
2434 s.Fatalf("unhandled OLITERAL %v", u.Kind())
2438 n := n.(*ir.ConvExpr)
2442 // Assume everything will work out, so set up our return value.
2443 // Anything interesting that happens from here is a fatal.
2449 // Special case for not confusing GC and liveness.
2450 // We don't want pointers accidentally classified
2451 // as not-pointers or vice-versa because of copy
2453 if to.IsPtrShaped() != from.IsPtrShaped() {
2454 return s.newValue2(ssa.OpConvert, to, x, s.mem())
2457 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
2460 if to.Kind() == types.TFUNC && from.IsPtrShaped() {
2464 // named <--> unnamed type or typed <--> untyped const
2465 if from.Kind() == to.Kind() {
2469 // unsafe.Pointer <--> *T
2470 if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
2475 if to.Kind() == types.TMAP && from.IsPtr() &&
2476 to.MapType().Hmap == from.Elem() {
2480 types.CalcSize(from)
2482 if from.Width != to.Width {
2483 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
2486 if etypesign(from.Kind()) != etypesign(to.Kind()) {
2487 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind())
2491 if base.Flag.Cfg.Instrumenting {
2492 // These appear to be fine, but they fail the
2493 // integer constraint below, so okay them here.
2494 // Sample non-integer conversion: map[string]string -> *uint8
2498 if etypesign(from.Kind()) == 0 {
2499 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
2503 // integer, same width, same sign
2507 n := n.(*ir.ConvExpr)
2509 ft := n.X.Type() // from type
2510 tt := n.Type() // to type
2511 if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
2512 // Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
2513 return s.newValue1(ssa.OpCopy, n.Type(), x)
2515 if ft.IsInteger() && tt.IsInteger() {
2517 if tt.Size() == ft.Size() {
2519 } else if tt.Size() < ft.Size() {
2521 switch 10*ft.Size() + tt.Size() {
2523 op = ssa.OpTrunc16to8
2525 op = ssa.OpTrunc32to8
2527 op = ssa.OpTrunc32to16
2529 op = ssa.OpTrunc64to8
2531 op = ssa.OpTrunc64to16
2533 op = ssa.OpTrunc64to32
2535 s.Fatalf("weird integer truncation %v -> %v", ft, tt)
2537 } else if ft.IsSigned() {
2539 switch 10*ft.Size() + tt.Size() {
2541 op = ssa.OpSignExt8to16
2543 op = ssa.OpSignExt8to32
2545 op = ssa.OpSignExt8to64
2547 op = ssa.OpSignExt16to32
2549 op = ssa.OpSignExt16to64
2551 op = ssa.OpSignExt32to64
2553 s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
2557 switch 10*ft.Size() + tt.Size() {
2559 op = ssa.OpZeroExt8to16
2561 op = ssa.OpZeroExt8to32
2563 op = ssa.OpZeroExt8to64
2565 op = ssa.OpZeroExt16to32
2567 op = ssa.OpZeroExt16to64
2569 op = ssa.OpZeroExt32to64
2571 s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
2574 return s.newValue1(op, n.Type(), x)
2577 if ft.IsFloat() || tt.IsFloat() {
2578 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
2579 if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
2580 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
2584 if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
2585 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
2590 if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
2591 if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
2592 // tt is float32 or float64, and ft is also unsigned
2594 return s.uint32Tofloat32(n, x, ft, tt)
2597 return s.uint32Tofloat64(n, x, ft, tt)
2599 } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
2600 // ft is float32 or float64, and tt is unsigned integer
2602 return s.float32ToUint32(n, x, ft, tt)
2605 return s.float64ToUint32(n, x, ft, tt)
2611 s.Fatalf("weird float conversion %v -> %v", ft, tt)
2613 op1, op2, it := conv.op1, conv.op2, conv.intermediateType
2615 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
2616 // normal case, not tripping over unsigned 64
2617 if op1 == ssa.OpCopy {
2618 if op2 == ssa.OpCopy {
2621 return s.newValueOrSfCall1(op2, n.Type(), x)
2623 if op2 == ssa.OpCopy {
2624 return s.newValueOrSfCall1(op1, n.Type(), x)
2626 return s.newValueOrSfCall1(op2, n.Type(), s.newValueOrSfCall1(op1, types.Types[it], x))
2628 // Tricky 64-bit unsigned cases.
2630 // tt is float32 or float64, and ft is also unsigned
2632 return s.uint64Tofloat32(n, x, ft, tt)
2635 return s.uint64Tofloat64(n, x, ft, tt)
2637 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
2639 // ft is float32 or float64, and tt is unsigned integer
2641 return s.float32ToUint64(n, x, ft, tt)
2644 return s.float64ToUint64(n, x, ft, tt)
2646 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
2650 if ft.IsComplex() && tt.IsComplex() {
2652 if ft.Size() == tt.Size() {
2659 s.Fatalf("weird complex conversion %v -> %v", ft, tt)
2661 } else if ft.Size() == 8 && tt.Size() == 16 {
2662 op = ssa.OpCvt32Fto64F
2663 } else if ft.Size() == 16 && tt.Size() == 8 {
2664 op = ssa.OpCvt64Fto32F
2666 s.Fatalf("weird complex conversion %v -> %v", ft, tt)
2668 ftp := types.FloatForComplex(ft)
2669 ttp := types.FloatForComplex(tt)
2670 return s.newValue2(ssa.OpComplexMake, tt,
2671 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
2672 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
2675 s.Fatalf("unhandled OCONV %s -> %s", n.X.Type().Kind(), n.Type().Kind())
2679 n := n.(*ir.TypeAssertExpr)
2680 res, _ := s.dottype(n, false)
2684 case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
2685 n := n.(*ir.BinaryExpr)
2688 if n.X.Type().IsComplex() {
2689 pt := types.FloatForComplex(n.X.Type())
2690 op := s.ssaOp(ir.OEQ, pt)
2691 r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
2692 i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
2693 c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
2698 return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
2700 s.Fatalf("ordered complex compare %v", n.Op())
2704 // Convert OGE and OGT into OLE and OLT.
2708 op, a, b = ir.OLE, b, a
2710 op, a, b = ir.OLT, b, a
2712 if n.X.Type().IsFloat() {
2714 return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
2716 // integer comparison
2717 return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
2719 n := n.(*ir.BinaryExpr)
2722 if n.Type().IsComplex() {
2723 mulop := ssa.OpMul64F
2724 addop := ssa.OpAdd64F
2725 subop := ssa.OpSub64F
2726 pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
2727 wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
2729 areal := s.newValue1(ssa.OpComplexReal, pt, a)
2730 breal := s.newValue1(ssa.OpComplexReal, pt, b)
2731 aimag := s.newValue1(ssa.OpComplexImag, pt, a)
2732 bimag := s.newValue1(ssa.OpComplexImag, pt, b)
2734 if pt != wt { // Widen for calculation
2735 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
2736 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
2737 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
2738 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
2741 xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
2742 ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
2744 if pt != wt { // Narrow to store back
2745 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
2746 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
2749 return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
2752 if n.Type().IsFloat() {
2753 return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2756 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2759 n := n.(*ir.BinaryExpr)
2762 if n.Type().IsComplex() {
2763 // TODO this is not executed because the front-end substitutes a runtime call.
2764 // That probably ought to change; with modest optimization the widen/narrow
2765 // conversions could all be elided in larger expression trees.
2766 mulop := ssa.OpMul64F
2767 addop := ssa.OpAdd64F
2768 subop := ssa.OpSub64F
2769 divop := ssa.OpDiv64F
2770 pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
2771 wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
2773 areal := s.newValue1(ssa.OpComplexReal, pt, a)
2774 breal := s.newValue1(ssa.OpComplexReal, pt, b)
2775 aimag := s.newValue1(ssa.OpComplexImag, pt, a)
2776 bimag := s.newValue1(ssa.OpComplexImag, pt, b)
2778 if pt != wt { // Widen for calculation
2779 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
2780 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
2781 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
2782 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
2785 denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
2786 xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
2787 ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
2789 // TODO not sure if this is best done in wide precision or narrow
2790 // Double-rounding might be an issue.
2791 // Note that the pre-SSA implementation does the entire calculation
2792 // in wide format, so wide is compatible.
2793 xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
2794 ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
2796 if pt != wt { // Narrow to store back
2797 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
2798 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
2800 return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
2802 if n.Type().IsFloat() {
2803 return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2805 return s.intDivide(n, a, b)
2807 n := n.(*ir.BinaryExpr)
2810 return s.intDivide(n, a, b)
2811 case ir.OADD, ir.OSUB:
2812 n := n.(*ir.BinaryExpr)
2815 if n.Type().IsComplex() {
2816 pt := types.FloatForComplex(n.Type())
2817 op := s.ssaOp(n.Op(), pt)
2818 return s.newValue2(ssa.OpComplexMake, n.Type(),
2819 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
2820 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
2822 if n.Type().IsFloat() {
2823 return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2825 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2826 case ir.OAND, ir.OOR, ir.OXOR:
2827 n := n.(*ir.BinaryExpr)
2830 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2832 n := n.(*ir.BinaryExpr)
2835 b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
2836 return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
2837 case ir.OLSH, ir.ORSH:
2838 n := n.(*ir.BinaryExpr)
2843 cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
2844 s.check(cmp, ir.Syms.Panicshift)
2845 bt = bt.ToUnsigned()
2847 return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
2848 case ir.OANDAND, ir.OOROR:
2849 // To implement OANDAND (and OOROR), we introduce a
2850 // new temporary variable to hold the result. The
2851 // variable is associated with the OANDAND node in the
2852 // s.vars table (normally variables are only
2853 // associated with ONAME nodes). We convert
2860 // Using var in the subsequent block introduces the
2861 // necessary phi variable.
2862 n := n.(*ir.LogicalExpr)
2867 b.Kind = ssa.BlockIf
2869 // In theory, we should set b.Likely here based on context.
2870 // However, gc only gives us likeliness hints
2871 // in a single place, for plain OIF statements,
2872 // and passing around context is finnicky, so don't bother for now.
2874 bRight := s.f.NewBlock(ssa.BlockPlain)
2875 bResult := s.f.NewBlock(ssa.BlockPlain)
2876 if n.Op() == ir.OANDAND {
2878 b.AddEdgeTo(bResult)
2879 } else if n.Op() == ir.OOROR {
2880 b.AddEdgeTo(bResult)
2884 s.startBlock(bRight)
2889 b.AddEdgeTo(bResult)
2891 s.startBlock(bResult)
2892 return s.variable(n, types.Types[types.TBOOL])
2894 n := n.(*ir.BinaryExpr)
2897 return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
2901 n := n.(*ir.UnaryExpr)
2903 if n.Type().IsComplex() {
2904 tp := types.FloatForComplex(n.Type())
2905 negop := s.ssaOp(n.Op(), tp)
2906 return s.newValue2(ssa.OpComplexMake, n.Type(),
2907 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
2908 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
2910 return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
2911 case ir.ONOT, ir.OBITNOT:
2912 n := n.(*ir.UnaryExpr)
2914 return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
2915 case ir.OIMAG, ir.OREAL:
2916 n := n.(*ir.UnaryExpr)
2918 return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a)
2920 n := n.(*ir.UnaryExpr)
2924 n := n.(*ir.AddrExpr)
2928 n := n.(*ir.ResultExpr)
2929 if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
2930 panic("Expected to see a previous call")
2934 panic(fmt.Errorf("ORESULT %v does not match call %s", n, s.prevCall))
2936 return s.resultOfCall(s.prevCall, which, n.Type())
2939 n := n.(*ir.StarExpr)
2940 p := s.exprPtr(n.X, n.Bounded(), n.Pos())
2941 return s.load(n.Type(), p)
2944 n := n.(*ir.SelectorExpr)
2945 if n.X.Op() == ir.OSTRUCTLIT {
2946 // All literals with nonzero fields have already been
2947 // rewritten during walk. Any that remain are just T{}
2948 // or equivalents. Use the zero value.
2949 if !ir.IsZero(n.X) {
2950 s.Fatalf("literal with nonzero value in SSA: %v", n.X)
2952 return s.zeroVal(n.Type())
2954 // If n is addressable and can't be represented in
2955 // SSA, then load just the selected field. This
2956 // prevents false memory dependencies in race/msan
2958 if ir.IsAddressable(n) && !s.canSSA(n) {
2960 return s.load(n.Type(), p)
2963 return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
2966 n := n.(*ir.SelectorExpr)
2967 p := s.exprPtr(n.X, n.Bounded(), n.Pos())
2968 p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
2969 return s.load(n.Type(), p)
2972 n := n.(*ir.IndexExpr)
2974 case n.X.Type().IsString():
2975 if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) {
2976 // Replace "abc"[1] with 'b'.
2977 // Delayed until now because "abc"[1] is not an ideal constant.
2978 // See test/fixedbugs/issue11370.go.
2979 return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)])))
2982 i := s.expr(n.Index)
2983 len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
2984 i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
2985 ptrtyp := s.f.Config.Types.BytePtr
2986 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
2987 if ir.IsConst(n.Index, constant.Int) {
2988 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr)
2990 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
2992 return s.load(types.Types[types.TUINT8], ptr)
2993 case n.X.Type().IsSlice():
2995 return s.load(n.X.Type().Elem(), p)
2996 case n.X.Type().IsArray():
2997 if TypeOK(n.X.Type()) {
2998 // SSA can handle arrays of length at most 1.
2999 bound := n.X.Type().NumElem()
3001 i := s.expr(n.Index)
3003 // Bounds check will never succeed. Might as well
3004 // use constants for the bounds check.
3005 z := s.constInt(types.Types[types.TINT], 0)
3006 s.boundsCheck(z, z, ssa.BoundsIndex, false)
3007 // The return value won't be live, return junk.
3008 return s.newValue0(ssa.OpUnknown, n.Type())
3010 len := s.constInt(types.Types[types.TINT], bound)
3011 s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
3012 return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
3015 return s.load(n.X.Type().Elem(), p)
3017 s.Fatalf("bad type for index %v", n.X.Type())
3021 case ir.OLEN, ir.OCAP:
3022 n := n.(*ir.UnaryExpr)
3024 case n.X.Type().IsSlice():
3025 op := ssa.OpSliceLen
3026 if n.Op() == ir.OCAP {
3029 return s.newValue1(op, types.Types[types.TINT], s.expr(n.X))
3030 case n.X.Type().IsString(): // string; not reachable for OCAP
3031 return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X))
3032 case n.X.Type().IsMap(), n.X.Type().IsChan():
3033 return s.referenceTypeBuiltin(n, s.expr(n.X))
3035 return s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
3039 n := n.(*ir.UnaryExpr)
3041 if n.X.Type().IsSlice() {
3042 return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
3044 return s.newValue1(ssa.OpStringPtr, n.Type(), a)
3048 n := n.(*ir.UnaryExpr)
3050 return s.newValue1(ssa.OpITab, n.Type(), a)
3053 n := n.(*ir.UnaryExpr)
3055 return s.newValue1(ssa.OpIData, n.Type(), a)
3058 n := n.(*ir.BinaryExpr)
3061 return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
3063 case ir.OSLICEHEADER:
3064 n := n.(*ir.SliceHeaderExpr)
3068 return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
3070 case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
3071 n := n.(*ir.SliceExpr)
3073 var i, j, k *ssa.Value
3083 p, l, c := s.slice(v, i, j, k, n.Bounded())
3084 return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
3087 n := n.(*ir.SliceExpr)
3096 p, l, _ := s.slice(v, i, j, nil, n.Bounded())
3097 return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
3099 case ir.OSLICE2ARRPTR:
3100 // if arrlen > slice.len {
3104 n := n.(*ir.ConvExpr)
3106 arrlen := s.constInt(types.Types[types.TINT], n.Type().Elem().NumElem())
3107 cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
3108 s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false)
3109 return s.newValue1(ssa.OpSlicePtrUnchecked, n.Type(), v)
3112 n := n.(*ir.CallExpr)
3113 if ir.IsIntrinsicCall(n) {
3114 return s.intrinsicCall(n)
3119 n := n.(*ir.CallExpr)
3120 return s.callResult(n, callNormal)
3123 n := n.(*ir.CallExpr)
3124 return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
3126 case ir.OGETCALLERPC:
3127 n := n.(*ir.CallExpr)
3128 return s.newValue0(ssa.OpGetCallerPC, n.Type())
3130 case ir.OGETCALLERSP:
3131 n := n.(*ir.CallExpr)
3132 return s.newValue0(ssa.OpGetCallerSP, n.Type())
3135 return s.append(n.(*ir.CallExpr), false)
3137 case ir.OSTRUCTLIT, ir.OARRAYLIT:
3138 // All literals with nonzero fields have already been
3139 // rewritten during walk. Any that remain are just T{}
3140 // or equivalents. Use the zero value.
3141 n := n.(*ir.CompLitExpr)
3143 s.Fatalf("literal with nonzero value in SSA: %v", n)
3145 return s.zeroVal(n.Type())
3148 n := n.(*ir.UnaryExpr)
3149 return s.newObject(n.Type().Elem())
3152 n := n.(*ir.BinaryExpr)
3155 return s.newValue2(ssa.OpAddPtr, n.Type(), ptr, len)
3158 s.Fatalf("unhandled expr %v", n.Op())
3163 func (s *state) resultOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
3164 aux := c.Aux.(*ssa.AuxCall)
3165 pa := aux.ParamAssignmentForResult(which)
3166 // TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded.
3167 // SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future.
3168 if len(pa.Registers) == 0 && !TypeOK(t) {
3169 addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
3170 return s.rawLoad(t, addr)
3172 return s.newValue1I(ssa.OpSelectN, t, which, c)
3175 func (s *state) resultAddrOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
3176 aux := c.Aux.(*ssa.AuxCall)
3177 pa := aux.ParamAssignmentForResult(which)
3178 if len(pa.Registers) == 0 {
3179 return s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
3181 _, addr := s.temp(c.Pos, t)
3182 rval := s.newValue1I(ssa.OpSelectN, t, which, c)
3183 s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, addr, rval, s.mem(), false)
3187 // append converts an OAPPEND node to SSA.
3188 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
3189 // adds it to s, and returns the Value.
3190 // If inplace is true, it writes the result of the OAPPEND expression n
3191 // back to the slice being appended to, and returns nil.
3192 // inplace MUST be set to false if the slice can be SSA'd.
3193 func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
3194 // If inplace is false, process as expression "append(s, e1, e2, e3)":
3196 // ptr, len, cap := s
3197 // newlen := len + 3
3198 // if newlen > cap {
3199 // ptr, len, cap = growslice(s, newlen)
3200 // newlen = len + 3 // recalculate to avoid a spill
3202 // // with write barriers, if needed:
3204 // *(ptr+len+1) = e2
3205 // *(ptr+len+2) = e3
3206 // return makeslice(ptr, newlen, cap)
3209 // If inplace is true, process as statement "s = append(s, e1, e2, e3)":
3212 // ptr, len, cap := s
3213 // newlen := len + 3
3214 // if uint(newlen) > uint(cap) {
3215 // newptr, len, newcap = growslice(ptr, len, cap, newlen)
3216 // vardef(a) // if necessary, advise liveness we are writing a new a
3217 // *a.cap = newcap // write before ptr to avoid a spill
3218 // *a.ptr = newptr // with write barrier
3220 // newlen = len + 3 // recalculate to avoid a spill
3222 // // with write barriers, if needed:
3224 // *(ptr+len+1) = e2
3225 // *(ptr+len+2) = e3
3227 et := n.Type().Elem()
3228 pt := types.NewPtr(et)
3231 sn := n.Args[0] // the slice node is the first in the list
3233 var slice, addr *ssa.Value
3236 slice = s.load(n.Type(), addr)
3241 // Allocate new blocks
3242 grow := s.f.NewBlock(ssa.BlockPlain)
3243 assign := s.f.NewBlock(ssa.BlockPlain)
3245 // Decide if we need to grow
3246 nargs := int64(len(n.Args) - 1)
3247 p := s.newValue1(ssa.OpSlicePtr, pt, slice)
3248 l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
3249 c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
3250 nl := s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
3252 cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, nl)
3256 s.vars[newlenVar] = nl
3263 b.Kind = ssa.BlockIf
3264 b.Likely = ssa.BranchUnlikely
3271 taddr := s.expr(n.X)
3272 r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
3275 if sn.Op() == ir.ONAME {
3277 if sn.Class != ir.PEXTERN {
3278 // Tell liveness we're about to build a new slice
3279 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
3282 capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr)
3283 s.store(types.Types[types.TINT], capaddr, r[2])
3284 s.store(pt, addr, r[0])
3285 // load the value we just stored to avoid having to spill it
3286 s.vars[ptrVar] = s.load(pt, addr)
3287 s.vars[lenVar] = r[1] // avoid a spill in the fast path
3289 s.vars[ptrVar] = r[0]
3290 s.vars[newlenVar] = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], r[1], s.constInt(types.Types[types.TINT], nargs))
3291 s.vars[capVar] = r[2]
3297 // assign new elements to slots
3298 s.startBlock(assign)
3301 l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
3302 nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
3303 lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr)
3304 s.store(types.Types[types.TINT], lenaddr, nl)
3308 type argRec struct {
3309 // if store is true, we're appending the value v. If false, we're appending the
3314 args := make([]argRec, 0, nargs)
3315 for _, n := range n.Args[1:] {
3316 if TypeOK(n.Type()) {
3317 args = append(args, argRec{v: s.expr(n), store: true})
3320 args = append(args, argRec{v: v})
3324 p = s.variable(ptrVar, pt) // generates phi for ptr
3326 nl = s.variable(newlenVar, types.Types[types.TINT]) // generates phi for nl
3327 c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap
3329 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
3330 for i, arg := range args {
3331 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
3333 s.storeType(et, addr, arg.v, 0, true)
3335 s.move(et, addr, arg.v)
3339 delete(s.vars, ptrVar)
3341 delete(s.vars, lenVar)
3344 delete(s.vars, newlenVar)
3345 delete(s.vars, capVar)
3347 return s.newValue3(ssa.OpSliceMake, n.Type(), p, nl, c)
3350 // condBranch evaluates the boolean expression cond and branches to yes
3351 // if cond is true and no if cond is false.
3352 // This function is intended to handle && and || better than just calling
3353 // s.expr(cond) and branching on the result.
3354 func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
3357 cond := cond.(*ir.LogicalExpr)
3358 mid := s.f.NewBlock(ssa.BlockPlain)
3359 s.stmtList(cond.Init())
3360 s.condBranch(cond.X, mid, no, max8(likely, 0))
3362 s.condBranch(cond.Y, yes, no, likely)
3364 // Note: if likely==1, then both recursive calls pass 1.
3365 // If likely==-1, then we don't have enough information to decide
3366 // whether the first branch is likely or not. So we pass 0 for
3367 // the likeliness of the first branch.
3368 // TODO: have the frontend give us branch prediction hints for
3369 // OANDAND and OOROR nodes (if it ever has such info).
3371 cond := cond.(*ir.LogicalExpr)
3372 mid := s.f.NewBlock(ssa.BlockPlain)
3373 s.stmtList(cond.Init())
3374 s.condBranch(cond.X, yes, mid, min8(likely, 0))
3376 s.condBranch(cond.Y, yes, no, likely)
3378 // Note: if likely==-1, then both recursive calls pass -1.
3379 // If likely==1, then we don't have enough info to decide
3380 // the likelihood of the first branch.
3382 cond := cond.(*ir.UnaryExpr)
3383 s.stmtList(cond.Init())
3384 s.condBranch(cond.X, no, yes, -likely)
3387 cond := cond.(*ir.ConvExpr)
3388 s.stmtList(cond.Init())
3389 s.condBranch(cond.X, yes, no, likely)
3394 b.Kind = ssa.BlockIf
3396 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
3404 skipPtr skipMask = 1 << iota
3409 // assign does left = right.
3410 // Right has already been evaluated to ssa, left has not.
3411 // If deref is true, then we do left = *right instead (and right has already been nil-checked).
3412 // If deref is true and right == nil, just do left = 0.
3413 // skip indicates assignments (at the top level) that can be avoided.
3414 func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
3415 if left.Op() == ir.ONAME && ir.IsBlank(left) {
3422 s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
3424 if left.Op() == ir.ODOT {
3425 // We're assigning to a field of an ssa-able value.
3426 // We need to build a new structure with the new value for the
3427 // field we're assigning and the old values for the other fields.
3429 // type T struct {a, b, c int}
3432 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
3434 // Grab information about the structure type.
3435 left := left.(*ir.SelectorExpr)
3438 idx := fieldIdx(left)
3440 // Grab old value of structure.
3441 old := s.expr(left.X)
3443 // Make new structure.
3444 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
3446 // Add fields as args.
3447 for i := 0; i < nf; i++ {
3451 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
3455 // Recursively assign the new value we've made to the base of the dot op.
3456 s.assign(left.X, new, false, 0)
3457 // TODO: do we need to update named values here?
3460 if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() {
3461 left := left.(*ir.IndexExpr)
3462 s.pushLine(left.Pos())
3464 // We're assigning to an element of an ssa-able array.
3469 i := s.expr(left.Index) // index
3471 // The bounds check must fail. Might as well
3472 // ignore the actual index and just use zeros.
3473 z := s.constInt(types.Types[types.TINT], 0)
3474 s.boundsCheck(z, z, ssa.BoundsIndex, false)
3478 s.Fatalf("assigning to non-1-length array")
3480 // Rewrite to a = [1]{v}
3481 len := s.constInt(types.Types[types.TINT], 1)
3482 s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
3483 v := s.newValue1(ssa.OpArrayMake1, t, right)
3484 s.assign(left.X, v, false, 0)
3487 left := left.(*ir.Name)
3488 // Update variable assignment.
3489 s.vars[left] = right
3490 s.addNamedValue(left, right)
3494 // If this assignment clobbers an entire local variable, then emit
3495 // OpVarDef so liveness analysis knows the variable is redefined.
3496 if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 {
3497 s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
3500 // Left is not ssa-able. Compute its address.
3501 addr := s.addr(left)
3502 if ir.IsReflectHeaderDataField(left) {
3503 // Package unsafe's documentation says storing pointers into
3504 // reflect.SliceHeader and reflect.StringHeader's Data fields
3505 // is valid, even though they have type uintptr (#19168).
3506 // Mark it pointer type to signal the writebarrier pass to
3507 // insert a write barrier.
3508 t = types.Types[types.TUNSAFEPTR]
3511 // Treat as a mem->mem move.
3515 s.move(t, addr, right)
3519 // Treat as a store.
3520 s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
3523 // zeroVal returns the zero value for type t.
3524 func (s *state) zeroVal(t *types.Type) *ssa.Value {
3529 return s.constInt8(t, 0)
3531 return s.constInt16(t, 0)
3533 return s.constInt32(t, 0)
3535 return s.constInt64(t, 0)
3537 s.Fatalf("bad sized integer type %v", t)
3542 return s.constFloat32(t, 0)
3544 return s.constFloat64(t, 0)
3546 s.Fatalf("bad sized float type %v", t)
3551 z := s.constFloat32(types.Types[types.TFLOAT32], 0)
3552 return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
3554 z := s.constFloat64(types.Types[types.TFLOAT64], 0)
3555 return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
3557 s.Fatalf("bad sized complex type %v", t)
3561 return s.constEmptyString(t)
3562 case t.IsPtrShaped():
3563 return s.constNil(t)
3565 return s.constBool(false)
3566 case t.IsInterface():
3567 return s.constInterface(t)
3569 return s.constSlice(t)
3572 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
3573 for i := 0; i < n; i++ {
3574 v.AddArg(s.zeroVal(t.FieldType(i)))
3578 switch t.NumElem() {
3580 return s.entryNewValue0(ssa.OpArrayMake0, t)
3582 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
3585 s.Fatalf("zero for type %v not implemented", t)
3592 callNormal callKind = iota
3598 type sfRtCallDef struct {
3603 var softFloatOps map[ssa.Op]sfRtCallDef
3605 func softfloatInit() {
3606 // Some of these operations get transformed by sfcall.
3607 softFloatOps = map[ssa.Op]sfRtCallDef{
3608 ssa.OpAdd32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
3609 ssa.OpAdd64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
3610 ssa.OpSub32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
3611 ssa.OpSub64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
3612 ssa.OpMul32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32},
3613 ssa.OpMul64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64},
3614 ssa.OpDiv32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32},
3615 ssa.OpDiv64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64},
3617 ssa.OpEq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
3618 ssa.OpEq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
3619 ssa.OpNeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
3620 ssa.OpNeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
3621 ssa.OpLess64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL},
3622 ssa.OpLess32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL},
3623 ssa.OpLeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge64"), types.TBOOL},
3624 ssa.OpLeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge32"), types.TBOOL},
3626 ssa.OpCvt32to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32},
3627 ssa.OpCvt32Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32},
3628 ssa.OpCvt64to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32},
3629 ssa.OpCvt32Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64},
3630 ssa.OpCvt64Uto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32},
3631 ssa.OpCvt32Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64},
3632 ssa.OpCvt32to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64},
3633 ssa.OpCvt64Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32},
3634 ssa.OpCvt64to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64},
3635 ssa.OpCvt64Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64},
3636 ssa.OpCvt64Uto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64},
3637 ssa.OpCvt64Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64},
3638 ssa.OpCvt32Fto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64},
3639 ssa.OpCvt64Fto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32},
3643 // TODO: do not emit sfcall if operation can be optimized to constant in later
3645 func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
3646 if callDef, ok := softFloatOps[op]; ok {
3652 args[0], args[1] = args[1], args[0]
3655 args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
3658 result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
3659 if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
3660 result = s.newValue1(ssa.OpNot, result.Type, result)
3667 var intrinsics map[intrinsicKey]intrinsicBuilder
3669 // An intrinsicBuilder converts a call node n into an ssa value that
3670 // implements that call as an intrinsic. args is a list of arguments to the func.
3671 type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value
3673 type intrinsicKey struct {
3680 intrinsics = map[intrinsicKey]intrinsicBuilder{}
3685 var lwatomics []*sys.Arch
3686 for _, a := range &sys.Archs {
3687 all = append(all, a)
3693 if a.Family != sys.PPC64 {
3694 lwatomics = append(lwatomics, a)
3698 // add adds the intrinsic b for pkg.fn for the given list of architectures.
3699 add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
3700 for _, a := range archs {
3701 intrinsics[intrinsicKey{a, pkg, fn}] = b
3704 // addF does the same as add but operates on architecture families.
3705 addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
3707 for _, f := range archFamilies {
3709 panic("too many architecture families")
3713 for _, a := range all {
3714 if m>>uint(a.Family)&1 != 0 {
3715 intrinsics[intrinsicKey{a, pkg, fn}] = b
3719 // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
3720 alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
3722 for _, a := range archs {
3723 if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
3724 intrinsics[intrinsicKey{a, pkg, fn}] = b
3729 panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn))
3733 /******** runtime ********/
3734 if !base.Flag.Cfg.Instrumenting {
3735 add("runtime", "slicebytetostringtmp",
3736 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3737 // Compiler frontend optimizations emit OBYTES2STRTMP nodes
3738 // for the backend instead of slicebytetostringtmp calls
3739 // when not instrumenting.
3740 return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
3744 addF("runtime/internal/math", "MulUintptr",
3745 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3746 if s.config.PtrSize == 4 {
3747 return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
3749 return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
3751 sys.AMD64, sys.I386, sys.MIPS64)
3752 add("runtime", "KeepAlive",
3753 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3754 data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
3755 s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
3759 add("runtime", "getclosureptr",
3760 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3761 return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
3765 add("runtime", "getcallerpc",
3766 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3767 return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
3771 add("runtime", "getcallersp",
3772 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3773 return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
3777 /******** runtime/internal/sys ********/
3778 addF("runtime/internal/sys", "Ctz32",
3779 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3780 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
3782 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
3783 addF("runtime/internal/sys", "Ctz64",
3784 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3785 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
3787 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
3788 addF("runtime/internal/sys", "Bswap32",
3789 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3790 return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
3792 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
3793 addF("runtime/internal/sys", "Bswap64",
3794 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3795 return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
3797 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
3799 /******** runtime/internal/atomic ********/
3800 addF("runtime/internal/atomic", "Load",
3801 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3802 v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
3803 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3804 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
3806 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3807 addF("runtime/internal/atomic", "Load8",
3808 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3809 v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
3810 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3811 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
3813 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3814 addF("runtime/internal/atomic", "Load64",
3815 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3816 v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
3817 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3818 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
3820 sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3821 addF("runtime/internal/atomic", "LoadAcq",
3822 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3823 v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
3824 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3825 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
3827 sys.PPC64, sys.S390X)
3828 addF("runtime/internal/atomic", "LoadAcq64",
3829 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3830 v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
3831 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3832 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
3835 addF("runtime/internal/atomic", "Loadp",
3836 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3837 v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
3838 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3839 return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
3841 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3843 addF("runtime/internal/atomic", "Store",
3844 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3845 s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
3848 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3849 addF("runtime/internal/atomic", "Store8",
3850 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3851 s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
3854 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3855 addF("runtime/internal/atomic", "Store64",
3856 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3857 s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
3860 sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3861 addF("runtime/internal/atomic", "StorepNoWB",
3862 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3863 s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
3866 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
3867 addF("runtime/internal/atomic", "StoreRel",
3868 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3869 s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
3872 sys.PPC64, sys.S390X)
3873 addF("runtime/internal/atomic", "StoreRel64",
3874 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3875 s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
3880 addF("runtime/internal/atomic", "Xchg",
3881 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3882 v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
3883 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3884 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
3886 sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3887 addF("runtime/internal/atomic", "Xchg64",
3888 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3889 v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
3890 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3891 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
3893 sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3895 type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind)
3897 makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
3899 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3900 // Target Atomic feature is identified by dynamic detection
3901 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb)
3902 v := s.load(types.Types[types.TBOOL], addr)
3904 b.Kind = ssa.BlockIf
3906 bTrue := s.f.NewBlock(ssa.BlockPlain)
3907 bFalse := s.f.NewBlock(ssa.BlockPlain)
3908 bEnd := s.f.NewBlock(ssa.BlockPlain)
3911 b.Likely = ssa.BranchLikely
3913 // We have atomic instructions - use it directly.
3915 emit(s, n, args, op1, typ)
3916 s.endBlock().AddEdgeTo(bEnd)
3918 // Use original instruction sequence.
3919 s.startBlock(bFalse)
3920 emit(s, n, args, op0, typ)
3921 s.endBlock().AddEdgeTo(bEnd)
3925 if rtyp == types.TNIL {
3928 return s.variable(n, types.Types[rtyp])
3933 atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
3934 v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
3935 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3936 s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
3938 addF("runtime/internal/atomic", "Xchg",
3939 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
3941 addF("runtime/internal/atomic", "Xchg64",
3942 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
3945 addF("runtime/internal/atomic", "Xadd",
3946 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3947 v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
3948 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3949 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
3951 sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3952 addF("runtime/internal/atomic", "Xadd64",
3953 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3954 v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
3955 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3956 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
3958 sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3960 addF("runtime/internal/atomic", "Xadd",
3961 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
3963 addF("runtime/internal/atomic", "Xadd64",
3964 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
3967 addF("runtime/internal/atomic", "Cas",
3968 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3969 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
3970 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3971 return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
3973 sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3974 addF("runtime/internal/atomic", "Cas64",
3975 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3976 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
3977 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3978 return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
3980 sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3981 addF("runtime/internal/atomic", "CasRel",
3982 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3983 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
3984 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3985 return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
3989 atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
3990 v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
3991 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3992 s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
3995 addF("runtime/internal/atomic", "Cas",
3996 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
3998 addF("runtime/internal/atomic", "Cas64",
3999 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
4002 addF("runtime/internal/atomic", "And8",
4003 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4004 s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
4007 sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
4008 addF("runtime/internal/atomic", "And",
4009 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4010 s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
4013 sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
4014 addF("runtime/internal/atomic", "Or8",
4015 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4016 s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
4019 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
4020 addF("runtime/internal/atomic", "Or",
4021 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4022 s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
4025 sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
4027 atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
4028 s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
4031 addF("runtime/internal/atomic", "And8",
4032 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4034 addF("runtime/internal/atomic", "And",
4035 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4037 addF("runtime/internal/atomic", "Or8",
4038 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4040 addF("runtime/internal/atomic", "Or",
4041 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4044 // Aliases for atomic load operations
4045 alias("runtime/internal/atomic", "Loadint32", "runtime/internal/atomic", "Load", all...)
4046 alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
4047 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
4048 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
4049 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
4050 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
4051 alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
4052 alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
4053 alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
4054 alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
4055 alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
4056 alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
4058 // Aliases for atomic store operations
4059 alias("runtime/internal/atomic", "Storeint32", "runtime/internal/atomic", "Store", all...)
4060 alias("runtime/internal/atomic", "Storeint64", "runtime/internal/atomic", "Store64", all...)
4061 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
4062 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
4063 alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
4064 alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
4065 alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
4066 alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
4067 alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
4068 alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
4070 // Aliases for atomic swap operations
4071 alias("runtime/internal/atomic", "Xchgint32", "runtime/internal/atomic", "Xchg", all...)
4072 alias("runtime/internal/atomic", "Xchgint64", "runtime/internal/atomic", "Xchg64", all...)
4073 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
4074 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
4076 // Aliases for atomic add operations
4077 alias("runtime/internal/atomic", "Xaddint32", "runtime/internal/atomic", "Xadd", all...)
4078 alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
4079 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
4080 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
4082 // Aliases for atomic CAS operations
4083 alias("runtime/internal/atomic", "Casint32", "runtime/internal/atomic", "Cas", all...)
4084 alias("runtime/internal/atomic", "Casint64", "runtime/internal/atomic", "Cas64", all...)
4085 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
4086 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
4087 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
4088 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
4089 alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
4091 /******** math ********/
4092 addF("math", "Sqrt",
4093 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4094 return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
4096 sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
4097 addF("math", "Trunc",
4098 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4099 return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
4101 sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
4102 addF("math", "Ceil",
4103 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4104 return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
4106 sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
4107 addF("math", "Floor",
4108 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4109 return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
4111 sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
4112 addF("math", "Round",
4113 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4114 return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
4116 sys.ARM64, sys.PPC64, sys.S390X)
4117 addF("math", "RoundToEven",
4118 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4119 return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
4121 sys.ARM64, sys.S390X, sys.Wasm)
4123 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4124 return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
4126 sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
4127 addF("math", "Copysign",
4128 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4129 return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
4131 sys.PPC64, sys.Wasm)
4133 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4134 return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4136 sys.ARM64, sys.PPC64, sys.S390X)
4138 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4139 if !s.config.UseFMA {
4140 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4141 return s.variable(n, types.Types[types.TFLOAT64])
4143 v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA)
4145 b.Kind = ssa.BlockIf
4147 bTrue := s.f.NewBlock(ssa.BlockPlain)
4148 bFalse := s.f.NewBlock(ssa.BlockPlain)
4149 bEnd := s.f.NewBlock(ssa.BlockPlain)
4152 b.Likely = ssa.BranchLikely // >= haswell cpus are common
4154 // We have the intrinsic - use it directly.
4156 s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4157 s.endBlock().AddEdgeTo(bEnd)
4159 // Call the pure Go version.
4160 s.startBlock(bFalse)
4161 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4162 s.endBlock().AddEdgeTo(bEnd)
4166 return s.variable(n, types.Types[types.TFLOAT64])
4170 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4171 if !s.config.UseFMA {
4172 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4173 return s.variable(n, types.Types[types.TFLOAT64])
4175 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb)
4176 v := s.load(types.Types[types.TBOOL], addr)
4178 b.Kind = ssa.BlockIf
4180 bTrue := s.f.NewBlock(ssa.BlockPlain)
4181 bFalse := s.f.NewBlock(ssa.BlockPlain)
4182 bEnd := s.f.NewBlock(ssa.BlockPlain)
4185 b.Likely = ssa.BranchLikely
4187 // We have the intrinsic - use it directly.
4189 s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4190 s.endBlock().AddEdgeTo(bEnd)
4192 // Call the pure Go version.
4193 s.startBlock(bFalse)
4194 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4195 s.endBlock().AddEdgeTo(bEnd)
4199 return s.variable(n, types.Types[types.TFLOAT64])
4203 makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4204 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4205 v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41)
4207 b.Kind = ssa.BlockIf
4209 bTrue := s.f.NewBlock(ssa.BlockPlain)
4210 bFalse := s.f.NewBlock(ssa.BlockPlain)
4211 bEnd := s.f.NewBlock(ssa.BlockPlain)
4214 b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
4216 // We have the intrinsic - use it directly.
4218 s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
4219 s.endBlock().AddEdgeTo(bEnd)
4221 // Call the pure Go version.
4222 s.startBlock(bFalse)
4223 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4224 s.endBlock().AddEdgeTo(bEnd)
4228 return s.variable(n, types.Types[types.TFLOAT64])
4231 addF("math", "RoundToEven",
4232 makeRoundAMD64(ssa.OpRoundToEven),
4234 addF("math", "Floor",
4235 makeRoundAMD64(ssa.OpFloor),
4237 addF("math", "Ceil",
4238 makeRoundAMD64(ssa.OpCeil),
4240 addF("math", "Trunc",
4241 makeRoundAMD64(ssa.OpTrunc),
4244 /******** math/bits ********/
4245 addF("math/bits", "TrailingZeros64",
4246 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4247 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
4249 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4250 addF("math/bits", "TrailingZeros32",
4251 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4252 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
4254 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4255 addF("math/bits", "TrailingZeros16",
4256 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4257 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
4258 c := s.constInt32(types.Types[types.TUINT32], 1<<16)
4259 y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
4260 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
4263 addF("math/bits", "TrailingZeros16",
4264 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4265 return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
4267 sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
4268 addF("math/bits", "TrailingZeros16",
4269 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4270 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
4271 c := s.constInt64(types.Types[types.TUINT64], 1<<16)
4272 y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
4273 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
4275 sys.S390X, sys.PPC64)
4276 addF("math/bits", "TrailingZeros8",
4277 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4278 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
4279 c := s.constInt32(types.Types[types.TUINT32], 1<<8)
4280 y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
4281 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
4284 addF("math/bits", "TrailingZeros8",
4285 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4286 return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
4288 sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
4289 addF("math/bits", "TrailingZeros8",
4290 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4291 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
4292 c := s.constInt64(types.Types[types.TUINT64], 1<<8)
4293 y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
4294 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
4297 alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
4298 alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
4299 // ReverseBytes inlines correctly, no need to intrinsify it.
4300 // ReverseBytes16 lowers to a rotate, no need for anything special here.
4301 addF("math/bits", "Len64",
4302 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4303 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
4305 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4306 addF("math/bits", "Len32",
4307 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4308 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
4310 sys.AMD64, sys.ARM64)
4311 addF("math/bits", "Len32",
4312 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4313 if s.config.PtrSize == 4 {
4314 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
4316 x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
4317 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
4319 sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4320 addF("math/bits", "Len16",
4321 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4322 if s.config.PtrSize == 4 {
4323 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
4324 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
4326 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
4327 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
4329 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4330 addF("math/bits", "Len16",
4331 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4332 return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
4335 addF("math/bits", "Len8",
4336 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4337 if s.config.PtrSize == 4 {
4338 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
4339 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
4341 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
4342 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
4344 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4345 addF("math/bits", "Len8",
4346 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4347 return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
4350 addF("math/bits", "Len",
4351 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4352 if s.config.PtrSize == 4 {
4353 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
4355 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
4357 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4358 // LeadingZeros is handled because it trivially calls Len.
4359 addF("math/bits", "Reverse64",
4360 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4361 return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
4364 addF("math/bits", "Reverse32",
4365 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4366 return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
4369 addF("math/bits", "Reverse16",
4370 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4371 return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
4374 addF("math/bits", "Reverse8",
4375 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4376 return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
4379 addF("math/bits", "Reverse",
4380 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4381 if s.config.PtrSize == 4 {
4382 return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
4384 return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
4387 addF("math/bits", "RotateLeft8",
4388 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4389 return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
4392 addF("math/bits", "RotateLeft16",
4393 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4394 return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
4397 addF("math/bits", "RotateLeft32",
4398 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4399 return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
4401 sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
4402 addF("math/bits", "RotateLeft64",
4403 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4404 return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
4406 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
4407 alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
4409 makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4410 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4411 v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT)
4413 b.Kind = ssa.BlockIf
4415 bTrue := s.f.NewBlock(ssa.BlockPlain)
4416 bFalse := s.f.NewBlock(ssa.BlockPlain)
4417 bEnd := s.f.NewBlock(ssa.BlockPlain)
4420 b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
4422 // We have the intrinsic - use it directly.
4425 if s.config.PtrSize == 4 {
4428 s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
4429 s.endBlock().AddEdgeTo(bEnd)
4431 // Call the pure Go version.
4432 s.startBlock(bFalse)
4433 s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
4434 s.endBlock().AddEdgeTo(bEnd)
4438 return s.variable(n, types.Types[types.TINT])
4441 addF("math/bits", "OnesCount64",
4442 makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
4444 addF("math/bits", "OnesCount64",
4445 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4446 return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
4448 sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
4449 addF("math/bits", "OnesCount32",
4450 makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
4452 addF("math/bits", "OnesCount32",
4453 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4454 return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
4456 sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
4457 addF("math/bits", "OnesCount16",
4458 makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
4460 addF("math/bits", "OnesCount16",
4461 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4462 return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
4464 sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
4465 addF("math/bits", "OnesCount8",
4466 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4467 return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
4469 sys.S390X, sys.PPC64, sys.Wasm)
4470 addF("math/bits", "OnesCount",
4471 makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
4473 addF("math/bits", "Mul64",
4474 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4475 return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
4477 sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64)
4478 alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
4479 alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
4480 addF("math/bits", "Add64",
4481 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4482 return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
4484 sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
4485 alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X)
4486 addF("math/bits", "Sub64",
4487 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4488 return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
4490 sys.AMD64, sys.ARM64, sys.S390X)
4491 alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
4492 addF("math/bits", "Div64",
4493 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4494 // check for divide-by-zero/overflow and panic with appropriate message
4495 cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
4496 s.check(cmpZero, ir.Syms.Panicdivide)
4497 cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
4498 s.check(cmpOverflow, ir.Syms.Panicoverflow)
4499 return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
4502 alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
4504 alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...)
4505 alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...)
4506 alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...)
4507 alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...)
4508 alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...)
4509 alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...)
4511 /******** sync/atomic ********/
4513 // Note: these are disabled by flag_race in findIntrinsic below.
4514 alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
4515 alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
4516 alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
4517 alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
4518 alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
4519 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
4520 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
4522 alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
4523 alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
4524 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
4525 alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
4526 alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
4527 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
4528 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
4530 alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
4531 alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
4532 alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
4533 alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
4534 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
4535 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
4537 alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
4538 alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
4539 alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
4540 alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
4541 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
4542 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
4544 alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
4545 alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
4546 alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
4547 alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
4548 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
4549 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
4551 /******** math/big ********/
4552 add("math/big", "mulWW",
4553 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4554 return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
4556 sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
4559 // findIntrinsic returns a function which builds the SSA equivalent of the
4560 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
4561 func findIntrinsic(sym *types.Sym) intrinsicBuilder {
4562 if sym == nil || sym.Pkg == nil {
4566 if sym.Pkg == types.LocalPkg {
4567 pkg = base.Ctxt.Pkgpath
4569 if sym.Pkg == ir.Pkgs.Runtime {
4572 if base.Flag.Race && pkg == "sync/atomic" {
4573 // The race detector needs to be able to intercept these calls.
4574 // We can't intrinsify them.
4577 // Skip intrinsifying math functions (which may contain hard-float
4578 // instructions) when soft-float
4579 if Arch.SoftFloat && pkg == "math" {
4584 if ssa.IntrinsicsDisable {
4585 if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
4586 // These runtime functions don't have definitions, must be intrinsics.
4591 return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}]
4594 func IsIntrinsicCall(n *ir.CallExpr) bool {
4598 name, ok := n.X.(*ir.Name)
4602 return findIntrinsic(name.Sym()) != nil
4605 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
4606 func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
4607 v := findIntrinsic(n.X.Sym())(s, n, s.intrinsicArgs(n))
4608 if ssa.IntrinsicsDebug > 0 {
4613 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
4616 base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.X.Sym().Name, x.LongString())
4621 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
4622 func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
4623 args := make([]*ssa.Value, len(n.Args))
4624 for i, n := range n.Args {
4630 // openDeferRecord adds code to evaluate and store the function for an open-code defer
4631 // call, and records info about the defer, so we can generate proper code on the
4632 // exit paths. n is the sub-node of the defer node that is the actual function
4633 // call. We will also record funcdata information on where the function is stored
4634 // (as well as the deferBits variable), and this will enable us to run the proper
4635 // defer calls during panics.
4636 func (s *state) openDeferRecord(n *ir.CallExpr) {
4637 if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.X.Type().NumResults() != 0 {
4638 s.Fatalf("defer call with arguments or results: %v", n)
4641 opendefer := &openDeferInfo{
4645 // We must always store the function value in a stack slot for the
4646 // runtime panic code to use. But in the defer exit code, we will
4647 // call the function directly if it is a static function.
4648 closureVal := s.expr(fn)
4649 closure := s.openDeferSave(fn.Type(), closureVal)
4650 opendefer.closureNode = closure.Aux.(*ir.Name)
4651 if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
4652 opendefer.closure = closure
4654 index := len(s.openDefers)
4655 s.openDefers = append(s.openDefers, opendefer)
4657 // Update deferBits only after evaluation and storage to stack of
4658 // the function is successful.
4659 bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
4660 newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
4661 s.vars[deferBitsVar] = newDeferBits
4662 s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
4665 // openDeferSave generates SSA nodes to store a value (with type t) for an
4666 // open-coded defer at an explicit autotmp location on the stack, so it can be
4667 // reloaded and used for the appropriate call on exit. Type t must be a function type
4668 // (therefore SSAable). val is the value to be stored. The function returns an SSA
4669 // value representing a pointer to the autotmp location.
4670 func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
4672 s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
4674 if !t.HasPointers() {
4675 s.Fatalf("openDeferSave of pointerless type %v val=%v", t, val)
4678 temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
4679 temp.SetOpenDeferSlot(true)
4680 var addrTemp *ssa.Value
4681 // Use OpVarLive to make sure stack slot for the closure is not removed by
4682 // dead-store elimination
4683 if s.curBlock.ID != s.f.Entry.ID {
4684 // Force the tmp storing this defer function to be declared in the entry
4685 // block, so that it will be live for the defer exit code (which will
4686 // actually access it only if the associated defer call has been activated).
4687 s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
4688 s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
4689 addrTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.defvars[s.f.Entry.ID][memVar])
4691 // Special case if we're still in the entry block. We can't use
4692 // the above code, since s.defvars[s.f.Entry.ID] isn't defined
4693 // until we end the entry block with s.endBlock().
4694 s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, temp, s.mem(), false)
4695 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, temp, s.mem(), false)
4696 addrTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.mem(), false)
4698 // Since we may use this temp during exit depending on the
4699 // deferBits, we must define it unconditionally on entry.
4700 // Therefore, we must make sure it is zeroed out in the entry
4701 // block if it contains pointers, else GC may wrongly follow an
4702 // uninitialized pointer value.
4703 temp.SetNeedzero(true)
4704 // We are storing to the stack, hence we can avoid the full checks in
4705 // storeType() (no write barrier) and do a simple store().
4706 s.store(t, addrTemp, val)
4710 // openDeferExit generates SSA for processing all the open coded defers at exit.
4711 // The code involves loading deferBits, and checking each of the bits to see if
4712 // the corresponding defer statement was executed. For each bit that is turned
4713 // on, the associated defer call is made.
4714 func (s *state) openDeferExit() {
4715 deferExit := s.f.NewBlock(ssa.BlockPlain)
4716 s.endBlock().AddEdgeTo(deferExit)
4717 s.startBlock(deferExit)
4718 s.lastDeferExit = deferExit
4719 s.lastDeferCount = len(s.openDefers)
4720 zeroval := s.constInt8(types.Types[types.TUINT8], 0)
4721 // Test for and run defers in reverse order
4722 for i := len(s.openDefers) - 1; i >= 0; i-- {
4723 r := s.openDefers[i]
4724 bCond := s.f.NewBlock(ssa.BlockPlain)
4725 bEnd := s.f.NewBlock(ssa.BlockPlain)
4727 deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
4728 // Generate code to check if the bit associated with the current
4730 bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
4731 andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
4732 eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
4734 b.Kind = ssa.BlockIf
4738 bCond.AddEdgeTo(bEnd)
4741 // Clear this bit in deferBits and force store back to stack, so
4742 // we will not try to re-run this defer call if this defer call panics.
4743 nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
4744 maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
4745 s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
4746 // Use this value for following tests, so we keep previous
4748 s.vars[deferBitsVar] = maskedval
4750 // Generate code to call the function call of the defer, using the
4751 // closure that were stored in argtmps at the point of the defer
4754 stksize := fn.Type().ArgWidth()
4755 var callArgs []*ssa.Value
4757 if r.closure != nil {
4758 v := s.load(r.closure.Type.Elem(), r.closure)
4759 s.maybeNilCheckClosure(v, callDefer)
4760 codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
4761 aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
4762 call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
4764 aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
4765 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
4767 callArgs = append(callArgs, s.mem())
4768 call.AddArgs(callArgs...)
4769 call.AuxInt = stksize
4770 s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, 0, call)
4771 // Make sure that the stack slots with pointers are kept live
4772 // through the call (which is a pre-emption point). Also, we will
4773 // use the first call of the last defer exit to compute liveness
4774 // for the deferreturn, so we want all stack slots to be live.
4775 if r.closureNode != nil {
4776 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
4784 func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
4785 return s.call(n, k, false)
4788 func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
4789 return s.call(n, k, true)
4792 // Calls the function n using the specified call type.
4793 // Returns the address of the return value (or nil if none).
4794 func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
4796 var callee *ir.Name // target function (if static)
4797 var closure *ssa.Value // ptr to closure to run (if dynamic)
4798 var codeptr *ssa.Value // ptr to target code (if dynamic)
4799 var rcvr *ssa.Value // receiver to set
4801 var ACArgs []*types.Type // AuxCall args
4802 var ACResults []*types.Type // AuxCall results
4803 var callArgs []*ssa.Value // For late-expansion, the args themselves (not stored, args to the call instead).
4805 callABI := s.f.ABIDefault
4807 if !buildcfg.Experiment.RegabiArgs {
4808 var magicFnNameSym *types.Sym
4809 if fn.Name() != nil {
4810 magicFnNameSym = fn.Name().Sym()
4811 ss := magicFnNameSym.Name
4812 if strings.HasSuffix(ss, magicNameDotSuffix) {
4816 if magicFnNameSym == nil && n.Op() == ir.OCALLINTER {
4817 magicFnNameSym = fn.(*ir.SelectorExpr).Sym()
4818 ss := magicFnNameSym.Name
4819 if strings.HasSuffix(ss, magicNameDotSuffix[1:]) {
4825 if k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
4826 s.Fatalf("go/defer call with arguments: %v", n)
4831 if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
4834 if buildcfg.Experiment.RegabiArgs {
4835 // This is a static call, so it may be
4836 // a direct call to a non-ABIInternal
4837 // function. fn.Func may be nil for
4838 // some compiler-generated functions,
4839 // but those are all ABIInternal.
4841 callABI = abiForFunc(fn.Func, s.f.ABI0, s.f.ABI1)
4844 // TODO(register args) remove after register abi is working
4845 inRegistersImported := fn.Pragma()&ir.RegisterParams != 0
4846 inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0
4847 if inRegistersImported || inRegistersSamePackage {
4853 closure = s.expr(fn)
4854 if k != callDefer && k != callDeferStack {
4855 // Deferred nil function needs to panic when the function is invoked,
4856 // not the point of defer statement.
4857 s.maybeNilCheckClosure(closure, k)
4860 if fn.Op() != ir.ODOTINTER {
4861 s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
4863 fn := fn.(*ir.SelectorExpr)
4864 var iclosure *ssa.Value
4865 iclosure, rcvr = s.getClosureAndRcvr(fn)
4866 if k == callNormal {
4867 codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
4873 if !buildcfg.Experiment.RegabiArgs {
4874 if regAbiForFuncType(n.X.Type().FuncType()) {
4875 // Magic last type in input args to call
4880 params := callABI.ABIAnalyze(n.X.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
4881 types.CalcSize(fn.Type())
4882 stksize := params.ArgWidth() // includes receiver, args, and results
4884 res := n.X.Type().Results()
4885 if k == callNormal {
4886 for _, p := range params.OutParams() {
4887 ACResults = append(ACResults, p.Type)
4892 if k == callDeferStack {
4893 // Make a defer struct d on the stack.
4895 s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
4899 d := typecheck.TempAt(n.Pos(), s.curfn, t)
4901 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
4904 // Must match deferstruct() below and src/runtime/runtime2.go:_defer.
4905 // 0: started, set in deferprocStack
4906 // 1: heap, set in deferprocStack
4908 // 3: sp, set in deferprocStack
4909 // 4: pc, set in deferprocStack
4911 s.store(closure.Type,
4912 s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
4914 // 6: panic, set in deferprocStack
4915 // 7: link, set in deferprocStack
4920 // Call runtime.deferprocStack with pointer to _defer record.
4921 ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
4922 aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
4923 callArgs = append(callArgs, addr, s.mem())
4924 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
4925 call.AddArgs(callArgs...)
4926 call.AuxInt = int64(types.PtrSize) // deferprocStack takes a *_defer arg
4928 // Store arguments to stack, including defer/go arguments and receiver for method calls.
4929 // These are written in SP-offset order.
4930 argStart := base.Ctxt.FixedFrameSize()
4932 if k != callNormal {
4933 // Write closure (arg to newproc/deferproc).
4934 ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) // not argExtra
4935 callArgs = append(callArgs, closure)
4936 stksize += int64(types.PtrSize)
4937 argStart += int64(types.PtrSize)
4940 // Set receiver (for interface calls).
4942 callArgs = append(callArgs, rcvr)
4949 for _, p := range params.InParams() { // includes receiver for interface calls
4950 ACArgs = append(ACArgs, p.Type)
4952 for i, n := range args {
4953 callArgs = append(callArgs, s.putArg(n, t.Params().Field(i).Type))
4956 callArgs = append(callArgs, s.mem())
4960 case k == callDefer:
4961 aux := ssa.StaticAuxCall(ir.Syms.Deferproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) // TODO paramResultInfo for DeferProc
4962 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
4964 aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
4965 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for NewProc
4966 case closure != nil:
4967 // rawLoad because loading the code pointer from a
4968 // closure is always safe, but IsSanitizerSafeAddr
4969 // can't always figure that out currently, and it's
4970 // critical that we not clobber any arguments already
4971 // stored onto the stack.
4972 codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
4973 aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(nil, ACArgs, ACResults))
4974 call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
4975 case codeptr != nil:
4976 // Note that the "receiver" parameter is nil because the actual receiver is the first input parameter.
4977 aux := ssa.InterfaceAuxCall(params)
4978 call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
4980 aux := ssa.StaticAuxCall(callTargetLSym(callee), params)
4981 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
4983 s.Fatalf("bad call type %v %v", n.Op(), n)
4985 call.AddArgs(callArgs...)
4986 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
4989 s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
4990 // Insert OVARLIVE nodes
4991 for _, name := range n.KeepAlive {
4992 s.stmt(ir.NewUnaryExpr(n.Pos(), ir.OVARLIVE, name))
4995 // Finish block for defers
4996 if k == callDefer || k == callDeferStack {
4998 b.Kind = ssa.BlockDefer
5000 bNext := s.f.NewBlock(ssa.BlockPlain)
5002 // Add recover edge to exit code.
5003 r := s.f.NewBlock(ssa.BlockPlain)
5007 b.Likely = ssa.BranchLikely
5011 if res.NumFields() == 0 || k != callNormal {
5012 // call has no return value. Continue with the next statement.
5016 if returnResultAddr {
5017 return s.resultAddrOfCall(call, 0, fp.Type)
5019 return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
5022 // maybeNilCheckClosure checks if a nil check of a closure is needed in some
5023 // architecture-dependent situations and, if so, emits the nil check.
5024 func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
5025 if Arch.LinkArch.Family == sys.Wasm || buildcfg.GOOS == "aix" && k != callGo {
5026 // On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
5027 // TODO(neelance): On other architectures this should be eliminated by the optimization steps
5032 // getClosureAndRcvr returns values for the appropriate closure and receiver of an
5034 func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) {
5036 itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
5038 itabidx := fn.Offset() + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab
5039 closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
5040 rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
5041 return closure, rcvr
5044 // etypesign returns the signed-ness of e, for integer/pointer etypes.
5045 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
5046 func etypesign(e types.Kind) int8 {
5048 case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
5050 case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
5056 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
5057 // The value that the returned Value represents is guaranteed to be non-nil.
5058 func (s *state) addr(n ir.Node) *ssa.Value {
5059 if n.Op() != ir.ONAME {
5065 s.Fatalf("addr of canSSA expression: %+v", n)
5068 t := types.NewPtr(n.Type())
5069 linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value {
5070 v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb)
5071 // TODO: Make OpAddr use AuxInt as well as Aux.
5073 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
5078 case ir.OLINKSYMOFFSET:
5079 no := n.(*ir.LinksymOffsetExpr)
5080 return linksymOffset(no.Linksym, no.Offset_)
5083 if n.Heapaddr != nil {
5084 return s.expr(n.Heapaddr)
5089 return linksymOffset(n.Linksym(), 0)
5096 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
5099 return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
5101 case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
5102 // ensure that we reuse symbols for out parameters so
5103 // that cse works on their addresses
5104 return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
5106 s.Fatalf("variable address class %v not implemented", n.Class)
5110 // load return from callee
5111 n := n.(*ir.ResultExpr)
5112 return s.resultAddrOfCall(s.prevCall, n.Index, n.Type())
5114 n := n.(*ir.IndexExpr)
5115 if n.X.Type().IsSlice() {
5117 i := s.expr(n.Index)
5118 len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
5119 i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
5120 p := s.newValue1(ssa.OpSlicePtr, t, a)
5121 return s.newValue2(ssa.OpPtrIndex, t, p, i)
5124 i := s.expr(n.Index)
5125 len := s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
5126 i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
5127 return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.X.Type().Elem()), a, i)
5130 n := n.(*ir.StarExpr)
5131 return s.exprPtr(n.X, n.Bounded(), n.Pos())
5133 n := n.(*ir.SelectorExpr)
5135 return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
5137 n := n.(*ir.SelectorExpr)
5138 p := s.exprPtr(n.X, n.Bounded(), n.Pos())
5139 return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
5141 n := n.(*ir.ConvExpr)
5142 if n.Type() == n.X.Type() {
5146 return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
5147 case ir.OCALLFUNC, ir.OCALLINTER:
5148 n := n.(*ir.CallExpr)
5149 return s.callAddr(n, callNormal)
5151 n := n.(*ir.TypeAssertExpr)
5152 v, _ := s.dottype(n, false)
5153 if v.Op != ssa.OpLoad {
5154 s.Fatalf("dottype of non-load")
5156 if v.Args[1] != s.mem() {
5157 s.Fatalf("memory no longer live from dottype load")
5161 s.Fatalf("unhandled addr %v", n.Op())
5166 // canSSA reports whether n is SSA-able.
5167 // n must be an ONAME (or an ODOT sequence with an ONAME base).
5168 func (s *state) canSSA(n ir.Node) bool {
5169 if base.Flag.N != 0 {
5174 if nn.Op() == ir.ODOT {
5175 nn := nn.(*ir.SelectorExpr)
5179 if nn.Op() == ir.OINDEX {
5180 nn := nn.(*ir.IndexExpr)
5181 if nn.X.Type().IsArray() {
5188 if n.Op() != ir.ONAME {
5191 return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type())
5194 func (s *state) canSSAName(name *ir.Name) bool {
5195 if name.Addrtaken() || !name.OnStack() {
5201 // TODO: handle this case? Named return values must be
5202 // in memory so that the deferred function can see them.
5203 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
5204 // Or maybe not, see issue 18860. Even unnamed return values
5205 // must be written back so if a defer recovers, the caller can see them.
5208 if s.cgoUnsafeArgs {
5209 // Cgo effectively takes the address of all result args,
5210 // but the compiler can't see that.
5214 if name.Class == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" {
5215 // wrappers generated by genwrapper need to update
5216 // the .this pointer in place.
5217 // TODO: treat as a PPARAMOUT?
5221 // TODO: try to make more variables SSAable?
5224 // TypeOK reports whether variables of type t are SSA-able.
5225 func TypeOK(t *types.Type) bool {
5227 if t.Width > int64(4*types.PtrSize) {
5228 // 4*Widthptr is an arbitrary constant. We want it
5229 // to be at least 3*Widthptr so slices can be registerized.
5230 // Too big and we'll introduce too much register pressure.
5235 // We can't do larger arrays because dynamic indexing is
5236 // not supported on SSA variables.
5237 // TODO: allow if all indexes are constant.
5238 if t.NumElem() <= 1 {
5239 return TypeOK(t.Elem())
5243 if t.NumFields() > ssa.MaxStruct {
5246 for _, t1 := range t.Fields().Slice() {
5247 if !TypeOK(t1.Type) {
5257 // exprPtr evaluates n to a pointer and nil-checks it.
5258 func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
5260 if bounded || n.NonNil() {
5261 if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
5262 s.f.Warnl(lineno, "removed nil check")
5270 // nilCheck generates nil pointer checking code.
5271 // Used only for automatically inserted nil checks,
5272 // not for user code like 'x != nil'.
5273 func (s *state) nilCheck(ptr *ssa.Value) {
5274 if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
5277 s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
5280 // boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
5281 // Starts a new block on return.
5282 // On input, len must be converted to full int width and be nonnegative.
5283 // Returns idx converted to full int width.
5284 // If bounded is true then caller guarantees the index is not out of bounds
5285 // (but boundsCheck will still extend the index to full int width).
5286 func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
5287 idx = s.extendIndex(idx, len, kind, bounded)
5289 if bounded || base.Flag.B != 0 {
5290 // If bounded or bounds checking is flag-disabled, then no check necessary,
5291 // just return the extended index.
5293 // Here, bounded == true if the compiler generated the index itself,
5294 // such as in the expansion of a slice initializer. These indexes are
5295 // compiler-generated, not Go program variables, so they cannot be
5296 // attacker-controlled, so we can omit Spectre masking as well.
5298 // Note that we do not want to omit Spectre masking in code like:
5300 // if 0 <= i && i < len(x) {
5304 // Lucky for us, bounded==false for that code.
5305 // In that case (handled below), we emit a bound check (and Spectre mask)
5306 // and then the prove pass will remove the bounds check.
5307 // In theory the prove pass could potentially remove certain
5308 // Spectre masks, but it's very delicate and probably better
5309 // to be conservative and leave them all in.
5313 bNext := s.f.NewBlock(ssa.BlockPlain)
5314 bPanic := s.f.NewBlock(ssa.BlockExit)
5316 if !idx.Type.IsSigned() {
5318 case ssa.BoundsIndex:
5319 kind = ssa.BoundsIndexU
5320 case ssa.BoundsSliceAlen:
5321 kind = ssa.BoundsSliceAlenU
5322 case ssa.BoundsSliceAcap:
5323 kind = ssa.BoundsSliceAcapU
5324 case ssa.BoundsSliceB:
5325 kind = ssa.BoundsSliceBU
5326 case ssa.BoundsSlice3Alen:
5327 kind = ssa.BoundsSlice3AlenU
5328 case ssa.BoundsSlice3Acap:
5329 kind = ssa.BoundsSlice3AcapU
5330 case ssa.BoundsSlice3B:
5331 kind = ssa.BoundsSlice3BU
5332 case ssa.BoundsSlice3C:
5333 kind = ssa.BoundsSlice3CU
5338 if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
5339 cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
5341 cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
5344 b.Kind = ssa.BlockIf
5346 b.Likely = ssa.BranchLikely
5350 s.startBlock(bPanic)
5351 if Arch.LinkArch.Family == sys.Wasm {
5352 // TODO(khr): figure out how to do "register" based calling convention for bounds checks.
5353 // Should be similar to gcWriteBarrier, but I can't make it work.
5354 s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
5356 mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
5357 s.endBlock().SetControl(mem)
5361 // In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
5362 if base.Flag.Cfg.SpectreIndex {
5363 op := ssa.OpSpectreIndex
5364 if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
5365 op = ssa.OpSpectreSliceIndex
5367 idx = s.newValue2(op, types.Types[types.TINT], idx, len)
5373 // If cmp (a bool) is false, panic using the given function.
5374 func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
5376 b.Kind = ssa.BlockIf
5378 b.Likely = ssa.BranchLikely
5379 bNext := s.f.NewBlock(ssa.BlockPlain)
5381 pos := base.Ctxt.PosTable.Pos(line)
5382 fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
5383 bPanic := s.panics[fl]
5385 bPanic = s.f.NewBlock(ssa.BlockPlain)
5386 s.panics[fl] = bPanic
5387 s.startBlock(bPanic)
5388 // The panic call takes/returns memory to ensure that the right
5389 // memory state is observed if the panic happens.
5390 s.rtcall(fn, false, nil)
5397 func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
5400 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
5406 // do a size-appropriate check for zero
5407 cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
5408 s.check(cmp, ir.Syms.Panicdivide)
5410 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
5413 // rtcall issues a call to the given runtime function fn with the listed args.
5414 // Returns a slice of results of the given result types.
5415 // The call is added to the end of the current block.
5416 // If returns is false, the block is marked as an exit block.
5417 func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
5419 // Write args to the stack
5420 off := base.Ctxt.FixedFrameSize()
5421 var callArgs []*ssa.Value
5422 var callArgTypes []*types.Type
5424 for _, arg := range args {
5426 off = types.Rnd(off, t.Alignment())
5428 callArgs = append(callArgs, arg)
5429 callArgTypes = append(callArgTypes, t)
5432 off = types.Rnd(off, int64(types.RegSize))
5434 // Accumulate results types and offsets
5436 for _, t := range results {
5437 offR = types.Rnd(offR, t.Alignment())
5443 aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(nil, callArgTypes, results))
5444 callArgs = append(callArgs, s.mem())
5445 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5446 call.AddArgs(callArgs...)
5447 s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(results)), call)
5452 b.Kind = ssa.BlockExit
5454 call.AuxInt = off - base.Ctxt.FixedFrameSize()
5455 if len(results) > 0 {
5456 s.Fatalf("panic call can't have results")
5462 res := make([]*ssa.Value, len(results))
5463 for i, t := range results {
5464 off = types.Rnd(off, t.Alignment())
5465 res[i] = s.resultOfCall(call, int64(i), t)
5468 off = types.Rnd(off, int64(types.PtrSize))
5470 // Remember how much callee stack space we needed.
5476 // do *left = right for type t.
5477 func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
5478 s.instrument(t, left, instrumentWrite)
5480 if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
5481 // Known to not have write barrier. Store the whole type.
5482 s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
5486 // store scalar fields first, so write barrier stores for
5487 // pointer fields can be grouped together, and scalar values
5488 // don't need to be live across the write barrier call.
5489 // TODO: if the writebarrier pass knows how to reorder stores,
5490 // we can do a single store here as long as skip==0.
5491 s.storeTypeScalars(t, left, right, skip)
5492 if skip&skipPtr == 0 && t.HasPointers() {
5493 s.storeTypePtrs(t, left, right)
5497 // do *left = right for all scalar (non-pointer) parts of t.
5498 func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
5500 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
5501 s.store(t, left, right)
5502 case t.IsPtrShaped():
5503 if t.IsPtr() && t.Elem().NotInHeap() {
5504 s.store(t, left, right) // see issue 42032
5506 // otherwise, no scalar fields.
5508 if skip&skipLen != 0 {
5511 len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
5512 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
5513 s.store(types.Types[types.TINT], lenAddr, len)
5515 if skip&skipLen == 0 {
5516 len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
5517 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
5518 s.store(types.Types[types.TINT], lenAddr, len)
5520 if skip&skipCap == 0 {
5521 cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
5522 capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
5523 s.store(types.Types[types.TINT], capAddr, cap)
5525 case t.IsInterface():
5526 // itab field doesn't need a write barrier (even though it is a pointer).
5527 itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
5528 s.store(types.Types[types.TUINTPTR], left, itab)
5531 for i := 0; i < n; i++ {
5532 ft := t.FieldType(i)
5533 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
5534 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
5535 s.storeTypeScalars(ft, addr, val, 0)
5537 case t.IsArray() && t.NumElem() == 0:
5539 case t.IsArray() && t.NumElem() == 1:
5540 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
5542 s.Fatalf("bad write barrier type %v", t)
5546 // do *left = right for all pointer parts of t.
5547 func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
5549 case t.IsPtrShaped():
5550 if t.IsPtr() && t.Elem().NotInHeap() {
5551 break // see issue 42032
5553 s.store(t, left, right)
5555 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
5556 s.store(s.f.Config.Types.BytePtr, left, ptr)
5558 elType := types.NewPtr(t.Elem())
5559 ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
5560 s.store(elType, left, ptr)
5561 case t.IsInterface():
5562 // itab field is treated as a scalar.
5563 idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
5564 idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
5565 s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
5568 for i := 0; i < n; i++ {
5569 ft := t.FieldType(i)
5570 if !ft.HasPointers() {
5573 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
5574 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
5575 s.storeTypePtrs(ft, addr, val)
5577 case t.IsArray() && t.NumElem() == 0:
5579 case t.IsArray() && t.NumElem() == 1:
5580 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
5582 s.Fatalf("bad write barrier type %v", t)
5586 // putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call.
5587 func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
5590 a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
5597 func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
5598 pt := types.NewPtr(t)
5601 // Use special routine that avoids allocation on duplicate offsets.
5602 addr = s.constOffPtrSP(pt, off)
5604 addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
5614 s.storeType(t, addr, a, 0, false)
5617 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
5618 // i,j,k may be nil, in which case they are set to their default value.
5619 // v may be a slice, string or pointer to an array.
5620 func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
5622 var ptr, len, cap *ssa.Value
5625 ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
5626 len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
5627 cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
5629 ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
5630 len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
5633 if !t.Elem().IsArray() {
5634 s.Fatalf("bad ptr to array in slice %v\n", t)
5637 ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
5638 len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
5641 s.Fatalf("bad type in slice %v\n", t)
5644 // Set default values
5646 i = s.constInt(types.Types[types.TINT], 0)
5657 // Panic if slice indices are not in bounds.
5658 // Make sure we check these in reverse order so that we're always
5659 // comparing against a value known to be nonnegative. See issue 28797.
5662 kind := ssa.BoundsSlice3Alen
5664 kind = ssa.BoundsSlice3Acap
5666 k = s.boundsCheck(k, cap, kind, bounded)
5669 j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
5671 i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
5674 kind := ssa.BoundsSliceAlen
5676 kind = ssa.BoundsSliceAcap
5678 j = s.boundsCheck(j, k, kind, bounded)
5680 i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
5683 // Word-sized integer operations.
5684 subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
5685 mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
5686 andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
5688 // Calculate the length (rlen) and capacity (rcap) of the new slice.
5689 // For strings the capacity of the result is unimportant. However,
5690 // we use rcap to test if we've generated a zero-length slice.
5691 // Use length of strings for that.
5692 rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
5694 if j != k && !t.IsString() {
5695 rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
5698 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
5699 // No pointer arithmetic necessary.
5700 return ptr, rlen, rcap
5703 // Calculate the base pointer (rptr) for the new slice.
5705 // Generate the following code assuming that indexes are in bounds.
5706 // The masking is to make sure that we don't generate a slice
5707 // that points to the next object in memory. We cannot just set
5708 // the pointer to nil because then we would create a nil slice or
5713 // rptr = ptr + (mask(rcap) & (i * stride))
5715 // Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
5716 // of the element type.
5717 stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Width)
5719 // The delta is the number of bytes to offset ptr by.
5720 delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
5722 // If we're slicing to the point where the capacity is zero,
5723 // zero out the delta.
5724 mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
5725 delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
5727 // Compute rptr = ptr + delta.
5728 rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
5730 return rptr, rlen, rcap
5733 type u642fcvtTab struct {
5734 leq, cvt2F, and, rsh, or, add ssa.Op
5735 one func(*state, *types.Type, int64) *ssa.Value
5738 var u64_f64 = u642fcvtTab{
5740 cvt2F: ssa.OpCvt64to64F,
5742 rsh: ssa.OpRsh64Ux64,
5745 one: (*state).constInt64,
5748 var u64_f32 = u642fcvtTab{
5750 cvt2F: ssa.OpCvt64to32F,
5752 rsh: ssa.OpRsh64Ux64,
5755 one: (*state).constInt64,
5758 func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5759 return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
5762 func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5763 return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
5766 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5768 // result = (floatY) x
5770 // y = uintX(x) ; y = x & 1
5771 // z = uintX(x) ; z = z >> 1
5774 // result = floatY(z)
5775 // result = result + result
5778 // Code borrowed from old code generator.
5779 // What's going on: large 64-bit "unsigned" looks like
5780 // negative number to hardware's integer-to-float
5781 // conversion. However, because the mantissa is only
5782 // 63 bits, we don't need the LSB, so instead we do an
5783 // unsigned right shift (divide by two), convert, and
5784 // double. However, before we do that, we need to be
5785 // sure that we do not lose a "1" if that made the
5786 // difference in the resulting rounding. Therefore, we
5787 // preserve it, and OR (not ADD) it back in. The case
5788 // that matters is when the eleven discarded bits are
5789 // equal to 10000000001; that rounds up, and the 1 cannot
5790 // be lost else it would round down if the LSB of the
5791 // candidate mantissa is 0.
5792 cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
5794 b.Kind = ssa.BlockIf
5796 b.Likely = ssa.BranchLikely
5798 bThen := s.f.NewBlock(ssa.BlockPlain)
5799 bElse := s.f.NewBlock(ssa.BlockPlain)
5800 bAfter := s.f.NewBlock(ssa.BlockPlain)
5804 a0 := s.newValue1(cvttab.cvt2F, tt, x)
5807 bThen.AddEdgeTo(bAfter)
5811 one := cvttab.one(s, ft, 1)
5812 y := s.newValue2(cvttab.and, ft, x, one)
5813 z := s.newValue2(cvttab.rsh, ft, x, one)
5814 z = s.newValue2(cvttab.or, ft, z, y)
5815 a := s.newValue1(cvttab.cvt2F, tt, z)
5816 a1 := s.newValue2(cvttab.add, tt, a, a)
5819 bElse.AddEdgeTo(bAfter)
5821 s.startBlock(bAfter)
5822 return s.variable(n, n.Type())
5825 type u322fcvtTab struct {
5826 cvtI2F, cvtF2F ssa.Op
5829 var u32_f64 = u322fcvtTab{
5830 cvtI2F: ssa.OpCvt32to64F,
5834 var u32_f32 = u322fcvtTab{
5835 cvtI2F: ssa.OpCvt32to32F,
5836 cvtF2F: ssa.OpCvt64Fto32F,
5839 func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5840 return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
5843 func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5844 return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
5847 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5849 // result = floatY(x)
5851 // result = floatY(float64(x) + (1<<32))
5853 cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
5855 b.Kind = ssa.BlockIf
5857 b.Likely = ssa.BranchLikely
5859 bThen := s.f.NewBlock(ssa.BlockPlain)
5860 bElse := s.f.NewBlock(ssa.BlockPlain)
5861 bAfter := s.f.NewBlock(ssa.BlockPlain)
5865 a0 := s.newValue1(cvttab.cvtI2F, tt, x)
5868 bThen.AddEdgeTo(bAfter)
5872 a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
5873 twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
5874 a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
5875 a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
5879 bElse.AddEdgeTo(bAfter)
5881 s.startBlock(bAfter)
5882 return s.variable(n, n.Type())
5885 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
5886 func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
5887 if !n.X.Type().IsMap() && !n.X.Type().IsChan() {
5888 s.Fatalf("node must be a map or a channel")
5894 // return *((*int)n)
5896 // return *(((*int)n)+1)
5899 nilValue := s.constNil(types.Types[types.TUINTPTR])
5900 cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
5902 b.Kind = ssa.BlockIf
5904 b.Likely = ssa.BranchUnlikely
5906 bThen := s.f.NewBlock(ssa.BlockPlain)
5907 bElse := s.f.NewBlock(ssa.BlockPlain)
5908 bAfter := s.f.NewBlock(ssa.BlockPlain)
5910 // length/capacity of a nil map/chan is zero
5913 s.vars[n] = s.zeroVal(lenType)
5915 bThen.AddEdgeTo(bAfter)
5921 // length is stored in the first word for map/chan
5922 s.vars[n] = s.load(lenType, x)
5924 // capacity is stored in the second word for chan
5925 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
5926 s.vars[n] = s.load(lenType, sw)
5928 s.Fatalf("op must be OLEN or OCAP")
5931 bElse.AddEdgeTo(bAfter)
5933 s.startBlock(bAfter)
5934 return s.variable(n, lenType)
5937 type f2uCvtTab struct {
5938 ltf, cvt2U, subf, or ssa.Op
5939 floatValue func(*state, *types.Type, float64) *ssa.Value
5940 intValue func(*state, *types.Type, int64) *ssa.Value
5944 var f32_u64 = f2uCvtTab{
5946 cvt2U: ssa.OpCvt32Fto64,
5949 floatValue: (*state).constFloat32,
5950 intValue: (*state).constInt64,
5954 var f64_u64 = f2uCvtTab{
5956 cvt2U: ssa.OpCvt64Fto64,
5959 floatValue: (*state).constFloat64,
5960 intValue: (*state).constInt64,
5964 var f32_u32 = f2uCvtTab{
5966 cvt2U: ssa.OpCvt32Fto32,
5969 floatValue: (*state).constFloat32,
5970 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
5974 var f64_u32 = f2uCvtTab{
5976 cvt2U: ssa.OpCvt64Fto32,
5979 floatValue: (*state).constFloat64,
5980 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
5984 func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5985 return s.floatToUint(&f32_u64, n, x, ft, tt)
5987 func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5988 return s.floatToUint(&f64_u64, n, x, ft, tt)
5991 func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5992 return s.floatToUint(&f32_u32, n, x, ft, tt)
5995 func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5996 return s.floatToUint(&f64_u32, n, x, ft, tt)
5999 func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6000 // cutoff:=1<<(intY_Size-1)
6001 // if x < floatX(cutoff) {
6002 // result = uintY(x)
6004 // y = x - floatX(cutoff)
6006 // result = z | -(cutoff)
6008 cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
6009 cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
6011 b.Kind = ssa.BlockIf
6013 b.Likely = ssa.BranchLikely
6015 bThen := s.f.NewBlock(ssa.BlockPlain)
6016 bElse := s.f.NewBlock(ssa.BlockPlain)
6017 bAfter := s.f.NewBlock(ssa.BlockPlain)
6021 a0 := s.newValue1(cvttab.cvt2U, tt, x)
6024 bThen.AddEdgeTo(bAfter)
6028 y := s.newValue2(cvttab.subf, ft, x, cutoff)
6029 y = s.newValue1(cvttab.cvt2U, tt, y)
6030 z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
6031 a1 := s.newValue2(cvttab.or, tt, y, z)
6034 bElse.AddEdgeTo(bAfter)
6036 s.startBlock(bAfter)
6037 return s.variable(n, n.Type())
6040 // dottype generates SSA for a type assertion node.
6041 // commaok indicates whether to panic or return a bool.
6042 // If commaok is false, resok will be nil.
6043 func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
6044 iface := s.expr(n.X) // input interface
6045 target := s.reflectType(n.Type()) // target type
6046 byteptr := s.f.Config.Types.BytePtr
6048 if n.Type().IsInterface() {
6049 if n.Type().IsEmptyInterface() {
6050 // Converting to an empty interface.
6051 // Input could be an empty or nonempty interface.
6052 if base.Debug.TypeAssert > 0 {
6053 base.WarnfAt(n.Pos(), "type assertion inlined")
6056 // Get itab/type field from input.
6057 itab := s.newValue1(ssa.OpITab, byteptr, iface)
6058 // Conversion succeeds iff that field is not nil.
6059 cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
6061 if n.X.Type().IsEmptyInterface() && commaok {
6062 // Converting empty interface to empty interface with ,ok is just a nil check.
6066 // Branch on nilness.
6068 b.Kind = ssa.BlockIf
6070 b.Likely = ssa.BranchLikely
6071 bOk := s.f.NewBlock(ssa.BlockPlain)
6072 bFail := s.f.NewBlock(ssa.BlockPlain)
6077 // On failure, panic by calling panicnildottype.
6079 s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
6081 // On success, return (perhaps modified) input interface.
6083 if n.X.Type().IsEmptyInterface() {
6084 res = iface // Use input interface unchanged.
6087 // Load type out of itab, build interface with existing idata.
6088 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
6089 typ := s.load(byteptr, off)
6090 idata := s.newValue1(ssa.OpIData, byteptr, iface)
6091 res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata)
6096 // nonempty -> empty
6097 // Need to load type from itab
6098 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
6099 s.vars[typVar] = s.load(byteptr, off)
6102 // itab is nil, might as well use that as the nil result.
6104 s.vars[typVar] = itab
6108 bEnd := s.f.NewBlock(ssa.BlockPlain)
6110 bFail.AddEdgeTo(bEnd)
6112 idata := s.newValue1(ssa.OpIData, byteptr, iface)
6113 res = s.newValue2(ssa.OpIMake, n.Type(), s.variable(typVar, byteptr), idata)
6115 delete(s.vars, typVar)
6118 // converting to a nonempty interface needs a runtime call.
6119 if base.Debug.TypeAssert > 0 {
6120 base.WarnfAt(n.Pos(), "type assertion not inlined")
6123 fn := ir.Syms.AssertI2I
6124 if n.X.Type().IsEmptyInterface() {
6125 fn = ir.Syms.AssertE2I
6127 data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
6128 tab := s.newValue1(ssa.OpITab, byteptr, iface)
6129 tab = s.rtcall(fn, true, []*types.Type{byteptr}, target, tab)[0]
6130 return s.newValue2(ssa.OpIMake, n.Type(), tab, data), nil
6132 fn := ir.Syms.AssertI2I2
6133 if n.X.Type().IsEmptyInterface() {
6134 fn = ir.Syms.AssertE2I2
6136 res = s.rtcall(fn, true, []*types.Type{n.Type()}, target, iface)[0]
6137 resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(n.Type()))
6141 if base.Debug.TypeAssert > 0 {
6142 base.WarnfAt(n.Pos(), "type assertion inlined")
6145 // Converting to a concrete type.
6146 direct := types.IsDirectIface(n.Type())
6147 itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
6148 if base.Debug.TypeAssert > 0 {
6149 base.WarnfAt(n.Pos(), "type assertion inlined")
6151 var targetITab *ssa.Value
6152 if n.X.Type().IsEmptyInterface() {
6153 // Looking for pointer to target type.
6156 // Looking for pointer to itab for target type and source interface.
6157 targetITab = s.expr(n.Itab)
6160 var tmp ir.Node // temporary for use with large types
6161 var addr *ssa.Value // address of tmp
6162 if commaok && !TypeOK(n.Type()) {
6163 // unSSAable type, use temporary.
6164 // TODO: get rid of some of these temporaries.
6165 tmp, addr = s.temp(n.Pos(), n.Type())
6168 cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, targetITab)
6170 b.Kind = ssa.BlockIf
6172 b.Likely = ssa.BranchLikely
6174 bOk := s.f.NewBlock(ssa.BlockPlain)
6175 bFail := s.f.NewBlock(ssa.BlockPlain)
6180 // on failure, panic by calling panicdottype
6182 taddr := s.reflectType(n.X.Type())
6183 if n.X.Type().IsEmptyInterface() {
6184 s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
6186 s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
6189 // on success, return data from interface
6192 return s.newValue1(ssa.OpIData, n.Type(), iface), nil
6194 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
6195 return s.load(n.Type(), p), nil
6198 // commaok is the more complicated case because we have
6199 // a control flow merge point.
6200 bEnd := s.f.NewBlock(ssa.BlockPlain)
6201 // Note that we need a new valVar each time (unlike okVar where we can
6202 // reuse the variable) because it might have a different type every time.
6203 valVar := ssaMarker("val")
6205 // type assertion succeeded
6209 s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type(), iface)
6211 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
6212 s.vars[valVar] = s.load(n.Type(), p)
6215 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
6216 s.move(n.Type(), addr, p)
6218 s.vars[okVar] = s.constBool(true)
6222 // type assertion failed
6225 s.vars[valVar] = s.zeroVal(n.Type())
6227 s.zero(n.Type(), addr)
6229 s.vars[okVar] = s.constBool(false)
6231 bFail.AddEdgeTo(bEnd)
6236 res = s.variable(valVar, n.Type())
6237 delete(s.vars, valVar)
6239 res = s.load(n.Type(), addr)
6240 s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp.(*ir.Name), s.mem())
6242 resok = s.variable(okVar, types.Types[types.TBOOL])
6243 delete(s.vars, okVar)
6247 // temp allocates a temp of type t at position pos
6248 func (s *state) temp(pos src.XPos, t *types.Type) (*ir.Name, *ssa.Value) {
6249 tmp := typecheck.TempAt(pos, s.curfn, t)
6250 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
6255 // variable returns the value of a variable at the current location.
6256 func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value {
6266 if s.curBlock == s.f.Entry {
6267 // No variable should be live at entry.
6268 s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, n, v)
6270 // Make a FwdRef, which records a value that's live on block input.
6271 // We'll find the matching definition as part of insertPhis.
6272 v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n})
6274 if n.Op() == ir.ONAME {
6275 s.addNamedValue(n.(*ir.Name), v)
6280 func (s *state) mem() *ssa.Value {
6281 return s.variable(memVar, types.TypeMem)
6284 func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
6285 if n.Class == ir.Pxxx {
6286 // Don't track our marker nodes (memVar etc.).
6289 if ir.IsAutoTmp(n) {
6290 // Don't track temporary variables.
6293 if n.Class == ir.PPARAMOUT {
6294 // Don't track named output values. This prevents return values
6295 // from being assigned too early. See #14591 and #14762. TODO: allow this.
6298 loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
6299 values, ok := s.f.NamedValues[loc]
6301 s.f.Names = append(s.f.Names, &loc)
6302 s.f.CanonicalLocalSlots[loc] = &loc
6304 s.f.NamedValues[loc] = append(values, v)
6307 // Branch is an unresolved branch.
6308 type Branch struct {
6309 P *obj.Prog // branch instruction
6310 B *ssa.Block // target
6313 // State contains state needed during Prog generation.
6319 // Branches remembers all the branch instructions we've seen
6320 // and where they would like to go.
6323 // bstart remembers where each block starts (indexed by block ID)
6326 maxarg int64 // largest frame size for arguments to calls made by the function
6328 // Map from GC safe points to liveness index, generated by
6329 // liveness analysis.
6330 livenessMap liveness.Map
6332 // partLiveArgs includes arguments that may be partially live, for which we
6333 // need to generate instructions that spill the argument registers.
6334 partLiveArgs map[*ir.Name]bool
6336 // lineRunStart records the beginning of the current run of instructions
6337 // within a single block sharing the same line number
6338 // Used to move statement marks to the beginning of such runs.
6339 lineRunStart *obj.Prog
6341 // wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
6342 OnWasmStackSkipped int
6345 func (s *State) FuncInfo() *obj.FuncInfo {
6346 return s.pp.CurFunc.LSym.Func()
6349 // Prog appends a new Prog.
6350 func (s *State) Prog(as obj.As) *obj.Prog {
6352 if objw.LosesStmtMark(as) {
6355 // Float a statement start to the beginning of any same-line run.
6356 // lineRunStart is reset at block boundaries, which appears to work well.
6357 if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
6359 } else if p.Pos.IsStmt() == src.PosIsStmt {
6360 s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
6361 p.Pos = p.Pos.WithNotStmt()
6366 // Pc returns the current Prog.
6367 func (s *State) Pc() *obj.Prog {
6371 // SetPos sets the current source position.
6372 func (s *State) SetPos(pos src.XPos) {
6376 // Br emits a single branch instruction and returns the instruction.
6377 // Not all architectures need the returned instruction, but otherwise
6378 // the boilerplate is common to all.
6379 func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog {
6381 p.To.Type = obj.TYPE_BRANCH
6382 s.Branches = append(s.Branches, Branch{P: p, B: target})
6386 // DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
6387 // that reduce "jumpy" line number churn when debugging.
6388 // Spill/fill/copy instructions from the register allocator,
6389 // phi functions, and instructions with a no-pos position
6390 // are examples of instructions that can cause churn.
6391 func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
6393 case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
6394 // These are not statements
6395 s.SetPos(v.Pos.WithNotStmt())
6398 if p != src.NoXPos {
6399 // If the position is defined, update the position.
6400 // Also convert default IsStmt to NotStmt; only
6401 // explicit statement boundaries should appear
6402 // in the generated code.
6403 if p.IsStmt() != src.PosIsStmt {
6405 // Calls use the pos attached to v, but copy the statement mark from State
6409 s.SetPos(s.pp.Pos.WithNotStmt())
6414 // emit argument info (locations on stack) for traceback.
6415 func emitArgInfo(e *ssafn, f *ssa.Func, pp *objw.Progs) {
6416 ft := e.curfn.Type()
6417 if ft.NumRecvs() == 0 && ft.NumParams() == 0 {
6421 x := EmitArgInfo(e.curfn, f.OwnAux.ABIInfo())
6422 e.curfn.LSym.Func().ArgInfo = x
6424 // Emit a funcdata pointing at the arg info data.
6425 p := pp.Prog(obj.AFUNCDATA)
6426 p.From.SetConst(objabi.FUNCDATA_ArgInfo)
6427 p.To.Type = obj.TYPE_MEM
6428 p.To.Name = obj.NAME_EXTERN
6432 // emit argument info (locations on stack) of f for traceback.
6433 func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym {
6434 x := base.Ctxt.Lookup(fmt.Sprintf("%s.arginfo%d", f.LSym.Name, f.ABI))
6436 PtrSize := int64(types.PtrSize)
6437 uintptrTyp := types.Types[types.TUINTPTR]
6439 isAggregate := func(t *types.Type) bool {
6440 return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice()
6443 // Populate the data.
6444 // The data is a stream of bytes, which contains the offsets and sizes of the
6445 // non-aggregate arguments or non-aggregate fields/elements of aggregate-typed
6446 // arguments, along with special "operators". Specifically,
6447 // - for each non-aggrgate arg/field/element, its offset from FP (1 byte) and
6449 // - special operators:
6450 // - 0xff - end of sequence
6451 // - 0xfe - print { (at the start of an aggregate-typed argument)
6452 // - 0xfd - print } (at the end of an aggregate-typed argument)
6453 // - 0xfc - print ... (more args/fields/elements)
6454 // - 0xfb - print _ (offset too large)
6455 // These constants need to be in sync with runtime.traceback.go:printArgs.
6461 _offsetTooLarge = 0xfb
6462 _special = 0xf0 // above this are operators, below this are ordinary offsets
6466 limit = 10 // print no more than 10 args/components
6467 maxDepth = 5 // no more than 5 layers of nesting
6469 // maxLen is a (conservative) upper bound of the byte stream length. For
6470 // each arg/component, it has no more than 2 bytes of data (size, offset),
6471 // and no more than one {, }, ... at each level (it cannot have both the
6472 // data and ... unless it is the last one, just be conservative). Plus 1
6474 maxLen = (maxDepth*3+2)*limit + 1
6479 writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) }
6481 // Write one non-aggrgate arg/field/element.
6482 write1 := func(sz, offset int64) {
6483 if offset >= _special {
6484 writebyte(_offsetTooLarge)
6486 writebyte(uint8(offset))
6487 writebyte(uint8(sz))
6492 // Visit t recursively and write it out.
6493 // Returns whether to continue visiting.
6494 var visitType func(baseOffset int64, t *types.Type, depth int) bool
6495 visitType = func(baseOffset int64, t *types.Type, depth int) bool {
6497 writebyte(_dotdotdot)
6500 if !isAggregate(t) {
6501 write1(t.Size(), baseOffset)
6504 writebyte(_startAgg)
6506 if depth >= maxDepth {
6507 writebyte(_dotdotdot)
6513 case t.IsInterface(), t.IsString():
6514 _ = visitType(baseOffset, uintptrTyp, depth) &&
6515 visitType(baseOffset+PtrSize, uintptrTyp, depth)
6517 _ = visitType(baseOffset, uintptrTyp, depth) &&
6518 visitType(baseOffset+PtrSize, uintptrTyp, depth) &&
6519 visitType(baseOffset+PtrSize*2, uintptrTyp, depth)
6521 _ = visitType(baseOffset, types.FloatForComplex(t), depth) &&
6522 visitType(baseOffset+t.Size()/2, types.FloatForComplex(t), depth)
6524 if t.NumElem() == 0 {
6525 n++ // {} counts as a component
6528 for i := int64(0); i < t.NumElem(); i++ {
6529 if !visitType(baseOffset, t.Elem(), depth) {
6532 baseOffset += t.Elem().Size()
6535 if t.NumFields() == 0 {
6536 n++ // {} counts as a component
6539 for _, field := range t.Fields().Slice() {
6540 if !visitType(baseOffset+field.Offset, field.Type, depth) {
6549 for _, a := range abiInfo.InParams() {
6550 if !visitType(a.FrameOffset(abiInfo), a.Type, 0) {
6556 base.Fatalf("ArgInfo too large")
6562 // genssa appends entries to pp for each instruction in f.
6563 func genssa(f *ssa.Func, pp *objw.Progs) {
6565 s.ABI = f.OwnAux.Fn.ABI()
6567 e := f.Frontend().(*ssafn)
6569 s.livenessMap, s.partLiveArgs = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
6570 emitArgInfo(e, f, pp)
6572 openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
6573 if openDeferInfo != nil {
6574 // This function uses open-coded defers -- write out the funcdata
6575 // info that we computed at the end of genssa.
6576 p := pp.Prog(obj.AFUNCDATA)
6577 p.From.SetConst(objabi.FUNCDATA_OpenCodedDeferInfo)
6578 p.To.Type = obj.TYPE_MEM
6579 p.To.Name = obj.NAME_EXTERN
6580 p.To.Sym = openDeferInfo
6583 // Remember where each block starts.
6584 s.bstart = make([]*obj.Prog, f.NumBlocks())
6586 var progToValue map[*obj.Prog]*ssa.Value
6587 var progToBlock map[*obj.Prog]*ssa.Block
6588 var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
6589 if f.PrintOrHtmlSSA {
6590 progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
6591 progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
6592 f.Logf("genssa %s\n", f.Name)
6593 progToBlock[s.pp.Next] = f.Blocks[0]
6596 if base.Ctxt.Flag_locationlists {
6597 if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
6598 f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
6600 valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
6601 for i := range valueToProgAfter {
6602 valueToProgAfter[i] = nil
6606 // If the very first instruction is not tagged as a statement,
6607 // debuggers may attribute it to previous function in program.
6608 firstPos := src.NoXPos
6609 for _, v := range f.Entry.Values {
6610 if v.Pos.IsStmt() == src.PosIsStmt {
6612 v.Pos = firstPos.WithDefaultStmt()
6617 // inlMarks has an entry for each Prog that implements an inline mark.
6618 // It maps from that Prog to the global inlining id of the inlined body
6619 // which should unwind to this Prog's location.
6620 var inlMarks map[*obj.Prog]int32
6621 var inlMarkList []*obj.Prog
6623 // inlMarksByPos maps from a (column 1) source position to the set of
6624 // Progs that are in the set above and have that source position.
6625 var inlMarksByPos map[src.XPos][]*obj.Prog
6627 // Emit basic blocks
6628 for i, b := range f.Blocks {
6629 s.bstart[b.ID] = s.pp.Next
6630 s.lineRunStart = nil
6632 // Attach a "default" liveness info. Normally this will be
6633 // overwritten in the Values loop below for each Value. But
6634 // for an empty block this will be used for its control
6635 // instruction. We won't use the actual liveness map on a
6636 // control instruction. Just mark it something that is
6637 // preemptible, unless this function is "all unsafe".
6638 s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
6640 // Emit values in block
6641 Arch.SSAMarkMoves(&s, b)
6642 for _, v := range b.Values {
6644 s.DebugFriendlySetPosFrom(v)
6646 if v.Op.ResultInArg0() && v.ResultReg() != v.Args[0].Reg() {
6647 v.Fatalf("input[0] and output not in same register %s", v.LongString())
6652 // memory arg needs no code
6654 // input args need no code
6655 case ssa.OpSP, ssa.OpSB:
6657 case ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult:
6660 // nothing to do when there's a g register,
6661 // and checkLower complains if there's not
6662 case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill:
6663 // nothing to do; already used by liveness
6667 // nothing to do; no-op conversion for liveness
6668 if v.Args[0].Reg() != v.Reg() {
6669 v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
6672 p := Arch.Ginsnop(s.pp)
6673 if inlMarks == nil {
6674 inlMarks = map[*obj.Prog]int32{}
6675 inlMarksByPos = map[src.XPos][]*obj.Prog{}
6677 inlMarks[p] = v.AuxInt32()
6678 inlMarkList = append(inlMarkList, p)
6679 pos := v.Pos.AtColumn1()
6680 inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
6683 // Special case for first line in function; move it to the start (which cannot be a register-valued instruction)
6684 if firstPos != src.NoXPos && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg {
6686 firstPos = src.NoXPos
6688 // Attach this safe point to the next
6690 s.pp.NextLive = s.livenessMap.Get(v)
6692 // let the backend handle it
6693 Arch.SSAGenValue(&s, v)
6696 if base.Ctxt.Flag_locationlists {
6697 valueToProgAfter[v.ID] = s.pp.Next
6700 if f.PrintOrHtmlSSA {
6701 for ; x != s.pp.Next; x = x.Link {
6706 // If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
6707 if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
6708 p := Arch.Ginsnop(s.pp)
6709 p.Pos = p.Pos.WithIsStmt()
6710 if b.Pos == src.NoXPos {
6711 b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652.
6712 if b.Pos == src.NoXPos {
6713 b.Pos = pp.Text.Pos // Sometimes p.Pos is empty. See #35695.
6716 b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
6718 // Emit control flow instructions for block
6720 if i < len(f.Blocks)-1 && base.Flag.N == 0 {
6721 // If -N, leave next==nil so every block with successors
6722 // ends in a JMP (except call blocks - plive doesn't like
6723 // select{send,recv} followed by a JMP call). Helps keep
6724 // line numbers for otherwise empty blocks.
6725 next = f.Blocks[i+1]
6729 Arch.SSAGenBlock(&s, b, next)
6730 if f.PrintOrHtmlSSA {
6731 for ; x != s.pp.Next; x = x.Link {
6736 if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
6737 // We need the return address of a panic call to
6738 // still be inside the function in question. So if
6739 // it ends in a call which doesn't return, add a
6740 // nop (which will never execute) after the call.
6743 if openDeferInfo != nil {
6744 // When doing open-coded defers, generate a disconnected call to
6745 // deferreturn and a return. This will be used to during panic
6746 // recovery to unwind the stack and return back to the runtime.
6747 s.pp.NextLive = s.livenessMap.DeferReturn
6748 p := pp.Prog(obj.ACALL)
6749 p.To.Type = obj.TYPE_MEM
6750 p.To.Name = obj.NAME_EXTERN
6751 p.To.Sym = ir.Syms.Deferreturn
6753 // Load results into registers. So when a deferred function
6754 // recovers a panic, it will return to caller with right results.
6755 // The results are already in memory, because they are not SSA'd
6756 // when the function has defers (see canSSAName).
6757 for _, o := range f.OwnAux.ABIInfo().OutParams() {
6758 n := o.Name.(*ir.Name)
6759 rts, offs := o.RegisterTypesAndOffsets()
6760 for i := range o.Registers {
6761 Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
6768 if inlMarks != nil {
6769 // We have some inline marks. Try to find other instructions we're
6770 // going to emit anyway, and use those instructions instead of the
6772 for p := pp.Text; p != nil; p = p.Link {
6773 if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
6774 // Don't use 0-sized instructions as inline marks, because we need
6775 // to identify inline mark instructions by pc offset.
6776 // (Some of these instructions are sometimes zero-sized, sometimes not.
6777 // We must not use anything that even might be zero-sized.)
6778 // TODO: are there others?
6781 if _, ok := inlMarks[p]; ok {
6782 // Don't use inline marks themselves. We don't know
6783 // whether they will be zero-sized or not yet.
6786 pos := p.Pos.AtColumn1()
6787 s := inlMarksByPos[pos]
6791 for _, m := range s {
6792 // We found an instruction with the same source position as
6793 // some of the inline marks.
6794 // Use this instruction instead.
6795 p.Pos = p.Pos.WithIsStmt() // promote position to a statement
6796 pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
6797 // Make the inline mark a real nop, so it doesn't generate any code.
6803 delete(inlMarksByPos, pos)
6805 // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
6806 for _, p := range inlMarkList {
6807 if p.As != obj.ANOP {
6808 pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
6813 if base.Ctxt.Flag_locationlists {
6814 var debugInfo *ssa.FuncDebug
6815 if e.curfn.ABI == obj.ABIInternal && base.Flag.N != 0 {
6816 debugInfo = ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
6818 debugInfo = ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
6820 e.curfn.DebugInfo = debugInfo
6822 idToIdx := make([]int, f.NumBlocks())
6823 for i, b := range f.Blocks {
6826 // Note that at this moment, Prog.Pc is a sequence number; it's
6827 // not a real PC until after assembly, so this mapping has to
6829 debugInfo.GetPC = func(b, v ssa.ID) int64 {
6831 case ssa.BlockStart.ID:
6832 if b == f.Entry.ID {
6833 return 0 // Start at the very beginning, at the assembler-generated prologue.
6834 // this should only happen for function args (ssa.OpArg)
6837 case ssa.BlockEnd.ID:
6838 blk := f.Blocks[idToIdx[b]]
6839 nv := len(blk.Values)
6840 return valueToProgAfter[blk.Values[nv-1].ID].Pc
6841 case ssa.FuncEnd.ID:
6842 return e.curfn.LSym.Size
6844 return valueToProgAfter[v].Pc
6849 // Resolve branches, and relax DefaultStmt into NotStmt
6850 for _, br := range s.Branches {
6851 br.P.To.SetTarget(s.bstart[br.B.ID])
6852 if br.P.Pos.IsStmt() != src.PosIsStmt {
6853 br.P.Pos = br.P.Pos.WithNotStmt()
6854 } else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
6855 br.P.Pos = br.P.Pos.WithNotStmt()
6860 if e.log { // spew to stdout
6862 for p := pp.Text; p != nil; p = p.Link {
6863 if p.Pos.IsKnown() && p.InnermostFilename() != filename {
6864 filename = p.InnermostFilename()
6865 f.Logf("# %s\n", filename)
6869 if v, ok := progToValue[p]; ok {
6871 } else if b, ok := progToBlock[p]; ok {
6874 s = " " // most value and branch strings are 2-3 characters long
6876 f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
6879 if f.HTMLWriter != nil { // spew to ssa.html
6880 var buf bytes.Buffer
6881 buf.WriteString("<code>")
6882 buf.WriteString("<dl class=\"ssa-gen\">")
6884 for p := pp.Text; p != nil; p = p.Link {
6885 // Don't spam every line with the file name, which is often huge.
6886 // Only print changes, and "unknown" is not a change.
6887 if p.Pos.IsKnown() && p.InnermostFilename() != filename {
6888 filename = p.InnermostFilename()
6889 buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
6890 buf.WriteString(html.EscapeString("# " + filename))
6891 buf.WriteString("</dd>")
6894 buf.WriteString("<dt class=\"ssa-prog-src\">")
6895 if v, ok := progToValue[p]; ok {
6896 buf.WriteString(v.HTML())
6897 } else if b, ok := progToBlock[p]; ok {
6898 buf.WriteString("<b>" + b.HTML() + "</b>")
6900 buf.WriteString("</dt>")
6901 buf.WriteString("<dd class=\"ssa-prog\">")
6902 buf.WriteString(fmt.Sprintf("%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString())))
6903 buf.WriteString("</dd>")
6905 buf.WriteString("</dl>")
6906 buf.WriteString("</code>")
6907 f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
6912 f.HTMLWriter.Close()
6916 func defframe(s *State, e *ssafn, f *ssa.Func) {
6919 frame := types.Rnd(s.maxarg+e.stksize, int64(types.RegSize))
6920 if Arch.PadFrame != nil {
6921 frame = Arch.PadFrame(frame)
6924 // Fill in argument and frame size.
6925 pp.Text.To.Type = obj.TYPE_TEXTSIZE
6926 pp.Text.To.Val = int32(types.Rnd(f.OwnAux.ArgWidth(), int64(types.RegSize)))
6927 pp.Text.To.Offset = frame
6931 // Insert code to spill argument registers if the named slot may be partially
6932 // live. That is, the named slot is considered live by liveness analysis,
6933 // (because a part of it is live), but we may not spill all parts into the
6934 // slot. This can only happen with aggregate-typed arguments that are SSA-able
6935 // and not address-taken (for non-SSA-able or address-taken arguments we always
6937 // Note: spilling is unnecessary in the -N/no-optimize case, since all values
6938 // will be considered non-SSAable and spilled up front.
6939 // TODO(register args) Make liveness more fine-grained to that partial spilling is okay.
6940 if f.OwnAux.ABIInfo().InRegistersUsed() != 0 && base.Flag.N == 0 {
6941 // First, see if it is already spilled before it may be live. Look for a spill
6942 // in the entry block up to the first safepoint.
6943 type nameOff struct {
6947 partLiveArgsSpilled := make(map[nameOff]bool)
6948 for _, v := range f.Entry.Values {
6952 if v.Op != ssa.OpStoreReg || v.Args[0].Op != ssa.OpArgIntReg {
6955 n, off := ssa.AutoVar(v)
6956 if n.Class != ir.PPARAM || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] {
6959 partLiveArgsSpilled[nameOff{n, off}] = true
6962 // Then, insert code to spill registers if not already.
6963 for _, a := range f.OwnAux.ABIInfo().InParams() {
6964 n, ok := a.Name.(*ir.Name)
6965 if !ok || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
6968 rts, offs := a.RegisterTypesAndOffsets()
6969 for i := range a.Registers {
6970 if !rts[i].HasPointers() {
6973 if partLiveArgsSpilled[nameOff{n, offs[i]}] {
6974 continue // already spilled
6976 reg := ssa.ObjRegForAbiReg(a.Registers[i], f.Config)
6977 p = Arch.SpillArgReg(pp, p, f, rts[i], reg, n, offs[i])
6982 // Insert code to zero ambiguously live variables so that the
6983 // garbage collector only sees initialized values when it
6984 // looks for pointers.
6987 // Opaque state for backend to use. Current backends use it to
6988 // keep track of which helper registers have been zeroed.
6991 // Iterate through declarations. Autos are sorted in decreasing
6992 // frame offset order.
6993 for _, n := range e.curfn.Dcl {
6997 if n.Class != ir.PAUTO {
6998 e.Fatalf(n.Pos(), "needzero class %d", n.Class)
7000 if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 {
7001 e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_)
7004 if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) {
7005 // Merge with range we already have.
7006 lo = n.FrameOffset()
7011 p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
7014 lo = n.FrameOffset()
7015 hi = lo + n.Type().Size()
7018 // Zero final range.
7019 Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
7022 // For generating consecutive jump instructions to model a specific branching
7023 type IndexJump struct {
7028 func (s *State) oneJump(b *ssa.Block, jump *IndexJump) {
7029 p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
7033 // CombJump generates combinational instructions (2 at present) for a block jump,
7034 // thereby the behaviour of non-standard condition codes could be simulated
7035 func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
7037 case b.Succs[0].Block():
7038 s.oneJump(b, &jumps[0][0])
7039 s.oneJump(b, &jumps[0][1])
7040 case b.Succs[1].Block():
7041 s.oneJump(b, &jumps[1][0])
7042 s.oneJump(b, &jumps[1][1])
7045 if b.Likely != ssa.BranchUnlikely {
7046 s.oneJump(b, &jumps[1][0])
7047 s.oneJump(b, &jumps[1][1])
7048 q = s.Br(obj.AJMP, b.Succs[1].Block())
7050 s.oneJump(b, &jumps[0][0])
7051 s.oneJump(b, &jumps[0][1])
7052 q = s.Br(obj.AJMP, b.Succs[0].Block())
7058 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
7059 func AddAux(a *obj.Addr, v *ssa.Value) {
7060 AddAux2(a, v, v.AuxInt)
7062 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
7063 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
7064 v.Fatalf("bad AddAux addr %v", a)
7066 // add integer offset
7069 // If no additional symbol offset, we're done.
7073 // Add symbol's offset from its base register.
7074 switch n := v.Aux.(type) {
7076 a.Name = obj.NAME_EXTERN
7079 a.Name = obj.NAME_EXTERN
7082 if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
7083 a.Name = obj.NAME_PARAM
7084 a.Sym = ir.Orig(n).(*ir.Name).Linksym()
7085 a.Offset += n.FrameOffset()
7088 a.Name = obj.NAME_AUTO
7089 if n.Class == ir.PPARAMOUT {
7090 a.Sym = ir.Orig(n).(*ir.Name).Linksym()
7094 a.Offset += n.FrameOffset()
7096 v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
7100 // extendIndex extends v to a full int width.
7101 // panic with the given kind if v does not fit in an int (only on 32-bit archs).
7102 func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
7103 size := idx.Type.Size()
7104 if size == s.config.PtrSize {
7107 if size > s.config.PtrSize {
7108 // truncate 64-bit indexes on 32-bit pointer archs. Test the
7109 // high word and branch to out-of-bounds failure if it is not 0.
7111 if idx.Type.IsSigned() {
7112 lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
7114 lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
7116 if bounded || base.Flag.B != 0 {
7119 bNext := s.f.NewBlock(ssa.BlockPlain)
7120 bPanic := s.f.NewBlock(ssa.BlockExit)
7121 hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
7122 cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
7123 if !idx.Type.IsSigned() {
7125 case ssa.BoundsIndex:
7126 kind = ssa.BoundsIndexU
7127 case ssa.BoundsSliceAlen:
7128 kind = ssa.BoundsSliceAlenU
7129 case ssa.BoundsSliceAcap:
7130 kind = ssa.BoundsSliceAcapU
7131 case ssa.BoundsSliceB:
7132 kind = ssa.BoundsSliceBU
7133 case ssa.BoundsSlice3Alen:
7134 kind = ssa.BoundsSlice3AlenU
7135 case ssa.BoundsSlice3Acap:
7136 kind = ssa.BoundsSlice3AcapU
7137 case ssa.BoundsSlice3B:
7138 kind = ssa.BoundsSlice3BU
7139 case ssa.BoundsSlice3C:
7140 kind = ssa.BoundsSlice3CU
7144 b.Kind = ssa.BlockIf
7146 b.Likely = ssa.BranchLikely
7150 s.startBlock(bPanic)
7151 mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
7152 s.endBlock().SetControl(mem)
7158 // Extend value to the required size
7160 if idx.Type.IsSigned() {
7161 switch 10*size + s.config.PtrSize {
7163 op = ssa.OpSignExt8to32
7165 op = ssa.OpSignExt8to64
7167 op = ssa.OpSignExt16to32
7169 op = ssa.OpSignExt16to64
7171 op = ssa.OpSignExt32to64
7173 s.Fatalf("bad signed index extension %s", idx.Type)
7176 switch 10*size + s.config.PtrSize {
7178 op = ssa.OpZeroExt8to32
7180 op = ssa.OpZeroExt8to64
7182 op = ssa.OpZeroExt16to32
7184 op = ssa.OpZeroExt16to64
7186 op = ssa.OpZeroExt32to64
7188 s.Fatalf("bad unsigned index extension %s", idx.Type)
7191 return s.newValue1(op, types.Types[types.TINT], idx)
7194 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
7195 // Called during ssaGenValue.
7196 func CheckLoweredPhi(v *ssa.Value) {
7197 if v.Op != ssa.OpPhi {
7198 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
7200 if v.Type.IsMemory() {
7204 loc := f.RegAlloc[v.ID]
7205 for _, a := range v.Args {
7206 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
7207 v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
7212 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block,
7213 // except for incoming in-register arguments.
7214 // The output of LoweredGetClosurePtr is generally hardwired to the correct register.
7215 // That register contains the closure pointer on closure entry.
7216 func CheckLoweredGetClosurePtr(v *ssa.Value) {
7217 entry := v.Block.Func.Entry
7218 if entry != v.Block {
7219 base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
7221 for _, w := range entry.Values {
7226 case ssa.OpArgIntReg, ssa.OpArgFloatReg:
7229 base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
7234 // CheckArgReg ensures that v is in the function's entry block.
7235 func CheckArgReg(v *ssa.Value) {
7236 entry := v.Block.Func.Entry
7237 if entry != v.Block {
7238 base.Fatalf("in %s, badly placed ArgIReg or ArgFReg: %v %v", v.Block.Func.Name, v.Block, v)
7242 func AddrAuto(a *obj.Addr, v *ssa.Value) {
7243 n, off := ssa.AutoVar(v)
7244 a.Type = obj.TYPE_MEM
7246 a.Reg = int16(Arch.REGSP)
7247 a.Offset = n.FrameOffset() + off
7248 if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
7249 a.Name = obj.NAME_PARAM
7251 a.Name = obj.NAME_AUTO
7255 // Call returns a new CALL instruction for the SSA value v.
7256 // It uses PrepareCall to prepare the call.
7257 func (s *State) Call(v *ssa.Value) *obj.Prog {
7258 pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
7261 p := s.Prog(obj.ACALL)
7262 if pPosIsStmt == src.PosIsStmt {
7263 p.Pos = v.Pos.WithIsStmt()
7265 p.Pos = v.Pos.WithNotStmt()
7267 if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
7268 p.To.Type = obj.TYPE_MEM
7269 p.To.Name = obj.NAME_EXTERN
7272 // TODO(mdempsky): Can these differences be eliminated?
7273 switch Arch.LinkArch.Family {
7274 case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
7275 p.To.Type = obj.TYPE_REG
7276 case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
7277 p.To.Type = obj.TYPE_MEM
7279 base.Fatalf("unknown indirect call family")
7281 p.To.Reg = v.Args[0].Reg()
7286 // PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
7287 // It must be called immediately before emitting the actual CALL instruction,
7288 // since it emits PCDATA for the stack map at the call (calls are safe points).
7289 func (s *State) PrepareCall(v *ssa.Value) {
7290 idx := s.livenessMap.Get(v)
7291 if !idx.StackMapValid() {
7292 // See Liveness.hasStackMap.
7293 if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
7294 base.Fatalf("missing stack map index for %v", v.LongString())
7298 call, ok := v.Aux.(*ssa.AuxCall)
7300 if ok && call.Fn == ir.Syms.Deferreturn {
7301 // Deferred calls will appear to be returning to
7302 // the CALL deferreturn(SB) that we are about to emit.
7303 // However, the stack trace code will show the line
7304 // of the instruction byte before the return PC.
7305 // To avoid that being an unrelated instruction,
7306 // insert an actual hardware NOP that will have the right line number.
7307 // This is different from obj.ANOP, which is a virtual no-op
7308 // that doesn't make it into the instruction stream.
7309 Arch.Ginsnopdefer(s.pp)
7313 // Record call graph information for nowritebarrierrec
7315 if nowritebarrierrecCheck != nil {
7316 nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
7320 if s.maxarg < v.AuxInt {
7325 // UseArgs records the fact that an instruction needs a certain amount of
7326 // callee args space for its use.
7327 func (s *State) UseArgs(n int64) {
7333 // fieldIdx finds the index of the field referred to by the ODOT node n.
7334 func fieldIdx(n *ir.SelectorExpr) int {
7337 panic("ODOT's LHS is not a struct")
7340 for i, f := range t.Fields().Slice() {
7342 if f.Offset != n.Offset() {
7343 panic("field offset doesn't match")
7348 panic(fmt.Sprintf("can't find field in expr %v\n", n))
7350 // TODO: keep the result of this function somewhere in the ODOT Node
7351 // so we don't have to recompute it each time we need it.
7354 // ssafn holds frontend information about a function that the backend is processing.
7355 // It also exports a bunch of compiler services for the ssa backend.
7358 strings map[string]*obj.LSym // map from constant string to data symbols
7359 stksize int64 // stack size for current frame
7360 stkptrsize int64 // prefix of stack containing pointers
7361 log bool // print ssa debug to the stdout
7364 // StringData returns a symbol which
7365 // is the data component of a global string constant containing s.
7366 func (e *ssafn) StringData(s string) *obj.LSym {
7367 if aux, ok := e.strings[s]; ok {
7370 if e.strings == nil {
7371 e.strings = make(map[string]*obj.LSym)
7373 data := staticdata.StringSym(e.curfn.Pos(), s)
7378 func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
7379 return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
7382 // SplitSlot returns a slot representing the data of parent starting at offset.
7383 func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
7386 if node.Class != ir.PAUTO || node.Addrtaken() {
7387 // addressed things and non-autos retain their parents (i.e., cannot truly be split)
7388 return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
7391 s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
7392 n := ir.NewNameAt(parent.N.Pos(), s)
7394 ir.AsNode(s.Def).Name().SetUsed(true)
7397 n.SetEsc(ir.EscNever)
7399 e.curfn.Dcl = append(e.curfn.Dcl, n)
7401 return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
7404 func (e *ssafn) CanSSA(t *types.Type) bool {
7408 func (e *ssafn) Line(pos src.XPos) string {
7409 return base.FmtPos(pos)
7412 // Log logs a message from the compiler.
7413 func (e *ssafn) Logf(msg string, args ...interface{}) {
7415 fmt.Printf(msg, args...)
7419 func (e *ssafn) Log() bool {
7423 // Fatal reports a compiler error and exits.
7424 func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
7426 nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
7427 base.Fatalf("'%s': "+msg, nargs...)
7430 // Warnl reports a "warning", which is usually flag-triggered
7431 // logging output for the benefit of tests.
7432 func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
7433 base.WarnfAt(pos, fmt_, args...)
7436 func (e *ssafn) Debug_checknil() bool {
7437 return base.Debug.Nil != 0
7440 func (e *ssafn) UseWriteBarrier() bool {
7444 func (e *ssafn) Syslook(name string) *obj.LSym {
7446 case "goschedguarded":
7447 return ir.Syms.Goschedguarded
7448 case "writeBarrier":
7449 return ir.Syms.WriteBarrier
7450 case "gcWriteBarrier":
7451 return ir.Syms.GCWriteBarrier
7452 case "typedmemmove":
7453 return ir.Syms.Typedmemmove
7455 return ir.Syms.Typedmemclr
7457 e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
7461 func (e *ssafn) SetWBPos(pos src.XPos) {
7462 e.curfn.SetWBPos(pos)
7465 func (e *ssafn) MyImportPath() string {
7466 return base.Ctxt.Pkgpath
7469 func clobberBase(n ir.Node) ir.Node {
7470 if n.Op() == ir.ODOT {
7471 n := n.(*ir.SelectorExpr)
7472 if n.X.Type().NumFields() == 1 {
7473 return clobberBase(n.X)
7476 if n.Op() == ir.OINDEX {
7477 n := n.(*ir.IndexExpr)
7478 if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 {
7479 return clobberBase(n.X)
7485 // callTargetLSym returns the correct LSym to call 'callee' using its ABI.
7486 func callTargetLSym(callee *ir.Name) *obj.LSym {
7487 if callee.Func == nil {
7488 // TODO(austin): This happens in a few cases of
7489 // compiler-generated functions. These are all
7490 // ABIInternal. It would be better if callee.Func was
7491 // never nil and we didn't need this case.
7492 return callee.Linksym()
7495 return callee.LinksymABI(callee.Func.ABI)
7498 func min8(a, b int8) int8 {
7505 func max8(a, b int8) int8 {
7512 // deferstruct makes a runtime._defer structure.
7513 func deferstruct() *types.Type {
7514 makefield := func(name string, typ *types.Type) *types.Field {
7515 // Unlike the global makefield function, this one needs to set Pkg
7516 // because these types might be compared (in SSA CSE sorting).
7517 // TODO: unify this makefield and the global one above.
7518 sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
7519 return types.NewField(src.NoXPos, sym, typ)
7521 // These fields must match the ones in runtime/runtime2.go:_defer and
7522 // (*state).call above.
7523 fields := []*types.Field{
7524 makefield("started", types.Types[types.TBOOL]),
7525 makefield("heap", types.Types[types.TBOOL]),
7526 makefield("openDefer", types.Types[types.TBOOL]),
7527 makefield("sp", types.Types[types.TUINTPTR]),
7528 makefield("pc", types.Types[types.TUINTPTR]),
7529 // Note: the types here don't really matter. Defer structures
7530 // are always scanned explicitly during stack copying and GC,
7531 // so we make them uintptr type even though they are real pointers.
7532 makefield("fn", types.Types[types.TUINTPTR]),
7533 makefield("_panic", types.Types[types.TUINTPTR]),
7534 makefield("link", types.Types[types.TUINTPTR]),
7535 makefield("fd", types.Types[types.TUINTPTR]),
7536 makefield("varp", types.Types[types.TUINTPTR]),
7537 makefield("framepc", types.Types[types.TUINTPTR]),
7540 // build struct holding the above fields
7541 s := types.NewStruct(types.NoPkg, fields)
7543 types.CalcStructSize(s)
7547 // SlotAddr uses LocalSlot information to initialize an obj.Addr
7548 // The resulting addr is used in a non-standard context -- in the prologue
7549 // of a function, before the frame has been constructed, so the standard
7550 // addressing for the parameters will be wrong.
7551 func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr {
7553 Name: obj.NAME_NONE,
7556 Offset: spill.Offset + extraOffset,
7561 BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
7562 ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
7565 // GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
7566 var GCWriteBarrierReg map[int16]*obj.LSym