1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 "cmd/compile/internal/abi"
20 "cmd/compile/internal/base"
21 "cmd/compile/internal/ir"
22 "cmd/compile/internal/liveness"
23 "cmd/compile/internal/objw"
24 "cmd/compile/internal/reflectdata"
25 "cmd/compile/internal/ssa"
26 "cmd/compile/internal/staticdata"
27 "cmd/compile/internal/typecheck"
28 "cmd/compile/internal/types"
30 "cmd/internal/obj/x86"
36 var ssaConfig *ssa.Config
37 var ssaCaches []ssa.Cache
39 var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for
40 var ssaDir string // optional destination for ssa dump file
41 var ssaDumpStdout bool // whether to dump to stdout
42 var ssaDumpCFG string // generate CFGs for these phases
43 const ssaDumpFile = "ssa.html"
45 // ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
46 var ssaDumpInlined []*ir.Func
48 func DumpInline(fn *ir.Func) {
49 if ssaDump != "" && ssaDump == ir.FuncName(fn) {
50 ssaDumpInlined = append(ssaDumpInlined, fn)
55 ssaDump = os.Getenv("GOSSAFUNC")
56 ssaDir = os.Getenv("GOSSADIR")
58 if strings.HasSuffix(ssaDump, "+") {
59 ssaDump = ssaDump[:len(ssaDump)-1]
62 spl := strings.Split(ssaDump, ":")
71 types_ := ssa.NewTypes()
77 // Generate a few pointer types that are uncommon in the frontend but common in the backend.
78 // Caching is disabled in the backend, so generating these here avoids allocations.
79 _ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
80 _ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
81 _ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
82 _ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte
83 _ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte
84 _ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
85 _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
86 _ = types.NewPtr(types.Types[types.TINT16]) // *int16
87 _ = types.NewPtr(types.Types[types.TINT64]) // *int64
88 _ = types.NewPtr(types.ErrorType) // *error
89 types.NewPtrCacheEnabled = false
90 ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0)
91 ssaConfig.SoftFloat = Arch.SoftFloat
92 ssaConfig.Race = base.Flag.Race
93 ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
95 // Set up some runtime functions we'll need to call.
96 ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I")
97 ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2")
98 ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I")
99 ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2")
100 ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
101 ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
102 ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
103 ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
104 ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero")
105 ir.Syms.GCWriteBarrier = typecheck.LookupRuntimeFunc("gcWriteBarrier")
106 ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
107 ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
108 ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
109 ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
110 ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
111 ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
112 ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
113 ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
114 ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE")
115 ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI")
116 ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
117 ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
118 ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
119 ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
120 ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
121 ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
122 ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
123 ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool
124 ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool
125 ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool
126 ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool
127 ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
128 ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s")
129 ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr")
130 ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
131 ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI
132 ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... }
133 ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
135 // asm funcs with special ABI
136 if base.Ctxt.Arch.Name == "amd64" {
137 GCWriteBarrierReg = map[int16]*obj.LSym{
138 x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"),
139 x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"),
140 x86.REG_DX: typecheck.LookupRuntimeFunc("gcWriteBarrierDX"),
141 x86.REG_BX: typecheck.LookupRuntimeFunc("gcWriteBarrierBX"),
142 x86.REG_BP: typecheck.LookupRuntimeFunc("gcWriteBarrierBP"),
143 x86.REG_SI: typecheck.LookupRuntimeFunc("gcWriteBarrierSI"),
144 x86.REG_R8: typecheck.LookupRuntimeFunc("gcWriteBarrierR8"),
145 x86.REG_R9: typecheck.LookupRuntimeFunc("gcWriteBarrierR9"),
149 if Arch.LinkArch.Family == sys.Wasm {
150 BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex")
151 BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU")
152 BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen")
153 BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU")
154 BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap")
155 BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU")
156 BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB")
157 BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU")
158 BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen")
159 BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU")
160 BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap")
161 BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU")
162 BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B")
163 BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU")
164 BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
165 BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
167 BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
168 BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
169 BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
170 BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
171 BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
172 BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
173 BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
174 BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
175 BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
176 BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
177 BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
178 BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
179 BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
180 BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
181 BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
182 BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
184 if Arch.LinkArch.PtrSize == 4 {
185 ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
186 ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
187 ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
188 ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
189 ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
190 ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
191 ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
192 ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
193 ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
194 ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
195 ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
196 ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
197 ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
198 ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
199 ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
200 ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
203 // Wasm (all asm funcs with special ABIs)
204 ir.Syms.WasmMove = typecheck.LookupRuntimeVar("wasmMove")
205 ir.Syms.WasmZero = typecheck.LookupRuntimeVar("wasmZero")
206 ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv")
207 ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS")
208 ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU")
209 ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic")
212 // AbiForBodylessFuncStackMap returns the ABI for a bodyless function's stack map.
213 // This is not necessarily the ABI used to call it.
214 // Currently (1.17 dev) such a stack map is always ABI0;
215 // any ABI wrapper that is present is nosplit, hence a precise
216 // stack map is not needed there (the parameters survive only long
217 // enough to call the wrapped assembly function).
218 // This always returns a freshly copied ABI.
219 func AbiForBodylessFuncStackMap(fn *ir.Func) *abi.ABIConfig {
220 return ssaConfig.ABI0.Copy() // No idea what races will result, be safe
223 // TODO (NLT 2021-04-15) This must be changed to a name that cannot match; it may be helpful to other register ABI work to keep the trigger-logic
224 const magicNameDotSuffix = ".MagicMethodNameForTestingRegisterABI"
225 const magicLastTypeName = "MagicLastTypeNameForTestingRegisterABI"
227 // abiForFunc implements ABI policy for a function, but does not return a copy of the ABI.
228 // Passing a nil function returns the default ABI based on experiment configuration.
229 func abiForFunc(fn *ir.Func, abi0, abi1 *abi.ABIConfig) *abi.ABIConfig {
230 if objabi.Experiment.RegabiArgs {
231 // Select the ABI based on the function's defining ABI.
238 case obj.ABIInternal:
239 // TODO(austin): Clean up the nomenclature here.
240 // It's not clear that "abi1" is ABIInternal.
243 base.Fatalf("function %v has unknown ABI %v", fn, fn.ABI)
244 panic("not reachable")
249 name := ir.FuncName(fn)
250 magicName := strings.HasSuffix(name, magicNameDotSuffix)
251 if fn.Pragma&ir.RegisterParams != 0 { // TODO(register args) remove after register abi is working
252 if strings.Contains(name, ".") {
254 base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name)
258 } else if magicName {
259 if base.FmtPos(fn.Pos()) == "<autogenerated>:1" {
260 // no way to put a pragma here, and it will error out in the real source code if they did not do it there.
263 base.ErrorfAt(fn.Pos(), "Methods with magic name %s (method %s) must also specify //go:registerparams", magicNameDotSuffix[1:], name)
266 if regAbiForFuncType(fn.Type().FuncType()) {
267 // fmt.Printf("Saw magic last type name for function %s\n", name)
274 func regAbiForFuncType(ft *types.Func) bool {
275 np := ft.Params.NumFields()
276 return np > 0 && strings.Contains(ft.Params.FieldType(np-1).String(), magicLastTypeName)
279 // getParam returns the Field of ith param of node n (which is a
280 // function/method/interface call), where the receiver of a method call is
281 // considered as the 0th parameter. This does not include the receiver of an
283 func getParam(n *ir.CallExpr, i int) *types.Field {
285 if n.Op() == ir.OCALLMETH {
286 base.Fatalf("OCALLMETH missed by walkCall")
288 return t.Params().Field(i)
291 // dvarint writes a varint v to the funcdata in symbol x and returns the new offset
292 func dvarint(x *obj.LSym, off int, v int64) int {
293 if v < 0 || v > 1e9 {
294 panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
297 return objw.Uint8(x, off, uint8(v))
299 off = objw.Uint8(x, off, uint8((v&127)|128))
301 return objw.Uint8(x, off, uint8(v>>7))
303 off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
305 return objw.Uint8(x, off, uint8(v>>14))
307 off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
309 return objw.Uint8(x, off, uint8(v>>21))
311 off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
312 return objw.Uint8(x, off, uint8(v>>28))
315 // emitOpenDeferInfo emits FUNCDATA information about the defers in a function
316 // that is using open-coded defers. This funcdata is used to determine the active
317 // defers in a function and execute those defers during panic processing.
319 // The funcdata is all encoded in varints (since values will almost always be less than
320 // 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets)
321 // for stack variables are specified as the number of bytes below varp (pointer to the
322 // top of the local variables) for their starting address. The format is:
324 // - Max total argument size among all the defers
325 // - Offset of the deferBits variable
326 // - Number of defers in the function
327 // - Information about each defer call, in reverse order of appearance in the function:
328 // - Total argument size of the call
329 // - Offset of the closure value to call
330 // - Number of arguments (including interface receiver or method receiver as first arg)
331 // - Information about each argument
332 // - Offset of the stored defer argument in this function's frame
333 // - Size of the argument
334 // - Offset of where argument should be placed in the args frame when making call
335 func (s *state) emitOpenDeferInfo() {
336 x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
337 s.curfn.LSym.Func().OpenCodedDeferInfo = x
340 // Compute maxargsize (max size of arguments for all defers)
341 // first, so we can output it first to the funcdata
343 for i := len(s.openDefers) - 1; i >= 0; i-- {
345 argsize := r.n.X.Type().ArgWidth() // TODO register args: but maybe use of abi0 will make this easy
346 if argsize > maxargsize {
350 off = dvarint(x, off, maxargsize)
351 off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
352 off = dvarint(x, off, int64(len(s.openDefers)))
354 // Write in reverse-order, for ease of running in that order at runtime
355 for i := len(s.openDefers) - 1; i >= 0; i-- {
357 off = dvarint(x, off, r.n.X.Type().ArgWidth())
358 off = dvarint(x, off, -r.closureNode.FrameOffset())
359 numArgs := len(r.argNodes)
360 if r.rcvrNode != nil {
361 // If there's an interface receiver, treat/place it as the first
362 // arg. (If there is a method receiver, it's already included as
363 // first arg in r.argNodes.)
366 off = dvarint(x, off, int64(numArgs))
367 argAdjust := 0 // presence of receiver offsets the parameter count.
368 if r.rcvrNode != nil {
369 off = dvarint(x, off, -okOffset(r.rcvrNode.FrameOffset()))
370 off = dvarint(x, off, s.config.PtrSize)
371 off = dvarint(x, off, 0) // This is okay because defer records use ABI0 (for now)
375 // TODO(register args) assume abi0 for this?
377 pri := ab.ABIAnalyzeFuncType(r.n.X.Type().FuncType())
378 for j, arg := range r.argNodes {
379 f := getParam(r.n, j)
380 off = dvarint(x, off, -okOffset(arg.FrameOffset()))
381 off = dvarint(x, off, f.Type.Size())
382 off = dvarint(x, off, okOffset(pri.InParam(j+argAdjust).FrameOffset(pri)))
387 func okOffset(offset int64) int64 {
388 if offset == types.BOGUS_FUNARG_OFFSET {
389 panic(fmt.Errorf("Bogus offset %d", offset))
394 // buildssa builds an SSA function for fn.
395 // worker indicates which of the backend workers is doing the processing.
396 func buildssa(fn *ir.Func, worker int) *ssa.Func {
397 name := ir.FuncName(fn)
399 if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset"
400 printssa = name == ssaDump || base.Ctxt.Pkgpath+"."+name == ssaDump
402 var astBuf *bytes.Buffer
404 astBuf = &bytes.Buffer{}
405 ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
406 ir.FDumpList(astBuf, "buildssa-body", fn.Body)
407 ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
409 fmt.Println("generating SSA for", name)
410 fmt.Print(astBuf.String())
418 s.hasdefer = fn.HasDefer()
419 if fn.Pragma&ir.CgoUnsafeArgs != 0 {
420 s.cgoUnsafeArgs = true
425 log: printssa && ssaDumpStdout,
429 s.f = ssa.NewFunc(&fe)
432 s.f.Config = ssaConfig
433 s.f.Cache = &ssaCaches[worker]
436 s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
437 s.f.PrintOrHtmlSSA = printssa
438 if fn.Pragma&ir.Nosplit != 0 {
441 s.f.ABI0 = ssaConfig.ABI0.Copy() // Make a copy to avoid racy map operations in type-register-width cache.
442 s.f.ABI1 = ssaConfig.ABI1.Copy()
443 s.f.ABIDefault = abiForFunc(nil, s.f.ABI0, s.f.ABI1)
444 s.f.ABISelf = abiForFunc(fn, s.f.ABI0, s.f.ABI1)
446 s.panics = map[funcLine]*ssa.Block{}
447 s.softFloat = s.config.SoftFloat
449 // Allocate starting block
450 s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
451 s.f.Entry.Pos = fn.Pos()
456 ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
457 ssaD := filepath.Dir(ssaDF)
458 os.MkdirAll(ssaD, 0755)
460 s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
461 // TODO: generate and print a mapping from nodes to values and blocks
462 dumpSourcesColumn(s.f.HTMLWriter, fn)
463 s.f.HTMLWriter.WriteAST("AST", astBuf)
466 // Allocate starting values
467 s.labels = map[string]*ssaLabel{}
468 s.fwdVars = map[ir.Node]*ssa.Value{}
469 s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
471 s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
473 case base.Debug.NoOpenDefer != 0:
474 s.hasOpenDefers = false
475 case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
476 // Don't support open-coded defers for 386 ONLY when using shared
477 // libraries, because there is extra code (added by rewriteToUseGot())
478 // preceding the deferreturn/ret code that we don't track correctly.
479 s.hasOpenDefers = false
481 if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
482 // Skip doing open defers if there is any extra exit code (likely
483 // race detection), since we will not generate that code in the
484 // case of the extra deferreturn/ret segment.
485 s.hasOpenDefers = false
488 // Similarly, skip if there are any heap-allocated result
489 // parameters that need to be copied back to their stack slots.
490 for _, f := range s.curfn.Type().Results().FieldSlice() {
491 if !f.Nname.(*ir.Name).OnStack() {
492 s.hasOpenDefers = false
497 if s.hasOpenDefers &&
498 s.curfn.NumReturns*s.curfn.NumDefers > 15 {
499 // Since we are generating defer calls at every exit for
500 // open-coded defers, skip doing open-coded defers if there are
501 // too many returns (especially if there are multiple defers).
502 // Open-coded defers are most important for improving performance
503 // for smaller functions (which don't have many returns).
504 s.hasOpenDefers = false
507 s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
508 s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
510 s.startBlock(s.f.Entry)
511 s.vars[memVar] = s.startmem
513 // Create the deferBits variable and stack slot. deferBits is a
514 // bitmask showing which of the open-coded defers in this function
515 // have been activated.
516 deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
517 deferBitsTemp.SetAddrtaken(true)
518 s.deferBitsTemp = deferBitsTemp
519 // For this value, AuxInt is initialized to zero by default
520 startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
521 s.vars[deferBitsVar] = startDeferBits
522 s.deferBitsAddr = s.addr(deferBitsTemp)
523 s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
524 // Make sure that the deferBits stack slot is kept alive (for use
525 // by panics) and stores to deferBits are not eliminated, even if
526 // all checking code on deferBits in the function exit can be
527 // eliminated, because the defer statements were all
529 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
532 var params *abi.ABIParamResultInfo
533 params = s.f.ABISelf.ABIAnalyze(fn.Type(), true)
535 // Generate addresses of local declarations
536 s.decladdrs = map[*ir.Name]*ssa.Value{}
537 for _, n := range fn.Dcl {
540 // Be aware that blank and unnamed input parameters will not appear here, but do appear in the type
541 s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
543 s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
545 // processed at each use, to prevent Addr coming
548 s.Fatalf("local variable with class %v unimplemented", n.Class)
552 s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, params)
554 // Populate SSAable arguments.
555 for _, n := range fn.Dcl {
556 if n.Class == ir.PPARAM {
558 v := s.newValue0A(ssa.OpArg, n.Type(), n)
560 s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
561 } else { // address was taken AND/OR too large for SSA
562 paramAssignment := ssa.ParamAssignmentForArgName(s.f, n)
563 if len(paramAssignment.Registers) > 0 {
564 if TypeOK(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
565 v := s.newValue0A(ssa.OpArg, n.Type(), n)
566 s.store(n.Type(), s.decladdrs[n], v)
567 } else { // Too big for SSA.
568 // Brute force, and early, do a bunch of stores from registers
569 // TODO fix the nasty storeArgOrLoad recursion in ssa/expand_calls.go so this Just Works with store of a big Arg.
570 s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false)
577 // Populate closure variables.
578 if !fn.ClosureCalled() {
579 clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
580 offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
581 for _, n := range fn.ClosureVars {
584 typ = types.NewPtr(typ)
587 offset = types.Rnd(offset, typ.Alignment())
588 ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
591 // If n is a small variable captured by value, promote
592 // it to PAUTO so it can be converted to SSA.
594 // Note: While we never capture a variable by value if
595 // the user took its address, we may have generated
596 // runtime calls that did (#43701). Since we don't
597 // convert Addrtaken variables to SSA anyway, no point
598 // in promoting them either.
599 if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) {
601 fn.Dcl = append(fn.Dcl, n)
602 s.assign(n, s.load(n.Type(), ptr), false, 0)
607 ptr = s.load(typ, ptr)
609 s.setHeapaddr(fn.Pos(), n, ptr)
613 // Convert the AST-based IR to the SSA-based IR
619 // fallthrough to exit
620 if s.curBlock != nil {
621 s.pushLine(fn.Endlineno)
626 for _, b := range s.f.Blocks {
627 if b.Pos != src.NoXPos {
628 s.updateUnsetPredPos(b)
632 s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis")
636 // Main call to ssa package to compile function
640 s.emitOpenDeferInfo()
646 func (s *state) storeParameterRegsToStack(abi *abi.ABIConfig, paramAssignment *abi.ABIParamAssignment, n *ir.Name, addr *ssa.Value, pointersOnly bool) {
647 typs, offs := paramAssignment.RegisterTypesAndOffsets()
648 for i, t := range typs {
649 if pointersOnly && !t.IsPtrShaped() {
652 r := paramAssignment.Registers[i]
654 op, reg := ssa.ArgOpAndRegisterFor(r, abi)
655 aux := &ssa.AuxNameOffset{Name: n, Offset: o}
656 v := s.newValue0I(op, t, reg)
658 p := s.newValue1I(ssa.OpOffPtr, types.NewPtr(t), o, addr)
663 // zeroResults zeros the return values at the start of the function.
664 // We need to do this very early in the function. Defer might stop a
665 // panic and show the return values as they exist at the time of
666 // panic. For precise stacks, the garbage collector assumes results
667 // are always live, so we need to zero them before any allocations,
668 // even allocations to move params/results to the heap.
669 func (s *state) zeroResults() {
670 for _, f := range s.curfn.Type().Results().FieldSlice() {
671 n := f.Nname.(*ir.Name)
673 // The local which points to the return value is the
674 // thing that needs zeroing. This is already handled
675 // by a Needzero annotation in plive.go:(*liveness).epilogue.
678 // Zero the stack location containing f.
679 if typ := n.Type(); TypeOK(typ) {
680 s.assign(n, s.zeroVal(typ), false, 0)
682 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
683 s.zero(n.Type(), s.decladdrs[n])
688 // paramsToHeap produces code to allocate memory for heap-escaped parameters
689 // and to copy non-result parameters' values from the stack.
690 func (s *state) paramsToHeap() {
691 do := func(params *types.Type) {
692 for _, f := range params.FieldSlice() {
694 continue // anonymous or blank parameter
696 n := f.Nname.(*ir.Name)
697 if ir.IsBlank(n) || n.OnStack() {
701 if n.Class == ir.PPARAM {
702 s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n])
707 typ := s.curfn.Type()
713 // newHeapaddr allocates heap memory for n and sets its heap address.
714 func (s *state) newHeapaddr(n *ir.Name) {
715 s.setHeapaddr(n.Pos(), n, s.newObject(n.Type()))
718 // setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
719 // and then sets it as n's heap address.
720 func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
721 if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) {
722 base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type)
725 // Declare variable to hold address.
726 addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
727 addr.SetType(types.NewPtr(n.Type()))
728 addr.Class = ir.PAUTO
731 s.curfn.Dcl = append(s.curfn.Dcl, addr)
732 types.CalcSize(addr.Type())
734 if n.Class == ir.PPARAMOUT {
735 addr.SetIsOutputParamHeapAddr(true)
739 s.assign(addr, ptr, false, 0)
742 // newObject returns an SSA value denoting new(typ).
743 func (s *state) newObject(typ *types.Type) *ssa.Value {
745 return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
747 return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0]
750 // reflectType returns an SSA value representing a pointer to typ's
751 // reflection type descriptor.
752 func (s *state) reflectType(typ *types.Type) *ssa.Value {
753 lsym := reflectdata.TypeLinksym(typ)
754 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
757 func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
758 // Read sources of target function fn.
759 fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
760 targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line())
762 writer.Logf("cannot read sources for function %v: %v", fn, err)
765 // Read sources of inlined functions.
766 var inlFns []*ssa.FuncLines
767 for _, fi := range ssaDumpInlined {
769 fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
770 fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
772 writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
775 inlFns = append(inlFns, fnLines)
778 sort.Sort(ssa.ByTopo(inlFns))
780 inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
783 writer.WriteSources("sources", inlFns)
786 func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
787 f, err := os.Open(os.ExpandEnv(file))
794 scanner := bufio.NewScanner(f)
795 for scanner.Scan() && ln <= end {
797 lines = append(lines, scanner.Text())
801 return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
804 // updateUnsetPredPos propagates the earliest-value position information for b
805 // towards all of b's predecessors that need a position, and recurs on that
806 // predecessor if its position is updated. B should have a non-empty position.
807 func (s *state) updateUnsetPredPos(b *ssa.Block) {
808 if b.Pos == src.NoXPos {
809 s.Fatalf("Block %s should have a position", b)
811 bestPos := src.NoXPos
812 for _, e := range b.Preds {
817 if bestPos == src.NoXPos {
819 for _, v := range b.Values {
823 if v.Pos != src.NoXPos {
824 // Assume values are still in roughly textual order;
825 // TODO: could also seek minimum position?
832 s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
836 // Information about each open-coded defer.
837 type openDeferInfo struct {
838 // The node representing the call of the defer
840 // If defer call is closure call, the address of the argtmp where the
841 // closure is stored.
843 // The node representing the argtmp where the closure is stored - used for
844 // function, method, or interface call, to store a closure that panic
845 // processing can use for this defer.
847 // If defer call is interface call, the address of the argtmp where the
848 // receiver is stored
850 // The node representing the argtmp where the receiver is stored
852 // The addresses of the argtmps where the evaluated arguments of the defer
853 // function call are stored.
855 // The nodes representing the argtmps where the args of the defer are stored
860 // configuration (arch) information
863 // function we're building
870 labels map[string]*ssaLabel
872 // unlabeled break and continue statement tracking
873 breakTo *ssa.Block // current target for plain break statement
874 continueTo *ssa.Block // current target for plain continue statement
876 // current location where we're interpreting the AST
879 // variable assignments in the current block (map from variable symbol to ssa value)
880 // *Node is the unique identifier (an ONAME Node) for the variable.
881 // TODO: keep a single varnum map, then make all of these maps slices instead?
882 vars map[ir.Node]*ssa.Value
884 // fwdVars are variables that are used before they are defined in the current block.
885 // This map exists just to coalesce multiple references into a single FwdRef op.
886 // *Node is the unique identifier (an ONAME Node) for the variable.
887 fwdVars map[ir.Node]*ssa.Value
889 // all defined variables at the end of each block. Indexed by block ID.
890 defvars []map[ir.Node]*ssa.Value
892 // addresses of PPARAM and PPARAMOUT variables on the stack.
893 decladdrs map[*ir.Name]*ssa.Value
895 // starting values. Memory, stack pointer, and globals pointer
899 // value representing address of where deferBits autotmp is stored
900 deferBitsAddr *ssa.Value
901 deferBitsTemp *ir.Name
903 // line number stack. The current line number is top of stack
905 // the last line number processed; it may have been popped
908 // list of panic calls by function name and line number.
909 // Used to deduplicate panic calls.
910 panics map[funcLine]*ssa.Block
913 hasdefer bool // whether the function contains a defer statement
915 hasOpenDefers bool // whether we are doing open-coded defers
917 // If doing open-coded defers, list of info about the defer calls in
918 // scanning order. Hence, at exit we should run these defers in reverse
919 // order of this list
920 openDefers []*openDeferInfo
921 // For open-coded defers, this is the beginning and end blocks of the last
922 // defer exit code that we have generated so far. We use these to share
923 // code between exits if the shareDeferExits option (disabled by default)
925 lastDeferExit *ssa.Block // Entry block of last defer exit code we generated
926 lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
927 lastDeferCount int // Number of defers encountered at that point
929 prevCall *ssa.Value // the previous call; use this to tie results to the call op.
932 type funcLine struct {
938 type ssaLabel struct {
939 target *ssa.Block // block identified by this label
940 breakTarget *ssa.Block // block to break to in control flow node identified by this label
941 continueTarget *ssa.Block // block to continue to in control flow node identified by this label
944 // label returns the label associated with sym, creating it if necessary.
945 func (s *state) label(sym *types.Sym) *ssaLabel {
946 lab := s.labels[sym.Name]
949 s.labels[sym.Name] = lab
954 func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
955 func (s *state) Log() bool { return s.f.Log() }
956 func (s *state) Fatalf(msg string, args ...interface{}) {
957 s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
959 func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
960 func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
962 func ssaMarker(name string) *ir.Name {
963 return typecheck.NewName(&types.Sym{Name: name})
967 // marker node for the memory variable
968 memVar = ssaMarker("mem")
970 // marker nodes for temporary variables
971 ptrVar = ssaMarker("ptr")
972 lenVar = ssaMarker("len")
973 newlenVar = ssaMarker("newlen")
974 capVar = ssaMarker("cap")
975 typVar = ssaMarker("typ")
976 okVar = ssaMarker("ok")
977 deferBitsVar = ssaMarker("deferBits")
980 // startBlock sets the current block we're generating code in to b.
981 func (s *state) startBlock(b *ssa.Block) {
982 if s.curBlock != nil {
983 s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
986 s.vars = map[ir.Node]*ssa.Value{}
987 for n := range s.fwdVars {
992 // endBlock marks the end of generating code for the current block.
993 // Returns the (former) current block. Returns nil if there is no current
994 // block, i.e. if no code flows to the current execution point.
995 func (s *state) endBlock() *ssa.Block {
1000 for len(s.defvars) <= int(b.ID) {
1001 s.defvars = append(s.defvars, nil)
1003 s.defvars[b.ID] = s.vars
1007 // Empty plain blocks get the line of their successor (handled after all blocks created),
1008 // except for increment blocks in For statements (handled in ssa conversion of OFOR),
1009 // and for blocks ending in GOTO/BREAK/CONTINUE.
1017 // pushLine pushes a line number on the line number stack.
1018 func (s *state) pushLine(line src.XPos) {
1019 if !line.IsKnown() {
1020 // the frontend may emit node with line number missing,
1021 // use the parent line number in this case.
1023 if base.Flag.K != 0 {
1024 base.Warn("buildssa: unknown position (line 0)")
1030 s.line = append(s.line, line)
1033 // popLine pops the top of the line number stack.
1034 func (s *state) popLine() {
1035 s.line = s.line[:len(s.line)-1]
1038 // peekPos peeks the top of the line number stack.
1039 func (s *state) peekPos() src.XPos {
1040 return s.line[len(s.line)-1]
1043 // newValue0 adds a new value with no arguments to the current block.
1044 func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
1045 return s.curBlock.NewValue0(s.peekPos(), op, t)
1048 // newValue0A adds a new value with no arguments and an aux value to the current block.
1049 func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
1050 return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
1053 // newValue0I adds a new value with no arguments and an auxint value to the current block.
1054 func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
1055 return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
1058 // newValue1 adds a new value with one argument to the current block.
1059 func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
1060 return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
1063 // newValue1A adds a new value with one argument and an aux value to the current block.
1064 func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
1065 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
1068 // newValue1Apos adds a new value with one argument and an aux value to the current block.
1069 // isStmt determines whether the created values may be a statement or not
1070 // (i.e., false means never, yes means maybe).
1071 func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value {
1073 return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
1075 return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
1078 // newValue1I adds a new value with one argument and an auxint value to the current block.
1079 func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
1080 return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
1083 // newValue2 adds a new value with two arguments to the current block.
1084 func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
1085 return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
1088 // newValue2A adds a new value with two arguments and an aux value to the current block.
1089 func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
1090 return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
1093 // newValue2Apos adds a new value with two arguments and an aux value to the current block.
1094 // isStmt determines whether the created values may be a statement or not
1095 // (i.e., false means never, yes means maybe).
1096 func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
1098 return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
1100 return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
1103 // newValue2I adds a new value with two arguments and an auxint value to the current block.
1104 func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
1105 return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
1108 // newValue3 adds a new value with three arguments to the current block.
1109 func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
1110 return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
1113 // newValue3I adds a new value with three arguments and an auxint value to the current block.
1114 func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
1115 return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
1118 // newValue3A adds a new value with three arguments and an aux value to the current block.
1119 func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
1120 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
1123 // newValue3Apos adds a new value with three arguments and an aux value to the current block.
1124 // isStmt determines whether the created values may be a statement or not
1125 // (i.e., false means never, yes means maybe).
1126 func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
1128 return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
1130 return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
1133 // newValue4 adds a new value with four arguments to the current block.
1134 func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
1135 return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
1138 // newValue4 adds a new value with four arguments and an auxint value to the current block.
1139 func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
1140 return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
1143 // entryNewValue0 adds a new value with no arguments to the entry block.
1144 func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
1145 return s.f.Entry.NewValue0(src.NoXPos, op, t)
1148 // entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
1149 func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
1150 return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux)
1153 // entryNewValue1 adds a new value with one argument to the entry block.
1154 func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
1155 return s.f.Entry.NewValue1(src.NoXPos, op, t, arg)
1158 // entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
1159 func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
1160 return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg)
1163 // entryNewValue1A adds a new value with one argument and an aux value to the entry block.
1164 func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
1165 return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg)
1168 // entryNewValue2 adds a new value with two arguments to the entry block.
1169 func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
1170 return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1)
1173 // entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
1174 func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
1175 return s.f.Entry.NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
1178 // const* routines add a new const value to the entry block.
1179 func (s *state) constSlice(t *types.Type) *ssa.Value {
1180 return s.f.ConstSlice(t)
1182 func (s *state) constInterface(t *types.Type) *ssa.Value {
1183 return s.f.ConstInterface(t)
1185 func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
1186 func (s *state) constEmptyString(t *types.Type) *ssa.Value {
1187 return s.f.ConstEmptyString(t)
1189 func (s *state) constBool(c bool) *ssa.Value {
1190 return s.f.ConstBool(types.Types[types.TBOOL], c)
1192 func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
1193 return s.f.ConstInt8(t, c)
1195 func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
1196 return s.f.ConstInt16(t, c)
1198 func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
1199 return s.f.ConstInt32(t, c)
1201 func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
1202 return s.f.ConstInt64(t, c)
1204 func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
1205 return s.f.ConstFloat32(t, c)
1207 func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
1208 return s.f.ConstFloat64(t, c)
1210 func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
1211 if s.config.PtrSize == 8 {
1212 return s.constInt64(t, c)
1214 if int64(int32(c)) != c {
1215 s.Fatalf("integer constant too big %d", c)
1217 return s.constInt32(t, int32(c))
1219 func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
1220 return s.f.ConstOffPtrSP(t, c, s.sp)
1223 // newValueOrSfCall* are wrappers around newValue*, which may create a call to a
1224 // soft-float runtime function instead (when emitting soft-float code).
1225 func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
1227 if c, ok := s.sfcall(op, arg); ok {
1231 return s.newValue1(op, t, arg)
1233 func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
1235 if c, ok := s.sfcall(op, arg0, arg1); ok {
1239 return s.newValue2(op, t, arg0, arg1)
1242 type instrumentKind uint8
1245 instrumentRead = iota
1250 func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) {
1251 s.instrument2(t, addr, nil, kind)
1254 // instrumentFields instruments a read/write operation on addr.
1255 // If it is instrumenting for MSAN and t is a struct type, it instruments
1256 // operation for each field, instead of for the whole struct.
1257 func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
1258 if !base.Flag.MSan || !t.IsStruct() {
1259 s.instrument(t, addr, kind)
1262 for _, f := range t.Fields().Slice() {
1263 if f.Sym.IsBlank() {
1266 offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), abi.FieldOffsetOf(f), addr)
1267 s.instrumentFields(f.Type, offptr, kind)
1271 func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
1273 s.instrument2(t, dst, src, instrumentMove)
1275 s.instrument(t, src, instrumentRead)
1276 s.instrument(t, dst, instrumentWrite)
1280 func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
1281 if !s.curfn.InstrumentBody() {
1287 return // can't race on zero-sized things
1290 if ssa.IsSanitizerSafeAddr(addr) {
1297 if addr2 != nil && kind != instrumentMove {
1298 panic("instrument2: non-nil addr2 for non-move instrumentation")
1303 case instrumentRead:
1304 fn = ir.Syms.Msanread
1305 case instrumentWrite:
1306 fn = ir.Syms.Msanwrite
1307 case instrumentMove:
1308 fn = ir.Syms.Msanmove
1310 panic("unreachable")
1313 } else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
1314 // for composite objects we have to write every address
1315 // because a write might happen to any subobject.
1316 // composites with only one element don't have subobjects, though.
1318 case instrumentRead:
1319 fn = ir.Syms.Racereadrange
1320 case instrumentWrite:
1321 fn = ir.Syms.Racewriterange
1323 panic("unreachable")
1326 } else if base.Flag.Race {
1327 // for non-composite objects we can write just the start
1328 // address, as any write must write the first byte.
1330 case instrumentRead:
1331 fn = ir.Syms.Raceread
1332 case instrumentWrite:
1333 fn = ir.Syms.Racewrite
1335 panic("unreachable")
1338 panic("unreachable")
1341 args := []*ssa.Value{addr}
1343 args = append(args, addr2)
1346 args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
1348 s.rtcall(fn, true, nil, args...)
1351 func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
1352 s.instrumentFields(t, src, instrumentRead)
1353 return s.rawLoad(t, src)
1356 func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
1357 return s.newValue2(ssa.OpLoad, t, src, s.mem())
1360 func (s *state) store(t *types.Type, dst, val *ssa.Value) {
1361 s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
1364 func (s *state) zero(t *types.Type, dst *ssa.Value) {
1365 s.instrument(t, dst, instrumentWrite)
1366 store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
1368 s.vars[memVar] = store
1371 func (s *state) move(t *types.Type, dst, src *ssa.Value) {
1372 s.instrumentMove(t, dst, src)
1373 store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
1375 s.vars[memVar] = store
1378 // stmtList converts the statement list n to SSA and adds it to s.
1379 func (s *state) stmtList(l ir.Nodes) {
1380 for _, n := range l {
1385 // stmt converts the statement n to SSA and adds it to s.
1386 func (s *state) stmt(n ir.Node) {
1387 if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
1388 // OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
1393 // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
1394 // then this code is dead. Stop here.
1395 if s.curBlock == nil && n.Op() != ir.OLABEL {
1399 s.stmtList(n.Init())
1403 n := n.(*ir.BlockStmt)
1407 case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
1409 // Expression statements
1411 n := n.(*ir.CallExpr)
1412 if ir.IsIntrinsicCall(n) {
1419 n := n.(*ir.CallExpr)
1420 s.callResult(n, callNormal)
1421 if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PFUNC {
1422 if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
1423 n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
1426 b.Kind = ssa.BlockExit
1428 // TODO: never rewrite OPANIC to OCALLFUNC in the
1429 // first place. Need to wait until all backends
1434 n := n.(*ir.GoDeferStmt)
1435 if base.Debug.Defer > 0 {
1436 var defertype string
1437 if s.hasOpenDefers {
1438 defertype = "open-coded"
1439 } else if n.Esc() == ir.EscNever {
1440 defertype = "stack-allocated"
1442 defertype = "heap-allocated"
1444 base.WarnfAt(n.Pos(), "%s defer", defertype)
1446 if s.hasOpenDefers {
1447 s.openDeferRecord(n.Call.(*ir.CallExpr))
1450 if n.Esc() == ir.EscNever {
1453 s.callResult(n.Call.(*ir.CallExpr), d)
1456 n := n.(*ir.GoDeferStmt)
1457 s.callResult(n.Call.(*ir.CallExpr), callGo)
1459 case ir.OAS2DOTTYPE:
1460 n := n.(*ir.AssignListStmt)
1461 res, resok := s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
1463 if !TypeOK(n.Rhs[0].Type()) {
1464 if res.Op != ssa.OpLoad {
1465 s.Fatalf("dottype of non-load")
1468 if mem.Op == ssa.OpVarKill {
1471 if res.Args[1] != mem {
1472 s.Fatalf("memory no longer live from 2-result dottype load")
1477 s.assign(n.Lhs[0], res, deref, 0)
1478 s.assign(n.Lhs[1], resok, false, 0)
1482 // We come here only when it is an intrinsic call returning two values.
1483 n := n.(*ir.AssignListStmt)
1484 call := n.Rhs[0].(*ir.CallExpr)
1485 if !ir.IsIntrinsicCall(call) {
1486 s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call)
1488 v := s.intrinsicCall(call)
1489 v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v)
1490 v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v)
1491 s.assign(n.Lhs[0], v1, false, 0)
1492 s.assign(n.Lhs[1], v2, false, 0)
1497 if v := n.X; v.Esc() == ir.EscHeap {
1502 n := n.(*ir.LabelStmt)
1506 // The label might already have a target block via a goto.
1507 if lab.target == nil {
1508 lab.target = s.f.NewBlock(ssa.BlockPlain)
1511 // Go to that label.
1512 // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
1513 if s.curBlock != nil {
1515 b.AddEdgeTo(lab.target)
1517 s.startBlock(lab.target)
1520 n := n.(*ir.BranchStmt)
1524 if lab.target == nil {
1525 lab.target = s.f.NewBlock(ssa.BlockPlain)
1529 b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
1530 b.AddEdgeTo(lab.target)
1533 n := n.(*ir.AssignStmt)
1534 if n.X == n.Y && n.X.Op() == ir.ONAME {
1535 // An x=x assignment. No point in doing anything
1536 // here. In addition, skipping this assignment
1537 // prevents generating:
1540 // which is bad because x is incorrectly considered
1541 // dead before the vardef. See issue #14904.
1549 case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
1550 // All literals with nonzero fields have already been
1551 // rewritten during walk. Any that remain are just T{}
1552 // or equivalents. Use the zero value.
1553 if !ir.IsZero(rhs) {
1554 s.Fatalf("literal with nonzero value in SSA: %v", rhs)
1558 rhs := rhs.(*ir.CallExpr)
1559 // Check whether we're writing the result of an append back to the same slice.
1560 // If so, we handle it specially to avoid write barriers on the fast
1561 // (non-growth) path.
1562 if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 {
1565 // If the slice can be SSA'd, it'll be on the stack,
1566 // so there will be no write barriers,
1567 // so there's no need to attempt to prevent them.
1569 if base.Debug.Append > 0 { // replicating old diagnostic message
1570 base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
1574 if base.Debug.Append > 0 {
1575 base.WarnfAt(n.Pos(), "append: len-only update")
1582 if ir.IsBlank(n.X) {
1584 // Just evaluate rhs for side-effects.
1602 r = nil // Signal assign to use OpZero.
1615 if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) {
1616 // We're assigning a slicing operation back to its source.
1617 // Don't write back fields we aren't changing. See issue #14855.
1618 rhs := rhs.(*ir.SliceExpr)
1619 i, j, k := rhs.Low, rhs.High, rhs.Max
1620 if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) {
1621 // [0:...] is the same as [:...]
1624 // TODO: detect defaults for len/cap also.
1625 // Currently doesn't really work because (*p)[:len(*p)] appears here as:
1628 //if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) {
1631 //if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) {
1645 s.assign(n.X, r, deref, skip)
1649 if ir.IsConst(n.Cond, constant.Bool) {
1650 s.stmtList(n.Cond.Init())
1651 if ir.BoolVal(n.Cond) {
1659 bEnd := s.f.NewBlock(ssa.BlockPlain)
1664 var bThen *ssa.Block
1665 if len(n.Body) != 0 {
1666 bThen = s.f.NewBlock(ssa.BlockPlain)
1670 var bElse *ssa.Block
1671 if len(n.Else) != 0 {
1672 bElse = s.f.NewBlock(ssa.BlockPlain)
1676 s.condBranch(n.Cond, bThen, bElse, likely)
1678 if len(n.Body) != 0 {
1681 if b := s.endBlock(); b != nil {
1685 if len(n.Else) != 0 {
1688 if b := s.endBlock(); b != nil {
1695 n := n.(*ir.ReturnStmt)
1696 s.stmtList(n.Results)
1698 b.Pos = s.lastPos.WithIsStmt()
1701 n := n.(*ir.TailCallStmt)
1703 b.Kind = ssa.BlockRetJmp // override BlockRet
1704 b.Aux = callTargetLSym(n.Target)
1706 case ir.OCONTINUE, ir.OBREAK:
1707 n := n.(*ir.BranchStmt)
1710 // plain break/continue
1718 // labeled break/continue; look up the target
1723 to = lab.continueTarget
1725 to = lab.breakTarget
1730 b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
1733 case ir.OFOR, ir.OFORUNTIL:
1734 // OFOR: for Ninit; Left; Right { Nbody }
1735 // cond (Left); body (Nbody); incr (Right)
1737 // OFORUNTIL: for Ninit; Left; Right; List { Nbody }
1738 // => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end:
1739 n := n.(*ir.ForStmt)
1740 bCond := s.f.NewBlock(ssa.BlockPlain)
1741 bBody := s.f.NewBlock(ssa.BlockPlain)
1742 bIncr := s.f.NewBlock(ssa.BlockPlain)
1743 bEnd := s.f.NewBlock(ssa.BlockPlain)
1745 // ensure empty for loops have correct position; issue #30167
1748 // first, jump to condition test (OFOR) or body (OFORUNTIL)
1750 if n.Op() == ir.OFOR {
1752 // generate code to test condition
1755 s.condBranch(n.Cond, bBody, bEnd, 1)
1758 b.Kind = ssa.BlockPlain
1766 // set up for continue/break in body
1767 prevContinue := s.continueTo
1768 prevBreak := s.breakTo
1769 s.continueTo = bIncr
1772 if sym := n.Label; sym != nil {
1775 lab.continueTarget = bIncr
1776 lab.breakTarget = bEnd
1783 // tear down continue/break
1784 s.continueTo = prevContinue
1785 s.breakTo = prevBreak
1787 lab.continueTarget = nil
1788 lab.breakTarget = nil
1791 // done with body, goto incr
1792 if b := s.endBlock(); b != nil {
1796 // generate incr (and, for OFORUNTIL, condition)
1801 if n.Op() == ir.OFOR {
1802 if b := s.endBlock(); b != nil {
1804 // It can happen that bIncr ends in a block containing only VARKILL,
1805 // and that muddles the debugging experience.
1806 if b.Pos == src.NoXPos {
1811 // bCond is unused in OFORUNTIL, so repurpose it.
1814 s.condBranch(n.Cond, bLateIncr, bEnd, 1)
1815 // generate late increment
1816 s.startBlock(bLateIncr)
1818 s.endBlock().AddEdgeTo(bBody)
1823 case ir.OSWITCH, ir.OSELECT:
1824 // These have been mostly rewritten by the front end into their Nbody fields.
1825 // Our main task is to correctly hook up any break statements.
1826 bEnd := s.f.NewBlock(ssa.BlockPlain)
1828 prevBreak := s.breakTo
1832 if n.Op() == ir.OSWITCH {
1833 n := n.(*ir.SwitchStmt)
1837 n := n.(*ir.SelectStmt)
1846 lab.breakTarget = bEnd
1849 // generate body code
1852 s.breakTo = prevBreak
1854 lab.breakTarget = nil
1857 // walk adds explicit OBREAK nodes to the end of all reachable code paths.
1858 // If we still have a current block here, then mark it unreachable.
1859 if s.curBlock != nil {
1862 b.Kind = ssa.BlockExit
1868 n := n.(*ir.UnaryExpr)
1870 s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
1873 // Insert a varkill op to record that a variable is no longer live.
1874 // We only care about liveness info at call sites, so putting the
1875 // varkill in the store chain is enough to keep it correctly ordered
1876 // with respect to call ops.
1877 n := n.(*ir.UnaryExpr)
1879 s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
1883 // Insert a varlive op to record that a variable is still live.
1884 n := n.(*ir.UnaryExpr)
1887 s.Fatalf("VARLIVE variable %v must have Addrtaken set", v)
1890 case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
1892 s.Fatalf("VARLIVE variable %v must be Auto or Arg", v)
1894 s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem())
1897 n := n.(*ir.UnaryExpr)
1902 n := n.(*ir.InlineMarkStmt)
1903 s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem())
1906 s.Fatalf("unhandled stmt %v", n.Op())
1910 // If true, share as many open-coded defer exits as possible (with the downside of
1911 // worse line-number information)
1912 const shareDeferExits = false
1914 // exit processes any code that needs to be generated just before returning.
1915 // It returns a BlockRet block that ends the control flow. Its control value
1916 // will be set to the final memory state.
1917 func (s *state) exit() *ssa.Block {
1919 if s.hasOpenDefers {
1920 if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
1921 if s.curBlock.Kind != ssa.BlockPlain {
1922 panic("Block for an exit should be BlockPlain")
1924 s.curBlock.AddEdgeTo(s.lastDeferExit)
1926 return s.lastDeferFinalBlock
1930 s.rtcall(ir.Syms.Deferreturn, true, nil)
1936 // Do actual return.
1937 // These currently turn into self-copies (in many cases).
1938 resultFields := s.curfn.Type().Results().FieldSlice()
1939 results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
1940 m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
1941 // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
1942 for i, f := range resultFields {
1943 n := f.Nname.(*ir.Name)
1944 if s.canSSA(n) { // result is in some SSA variable
1945 if !n.IsOutputParamInRegisters() {
1946 // We are about to store to the result slot.
1947 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
1949 results[i] = s.variable(n, n.Type())
1950 } else if !n.OnStack() { // result is actually heap allocated
1951 // We are about to copy the in-heap result to the result slot.
1952 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
1953 ha := s.expr(n.Heapaddr)
1954 s.instrumentFields(n.Type(), ha, instrumentRead)
1955 results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem())
1956 } else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA.
1957 // Before register ABI this ought to be a self-move, home=dest,
1958 // With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed)
1959 // No VarDef, as the result slot is already holding live value.
1960 results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem())
1964 // Run exit code. Today, this is just racefuncexit, in -race mode.
1965 // TODO(register args) this seems risky here with a register-ABI, but not clear it is right to do it earlier either.
1966 // Spills in register allocation might just fix it.
1967 s.stmtList(s.curfn.Exit)
1969 results[len(results)-1] = s.mem()
1970 m.AddArgs(results...)
1973 b.Kind = ssa.BlockRet
1975 if s.hasdefer && s.hasOpenDefers {
1976 s.lastDeferFinalBlock = b
1981 type opAndType struct {
1986 var opToSSA = map[opAndType]ssa.Op{
1987 opAndType{ir.OADD, types.TINT8}: ssa.OpAdd8,
1988 opAndType{ir.OADD, types.TUINT8}: ssa.OpAdd8,
1989 opAndType{ir.OADD, types.TINT16}: ssa.OpAdd16,
1990 opAndType{ir.OADD, types.TUINT16}: ssa.OpAdd16,
1991 opAndType{ir.OADD, types.TINT32}: ssa.OpAdd32,
1992 opAndType{ir.OADD, types.TUINT32}: ssa.OpAdd32,
1993 opAndType{ir.OADD, types.TINT64}: ssa.OpAdd64,
1994 opAndType{ir.OADD, types.TUINT64}: ssa.OpAdd64,
1995 opAndType{ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
1996 opAndType{ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
1998 opAndType{ir.OSUB, types.TINT8}: ssa.OpSub8,
1999 opAndType{ir.OSUB, types.TUINT8}: ssa.OpSub8,
2000 opAndType{ir.OSUB, types.TINT16}: ssa.OpSub16,
2001 opAndType{ir.OSUB, types.TUINT16}: ssa.OpSub16,
2002 opAndType{ir.OSUB, types.TINT32}: ssa.OpSub32,
2003 opAndType{ir.OSUB, types.TUINT32}: ssa.OpSub32,
2004 opAndType{ir.OSUB, types.TINT64}: ssa.OpSub64,
2005 opAndType{ir.OSUB, types.TUINT64}: ssa.OpSub64,
2006 opAndType{ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
2007 opAndType{ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
2009 opAndType{ir.ONOT, types.TBOOL}: ssa.OpNot,
2011 opAndType{ir.ONEG, types.TINT8}: ssa.OpNeg8,
2012 opAndType{ir.ONEG, types.TUINT8}: ssa.OpNeg8,
2013 opAndType{ir.ONEG, types.TINT16}: ssa.OpNeg16,
2014 opAndType{ir.ONEG, types.TUINT16}: ssa.OpNeg16,
2015 opAndType{ir.ONEG, types.TINT32}: ssa.OpNeg32,
2016 opAndType{ir.ONEG, types.TUINT32}: ssa.OpNeg32,
2017 opAndType{ir.ONEG, types.TINT64}: ssa.OpNeg64,
2018 opAndType{ir.ONEG, types.TUINT64}: ssa.OpNeg64,
2019 opAndType{ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
2020 opAndType{ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
2022 opAndType{ir.OBITNOT, types.TINT8}: ssa.OpCom8,
2023 opAndType{ir.OBITNOT, types.TUINT8}: ssa.OpCom8,
2024 opAndType{ir.OBITNOT, types.TINT16}: ssa.OpCom16,
2025 opAndType{ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
2026 opAndType{ir.OBITNOT, types.TINT32}: ssa.OpCom32,
2027 opAndType{ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
2028 opAndType{ir.OBITNOT, types.TINT64}: ssa.OpCom64,
2029 opAndType{ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
2031 opAndType{ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag,
2032 opAndType{ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
2033 opAndType{ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal,
2034 opAndType{ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
2036 opAndType{ir.OMUL, types.TINT8}: ssa.OpMul8,
2037 opAndType{ir.OMUL, types.TUINT8}: ssa.OpMul8,
2038 opAndType{ir.OMUL, types.TINT16}: ssa.OpMul16,
2039 opAndType{ir.OMUL, types.TUINT16}: ssa.OpMul16,
2040 opAndType{ir.OMUL, types.TINT32}: ssa.OpMul32,
2041 opAndType{ir.OMUL, types.TUINT32}: ssa.OpMul32,
2042 opAndType{ir.OMUL, types.TINT64}: ssa.OpMul64,
2043 opAndType{ir.OMUL, types.TUINT64}: ssa.OpMul64,
2044 opAndType{ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
2045 opAndType{ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
2047 opAndType{ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
2048 opAndType{ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
2050 opAndType{ir.ODIV, types.TINT8}: ssa.OpDiv8,
2051 opAndType{ir.ODIV, types.TUINT8}: ssa.OpDiv8u,
2052 opAndType{ir.ODIV, types.TINT16}: ssa.OpDiv16,
2053 opAndType{ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
2054 opAndType{ir.ODIV, types.TINT32}: ssa.OpDiv32,
2055 opAndType{ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
2056 opAndType{ir.ODIV, types.TINT64}: ssa.OpDiv64,
2057 opAndType{ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
2059 opAndType{ir.OMOD, types.TINT8}: ssa.OpMod8,
2060 opAndType{ir.OMOD, types.TUINT8}: ssa.OpMod8u,
2061 opAndType{ir.OMOD, types.TINT16}: ssa.OpMod16,
2062 opAndType{ir.OMOD, types.TUINT16}: ssa.OpMod16u,
2063 opAndType{ir.OMOD, types.TINT32}: ssa.OpMod32,
2064 opAndType{ir.OMOD, types.TUINT32}: ssa.OpMod32u,
2065 opAndType{ir.OMOD, types.TINT64}: ssa.OpMod64,
2066 opAndType{ir.OMOD, types.TUINT64}: ssa.OpMod64u,
2068 opAndType{ir.OAND, types.TINT8}: ssa.OpAnd8,
2069 opAndType{ir.OAND, types.TUINT8}: ssa.OpAnd8,
2070 opAndType{ir.OAND, types.TINT16}: ssa.OpAnd16,
2071 opAndType{ir.OAND, types.TUINT16}: ssa.OpAnd16,
2072 opAndType{ir.OAND, types.TINT32}: ssa.OpAnd32,
2073 opAndType{ir.OAND, types.TUINT32}: ssa.OpAnd32,
2074 opAndType{ir.OAND, types.TINT64}: ssa.OpAnd64,
2075 opAndType{ir.OAND, types.TUINT64}: ssa.OpAnd64,
2077 opAndType{ir.OOR, types.TINT8}: ssa.OpOr8,
2078 opAndType{ir.OOR, types.TUINT8}: ssa.OpOr8,
2079 opAndType{ir.OOR, types.TINT16}: ssa.OpOr16,
2080 opAndType{ir.OOR, types.TUINT16}: ssa.OpOr16,
2081 opAndType{ir.OOR, types.TINT32}: ssa.OpOr32,
2082 opAndType{ir.OOR, types.TUINT32}: ssa.OpOr32,
2083 opAndType{ir.OOR, types.TINT64}: ssa.OpOr64,
2084 opAndType{ir.OOR, types.TUINT64}: ssa.OpOr64,
2086 opAndType{ir.OXOR, types.TINT8}: ssa.OpXor8,
2087 opAndType{ir.OXOR, types.TUINT8}: ssa.OpXor8,
2088 opAndType{ir.OXOR, types.TINT16}: ssa.OpXor16,
2089 opAndType{ir.OXOR, types.TUINT16}: ssa.OpXor16,
2090 opAndType{ir.OXOR, types.TINT32}: ssa.OpXor32,
2091 opAndType{ir.OXOR, types.TUINT32}: ssa.OpXor32,
2092 opAndType{ir.OXOR, types.TINT64}: ssa.OpXor64,
2093 opAndType{ir.OXOR, types.TUINT64}: ssa.OpXor64,
2095 opAndType{ir.OEQ, types.TBOOL}: ssa.OpEqB,
2096 opAndType{ir.OEQ, types.TINT8}: ssa.OpEq8,
2097 opAndType{ir.OEQ, types.TUINT8}: ssa.OpEq8,
2098 opAndType{ir.OEQ, types.TINT16}: ssa.OpEq16,
2099 opAndType{ir.OEQ, types.TUINT16}: ssa.OpEq16,
2100 opAndType{ir.OEQ, types.TINT32}: ssa.OpEq32,
2101 opAndType{ir.OEQ, types.TUINT32}: ssa.OpEq32,
2102 opAndType{ir.OEQ, types.TINT64}: ssa.OpEq64,
2103 opAndType{ir.OEQ, types.TUINT64}: ssa.OpEq64,
2104 opAndType{ir.OEQ, types.TINTER}: ssa.OpEqInter,
2105 opAndType{ir.OEQ, types.TSLICE}: ssa.OpEqSlice,
2106 opAndType{ir.OEQ, types.TFUNC}: ssa.OpEqPtr,
2107 opAndType{ir.OEQ, types.TMAP}: ssa.OpEqPtr,
2108 opAndType{ir.OEQ, types.TCHAN}: ssa.OpEqPtr,
2109 opAndType{ir.OEQ, types.TPTR}: ssa.OpEqPtr,
2110 opAndType{ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr,
2111 opAndType{ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
2112 opAndType{ir.OEQ, types.TFLOAT64}: ssa.OpEq64F,
2113 opAndType{ir.OEQ, types.TFLOAT32}: ssa.OpEq32F,
2115 opAndType{ir.ONE, types.TBOOL}: ssa.OpNeqB,
2116 opAndType{ir.ONE, types.TINT8}: ssa.OpNeq8,
2117 opAndType{ir.ONE, types.TUINT8}: ssa.OpNeq8,
2118 opAndType{ir.ONE, types.TINT16}: ssa.OpNeq16,
2119 opAndType{ir.ONE, types.TUINT16}: ssa.OpNeq16,
2120 opAndType{ir.ONE, types.TINT32}: ssa.OpNeq32,
2121 opAndType{ir.ONE, types.TUINT32}: ssa.OpNeq32,
2122 opAndType{ir.ONE, types.TINT64}: ssa.OpNeq64,
2123 opAndType{ir.ONE, types.TUINT64}: ssa.OpNeq64,
2124 opAndType{ir.ONE, types.TINTER}: ssa.OpNeqInter,
2125 opAndType{ir.ONE, types.TSLICE}: ssa.OpNeqSlice,
2126 opAndType{ir.ONE, types.TFUNC}: ssa.OpNeqPtr,
2127 opAndType{ir.ONE, types.TMAP}: ssa.OpNeqPtr,
2128 opAndType{ir.ONE, types.TCHAN}: ssa.OpNeqPtr,
2129 opAndType{ir.ONE, types.TPTR}: ssa.OpNeqPtr,
2130 opAndType{ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr,
2131 opAndType{ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
2132 opAndType{ir.ONE, types.TFLOAT64}: ssa.OpNeq64F,
2133 opAndType{ir.ONE, types.TFLOAT32}: ssa.OpNeq32F,
2135 opAndType{ir.OLT, types.TINT8}: ssa.OpLess8,
2136 opAndType{ir.OLT, types.TUINT8}: ssa.OpLess8U,
2137 opAndType{ir.OLT, types.TINT16}: ssa.OpLess16,
2138 opAndType{ir.OLT, types.TUINT16}: ssa.OpLess16U,
2139 opAndType{ir.OLT, types.TINT32}: ssa.OpLess32,
2140 opAndType{ir.OLT, types.TUINT32}: ssa.OpLess32U,
2141 opAndType{ir.OLT, types.TINT64}: ssa.OpLess64,
2142 opAndType{ir.OLT, types.TUINT64}: ssa.OpLess64U,
2143 opAndType{ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
2144 opAndType{ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
2146 opAndType{ir.OLE, types.TINT8}: ssa.OpLeq8,
2147 opAndType{ir.OLE, types.TUINT8}: ssa.OpLeq8U,
2148 opAndType{ir.OLE, types.TINT16}: ssa.OpLeq16,
2149 opAndType{ir.OLE, types.TUINT16}: ssa.OpLeq16U,
2150 opAndType{ir.OLE, types.TINT32}: ssa.OpLeq32,
2151 opAndType{ir.OLE, types.TUINT32}: ssa.OpLeq32U,
2152 opAndType{ir.OLE, types.TINT64}: ssa.OpLeq64,
2153 opAndType{ir.OLE, types.TUINT64}: ssa.OpLeq64U,
2154 opAndType{ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
2155 opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
2158 func (s *state) concreteEtype(t *types.Type) types.Kind {
2164 if s.config.PtrSize == 8 {
2169 if s.config.PtrSize == 8 {
2170 return types.TUINT64
2172 return types.TUINT32
2173 case types.TUINTPTR:
2174 if s.config.PtrSize == 8 {
2175 return types.TUINT64
2177 return types.TUINT32
2181 func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
2182 etype := s.concreteEtype(t)
2183 x, ok := opToSSA[opAndType{op, etype}]
2185 s.Fatalf("unhandled binary op %v %s", op, etype)
2190 type opAndTwoTypes struct {
2196 type twoTypes struct {
2201 type twoOpsAndType struct {
2204 intermediateType types.Kind
2207 var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
2209 twoTypes{types.TINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
2210 twoTypes{types.TINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
2211 twoTypes{types.TINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
2212 twoTypes{types.TINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
2214 twoTypes{types.TINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
2215 twoTypes{types.TINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
2216 twoTypes{types.TINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
2217 twoTypes{types.TINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
2219 twoTypes{types.TFLOAT32, types.TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
2220 twoTypes{types.TFLOAT32, types.TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
2221 twoTypes{types.TFLOAT32, types.TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
2222 twoTypes{types.TFLOAT32, types.TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
2224 twoTypes{types.TFLOAT64, types.TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
2225 twoTypes{types.TFLOAT64, types.TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
2226 twoTypes{types.TFLOAT64, types.TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
2227 twoTypes{types.TFLOAT64, types.TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
2229 twoTypes{types.TUINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
2230 twoTypes{types.TUINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
2231 twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
2232 twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead
2234 twoTypes{types.TUINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
2235 twoTypes{types.TUINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
2236 twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
2237 twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead
2239 twoTypes{types.TFLOAT32, types.TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
2240 twoTypes{types.TFLOAT32, types.TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
2241 twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
2242 twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
2244 twoTypes{types.TFLOAT64, types.TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
2245 twoTypes{types.TFLOAT64, types.TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
2246 twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
2247 twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
2250 twoTypes{types.TFLOAT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
2251 twoTypes{types.TFLOAT64, types.TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
2252 twoTypes{types.TFLOAT32, types.TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
2253 twoTypes{types.TFLOAT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
2256 // this map is used only for 32-bit arch, and only includes the difference
2257 // on 32-bit arch, don't use int64<->float conversion for uint32
2258 var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
2259 twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
2260 twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
2261 twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
2262 twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
2265 // uint64<->float conversions, only on machines that have instructions for that
2266 var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
2267 twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
2268 twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
2269 twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
2270 twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
2273 var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
2274 opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8,
2275 opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8,
2276 opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16,
2277 opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
2278 opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32,
2279 opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
2280 opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64,
2281 opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
2283 opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8,
2284 opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8,
2285 opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16,
2286 opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
2287 opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32,
2288 opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
2289 opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64,
2290 opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
2292 opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8,
2293 opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8,
2294 opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16,
2295 opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
2296 opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32,
2297 opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
2298 opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64,
2299 opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
2301 opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8,
2302 opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8,
2303 opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16,
2304 opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
2305 opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32,
2306 opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
2307 opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64,
2308 opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
2310 opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8,
2311 opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8,
2312 opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16,
2313 opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
2314 opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32,
2315 opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
2316 opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64,
2317 opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
2319 opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8,
2320 opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8,
2321 opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16,
2322 opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
2323 opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32,
2324 opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
2325 opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64,
2326 opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
2328 opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8,
2329 opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8,
2330 opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16,
2331 opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
2332 opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32,
2333 opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
2334 opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64,
2335 opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
2337 opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8,
2338 opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8,
2339 opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16,
2340 opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
2341 opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32,
2342 opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
2343 opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64,
2344 opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
2347 func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
2348 etype1 := s.concreteEtype(t)
2349 etype2 := s.concreteEtype(u)
2350 x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
2352 s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
2357 // expr converts the expression n to ssa, adds it to s and returns the ssa result.
2358 func (s *state) expr(n ir.Node) *ssa.Value {
2359 if ir.HasUniquePos(n) {
2360 // ONAMEs and named OLITERALs have the line number
2361 // of the decl, not the use. See issue 14742.
2366 s.stmtList(n.Init())
2368 case ir.OBYTES2STRTMP:
2369 n := n.(*ir.ConvExpr)
2370 slice := s.expr(n.X)
2371 ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
2372 len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
2373 return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
2374 case ir.OSTR2BYTESTMP:
2375 n := n.(*ir.ConvExpr)
2377 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
2378 len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
2379 return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
2381 n := n.(*ir.UnaryExpr)
2382 aux := n.X.(*ir.Name).Linksym()
2383 // OCFUNC is used to build function values, which must
2384 // always reference ABIInternal entry points.
2385 if aux.ABI() != obj.ABIInternal {
2386 s.Fatalf("expected ABIInternal: %v", aux.ABI())
2388 return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
2391 if n.Class == ir.PFUNC {
2392 // "value" of a function is the address of the function's closure
2393 sym := staticdata.FuncLinksym(n)
2394 return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
2397 return s.variable(n, n.Type())
2399 return s.load(n.Type(), s.addr(n))
2400 case ir.OLINKSYMOFFSET:
2401 n := n.(*ir.LinksymOffsetExpr)
2402 return s.load(n.Type(), s.addr(n))
2404 n := n.(*ir.NilExpr)
2408 return s.constSlice(t)
2409 case t.IsInterface():
2410 return s.constInterface(t)
2412 return s.constNil(t)
2415 switch u := n.Val(); u.Kind() {
2417 i := ir.IntVal(n.Type(), u)
2418 switch n.Type().Size() {
2420 return s.constInt8(n.Type(), int8(i))
2422 return s.constInt16(n.Type(), int16(i))
2424 return s.constInt32(n.Type(), int32(i))
2426 return s.constInt64(n.Type(), i)
2428 s.Fatalf("bad integer size %d", n.Type().Size())
2431 case constant.String:
2432 i := constant.StringVal(u)
2434 return s.constEmptyString(n.Type())
2436 return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i))
2438 return s.constBool(constant.BoolVal(u))
2439 case constant.Float:
2440 f, _ := constant.Float64Val(u)
2441 switch n.Type().Size() {
2443 return s.constFloat32(n.Type(), f)
2445 return s.constFloat64(n.Type(), f)
2447 s.Fatalf("bad float size %d", n.Type().Size())
2450 case constant.Complex:
2451 re, _ := constant.Float64Val(constant.Real(u))
2452 im, _ := constant.Float64Val(constant.Imag(u))
2453 switch n.Type().Size() {
2455 pt := types.Types[types.TFLOAT32]
2456 return s.newValue2(ssa.OpComplexMake, n.Type(),
2457 s.constFloat32(pt, re),
2458 s.constFloat32(pt, im))
2460 pt := types.Types[types.TFLOAT64]
2461 return s.newValue2(ssa.OpComplexMake, n.Type(),
2462 s.constFloat64(pt, re),
2463 s.constFloat64(pt, im))
2465 s.Fatalf("bad complex size %d", n.Type().Size())
2469 s.Fatalf("unhandled OLITERAL %v", u.Kind())
2473 n := n.(*ir.ConvExpr)
2477 // Assume everything will work out, so set up our return value.
2478 // Anything interesting that happens from here is a fatal.
2484 // Special case for not confusing GC and liveness.
2485 // We don't want pointers accidentally classified
2486 // as not-pointers or vice-versa because of copy
2488 if to.IsPtrShaped() != from.IsPtrShaped() {
2489 return s.newValue2(ssa.OpConvert, to, x, s.mem())
2492 v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
2495 if to.Kind() == types.TFUNC && from.IsPtrShaped() {
2499 // named <--> unnamed type or typed <--> untyped const
2500 if from.Kind() == to.Kind() {
2504 // unsafe.Pointer <--> *T
2505 if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
2510 if to.Kind() == types.TMAP && from.IsPtr() &&
2511 to.MapType().Hmap == from.Elem() {
2515 types.CalcSize(from)
2517 if from.Width != to.Width {
2518 s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
2521 if etypesign(from.Kind()) != etypesign(to.Kind()) {
2522 s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind())
2526 if base.Flag.Cfg.Instrumenting {
2527 // These appear to be fine, but they fail the
2528 // integer constraint below, so okay them here.
2529 // Sample non-integer conversion: map[string]string -> *uint8
2533 if etypesign(from.Kind()) == 0 {
2534 s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
2538 // integer, same width, same sign
2542 n := n.(*ir.ConvExpr)
2544 ft := n.X.Type() // from type
2545 tt := n.Type() // to type
2546 if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
2547 // Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
2548 return s.newValue1(ssa.OpCopy, n.Type(), x)
2550 if ft.IsInteger() && tt.IsInteger() {
2552 if tt.Size() == ft.Size() {
2554 } else if tt.Size() < ft.Size() {
2556 switch 10*ft.Size() + tt.Size() {
2558 op = ssa.OpTrunc16to8
2560 op = ssa.OpTrunc32to8
2562 op = ssa.OpTrunc32to16
2564 op = ssa.OpTrunc64to8
2566 op = ssa.OpTrunc64to16
2568 op = ssa.OpTrunc64to32
2570 s.Fatalf("weird integer truncation %v -> %v", ft, tt)
2572 } else if ft.IsSigned() {
2574 switch 10*ft.Size() + tt.Size() {
2576 op = ssa.OpSignExt8to16
2578 op = ssa.OpSignExt8to32
2580 op = ssa.OpSignExt8to64
2582 op = ssa.OpSignExt16to32
2584 op = ssa.OpSignExt16to64
2586 op = ssa.OpSignExt32to64
2588 s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
2592 switch 10*ft.Size() + tt.Size() {
2594 op = ssa.OpZeroExt8to16
2596 op = ssa.OpZeroExt8to32
2598 op = ssa.OpZeroExt8to64
2600 op = ssa.OpZeroExt16to32
2602 op = ssa.OpZeroExt16to64
2604 op = ssa.OpZeroExt32to64
2606 s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
2609 return s.newValue1(op, n.Type(), x)
2612 if ft.IsFloat() || tt.IsFloat() {
2613 conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
2614 if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
2615 if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
2619 if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
2620 if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
2625 if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
2626 if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
2627 // tt is float32 or float64, and ft is also unsigned
2629 return s.uint32Tofloat32(n, x, ft, tt)
2632 return s.uint32Tofloat64(n, x, ft, tt)
2634 } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
2635 // ft is float32 or float64, and tt is unsigned integer
2637 return s.float32ToUint32(n, x, ft, tt)
2640 return s.float64ToUint32(n, x, ft, tt)
2646 s.Fatalf("weird float conversion %v -> %v", ft, tt)
2648 op1, op2, it := conv.op1, conv.op2, conv.intermediateType
2650 if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
2651 // normal case, not tripping over unsigned 64
2652 if op1 == ssa.OpCopy {
2653 if op2 == ssa.OpCopy {
2656 return s.newValueOrSfCall1(op2, n.Type(), x)
2658 if op2 == ssa.OpCopy {
2659 return s.newValueOrSfCall1(op1, n.Type(), x)
2661 return s.newValueOrSfCall1(op2, n.Type(), s.newValueOrSfCall1(op1, types.Types[it], x))
2663 // Tricky 64-bit unsigned cases.
2665 // tt is float32 or float64, and ft is also unsigned
2667 return s.uint64Tofloat32(n, x, ft, tt)
2670 return s.uint64Tofloat64(n, x, ft, tt)
2672 s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
2674 // ft is float32 or float64, and tt is unsigned integer
2676 return s.float32ToUint64(n, x, ft, tt)
2679 return s.float64ToUint64(n, x, ft, tt)
2681 s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
2685 if ft.IsComplex() && tt.IsComplex() {
2687 if ft.Size() == tt.Size() {
2694 s.Fatalf("weird complex conversion %v -> %v", ft, tt)
2696 } else if ft.Size() == 8 && tt.Size() == 16 {
2697 op = ssa.OpCvt32Fto64F
2698 } else if ft.Size() == 16 && tt.Size() == 8 {
2699 op = ssa.OpCvt64Fto32F
2701 s.Fatalf("weird complex conversion %v -> %v", ft, tt)
2703 ftp := types.FloatForComplex(ft)
2704 ttp := types.FloatForComplex(tt)
2705 return s.newValue2(ssa.OpComplexMake, tt,
2706 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
2707 s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
2710 s.Fatalf("unhandled OCONV %s -> %s", n.X.Type().Kind(), n.Type().Kind())
2714 n := n.(*ir.TypeAssertExpr)
2715 res, _ := s.dottype(n, false)
2719 case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
2720 n := n.(*ir.BinaryExpr)
2723 if n.X.Type().IsComplex() {
2724 pt := types.FloatForComplex(n.X.Type())
2725 op := s.ssaOp(ir.OEQ, pt)
2726 r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
2727 i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
2728 c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
2733 return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
2735 s.Fatalf("ordered complex compare %v", n.Op())
2739 // Convert OGE and OGT into OLE and OLT.
2743 op, a, b = ir.OLE, b, a
2745 op, a, b = ir.OLT, b, a
2747 if n.X.Type().IsFloat() {
2749 return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
2751 // integer comparison
2752 return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
2754 n := n.(*ir.BinaryExpr)
2757 if n.Type().IsComplex() {
2758 mulop := ssa.OpMul64F
2759 addop := ssa.OpAdd64F
2760 subop := ssa.OpSub64F
2761 pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
2762 wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
2764 areal := s.newValue1(ssa.OpComplexReal, pt, a)
2765 breal := s.newValue1(ssa.OpComplexReal, pt, b)
2766 aimag := s.newValue1(ssa.OpComplexImag, pt, a)
2767 bimag := s.newValue1(ssa.OpComplexImag, pt, b)
2769 if pt != wt { // Widen for calculation
2770 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
2771 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
2772 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
2773 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
2776 xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
2777 ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
2779 if pt != wt { // Narrow to store back
2780 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
2781 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
2784 return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
2787 if n.Type().IsFloat() {
2788 return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2791 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2794 n := n.(*ir.BinaryExpr)
2797 if n.Type().IsComplex() {
2798 // TODO this is not executed because the front-end substitutes a runtime call.
2799 // That probably ought to change; with modest optimization the widen/narrow
2800 // conversions could all be elided in larger expression trees.
2801 mulop := ssa.OpMul64F
2802 addop := ssa.OpAdd64F
2803 subop := ssa.OpSub64F
2804 divop := ssa.OpDiv64F
2805 pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
2806 wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
2808 areal := s.newValue1(ssa.OpComplexReal, pt, a)
2809 breal := s.newValue1(ssa.OpComplexReal, pt, b)
2810 aimag := s.newValue1(ssa.OpComplexImag, pt, a)
2811 bimag := s.newValue1(ssa.OpComplexImag, pt, b)
2813 if pt != wt { // Widen for calculation
2814 areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
2815 breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
2816 aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
2817 bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
2820 denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
2821 xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
2822 ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
2824 // TODO not sure if this is best done in wide precision or narrow
2825 // Double-rounding might be an issue.
2826 // Note that the pre-SSA implementation does the entire calculation
2827 // in wide format, so wide is compatible.
2828 xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
2829 ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
2831 if pt != wt { // Narrow to store back
2832 xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
2833 ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
2835 return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
2837 if n.Type().IsFloat() {
2838 return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2840 return s.intDivide(n, a, b)
2842 n := n.(*ir.BinaryExpr)
2845 return s.intDivide(n, a, b)
2846 case ir.OADD, ir.OSUB:
2847 n := n.(*ir.BinaryExpr)
2850 if n.Type().IsComplex() {
2851 pt := types.FloatForComplex(n.Type())
2852 op := s.ssaOp(n.Op(), pt)
2853 return s.newValue2(ssa.OpComplexMake, n.Type(),
2854 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
2855 s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
2857 if n.Type().IsFloat() {
2858 return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2860 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2861 case ir.OAND, ir.OOR, ir.OXOR:
2862 n := n.(*ir.BinaryExpr)
2865 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
2867 n := n.(*ir.BinaryExpr)
2870 b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
2871 return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
2872 case ir.OLSH, ir.ORSH:
2873 n := n.(*ir.BinaryExpr)
2878 cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
2879 s.check(cmp, ir.Syms.Panicshift)
2880 bt = bt.ToUnsigned()
2882 return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
2883 case ir.OANDAND, ir.OOROR:
2884 // To implement OANDAND (and OOROR), we introduce a
2885 // new temporary variable to hold the result. The
2886 // variable is associated with the OANDAND node in the
2887 // s.vars table (normally variables are only
2888 // associated with ONAME nodes). We convert
2895 // Using var in the subsequent block introduces the
2896 // necessary phi variable.
2897 n := n.(*ir.LogicalExpr)
2902 b.Kind = ssa.BlockIf
2904 // In theory, we should set b.Likely here based on context.
2905 // However, gc only gives us likeliness hints
2906 // in a single place, for plain OIF statements,
2907 // and passing around context is finnicky, so don't bother for now.
2909 bRight := s.f.NewBlock(ssa.BlockPlain)
2910 bResult := s.f.NewBlock(ssa.BlockPlain)
2911 if n.Op() == ir.OANDAND {
2913 b.AddEdgeTo(bResult)
2914 } else if n.Op() == ir.OOROR {
2915 b.AddEdgeTo(bResult)
2919 s.startBlock(bRight)
2924 b.AddEdgeTo(bResult)
2926 s.startBlock(bResult)
2927 return s.variable(n, types.Types[types.TBOOL])
2929 n := n.(*ir.BinaryExpr)
2932 return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
2936 n := n.(*ir.UnaryExpr)
2938 if n.Type().IsComplex() {
2939 tp := types.FloatForComplex(n.Type())
2940 negop := s.ssaOp(n.Op(), tp)
2941 return s.newValue2(ssa.OpComplexMake, n.Type(),
2942 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
2943 s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
2945 return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
2946 case ir.ONOT, ir.OBITNOT:
2947 n := n.(*ir.UnaryExpr)
2949 return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
2950 case ir.OIMAG, ir.OREAL:
2951 n := n.(*ir.UnaryExpr)
2953 return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a)
2955 n := n.(*ir.UnaryExpr)
2959 n := n.(*ir.AddrExpr)
2963 n := n.(*ir.ResultExpr)
2964 if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
2965 panic("Expected to see a previous call")
2969 panic(fmt.Errorf("ORESULT %v does not match call %s", n, s.prevCall))
2971 return s.resultOfCall(s.prevCall, which, n.Type())
2974 n := n.(*ir.StarExpr)
2975 p := s.exprPtr(n.X, n.Bounded(), n.Pos())
2976 return s.load(n.Type(), p)
2979 n := n.(*ir.SelectorExpr)
2980 if n.X.Op() == ir.OSTRUCTLIT {
2981 // All literals with nonzero fields have already been
2982 // rewritten during walk. Any that remain are just T{}
2983 // or equivalents. Use the zero value.
2984 if !ir.IsZero(n.X) {
2985 s.Fatalf("literal with nonzero value in SSA: %v", n.X)
2987 return s.zeroVal(n.Type())
2989 // If n is addressable and can't be represented in
2990 // SSA, then load just the selected field. This
2991 // prevents false memory dependencies in race/msan
2993 if ir.IsAddressable(n) && !s.canSSA(n) {
2995 return s.load(n.Type(), p)
2998 return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
3001 n := n.(*ir.SelectorExpr)
3002 p := s.exprPtr(n.X, n.Bounded(), n.Pos())
3003 p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
3004 return s.load(n.Type(), p)
3007 n := n.(*ir.IndexExpr)
3009 case n.X.Type().IsString():
3010 if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) {
3011 // Replace "abc"[1] with 'b'.
3012 // Delayed until now because "abc"[1] is not an ideal constant.
3013 // See test/fixedbugs/issue11370.go.
3014 return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)])))
3017 i := s.expr(n.Index)
3018 len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
3019 i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
3020 ptrtyp := s.f.Config.Types.BytePtr
3021 ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
3022 if ir.IsConst(n.Index, constant.Int) {
3023 ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr)
3025 ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
3027 return s.load(types.Types[types.TUINT8], ptr)
3028 case n.X.Type().IsSlice():
3030 return s.load(n.X.Type().Elem(), p)
3031 case n.X.Type().IsArray():
3032 if TypeOK(n.X.Type()) {
3033 // SSA can handle arrays of length at most 1.
3034 bound := n.X.Type().NumElem()
3036 i := s.expr(n.Index)
3038 // Bounds check will never succeed. Might as well
3039 // use constants for the bounds check.
3040 z := s.constInt(types.Types[types.TINT], 0)
3041 s.boundsCheck(z, z, ssa.BoundsIndex, false)
3042 // The return value won't be live, return junk.
3043 return s.newValue0(ssa.OpUnknown, n.Type())
3045 len := s.constInt(types.Types[types.TINT], bound)
3046 s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
3047 return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
3050 return s.load(n.X.Type().Elem(), p)
3052 s.Fatalf("bad type for index %v", n.X.Type())
3056 case ir.OLEN, ir.OCAP:
3057 n := n.(*ir.UnaryExpr)
3059 case n.X.Type().IsSlice():
3060 op := ssa.OpSliceLen
3061 if n.Op() == ir.OCAP {
3064 return s.newValue1(op, types.Types[types.TINT], s.expr(n.X))
3065 case n.X.Type().IsString(): // string; not reachable for OCAP
3066 return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X))
3067 case n.X.Type().IsMap(), n.X.Type().IsChan():
3068 return s.referenceTypeBuiltin(n, s.expr(n.X))
3070 return s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
3074 n := n.(*ir.UnaryExpr)
3076 if n.X.Type().IsSlice() {
3077 return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
3079 return s.newValue1(ssa.OpStringPtr, n.Type(), a)
3083 n := n.(*ir.UnaryExpr)
3085 return s.newValue1(ssa.OpITab, n.Type(), a)
3088 n := n.(*ir.UnaryExpr)
3090 return s.newValue1(ssa.OpIData, n.Type(), a)
3093 n := n.(*ir.BinaryExpr)
3096 return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
3098 case ir.OSLICEHEADER:
3099 n := n.(*ir.SliceHeaderExpr)
3103 return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
3105 case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
3106 n := n.(*ir.SliceExpr)
3108 var i, j, k *ssa.Value
3118 p, l, c := s.slice(v, i, j, k, n.Bounded())
3119 return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
3122 n := n.(*ir.SliceExpr)
3131 p, l, _ := s.slice(v, i, j, nil, n.Bounded())
3132 return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
3135 n := n.(*ir.CallExpr)
3136 if ir.IsIntrinsicCall(n) {
3137 return s.intrinsicCall(n)
3141 case ir.OCALLINTER, ir.OCALLMETH:
3142 n := n.(*ir.CallExpr)
3143 return s.callResult(n, callNormal)
3146 n := n.(*ir.CallExpr)
3147 return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
3150 return s.append(n.(*ir.CallExpr), false)
3152 case ir.OSTRUCTLIT, ir.OARRAYLIT:
3153 // All literals with nonzero fields have already been
3154 // rewritten during walk. Any that remain are just T{}
3155 // or equivalents. Use the zero value.
3156 n := n.(*ir.CompLitExpr)
3158 s.Fatalf("literal with nonzero value in SSA: %v", n)
3160 return s.zeroVal(n.Type())
3163 n := n.(*ir.UnaryExpr)
3164 return s.newObject(n.Type().Elem())
3167 s.Fatalf("unhandled expr %v", n.Op())
3172 func (s *state) resultOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
3173 aux := c.Aux.(*ssa.AuxCall)
3174 pa := aux.ParamAssignmentForResult(which)
3175 // TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded.
3176 // SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future.
3177 if len(pa.Registers) == 0 && !TypeOK(t) {
3178 addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
3179 return s.rawLoad(t, addr)
3181 return s.newValue1I(ssa.OpSelectN, t, which, c)
3184 func (s *state) resultAddrOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
3185 aux := c.Aux.(*ssa.AuxCall)
3186 pa := aux.ParamAssignmentForResult(which)
3187 if len(pa.Registers) == 0 {
3188 return s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
3190 _, addr := s.temp(c.Pos, t)
3191 rval := s.newValue1I(ssa.OpSelectN, t, which, c)
3192 s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, addr, rval, s.mem(), false)
3196 // append converts an OAPPEND node to SSA.
3197 // If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
3198 // adds it to s, and returns the Value.
3199 // If inplace is true, it writes the result of the OAPPEND expression n
3200 // back to the slice being appended to, and returns nil.
3201 // inplace MUST be set to false if the slice can be SSA'd.
3202 func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
3203 // If inplace is false, process as expression "append(s, e1, e2, e3)":
3205 // ptr, len, cap := s
3206 // newlen := len + 3
3207 // if newlen > cap {
3208 // ptr, len, cap = growslice(s, newlen)
3209 // newlen = len + 3 // recalculate to avoid a spill
3211 // // with write barriers, if needed:
3213 // *(ptr+len+1) = e2
3214 // *(ptr+len+2) = e3
3215 // return makeslice(ptr, newlen, cap)
3218 // If inplace is true, process as statement "s = append(s, e1, e2, e3)":
3221 // ptr, len, cap := s
3222 // newlen := len + 3
3223 // if uint(newlen) > uint(cap) {
3224 // newptr, len, newcap = growslice(ptr, len, cap, newlen)
3225 // vardef(a) // if necessary, advise liveness we are writing a new a
3226 // *a.cap = newcap // write before ptr to avoid a spill
3227 // *a.ptr = newptr // with write barrier
3229 // newlen = len + 3 // recalculate to avoid a spill
3231 // // with write barriers, if needed:
3233 // *(ptr+len+1) = e2
3234 // *(ptr+len+2) = e3
3236 et := n.Type().Elem()
3237 pt := types.NewPtr(et)
3240 sn := n.Args[0] // the slice node is the first in the list
3242 var slice, addr *ssa.Value
3245 slice = s.load(n.Type(), addr)
3250 // Allocate new blocks
3251 grow := s.f.NewBlock(ssa.BlockPlain)
3252 assign := s.f.NewBlock(ssa.BlockPlain)
3254 // Decide if we need to grow
3255 nargs := int64(len(n.Args) - 1)
3256 p := s.newValue1(ssa.OpSlicePtr, pt, slice)
3257 l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
3258 c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
3259 nl := s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
3261 cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, nl)
3265 s.vars[newlenVar] = nl
3272 b.Kind = ssa.BlockIf
3273 b.Likely = ssa.BranchUnlikely
3280 taddr := s.expr(n.X)
3281 r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
3284 if sn.Op() == ir.ONAME {
3286 if sn.Class != ir.PEXTERN {
3287 // Tell liveness we're about to build a new slice
3288 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
3291 capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr)
3292 s.store(types.Types[types.TINT], capaddr, r[2])
3293 s.store(pt, addr, r[0])
3294 // load the value we just stored to avoid having to spill it
3295 s.vars[ptrVar] = s.load(pt, addr)
3296 s.vars[lenVar] = r[1] // avoid a spill in the fast path
3298 s.vars[ptrVar] = r[0]
3299 s.vars[newlenVar] = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], r[1], s.constInt(types.Types[types.TINT], nargs))
3300 s.vars[capVar] = r[2]
3306 // assign new elements to slots
3307 s.startBlock(assign)
3310 l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
3311 nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
3312 lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr)
3313 s.store(types.Types[types.TINT], lenaddr, nl)
3317 type argRec struct {
3318 // if store is true, we're appending the value v. If false, we're appending the
3323 args := make([]argRec, 0, nargs)
3324 for _, n := range n.Args[1:] {
3325 if TypeOK(n.Type()) {
3326 args = append(args, argRec{v: s.expr(n), store: true})
3329 args = append(args, argRec{v: v})
3333 p = s.variable(ptrVar, pt) // generates phi for ptr
3335 nl = s.variable(newlenVar, types.Types[types.TINT]) // generates phi for nl
3336 c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap
3338 p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
3339 for i, arg := range args {
3340 addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
3342 s.storeType(et, addr, arg.v, 0, true)
3344 s.move(et, addr, arg.v)
3348 delete(s.vars, ptrVar)
3350 delete(s.vars, lenVar)
3353 delete(s.vars, newlenVar)
3354 delete(s.vars, capVar)
3356 return s.newValue3(ssa.OpSliceMake, n.Type(), p, nl, c)
3359 // condBranch evaluates the boolean expression cond and branches to yes
3360 // if cond is true and no if cond is false.
3361 // This function is intended to handle && and || better than just calling
3362 // s.expr(cond) and branching on the result.
3363 func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
3366 cond := cond.(*ir.LogicalExpr)
3367 mid := s.f.NewBlock(ssa.BlockPlain)
3368 s.stmtList(cond.Init())
3369 s.condBranch(cond.X, mid, no, max8(likely, 0))
3371 s.condBranch(cond.Y, yes, no, likely)
3373 // Note: if likely==1, then both recursive calls pass 1.
3374 // If likely==-1, then we don't have enough information to decide
3375 // whether the first branch is likely or not. So we pass 0 for
3376 // the likeliness of the first branch.
3377 // TODO: have the frontend give us branch prediction hints for
3378 // OANDAND and OOROR nodes (if it ever has such info).
3380 cond := cond.(*ir.LogicalExpr)
3381 mid := s.f.NewBlock(ssa.BlockPlain)
3382 s.stmtList(cond.Init())
3383 s.condBranch(cond.X, yes, mid, min8(likely, 0))
3385 s.condBranch(cond.Y, yes, no, likely)
3387 // Note: if likely==-1, then both recursive calls pass -1.
3388 // If likely==1, then we don't have enough info to decide
3389 // the likelihood of the first branch.
3391 cond := cond.(*ir.UnaryExpr)
3392 s.stmtList(cond.Init())
3393 s.condBranch(cond.X, no, yes, -likely)
3396 cond := cond.(*ir.ConvExpr)
3397 s.stmtList(cond.Init())
3398 s.condBranch(cond.X, yes, no, likely)
3403 b.Kind = ssa.BlockIf
3405 b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
3413 skipPtr skipMask = 1 << iota
3418 // assign does left = right.
3419 // Right has already been evaluated to ssa, left has not.
3420 // If deref is true, then we do left = *right instead (and right has already been nil-checked).
3421 // If deref is true and right == nil, just do left = 0.
3422 // skip indicates assignments (at the top level) that can be avoided.
3423 func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
3424 if left.Op() == ir.ONAME && ir.IsBlank(left) {
3431 s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
3433 if left.Op() == ir.ODOT {
3434 // We're assigning to a field of an ssa-able value.
3435 // We need to build a new structure with the new value for the
3436 // field we're assigning and the old values for the other fields.
3438 // type T struct {a, b, c int}
3441 // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
3443 // Grab information about the structure type.
3444 left := left.(*ir.SelectorExpr)
3447 idx := fieldIdx(left)
3449 // Grab old value of structure.
3450 old := s.expr(left.X)
3452 // Make new structure.
3453 new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
3455 // Add fields as args.
3456 for i := 0; i < nf; i++ {
3460 new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
3464 // Recursively assign the new value we've made to the base of the dot op.
3465 s.assign(left.X, new, false, 0)
3466 // TODO: do we need to update named values here?
3469 if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() {
3470 left := left.(*ir.IndexExpr)
3471 s.pushLine(left.Pos())
3473 // We're assigning to an element of an ssa-able array.
3478 i := s.expr(left.Index) // index
3480 // The bounds check must fail. Might as well
3481 // ignore the actual index and just use zeros.
3482 z := s.constInt(types.Types[types.TINT], 0)
3483 s.boundsCheck(z, z, ssa.BoundsIndex, false)
3487 s.Fatalf("assigning to non-1-length array")
3489 // Rewrite to a = [1]{v}
3490 len := s.constInt(types.Types[types.TINT], 1)
3491 s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
3492 v := s.newValue1(ssa.OpArrayMake1, t, right)
3493 s.assign(left.X, v, false, 0)
3496 left := left.(*ir.Name)
3497 // Update variable assignment.
3498 s.vars[left] = right
3499 s.addNamedValue(left, right)
3503 // If this assignment clobbers an entire local variable, then emit
3504 // OpVarDef so liveness analysis knows the variable is redefined.
3505 if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 {
3506 s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
3509 // Left is not ssa-able. Compute its address.
3510 addr := s.addr(left)
3511 if ir.IsReflectHeaderDataField(left) {
3512 // Package unsafe's documentation says storing pointers into
3513 // reflect.SliceHeader and reflect.StringHeader's Data fields
3514 // is valid, even though they have type uintptr (#19168).
3515 // Mark it pointer type to signal the writebarrier pass to
3516 // insert a write barrier.
3517 t = types.Types[types.TUNSAFEPTR]
3520 // Treat as a mem->mem move.
3524 s.move(t, addr, right)
3528 // Treat as a store.
3529 s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
3532 // zeroVal returns the zero value for type t.
3533 func (s *state) zeroVal(t *types.Type) *ssa.Value {
3538 return s.constInt8(t, 0)
3540 return s.constInt16(t, 0)
3542 return s.constInt32(t, 0)
3544 return s.constInt64(t, 0)
3546 s.Fatalf("bad sized integer type %v", t)
3551 return s.constFloat32(t, 0)
3553 return s.constFloat64(t, 0)
3555 s.Fatalf("bad sized float type %v", t)
3560 z := s.constFloat32(types.Types[types.TFLOAT32], 0)
3561 return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
3563 z := s.constFloat64(types.Types[types.TFLOAT64], 0)
3564 return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
3566 s.Fatalf("bad sized complex type %v", t)
3570 return s.constEmptyString(t)
3571 case t.IsPtrShaped():
3572 return s.constNil(t)
3574 return s.constBool(false)
3575 case t.IsInterface():
3576 return s.constInterface(t)
3578 return s.constSlice(t)
3581 v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
3582 for i := 0; i < n; i++ {
3583 v.AddArg(s.zeroVal(t.FieldType(i)))
3587 switch t.NumElem() {
3589 return s.entryNewValue0(ssa.OpArrayMake0, t)
3591 return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
3594 s.Fatalf("zero for type %v not implemented", t)
3601 callNormal callKind = iota
3607 type sfRtCallDef struct {
3612 var softFloatOps map[ssa.Op]sfRtCallDef
3614 func softfloatInit() {
3615 // Some of these operations get transformed by sfcall.
3616 softFloatOps = map[ssa.Op]sfRtCallDef{
3617 ssa.OpAdd32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
3618 ssa.OpAdd64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
3619 ssa.OpSub32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
3620 ssa.OpSub64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
3621 ssa.OpMul32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32},
3622 ssa.OpMul64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64},
3623 ssa.OpDiv32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32},
3624 ssa.OpDiv64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64},
3626 ssa.OpEq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
3627 ssa.OpEq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
3628 ssa.OpNeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
3629 ssa.OpNeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
3630 ssa.OpLess64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL},
3631 ssa.OpLess32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL},
3632 ssa.OpLeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge64"), types.TBOOL},
3633 ssa.OpLeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge32"), types.TBOOL},
3635 ssa.OpCvt32to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32},
3636 ssa.OpCvt32Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32},
3637 ssa.OpCvt64to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32},
3638 ssa.OpCvt32Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64},
3639 ssa.OpCvt64Uto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32},
3640 ssa.OpCvt32Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64},
3641 ssa.OpCvt32to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64},
3642 ssa.OpCvt64Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32},
3643 ssa.OpCvt64to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64},
3644 ssa.OpCvt64Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64},
3645 ssa.OpCvt64Uto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64},
3646 ssa.OpCvt64Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64},
3647 ssa.OpCvt32Fto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64},
3648 ssa.OpCvt64Fto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32},
3652 // TODO: do not emit sfcall if operation can be optimized to constant in later
3654 func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
3655 if callDef, ok := softFloatOps[op]; ok {
3661 args[0], args[1] = args[1], args[0]
3664 args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
3667 result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
3668 if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
3669 result = s.newValue1(ssa.OpNot, result.Type, result)
3676 var intrinsics map[intrinsicKey]intrinsicBuilder
3678 // An intrinsicBuilder converts a call node n into an ssa value that
3679 // implements that call as an intrinsic. args is a list of arguments to the func.
3680 type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value
3682 type intrinsicKey struct {
3689 intrinsics = map[intrinsicKey]intrinsicBuilder{}
3694 var lwatomics []*sys.Arch
3695 for _, a := range &sys.Archs {
3696 all = append(all, a)
3702 if a.Family != sys.PPC64 {
3703 lwatomics = append(lwatomics, a)
3707 // add adds the intrinsic b for pkg.fn for the given list of architectures.
3708 add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
3709 for _, a := range archs {
3710 intrinsics[intrinsicKey{a, pkg, fn}] = b
3713 // addF does the same as add but operates on architecture families.
3714 addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
3716 for _, f := range archFamilies {
3718 panic("too many architecture families")
3722 for _, a := range all {
3723 if m>>uint(a.Family)&1 != 0 {
3724 intrinsics[intrinsicKey{a, pkg, fn}] = b
3728 // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
3729 alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
3731 for _, a := range archs {
3732 if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
3733 intrinsics[intrinsicKey{a, pkg, fn}] = b
3738 panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn))
3742 /******** runtime ********/
3743 if !base.Flag.Cfg.Instrumenting {
3744 add("runtime", "slicebytetostringtmp",
3745 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3746 // Compiler frontend optimizations emit OBYTES2STRTMP nodes
3747 // for the backend instead of slicebytetostringtmp calls
3748 // when not instrumenting.
3749 return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
3753 addF("runtime/internal/math", "MulUintptr",
3754 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3755 if s.config.PtrSize == 4 {
3756 return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
3758 return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
3760 sys.AMD64, sys.I386, sys.MIPS64)
3761 add("runtime", "KeepAlive",
3762 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3763 data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
3764 s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
3768 add("runtime", "getclosureptr",
3769 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3770 return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
3774 add("runtime", "getcallerpc",
3775 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3776 return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
3780 add("runtime", "getcallersp",
3781 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3782 return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
3786 /******** runtime/internal/sys ********/
3787 addF("runtime/internal/sys", "Ctz32",
3788 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3789 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
3791 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
3792 addF("runtime/internal/sys", "Ctz64",
3793 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3794 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
3796 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
3797 addF("runtime/internal/sys", "Bswap32",
3798 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3799 return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
3801 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
3802 addF("runtime/internal/sys", "Bswap64",
3803 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3804 return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
3806 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
3808 /******** runtime/internal/atomic ********/
3809 addF("runtime/internal/atomic", "Load",
3810 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3811 v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
3812 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3813 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
3815 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3816 addF("runtime/internal/atomic", "Load8",
3817 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3818 v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
3819 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3820 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
3822 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3823 addF("runtime/internal/atomic", "Load64",
3824 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3825 v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
3826 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3827 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
3829 sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3830 addF("runtime/internal/atomic", "LoadAcq",
3831 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3832 v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
3833 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3834 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
3836 sys.PPC64, sys.S390X)
3837 addF("runtime/internal/atomic", "LoadAcq64",
3838 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3839 v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
3840 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3841 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
3844 addF("runtime/internal/atomic", "Loadp",
3845 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3846 v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
3847 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3848 return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
3850 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3852 addF("runtime/internal/atomic", "Store",
3853 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3854 s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
3857 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3858 addF("runtime/internal/atomic", "Store8",
3859 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3860 s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
3863 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3864 addF("runtime/internal/atomic", "Store64",
3865 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3866 s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
3869 sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3870 addF("runtime/internal/atomic", "StorepNoWB",
3871 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3872 s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
3875 sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
3876 addF("runtime/internal/atomic", "StoreRel",
3877 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3878 s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
3881 sys.PPC64, sys.S390X)
3882 addF("runtime/internal/atomic", "StoreRel64",
3883 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3884 s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
3889 addF("runtime/internal/atomic", "Xchg",
3890 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3891 v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
3892 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3893 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
3895 sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3896 addF("runtime/internal/atomic", "Xchg64",
3897 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3898 v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
3899 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3900 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
3902 sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3904 type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind)
3906 makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
3908 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3909 // Target Atomic feature is identified by dynamic detection
3910 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb)
3911 v := s.load(types.Types[types.TBOOL], addr)
3913 b.Kind = ssa.BlockIf
3915 bTrue := s.f.NewBlock(ssa.BlockPlain)
3916 bFalse := s.f.NewBlock(ssa.BlockPlain)
3917 bEnd := s.f.NewBlock(ssa.BlockPlain)
3920 b.Likely = ssa.BranchLikely
3922 // We have atomic instructions - use it directly.
3924 emit(s, n, args, op1, typ)
3925 s.endBlock().AddEdgeTo(bEnd)
3927 // Use original instruction sequence.
3928 s.startBlock(bFalse)
3929 emit(s, n, args, op0, typ)
3930 s.endBlock().AddEdgeTo(bEnd)
3934 if rtyp == types.TNIL {
3937 return s.variable(n, types.Types[rtyp])
3942 atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
3943 v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
3944 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3945 s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
3947 addF("runtime/internal/atomic", "Xchg",
3948 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
3950 addF("runtime/internal/atomic", "Xchg64",
3951 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
3954 addF("runtime/internal/atomic", "Xadd",
3955 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3956 v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
3957 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3958 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
3960 sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3961 addF("runtime/internal/atomic", "Xadd64",
3962 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3963 v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
3964 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3965 return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
3967 sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3969 addF("runtime/internal/atomic", "Xadd",
3970 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
3972 addF("runtime/internal/atomic", "Xadd64",
3973 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
3976 addF("runtime/internal/atomic", "Cas",
3977 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3978 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
3979 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3980 return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
3982 sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3983 addF("runtime/internal/atomic", "Cas64",
3984 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3985 v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
3986 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3987 return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
3989 sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
3990 addF("runtime/internal/atomic", "CasRel",
3991 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
3992 v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
3993 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
3994 return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
3998 atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
3999 v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
4000 s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
4001 s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
4004 addF("runtime/internal/atomic", "Cas",
4005 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
4007 addF("runtime/internal/atomic", "Cas64",
4008 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
4011 addF("runtime/internal/atomic", "And8",
4012 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4013 s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
4016 sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
4017 addF("runtime/internal/atomic", "And",
4018 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4019 s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
4022 sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
4023 addF("runtime/internal/atomic", "Or8",
4024 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4025 s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
4028 sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
4029 addF("runtime/internal/atomic", "Or",
4030 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4031 s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
4034 sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
4036 atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
4037 s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
4040 addF("runtime/internal/atomic", "And8",
4041 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4043 addF("runtime/internal/atomic", "And",
4044 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4046 addF("runtime/internal/atomic", "Or8",
4047 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4049 addF("runtime/internal/atomic", "Or",
4050 makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
4053 // Aliases for atomic load operations
4054 alias("runtime/internal/atomic", "Loadint32", "runtime/internal/atomic", "Load", all...)
4055 alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
4056 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
4057 alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
4058 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
4059 alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
4060 alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
4061 alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
4062 alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
4063 alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
4064 alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
4065 alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
4067 // Aliases for atomic store operations
4068 alias("runtime/internal/atomic", "Storeint32", "runtime/internal/atomic", "Store", all...)
4069 alias("runtime/internal/atomic", "Storeint64", "runtime/internal/atomic", "Store64", all...)
4070 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
4071 alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
4072 alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
4073 alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
4074 alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
4075 alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
4076 alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
4077 alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
4079 // Aliases for atomic swap operations
4080 alias("runtime/internal/atomic", "Xchgint32", "runtime/internal/atomic", "Xchg", all...)
4081 alias("runtime/internal/atomic", "Xchgint64", "runtime/internal/atomic", "Xchg64", all...)
4082 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
4083 alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
4085 // Aliases for atomic add operations
4086 alias("runtime/internal/atomic", "Xaddint32", "runtime/internal/atomic", "Xadd", all...)
4087 alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
4088 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
4089 alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
4091 // Aliases for atomic CAS operations
4092 alias("runtime/internal/atomic", "Casint32", "runtime/internal/atomic", "Cas", all...)
4093 alias("runtime/internal/atomic", "Casint64", "runtime/internal/atomic", "Cas64", all...)
4094 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
4095 alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
4096 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
4097 alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
4098 alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
4100 /******** math ********/
4101 addF("math", "Sqrt",
4102 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4103 return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
4105 sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
4106 addF("math", "Trunc",
4107 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4108 return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
4110 sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
4111 addF("math", "Ceil",
4112 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4113 return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
4115 sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
4116 addF("math", "Floor",
4117 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4118 return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
4120 sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
4121 addF("math", "Round",
4122 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4123 return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
4125 sys.ARM64, sys.PPC64, sys.S390X)
4126 addF("math", "RoundToEven",
4127 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4128 return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
4130 sys.ARM64, sys.S390X, sys.Wasm)
4132 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4133 return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
4135 sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
4136 addF("math", "Copysign",
4137 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4138 return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
4140 sys.PPC64, sys.Wasm)
4142 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4143 return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4145 sys.ARM64, sys.PPC64, sys.S390X)
4147 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4148 if !s.config.UseFMA {
4149 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4150 return s.variable(n, types.Types[types.TFLOAT64])
4152 v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA)
4154 b.Kind = ssa.BlockIf
4156 bTrue := s.f.NewBlock(ssa.BlockPlain)
4157 bFalse := s.f.NewBlock(ssa.BlockPlain)
4158 bEnd := s.f.NewBlock(ssa.BlockPlain)
4161 b.Likely = ssa.BranchLikely // >= haswell cpus are common
4163 // We have the intrinsic - use it directly.
4165 s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4166 s.endBlock().AddEdgeTo(bEnd)
4168 // Call the pure Go version.
4169 s.startBlock(bFalse)
4170 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4171 s.endBlock().AddEdgeTo(bEnd)
4175 return s.variable(n, types.Types[types.TFLOAT64])
4179 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4180 if !s.config.UseFMA {
4181 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4182 return s.variable(n, types.Types[types.TFLOAT64])
4184 addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb)
4185 v := s.load(types.Types[types.TBOOL], addr)
4187 b.Kind = ssa.BlockIf
4189 bTrue := s.f.NewBlock(ssa.BlockPlain)
4190 bFalse := s.f.NewBlock(ssa.BlockPlain)
4191 bEnd := s.f.NewBlock(ssa.BlockPlain)
4194 b.Likely = ssa.BranchLikely
4196 // We have the intrinsic - use it directly.
4198 s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
4199 s.endBlock().AddEdgeTo(bEnd)
4201 // Call the pure Go version.
4202 s.startBlock(bFalse)
4203 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4204 s.endBlock().AddEdgeTo(bEnd)
4208 return s.variable(n, types.Types[types.TFLOAT64])
4212 makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4213 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4214 v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41)
4216 b.Kind = ssa.BlockIf
4218 bTrue := s.f.NewBlock(ssa.BlockPlain)
4219 bFalse := s.f.NewBlock(ssa.BlockPlain)
4220 bEnd := s.f.NewBlock(ssa.BlockPlain)
4223 b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
4225 // We have the intrinsic - use it directly.
4227 s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
4228 s.endBlock().AddEdgeTo(bEnd)
4230 // Call the pure Go version.
4231 s.startBlock(bFalse)
4232 s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
4233 s.endBlock().AddEdgeTo(bEnd)
4237 return s.variable(n, types.Types[types.TFLOAT64])
4240 addF("math", "RoundToEven",
4241 makeRoundAMD64(ssa.OpRoundToEven),
4243 addF("math", "Floor",
4244 makeRoundAMD64(ssa.OpFloor),
4246 addF("math", "Ceil",
4247 makeRoundAMD64(ssa.OpCeil),
4249 addF("math", "Trunc",
4250 makeRoundAMD64(ssa.OpTrunc),
4253 /******** math/bits ********/
4254 addF("math/bits", "TrailingZeros64",
4255 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4256 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
4258 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4259 addF("math/bits", "TrailingZeros32",
4260 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4261 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
4263 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4264 addF("math/bits", "TrailingZeros16",
4265 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4266 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
4267 c := s.constInt32(types.Types[types.TUINT32], 1<<16)
4268 y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
4269 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
4272 addF("math/bits", "TrailingZeros16",
4273 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4274 return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
4276 sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
4277 addF("math/bits", "TrailingZeros16",
4278 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4279 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
4280 c := s.constInt64(types.Types[types.TUINT64], 1<<16)
4281 y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
4282 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
4284 sys.S390X, sys.PPC64)
4285 addF("math/bits", "TrailingZeros8",
4286 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4287 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
4288 c := s.constInt32(types.Types[types.TUINT32], 1<<8)
4289 y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
4290 return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
4293 addF("math/bits", "TrailingZeros8",
4294 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4295 return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
4297 sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
4298 addF("math/bits", "TrailingZeros8",
4299 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4300 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
4301 c := s.constInt64(types.Types[types.TUINT64], 1<<8)
4302 y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
4303 return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
4306 alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
4307 alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
4308 // ReverseBytes inlines correctly, no need to intrinsify it.
4309 // ReverseBytes16 lowers to a rotate, no need for anything special here.
4310 addF("math/bits", "Len64",
4311 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4312 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
4314 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4315 addF("math/bits", "Len32",
4316 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4317 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
4319 sys.AMD64, sys.ARM64)
4320 addF("math/bits", "Len32",
4321 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4322 if s.config.PtrSize == 4 {
4323 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
4325 x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
4326 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
4328 sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4329 addF("math/bits", "Len16",
4330 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4331 if s.config.PtrSize == 4 {
4332 x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
4333 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
4335 x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
4336 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
4338 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4339 addF("math/bits", "Len16",
4340 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4341 return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
4344 addF("math/bits", "Len8",
4345 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4346 if s.config.PtrSize == 4 {
4347 x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
4348 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
4350 x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
4351 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
4353 sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4354 addF("math/bits", "Len8",
4355 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4356 return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
4359 addF("math/bits", "Len",
4360 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4361 if s.config.PtrSize == 4 {
4362 return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
4364 return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
4366 sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
4367 // LeadingZeros is handled because it trivially calls Len.
4368 addF("math/bits", "Reverse64",
4369 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4370 return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
4373 addF("math/bits", "Reverse32",
4374 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4375 return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
4378 addF("math/bits", "Reverse16",
4379 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4380 return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
4383 addF("math/bits", "Reverse8",
4384 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4385 return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
4388 addF("math/bits", "Reverse",
4389 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4390 if s.config.PtrSize == 4 {
4391 return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
4393 return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
4396 addF("math/bits", "RotateLeft8",
4397 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4398 return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
4401 addF("math/bits", "RotateLeft16",
4402 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4403 return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
4406 addF("math/bits", "RotateLeft32",
4407 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4408 return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
4410 sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
4411 addF("math/bits", "RotateLeft64",
4412 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4413 return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
4415 sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
4416 alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
4418 makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4419 return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4420 v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT)
4422 b.Kind = ssa.BlockIf
4424 bTrue := s.f.NewBlock(ssa.BlockPlain)
4425 bFalse := s.f.NewBlock(ssa.BlockPlain)
4426 bEnd := s.f.NewBlock(ssa.BlockPlain)
4429 b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
4431 // We have the intrinsic - use it directly.
4434 if s.config.PtrSize == 4 {
4437 s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
4438 s.endBlock().AddEdgeTo(bEnd)
4440 // Call the pure Go version.
4441 s.startBlock(bFalse)
4442 s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
4443 s.endBlock().AddEdgeTo(bEnd)
4447 return s.variable(n, types.Types[types.TINT])
4450 addF("math/bits", "OnesCount64",
4451 makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
4453 addF("math/bits", "OnesCount64",
4454 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4455 return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
4457 sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
4458 addF("math/bits", "OnesCount32",
4459 makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
4461 addF("math/bits", "OnesCount32",
4462 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4463 return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
4465 sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
4466 addF("math/bits", "OnesCount16",
4467 makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
4469 addF("math/bits", "OnesCount16",
4470 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4471 return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
4473 sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
4474 addF("math/bits", "OnesCount8",
4475 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4476 return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
4478 sys.S390X, sys.PPC64, sys.Wasm)
4479 addF("math/bits", "OnesCount",
4480 makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
4482 addF("math/bits", "Mul64",
4483 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4484 return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
4486 sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64)
4487 alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
4488 alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
4489 addF("math/bits", "Add64",
4490 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4491 return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
4493 sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
4494 alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X)
4495 addF("math/bits", "Sub64",
4496 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4497 return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
4499 sys.AMD64, sys.ARM64, sys.S390X)
4500 alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
4501 addF("math/bits", "Div64",
4502 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4503 // check for divide-by-zero/overflow and panic with appropriate message
4504 cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
4505 s.check(cmpZero, ir.Syms.Panicdivide)
4506 cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
4507 s.check(cmpOverflow, ir.Syms.Panicoverflow)
4508 return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
4511 alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
4513 alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...)
4514 alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...)
4515 alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...)
4516 alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...)
4517 alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...)
4518 alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...)
4520 /******** sync/atomic ********/
4522 // Note: these are disabled by flag_race in findIntrinsic below.
4523 alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
4524 alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
4525 alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
4526 alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
4527 alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
4528 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
4529 alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
4531 alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
4532 alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
4533 // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
4534 alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
4535 alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
4536 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
4537 alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
4539 alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
4540 alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
4541 alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
4542 alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
4543 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
4544 alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
4546 alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
4547 alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
4548 alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
4549 alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
4550 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
4551 alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
4553 alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
4554 alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
4555 alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
4556 alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
4557 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
4558 alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
4560 /******** math/big ********/
4561 add("math/big", "mulWW",
4562 func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
4563 return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
4565 sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
4568 // findIntrinsic returns a function which builds the SSA equivalent of the
4569 // function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
4570 func findIntrinsic(sym *types.Sym) intrinsicBuilder {
4571 if sym == nil || sym.Pkg == nil {
4575 if sym.Pkg == types.LocalPkg {
4576 pkg = base.Ctxt.Pkgpath
4578 if sym.Pkg == ir.Pkgs.Runtime {
4581 if base.Flag.Race && pkg == "sync/atomic" {
4582 // The race detector needs to be able to intercept these calls.
4583 // We can't intrinsify them.
4586 // Skip intrinsifying math functions (which may contain hard-float
4587 // instructions) when soft-float
4588 if Arch.SoftFloat && pkg == "math" {
4593 if ssa.IntrinsicsDisable {
4594 if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
4595 // These runtime functions don't have definitions, must be intrinsics.
4600 return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}]
4603 func IsIntrinsicCall(n *ir.CallExpr) bool {
4607 name, ok := n.X.(*ir.Name)
4611 return findIntrinsic(name.Sym()) != nil
4614 // intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
4615 func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
4616 v := findIntrinsic(n.X.Sym())(s, n, s.intrinsicArgs(n))
4617 if ssa.IntrinsicsDebug > 0 {
4622 if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
4625 base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.X.Sym().Name, x.LongString())
4630 // intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
4631 func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
4632 args := make([]*ssa.Value, len(n.Args))
4633 for i, n := range n.Args {
4639 // openDeferRecord adds code to evaluate and store the args for an open-code defer
4640 // call, and records info about the defer, so we can generate proper code on the
4641 // exit paths. n is the sub-node of the defer node that is the actual function
4642 // call. We will also record funcdata information on where the args are stored
4643 // (as well as the deferBits variable), and this will enable us to run the proper
4644 // defer calls during panics.
4645 func (s *state) openDeferRecord(n *ir.CallExpr) {
4646 var args []*ssa.Value
4647 var argNodes []*ir.Name
4649 if objabi.Experiment.RegabiDefer && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
4650 s.Fatalf("defer call with arguments or results: %v", n)
4653 opendefer := &openDeferInfo{
4657 if n.Op() == ir.OCALLFUNC {
4658 // We must always store the function value in a stack slot for the
4659 // runtime panic code to use. But in the defer exit code, we will
4660 // call the function directly if it is a static function.
4661 closureVal := s.expr(fn)
4662 closure := s.openDeferSave(nil, fn.Type(), closureVal)
4663 opendefer.closureNode = closure.Aux.(*ir.Name)
4664 if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
4665 opendefer.closure = closure
4667 } else if n.Op() == ir.OCALLMETH {
4668 base.Fatalf("OCALLMETH missed by walkCall")
4670 if fn.Op() != ir.ODOTINTER {
4671 base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
4673 fn := fn.(*ir.SelectorExpr)
4674 closure, rcvr := s.getClosureAndRcvr(fn)
4675 opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
4676 // Important to get the receiver type correct, so it is recognized
4677 // as a pointer for GC purposes.
4678 opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
4679 opendefer.closureNode = opendefer.closure.Aux.(*ir.Name)
4680 opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name)
4682 for _, argn := range n.Args {
4684 if TypeOK(argn.Type()) {
4685 v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
4687 v = s.openDeferSave(argn, argn.Type(), nil)
4689 args = append(args, v)
4690 argNodes = append(argNodes, v.Aux.(*ir.Name))
4692 opendefer.argVals = args
4693 opendefer.argNodes = argNodes
4694 index := len(s.openDefers)
4695 s.openDefers = append(s.openDefers, opendefer)
4697 // Update deferBits only after evaluation and storage to stack of
4698 // args/receiver/interface is successful.
4699 bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
4700 newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
4701 s.vars[deferBitsVar] = newDeferBits
4702 s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
4705 // openDeferSave generates SSA nodes to store a value (with type t) for an
4706 // open-coded defer at an explicit autotmp location on the stack, so it can be
4707 // reloaded and used for the appropriate call on exit. If type t is SSAable, then
4708 // val must be non-nil (and n should be nil) and val is the value to be stored. If
4709 // type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
4710 // evaluated (via s.addr() below) to get the value that is to be stored. The
4711 // function returns an SSA value representing a pointer to the autotmp location.
4712 func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
4720 argTemp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
4721 argTemp.SetOpenDeferSlot(true)
4722 var addrArgTemp *ssa.Value
4723 // Use OpVarLive to make sure stack slots for the args, etc. are not
4724 // removed by dead-store elimination
4725 if s.curBlock.ID != s.f.Entry.ID {
4726 // Force the argtmp storing this defer function/receiver/arg to be
4727 // declared in the entry block, so that it will be live for the
4728 // defer exit code (which will actually access it only if the
4729 // associated defer call has been activated).
4730 s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
4731 s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
4732 addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
4734 // Special case if we're still in the entry block. We can't use
4735 // the above code, since s.defvars[s.f.Entry.ID] isn't defined
4736 // until we end the entry block with s.endBlock().
4737 s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
4738 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
4739 addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.mem(), false)
4741 if t.HasPointers() {
4742 // Since we may use this argTemp during exit depending on the
4743 // deferBits, we must define it unconditionally on entry.
4744 // Therefore, we must make sure it is zeroed out in the entry
4745 // block if it contains pointers, else GC may wrongly follow an
4746 // uninitialized pointer value.
4747 argTemp.SetNeedzero(true)
4751 s.move(t, addrArgTemp, a)
4754 // We are storing to the stack, hence we can avoid the full checks in
4755 // storeType() (no write barrier) and do a simple store().
4756 s.store(t, addrArgTemp, val)
4760 // openDeferExit generates SSA for processing all the open coded defers at exit.
4761 // The code involves loading deferBits, and checking each of the bits to see if
4762 // the corresponding defer statement was executed. For each bit that is turned
4763 // on, the associated defer call is made.
4764 func (s *state) openDeferExit() {
4765 deferExit := s.f.NewBlock(ssa.BlockPlain)
4766 s.endBlock().AddEdgeTo(deferExit)
4767 s.startBlock(deferExit)
4768 s.lastDeferExit = deferExit
4769 s.lastDeferCount = len(s.openDefers)
4770 zeroval := s.constInt8(types.Types[types.TUINT8], 0)
4771 // Test for and run defers in reverse order
4772 for i := len(s.openDefers) - 1; i >= 0; i-- {
4773 r := s.openDefers[i]
4774 bCond := s.f.NewBlock(ssa.BlockPlain)
4775 bEnd := s.f.NewBlock(ssa.BlockPlain)
4777 deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
4778 // Generate code to check if the bit associated with the current
4780 bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
4781 andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
4782 eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
4784 b.Kind = ssa.BlockIf
4788 bCond.AddEdgeTo(bEnd)
4791 // Clear this bit in deferBits and force store back to stack, so
4792 // we will not try to re-run this defer call if this defer call panics.
4793 nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
4794 maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
4795 s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
4796 // Use this value for following tests, so we keep previous
4798 s.vars[deferBitsVar] = maskedval
4800 // Generate code to call the function call of the defer, using the
4801 // closure/receiver/args that were stored in argtmps at the point
4802 // of the defer statement.
4804 stksize := fn.Type().ArgWidth()
4805 var ACArgs []*types.Type
4806 var ACResults []*types.Type
4807 var callArgs []*ssa.Value
4809 // rcvr in case of OCALLINTER
4810 v := s.load(r.rcvr.Type.Elem(), r.rcvr)
4811 ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
4812 callArgs = append(callArgs, v)
4814 for j, argAddrVal := range r.argVals {
4815 f := getParam(r.n, j)
4816 ACArgs = append(ACArgs, f.Type)
4818 if !TypeOK(f.Type) {
4819 a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
4821 a = s.load(f.Type, argAddrVal)
4823 callArgs = append(callArgs, a)
4826 if r.closure != nil {
4827 v := s.load(r.closure.Type.Elem(), r.closure)
4828 s.maybeNilCheckClosure(v, callDefer)
4829 codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
4830 aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
4831 call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
4833 aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
4834 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
4836 callArgs = append(callArgs, s.mem())
4837 call.AddArgs(callArgs...)
4838 call.AuxInt = stksize
4839 s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
4840 // Make sure that the stack slots with pointers are kept live
4841 // through the call (which is a pre-emption point). Also, we will
4842 // use the first call of the last defer exit to compute liveness
4843 // for the deferreturn, so we want all stack slots to be live.
4844 if r.closureNode != nil {
4845 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
4847 if r.rcvrNode != nil {
4848 if r.rcvrNode.Type().HasPointers() {
4849 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
4852 for _, argNode := range r.argNodes {
4853 if argNode.Type().HasPointers() {
4854 s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
4863 func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
4864 return s.call(n, k, false)
4867 func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
4868 return s.call(n, k, true)
4871 // Calls the function n using the specified call type.
4872 // Returns the address of the return value (or nil if none).
4873 func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
4875 var callee *ir.Name // target function (if static)
4876 var closure *ssa.Value // ptr to closure to run (if dynamic)
4877 var codeptr *ssa.Value // ptr to target code (if dynamic)
4878 var rcvr *ssa.Value // receiver to set
4880 var ACArgs []*types.Type // AuxCall args
4881 var ACResults []*types.Type // AuxCall results
4882 var callArgs []*ssa.Value // For late-expansion, the args themselves (not stored, args to the call instead).
4884 callABI := s.f.ABIDefault
4886 if !objabi.Experiment.RegabiArgs {
4887 var magicFnNameSym *types.Sym
4888 if fn.Name() != nil {
4889 magicFnNameSym = fn.Name().Sym()
4890 ss := magicFnNameSym.Name
4891 if strings.HasSuffix(ss, magicNameDotSuffix) {
4895 if magicFnNameSym == nil && n.Op() == ir.OCALLINTER {
4896 magicFnNameSym = fn.(*ir.SelectorExpr).Sym()
4897 ss := magicFnNameSym.Name
4898 if strings.HasSuffix(ss, magicNameDotSuffix[1:]) {
4904 if objabi.Experiment.RegabiDefer && k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
4905 s.Fatalf("go/defer call with arguments: %v", n)
4910 if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
4913 if objabi.Experiment.RegabiArgs {
4914 // This is a static call, so it may be
4915 // a direct call to a non-ABIInternal
4916 // function. fn.Func may be nil for
4917 // some compiler-generated functions,
4918 // but those are all ABIInternal.
4920 callABI = abiForFunc(fn.Func, s.f.ABI0, s.f.ABI1)
4923 // TODO(register args) remove after register abi is working
4924 inRegistersImported := fn.Pragma()&ir.RegisterParams != 0
4925 inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0
4926 if inRegistersImported || inRegistersSamePackage {
4932 closure = s.expr(fn)
4933 if k != callDefer && k != callDeferStack {
4934 // Deferred nil function needs to panic when the function is invoked,
4935 // not the point of defer statement.
4936 s.maybeNilCheckClosure(closure, k)
4939 base.Fatalf("OCALLMETH missed by walkCall")
4941 if fn.Op() != ir.ODOTINTER {
4942 s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
4944 fn := fn.(*ir.SelectorExpr)
4945 var iclosure *ssa.Value
4946 iclosure, rcvr = s.getClosureAndRcvr(fn)
4947 if k == callNormal {
4948 codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
4954 if !objabi.Experiment.RegabiArgs {
4955 if regAbiForFuncType(n.X.Type().FuncType()) {
4956 // Magic last type in input args to call
4961 params := callABI.ABIAnalyze(n.X.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
4962 types.CalcSize(fn.Type())
4963 stksize := params.ArgWidth() // includes receiver, args, and results
4965 res := n.X.Type().Results()
4966 if k == callNormal {
4967 for _, p := range params.OutParams() {
4968 ACResults = append(ACResults, p.Type)
4973 if k == callDeferStack {
4974 // Make a defer struct d on the stack.
4975 t := deferstruct(stksize)
4976 d := typecheck.TempAt(n.Pos(), s.curfn, t)
4978 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
4981 // Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
4983 s.store(types.Types[types.TUINT32],
4984 s.newValue1I(ssa.OpOffPtr, types.Types[types.TUINT32].PtrTo(), t.FieldOff(0), addr),
4985 s.constInt32(types.Types[types.TUINT32], int32(stksize)))
4986 // 1: started, set in deferprocStack
4987 // 2: heap, set in deferprocStack
4989 // 4: sp, set in deferprocStack
4990 // 5: pc, set in deferprocStack
4992 s.store(closure.Type,
4993 s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(6), addr),
4995 // 7: panic, set in deferprocStack
4996 // 8: link, set in deferprocStack
5001 // Then, store all the arguments of the defer call.
5003 off := t.FieldOff(12) // TODO register args: be sure this isn't a hardcoded param stack offset.
5006 // Set receiver (for interface calls). Always a pointer.
5008 p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
5009 s.store(types.Types[types.TUINTPTR], p, rcvr)
5011 // Set receiver (for method calls).
5012 if n.Op() == ir.OCALLMETH {
5013 base.Fatalf("OCALLMETH missed by walkCall")
5016 for _, f := range ft.Params().Fields().Slice() {
5017 s.storeArgWithBase(args[0], f.Type, addr, off+abi.FieldOffsetOf(f))
5021 // Call runtime.deferprocStack with pointer to _defer record.
5022 ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
5023 aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
5024 callArgs = append(callArgs, addr, s.mem())
5025 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5026 call.AddArgs(callArgs...)
5027 if stksize < int64(types.PtrSize) {
5028 // We need room for both the call to deferprocStack and the call to
5029 // the deferred function.
5030 // TODO(register args) Revisit this if/when we pass args in registers.
5031 stksize = int64(types.PtrSize)
5033 call.AuxInt = stksize
5035 // Store arguments to stack, including defer/go arguments and receiver for method calls.
5036 // These are written in SP-offset order.
5037 argStart := base.Ctxt.FixedFrameSize()
5039 if k != callNormal {
5040 // Write argsize and closure (args to newproc/deferproc).
5041 argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize))
5042 ACArgs = append(ACArgs, types.Types[types.TUINT32]) // not argExtra
5043 callArgs = append(callArgs, argsize)
5044 ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
5045 callArgs = append(callArgs, closure)
5046 stksize += 2 * int64(types.PtrSize)
5047 argStart += 2 * int64(types.PtrSize)
5050 // Set receiver (for interface calls).
5052 callArgs = append(callArgs, rcvr)
5058 if n.Op() == ir.OCALLMETH {
5059 base.Fatalf("OCALLMETH missed by walkCall")
5062 for _, p := range params.InParams() { // includes receiver for interface calls
5063 ACArgs = append(ACArgs, p.Type)
5065 for i, n := range args {
5066 callArgs = append(callArgs, s.putArg(n, t.Params().Field(i).Type))
5069 callArgs = append(callArgs, s.mem())
5073 case k == callDefer:
5074 aux := ssa.StaticAuxCall(ir.Syms.Deferproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) // TODO paramResultInfo for DeferProc
5075 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5077 aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
5078 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for NewProc
5079 case closure != nil:
5080 // rawLoad because loading the code pointer from a
5081 // closure is always safe, but IsSanitizerSafeAddr
5082 // can't always figure that out currently, and it's
5083 // critical that we not clobber any arguments already
5084 // stored onto the stack.
5085 codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
5086 aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(nil, ACArgs, ACResults))
5087 call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
5088 case codeptr != nil:
5089 // Note that the "receiver" parameter is nil because the actual receiver is the first input parameter.
5090 aux := ssa.InterfaceAuxCall(params)
5091 call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
5093 aux := ssa.StaticAuxCall(callTargetLSym(callee), params)
5094 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5096 s.Fatalf("bad call type %v %v", n.Op(), n)
5098 call.AddArgs(callArgs...)
5099 call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
5102 s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
5103 // Insert OVARLIVE nodes
5104 for _, name := range n.KeepAlive {
5105 s.stmt(ir.NewUnaryExpr(n.Pos(), ir.OVARLIVE, name))
5108 // Finish block for defers
5109 if k == callDefer || k == callDeferStack {
5111 b.Kind = ssa.BlockDefer
5113 bNext := s.f.NewBlock(ssa.BlockPlain)
5115 // Add recover edge to exit code.
5116 r := s.f.NewBlock(ssa.BlockPlain)
5120 b.Likely = ssa.BranchLikely
5124 if res.NumFields() == 0 || k != callNormal {
5125 // call has no return value. Continue with the next statement.
5129 if returnResultAddr {
5130 return s.resultAddrOfCall(call, 0, fp.Type)
5132 return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
5135 // maybeNilCheckClosure checks if a nil check of a closure is needed in some
5136 // architecture-dependent situations and, if so, emits the nil check.
5137 func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
5138 if Arch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo {
5139 // On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
5140 // TODO(neelance): On other architectures this should be eliminated by the optimization steps
5145 // getClosureAndRcvr returns values for the appropriate closure and receiver of an
5147 func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) {
5149 itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
5151 itabidx := fn.Offset() + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab
5152 closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
5153 rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
5154 return closure, rcvr
5157 // etypesign returns the signed-ness of e, for integer/pointer etypes.
5158 // -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
5159 func etypesign(e types.Kind) int8 {
5161 case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
5163 case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
5169 // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
5170 // The value that the returned Value represents is guaranteed to be non-nil.
5171 func (s *state) addr(n ir.Node) *ssa.Value {
5172 if n.Op() != ir.ONAME {
5178 s.Fatalf("addr of canSSA expression: %+v", n)
5181 t := types.NewPtr(n.Type())
5182 linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value {
5183 v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb)
5184 // TODO: Make OpAddr use AuxInt as well as Aux.
5186 v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
5191 case ir.OLINKSYMOFFSET:
5192 no := n.(*ir.LinksymOffsetExpr)
5193 return linksymOffset(no.Linksym, no.Offset_)
5196 if n.Heapaddr != nil {
5197 return s.expr(n.Heapaddr)
5202 return linksymOffset(n.Linksym(), 0)
5209 s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
5212 return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
5214 case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
5215 // ensure that we reuse symbols for out parameters so
5216 // that cse works on their addresses
5217 return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
5219 s.Fatalf("variable address class %v not implemented", n.Class)
5223 // load return from callee
5224 n := n.(*ir.ResultExpr)
5225 return s.resultAddrOfCall(s.prevCall, n.Index, n.Type())
5227 n := n.(*ir.IndexExpr)
5228 if n.X.Type().IsSlice() {
5230 i := s.expr(n.Index)
5231 len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
5232 i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
5233 p := s.newValue1(ssa.OpSlicePtr, t, a)
5234 return s.newValue2(ssa.OpPtrIndex, t, p, i)
5237 i := s.expr(n.Index)
5238 len := s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
5239 i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
5240 return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.X.Type().Elem()), a, i)
5243 n := n.(*ir.StarExpr)
5244 return s.exprPtr(n.X, n.Bounded(), n.Pos())
5246 n := n.(*ir.SelectorExpr)
5248 return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
5250 n := n.(*ir.SelectorExpr)
5251 p := s.exprPtr(n.X, n.Bounded(), n.Pos())
5252 return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
5254 n := n.(*ir.ConvExpr)
5255 if n.Type() == n.X.Type() {
5259 return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
5260 case ir.OCALLFUNC, ir.OCALLINTER:
5261 n := n.(*ir.CallExpr)
5262 return s.callAddr(n, callNormal)
5264 n := n.(*ir.TypeAssertExpr)
5265 v, _ := s.dottype(n, false)
5266 if v.Op != ssa.OpLoad {
5267 s.Fatalf("dottype of non-load")
5269 if v.Args[1] != s.mem() {
5270 s.Fatalf("memory no longer live from dottype load")
5274 s.Fatalf("unhandled addr %v", n.Op())
5279 // canSSA reports whether n is SSA-able.
5280 // n must be an ONAME (or an ODOT sequence with an ONAME base).
5281 func (s *state) canSSA(n ir.Node) bool {
5282 if base.Flag.N != 0 {
5287 if nn.Op() == ir.ODOT {
5288 nn := nn.(*ir.SelectorExpr)
5292 if nn.Op() == ir.OINDEX {
5293 nn := nn.(*ir.IndexExpr)
5294 if nn.X.Type().IsArray() {
5301 if n.Op() != ir.ONAME {
5304 return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type())
5307 func (s *state) canSSAName(name *ir.Name) bool {
5308 if name.Addrtaken() || !name.OnStack() {
5314 // TODO: handle this case? Named return values must be
5315 // in memory so that the deferred function can see them.
5316 // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
5317 // Or maybe not, see issue 18860. Even unnamed return values
5318 // must be written back so if a defer recovers, the caller can see them.
5321 if s.cgoUnsafeArgs {
5322 // Cgo effectively takes the address of all result args,
5323 // but the compiler can't see that.
5327 if name.Class == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" {
5328 // wrappers generated by genwrapper need to update
5329 // the .this pointer in place.
5330 // TODO: treat as a PPARAMOUT?
5334 // TODO: try to make more variables SSAable?
5337 // TypeOK reports whether variables of type t are SSA-able.
5338 func TypeOK(t *types.Type) bool {
5340 if t.Width > int64(4*types.PtrSize) {
5341 // 4*Widthptr is an arbitrary constant. We want it
5342 // to be at least 3*Widthptr so slices can be registerized.
5343 // Too big and we'll introduce too much register pressure.
5348 // We can't do larger arrays because dynamic indexing is
5349 // not supported on SSA variables.
5350 // TODO: allow if all indexes are constant.
5351 if t.NumElem() <= 1 {
5352 return TypeOK(t.Elem())
5356 if t.NumFields() > ssa.MaxStruct {
5359 for _, t1 := range t.Fields().Slice() {
5360 if !TypeOK(t1.Type) {
5370 // exprPtr evaluates n to a pointer and nil-checks it.
5371 func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
5373 if bounded || n.NonNil() {
5374 if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
5375 s.f.Warnl(lineno, "removed nil check")
5383 // nilCheck generates nil pointer checking code.
5384 // Used only for automatically inserted nil checks,
5385 // not for user code like 'x != nil'.
5386 func (s *state) nilCheck(ptr *ssa.Value) {
5387 if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
5390 s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
5393 // boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
5394 // Starts a new block on return.
5395 // On input, len must be converted to full int width and be nonnegative.
5396 // Returns idx converted to full int width.
5397 // If bounded is true then caller guarantees the index is not out of bounds
5398 // (but boundsCheck will still extend the index to full int width).
5399 func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
5400 idx = s.extendIndex(idx, len, kind, bounded)
5402 if bounded || base.Flag.B != 0 {
5403 // If bounded or bounds checking is flag-disabled, then no check necessary,
5404 // just return the extended index.
5406 // Here, bounded == true if the compiler generated the index itself,
5407 // such as in the expansion of a slice initializer. These indexes are
5408 // compiler-generated, not Go program variables, so they cannot be
5409 // attacker-controlled, so we can omit Spectre masking as well.
5411 // Note that we do not want to omit Spectre masking in code like:
5413 // if 0 <= i && i < len(x) {
5417 // Lucky for us, bounded==false for that code.
5418 // In that case (handled below), we emit a bound check (and Spectre mask)
5419 // and then the prove pass will remove the bounds check.
5420 // In theory the prove pass could potentially remove certain
5421 // Spectre masks, but it's very delicate and probably better
5422 // to be conservative and leave them all in.
5426 bNext := s.f.NewBlock(ssa.BlockPlain)
5427 bPanic := s.f.NewBlock(ssa.BlockExit)
5429 if !idx.Type.IsSigned() {
5431 case ssa.BoundsIndex:
5432 kind = ssa.BoundsIndexU
5433 case ssa.BoundsSliceAlen:
5434 kind = ssa.BoundsSliceAlenU
5435 case ssa.BoundsSliceAcap:
5436 kind = ssa.BoundsSliceAcapU
5437 case ssa.BoundsSliceB:
5438 kind = ssa.BoundsSliceBU
5439 case ssa.BoundsSlice3Alen:
5440 kind = ssa.BoundsSlice3AlenU
5441 case ssa.BoundsSlice3Acap:
5442 kind = ssa.BoundsSlice3AcapU
5443 case ssa.BoundsSlice3B:
5444 kind = ssa.BoundsSlice3BU
5445 case ssa.BoundsSlice3C:
5446 kind = ssa.BoundsSlice3CU
5451 if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
5452 cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
5454 cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
5457 b.Kind = ssa.BlockIf
5459 b.Likely = ssa.BranchLikely
5463 s.startBlock(bPanic)
5464 if Arch.LinkArch.Family == sys.Wasm {
5465 // TODO(khr): figure out how to do "register" based calling convention for bounds checks.
5466 // Should be similar to gcWriteBarrier, but I can't make it work.
5467 s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
5469 mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
5470 s.endBlock().SetControl(mem)
5474 // In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
5475 if base.Flag.Cfg.SpectreIndex {
5476 op := ssa.OpSpectreIndex
5477 if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
5478 op = ssa.OpSpectreSliceIndex
5480 idx = s.newValue2(op, types.Types[types.TINT], idx, len)
5486 // If cmp (a bool) is false, panic using the given function.
5487 func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
5489 b.Kind = ssa.BlockIf
5491 b.Likely = ssa.BranchLikely
5492 bNext := s.f.NewBlock(ssa.BlockPlain)
5494 pos := base.Ctxt.PosTable.Pos(line)
5495 fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
5496 bPanic := s.panics[fl]
5498 bPanic = s.f.NewBlock(ssa.BlockPlain)
5499 s.panics[fl] = bPanic
5500 s.startBlock(bPanic)
5501 // The panic call takes/returns memory to ensure that the right
5502 // memory state is observed if the panic happens.
5503 s.rtcall(fn, false, nil)
5510 func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
5513 case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
5519 // do a size-appropriate check for zero
5520 cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
5521 s.check(cmp, ir.Syms.Panicdivide)
5523 return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
5526 // rtcall issues a call to the given runtime function fn with the listed args.
5527 // Returns a slice of results of the given result types.
5528 // The call is added to the end of the current block.
5529 // If returns is false, the block is marked as an exit block.
5530 func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
5532 // Write args to the stack
5533 off := base.Ctxt.FixedFrameSize()
5534 var callArgs []*ssa.Value
5535 var callArgTypes []*types.Type
5537 for _, arg := range args {
5539 off = types.Rnd(off, t.Alignment())
5541 callArgs = append(callArgs, arg)
5542 callArgTypes = append(callArgTypes, t)
5545 off = types.Rnd(off, int64(types.RegSize))
5547 // Accumulate results types and offsets
5549 for _, t := range results {
5550 offR = types.Rnd(offR, t.Alignment())
5556 aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(nil, callArgTypes, results))
5557 callArgs = append(callArgs, s.mem())
5558 call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
5559 call.AddArgs(callArgs...)
5560 s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(results)), call)
5565 b.Kind = ssa.BlockExit
5567 call.AuxInt = off - base.Ctxt.FixedFrameSize()
5568 if len(results) > 0 {
5569 s.Fatalf("panic call can't have results")
5575 res := make([]*ssa.Value, len(results))
5576 for i, t := range results {
5577 off = types.Rnd(off, t.Alignment())
5578 res[i] = s.resultOfCall(call, int64(i), t)
5581 off = types.Rnd(off, int64(types.PtrSize))
5583 // Remember how much callee stack space we needed.
5589 // do *left = right for type t.
5590 func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
5591 s.instrument(t, left, instrumentWrite)
5593 if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
5594 // Known to not have write barrier. Store the whole type.
5595 s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
5599 // store scalar fields first, so write barrier stores for
5600 // pointer fields can be grouped together, and scalar values
5601 // don't need to be live across the write barrier call.
5602 // TODO: if the writebarrier pass knows how to reorder stores,
5603 // we can do a single store here as long as skip==0.
5604 s.storeTypeScalars(t, left, right, skip)
5605 if skip&skipPtr == 0 && t.HasPointers() {
5606 s.storeTypePtrs(t, left, right)
5610 // do *left = right for all scalar (non-pointer) parts of t.
5611 func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
5613 case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
5614 s.store(t, left, right)
5615 case t.IsPtrShaped():
5616 if t.IsPtr() && t.Elem().NotInHeap() {
5617 s.store(t, left, right) // see issue 42032
5619 // otherwise, no scalar fields.
5621 if skip&skipLen != 0 {
5624 len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
5625 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
5626 s.store(types.Types[types.TINT], lenAddr, len)
5628 if skip&skipLen == 0 {
5629 len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
5630 lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
5631 s.store(types.Types[types.TINT], lenAddr, len)
5633 if skip&skipCap == 0 {
5634 cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
5635 capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
5636 s.store(types.Types[types.TINT], capAddr, cap)
5638 case t.IsInterface():
5639 // itab field doesn't need a write barrier (even though it is a pointer).
5640 itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
5641 s.store(types.Types[types.TUINTPTR], left, itab)
5644 for i := 0; i < n; i++ {
5645 ft := t.FieldType(i)
5646 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
5647 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
5648 s.storeTypeScalars(ft, addr, val, 0)
5650 case t.IsArray() && t.NumElem() == 0:
5652 case t.IsArray() && t.NumElem() == 1:
5653 s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
5655 s.Fatalf("bad write barrier type %v", t)
5659 // do *left = right for all pointer parts of t.
5660 func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
5662 case t.IsPtrShaped():
5663 if t.IsPtr() && t.Elem().NotInHeap() {
5664 break // see issue 42032
5666 s.store(t, left, right)
5668 ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
5669 s.store(s.f.Config.Types.BytePtr, left, ptr)
5671 elType := types.NewPtr(t.Elem())
5672 ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
5673 s.store(elType, left, ptr)
5674 case t.IsInterface():
5675 // itab field is treated as a scalar.
5676 idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
5677 idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
5678 s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
5681 for i := 0; i < n; i++ {
5682 ft := t.FieldType(i)
5683 if !ft.HasPointers() {
5686 addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
5687 val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
5688 s.storeTypePtrs(ft, addr, val)
5690 case t.IsArray() && t.NumElem() == 0:
5692 case t.IsArray() && t.NumElem() == 1:
5693 s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
5695 s.Fatalf("bad write barrier type %v", t)
5699 // putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call.
5700 func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
5703 a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
5710 func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
5711 pt := types.NewPtr(t)
5714 // Use special routine that avoids allocation on duplicate offsets.
5715 addr = s.constOffPtrSP(pt, off)
5717 addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
5727 s.storeType(t, addr, a, 0, false)
5730 // slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
5731 // i,j,k may be nil, in which case they are set to their default value.
5732 // v may be a slice, string or pointer to an array.
5733 func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
5735 var ptr, len, cap *ssa.Value
5738 ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
5739 len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
5740 cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
5742 ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
5743 len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
5746 if !t.Elem().IsArray() {
5747 s.Fatalf("bad ptr to array in slice %v\n", t)
5750 ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
5751 len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
5754 s.Fatalf("bad type in slice %v\n", t)
5757 // Set default values
5759 i = s.constInt(types.Types[types.TINT], 0)
5770 // Panic if slice indices are not in bounds.
5771 // Make sure we check these in reverse order so that we're always
5772 // comparing against a value known to be nonnegative. See issue 28797.
5775 kind := ssa.BoundsSlice3Alen
5777 kind = ssa.BoundsSlice3Acap
5779 k = s.boundsCheck(k, cap, kind, bounded)
5782 j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
5784 i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
5787 kind := ssa.BoundsSliceAlen
5789 kind = ssa.BoundsSliceAcap
5791 j = s.boundsCheck(j, k, kind, bounded)
5793 i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
5796 // Word-sized integer operations.
5797 subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
5798 mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
5799 andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
5801 // Calculate the length (rlen) and capacity (rcap) of the new slice.
5802 // For strings the capacity of the result is unimportant. However,
5803 // we use rcap to test if we've generated a zero-length slice.
5804 // Use length of strings for that.
5805 rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
5807 if j != k && !t.IsString() {
5808 rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
5811 if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
5812 // No pointer arithmetic necessary.
5813 return ptr, rlen, rcap
5816 // Calculate the base pointer (rptr) for the new slice.
5818 // Generate the following code assuming that indexes are in bounds.
5819 // The masking is to make sure that we don't generate a slice
5820 // that points to the next object in memory. We cannot just set
5821 // the pointer to nil because then we would create a nil slice or
5826 // rptr = ptr + (mask(rcap) & (i * stride))
5828 // Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
5829 // of the element type.
5830 stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Width)
5832 // The delta is the number of bytes to offset ptr by.
5833 delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
5835 // If we're slicing to the point where the capacity is zero,
5836 // zero out the delta.
5837 mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
5838 delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
5840 // Compute rptr = ptr + delta.
5841 rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
5843 return rptr, rlen, rcap
5846 type u642fcvtTab struct {
5847 leq, cvt2F, and, rsh, or, add ssa.Op
5848 one func(*state, *types.Type, int64) *ssa.Value
5851 var u64_f64 = u642fcvtTab{
5853 cvt2F: ssa.OpCvt64to64F,
5855 rsh: ssa.OpRsh64Ux64,
5858 one: (*state).constInt64,
5861 var u64_f32 = u642fcvtTab{
5863 cvt2F: ssa.OpCvt64to32F,
5865 rsh: ssa.OpRsh64Ux64,
5868 one: (*state).constInt64,
5871 func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5872 return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
5875 func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5876 return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
5879 func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5881 // result = (floatY) x
5883 // y = uintX(x) ; y = x & 1
5884 // z = uintX(x) ; z = z >> 1
5887 // result = floatY(z)
5888 // result = result + result
5891 // Code borrowed from old code generator.
5892 // What's going on: large 64-bit "unsigned" looks like
5893 // negative number to hardware's integer-to-float
5894 // conversion. However, because the mantissa is only
5895 // 63 bits, we don't need the LSB, so instead we do an
5896 // unsigned right shift (divide by two), convert, and
5897 // double. However, before we do that, we need to be
5898 // sure that we do not lose a "1" if that made the
5899 // difference in the resulting rounding. Therefore, we
5900 // preserve it, and OR (not ADD) it back in. The case
5901 // that matters is when the eleven discarded bits are
5902 // equal to 10000000001; that rounds up, and the 1 cannot
5903 // be lost else it would round down if the LSB of the
5904 // candidate mantissa is 0.
5905 cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
5907 b.Kind = ssa.BlockIf
5909 b.Likely = ssa.BranchLikely
5911 bThen := s.f.NewBlock(ssa.BlockPlain)
5912 bElse := s.f.NewBlock(ssa.BlockPlain)
5913 bAfter := s.f.NewBlock(ssa.BlockPlain)
5917 a0 := s.newValue1(cvttab.cvt2F, tt, x)
5920 bThen.AddEdgeTo(bAfter)
5924 one := cvttab.one(s, ft, 1)
5925 y := s.newValue2(cvttab.and, ft, x, one)
5926 z := s.newValue2(cvttab.rsh, ft, x, one)
5927 z = s.newValue2(cvttab.or, ft, z, y)
5928 a := s.newValue1(cvttab.cvt2F, tt, z)
5929 a1 := s.newValue2(cvttab.add, tt, a, a)
5932 bElse.AddEdgeTo(bAfter)
5934 s.startBlock(bAfter)
5935 return s.variable(n, n.Type())
5938 type u322fcvtTab struct {
5939 cvtI2F, cvtF2F ssa.Op
5942 var u32_f64 = u322fcvtTab{
5943 cvtI2F: ssa.OpCvt32to64F,
5947 var u32_f32 = u322fcvtTab{
5948 cvtI2F: ssa.OpCvt32to32F,
5949 cvtF2F: ssa.OpCvt64Fto32F,
5952 func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5953 return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
5956 func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5957 return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
5960 func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
5962 // result = floatY(x)
5964 // result = floatY(float64(x) + (1<<32))
5966 cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
5968 b.Kind = ssa.BlockIf
5970 b.Likely = ssa.BranchLikely
5972 bThen := s.f.NewBlock(ssa.BlockPlain)
5973 bElse := s.f.NewBlock(ssa.BlockPlain)
5974 bAfter := s.f.NewBlock(ssa.BlockPlain)
5978 a0 := s.newValue1(cvttab.cvtI2F, tt, x)
5981 bThen.AddEdgeTo(bAfter)
5985 a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
5986 twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
5987 a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
5988 a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
5992 bElse.AddEdgeTo(bAfter)
5994 s.startBlock(bAfter)
5995 return s.variable(n, n.Type())
5998 // referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
5999 func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
6000 if !n.X.Type().IsMap() && !n.X.Type().IsChan() {
6001 s.Fatalf("node must be a map or a channel")
6007 // return *((*int)n)
6009 // return *(((*int)n)+1)
6012 nilValue := s.constNil(types.Types[types.TUINTPTR])
6013 cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
6015 b.Kind = ssa.BlockIf
6017 b.Likely = ssa.BranchUnlikely
6019 bThen := s.f.NewBlock(ssa.BlockPlain)
6020 bElse := s.f.NewBlock(ssa.BlockPlain)
6021 bAfter := s.f.NewBlock(ssa.BlockPlain)
6023 // length/capacity of a nil map/chan is zero
6026 s.vars[n] = s.zeroVal(lenType)
6028 bThen.AddEdgeTo(bAfter)
6034 // length is stored in the first word for map/chan
6035 s.vars[n] = s.load(lenType, x)
6037 // capacity is stored in the second word for chan
6038 sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
6039 s.vars[n] = s.load(lenType, sw)
6041 s.Fatalf("op must be OLEN or OCAP")
6044 bElse.AddEdgeTo(bAfter)
6046 s.startBlock(bAfter)
6047 return s.variable(n, lenType)
6050 type f2uCvtTab struct {
6051 ltf, cvt2U, subf, or ssa.Op
6052 floatValue func(*state, *types.Type, float64) *ssa.Value
6053 intValue func(*state, *types.Type, int64) *ssa.Value
6057 var f32_u64 = f2uCvtTab{
6059 cvt2U: ssa.OpCvt32Fto64,
6062 floatValue: (*state).constFloat32,
6063 intValue: (*state).constInt64,
6067 var f64_u64 = f2uCvtTab{
6069 cvt2U: ssa.OpCvt64Fto64,
6072 floatValue: (*state).constFloat64,
6073 intValue: (*state).constInt64,
6077 var f32_u32 = f2uCvtTab{
6079 cvt2U: ssa.OpCvt32Fto32,
6082 floatValue: (*state).constFloat32,
6083 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
6087 var f64_u32 = f2uCvtTab{
6089 cvt2U: ssa.OpCvt64Fto32,
6092 floatValue: (*state).constFloat64,
6093 intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
6097 func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6098 return s.floatToUint(&f32_u64, n, x, ft, tt)
6100 func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6101 return s.floatToUint(&f64_u64, n, x, ft, tt)
6104 func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6105 return s.floatToUint(&f32_u32, n, x, ft, tt)
6108 func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6109 return s.floatToUint(&f64_u32, n, x, ft, tt)
6112 func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
6113 // cutoff:=1<<(intY_Size-1)
6114 // if x < floatX(cutoff) {
6115 // result = uintY(x)
6117 // y = x - floatX(cutoff)
6119 // result = z | -(cutoff)
6121 cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
6122 cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
6124 b.Kind = ssa.BlockIf
6126 b.Likely = ssa.BranchLikely
6128 bThen := s.f.NewBlock(ssa.BlockPlain)
6129 bElse := s.f.NewBlock(ssa.BlockPlain)
6130 bAfter := s.f.NewBlock(ssa.BlockPlain)
6134 a0 := s.newValue1(cvttab.cvt2U, tt, x)
6137 bThen.AddEdgeTo(bAfter)
6141 y := s.newValue2(cvttab.subf, ft, x, cutoff)
6142 y = s.newValue1(cvttab.cvt2U, tt, y)
6143 z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
6144 a1 := s.newValue2(cvttab.or, tt, y, z)
6147 bElse.AddEdgeTo(bAfter)
6149 s.startBlock(bAfter)
6150 return s.variable(n, n.Type())
6153 // dottype generates SSA for a type assertion node.
6154 // commaok indicates whether to panic or return a bool.
6155 // If commaok is false, resok will be nil.
6156 func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
6157 iface := s.expr(n.X) // input interface
6158 target := s.reflectType(n.Type()) // target type
6159 byteptr := s.f.Config.Types.BytePtr
6161 if n.Type().IsInterface() {
6162 if n.Type().IsEmptyInterface() {
6163 // Converting to an empty interface.
6164 // Input could be an empty or nonempty interface.
6165 if base.Debug.TypeAssert > 0 {
6166 base.WarnfAt(n.Pos(), "type assertion inlined")
6169 // Get itab/type field from input.
6170 itab := s.newValue1(ssa.OpITab, byteptr, iface)
6171 // Conversion succeeds iff that field is not nil.
6172 cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
6174 if n.X.Type().IsEmptyInterface() && commaok {
6175 // Converting empty interface to empty interface with ,ok is just a nil check.
6179 // Branch on nilness.
6181 b.Kind = ssa.BlockIf
6183 b.Likely = ssa.BranchLikely
6184 bOk := s.f.NewBlock(ssa.BlockPlain)
6185 bFail := s.f.NewBlock(ssa.BlockPlain)
6190 // On failure, panic by calling panicnildottype.
6192 s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
6194 // On success, return (perhaps modified) input interface.
6196 if n.X.Type().IsEmptyInterface() {
6197 res = iface // Use input interface unchanged.
6200 // Load type out of itab, build interface with existing idata.
6201 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
6202 typ := s.load(byteptr, off)
6203 idata := s.newValue1(ssa.OpIData, byteptr, iface)
6204 res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata)
6209 // nonempty -> empty
6210 // Need to load type from itab
6211 off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
6212 s.vars[typVar] = s.load(byteptr, off)
6215 // itab is nil, might as well use that as the nil result.
6217 s.vars[typVar] = itab
6221 bEnd := s.f.NewBlock(ssa.BlockPlain)
6223 bFail.AddEdgeTo(bEnd)
6225 idata := s.newValue1(ssa.OpIData, byteptr, iface)
6226 res = s.newValue2(ssa.OpIMake, n.Type(), s.variable(typVar, byteptr), idata)
6228 delete(s.vars, typVar)
6231 // converting to a nonempty interface needs a runtime call.
6232 if base.Debug.TypeAssert > 0 {
6233 base.WarnfAt(n.Pos(), "type assertion not inlined")
6236 fn := ir.Syms.AssertI2I
6237 if n.X.Type().IsEmptyInterface() {
6238 fn = ir.Syms.AssertE2I
6240 data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
6241 tab := s.newValue1(ssa.OpITab, byteptr, iface)
6242 tab = s.rtcall(fn, true, []*types.Type{byteptr}, target, tab)[0]
6243 return s.newValue2(ssa.OpIMake, n.Type(), tab, data), nil
6245 fn := ir.Syms.AssertI2I2
6246 if n.X.Type().IsEmptyInterface() {
6247 fn = ir.Syms.AssertE2I2
6249 res = s.rtcall(fn, true, []*types.Type{n.Type()}, target, iface)[0]
6250 resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(n.Type()))
6254 if base.Debug.TypeAssert > 0 {
6255 base.WarnfAt(n.Pos(), "type assertion inlined")
6258 // Converting to a concrete type.
6259 direct := types.IsDirectIface(n.Type())
6260 itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
6261 if base.Debug.TypeAssert > 0 {
6262 base.WarnfAt(n.Pos(), "type assertion inlined")
6264 var targetITab *ssa.Value
6265 if n.X.Type().IsEmptyInterface() {
6266 // Looking for pointer to target type.
6269 // Looking for pointer to itab for target type and source interface.
6270 targetITab = s.expr(n.Itab)
6273 var tmp ir.Node // temporary for use with large types
6274 var addr *ssa.Value // address of tmp
6275 if commaok && !TypeOK(n.Type()) {
6276 // unSSAable type, use temporary.
6277 // TODO: get rid of some of these temporaries.
6278 tmp, addr = s.temp(n.Pos(), n.Type())
6281 cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, targetITab)
6283 b.Kind = ssa.BlockIf
6285 b.Likely = ssa.BranchLikely
6287 bOk := s.f.NewBlock(ssa.BlockPlain)
6288 bFail := s.f.NewBlock(ssa.BlockPlain)
6293 // on failure, panic by calling panicdottype
6295 taddr := s.reflectType(n.X.Type())
6296 if n.X.Type().IsEmptyInterface() {
6297 s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
6299 s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
6302 // on success, return data from interface
6305 return s.newValue1(ssa.OpIData, n.Type(), iface), nil
6307 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
6308 return s.load(n.Type(), p), nil
6311 // commaok is the more complicated case because we have
6312 // a control flow merge point.
6313 bEnd := s.f.NewBlock(ssa.BlockPlain)
6314 // Note that we need a new valVar each time (unlike okVar where we can
6315 // reuse the variable) because it might have a different type every time.
6316 valVar := ssaMarker("val")
6318 // type assertion succeeded
6322 s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type(), iface)
6324 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
6325 s.vars[valVar] = s.load(n.Type(), p)
6328 p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
6329 s.move(n.Type(), addr, p)
6331 s.vars[okVar] = s.constBool(true)
6335 // type assertion failed
6338 s.vars[valVar] = s.zeroVal(n.Type())
6340 s.zero(n.Type(), addr)
6342 s.vars[okVar] = s.constBool(false)
6344 bFail.AddEdgeTo(bEnd)
6349 res = s.variable(valVar, n.Type())
6350 delete(s.vars, valVar)
6352 res = s.load(n.Type(), addr)
6353 s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp.(*ir.Name), s.mem())
6355 resok = s.variable(okVar, types.Types[types.TBOOL])
6356 delete(s.vars, okVar)
6360 // temp allocates a temp of type t at position pos
6361 func (s *state) temp(pos src.XPos, t *types.Type) (*ir.Name, *ssa.Value) {
6362 tmp := typecheck.TempAt(pos, s.curfn, t)
6363 s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
6368 // variable returns the value of a variable at the current location.
6369 func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value {
6379 if s.curBlock == s.f.Entry {
6380 // No variable should be live at entry.
6381 s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, n, v)
6383 // Make a FwdRef, which records a value that's live on block input.
6384 // We'll find the matching definition as part of insertPhis.
6385 v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n})
6387 if n.Op() == ir.ONAME {
6388 s.addNamedValue(n.(*ir.Name), v)
6393 func (s *state) mem() *ssa.Value {
6394 return s.variable(memVar, types.TypeMem)
6397 func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
6398 if n.Class == ir.Pxxx {
6399 // Don't track our marker nodes (memVar etc.).
6402 if ir.IsAutoTmp(n) {
6403 // Don't track temporary variables.
6406 if n.Class == ir.PPARAMOUT {
6407 // Don't track named output values. This prevents return values
6408 // from being assigned too early. See #14591 and #14762. TODO: allow this.
6411 loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
6412 values, ok := s.f.NamedValues[loc]
6414 s.f.Names = append(s.f.Names, loc)
6416 s.f.NamedValues[loc] = append(values, v)
6419 // Branch is an unresolved branch.
6420 type Branch struct {
6421 P *obj.Prog // branch instruction
6422 B *ssa.Block // target
6425 // State contains state needed during Prog generation.
6431 // Branches remembers all the branch instructions we've seen
6432 // and where they would like to go.
6435 // bstart remembers where each block starts (indexed by block ID)
6438 maxarg int64 // largest frame size for arguments to calls made by the function
6440 // Map from GC safe points to liveness index, generated by
6441 // liveness analysis.
6442 livenessMap liveness.Map
6444 // partLiveArgs includes arguments that may be partially live, for which we
6445 // need to generate instructions that spill the argument registers.
6446 partLiveArgs map[*ir.Name]bool
6448 // lineRunStart records the beginning of the current run of instructions
6449 // within a single block sharing the same line number
6450 // Used to move statement marks to the beginning of such runs.
6451 lineRunStart *obj.Prog
6453 // wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
6454 OnWasmStackSkipped int
6457 func (s *State) FuncInfo() *obj.FuncInfo {
6458 return s.pp.CurFunc.LSym.Func()
6461 // Prog appends a new Prog.
6462 func (s *State) Prog(as obj.As) *obj.Prog {
6464 if objw.LosesStmtMark(as) {
6467 // Float a statement start to the beginning of any same-line run.
6468 // lineRunStart is reset at block boundaries, which appears to work well.
6469 if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
6471 } else if p.Pos.IsStmt() == src.PosIsStmt {
6472 s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
6473 p.Pos = p.Pos.WithNotStmt()
6478 // Pc returns the current Prog.
6479 func (s *State) Pc() *obj.Prog {
6483 // SetPos sets the current source position.
6484 func (s *State) SetPos(pos src.XPos) {
6488 // Br emits a single branch instruction and returns the instruction.
6489 // Not all architectures need the returned instruction, but otherwise
6490 // the boilerplate is common to all.
6491 func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog {
6493 p.To.Type = obj.TYPE_BRANCH
6494 s.Branches = append(s.Branches, Branch{P: p, B: target})
6498 // DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
6499 // that reduce "jumpy" line number churn when debugging.
6500 // Spill/fill/copy instructions from the register allocator,
6501 // phi functions, and instructions with a no-pos position
6502 // are examples of instructions that can cause churn.
6503 func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
6505 case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
6506 // These are not statements
6507 s.SetPos(v.Pos.WithNotStmt())
6510 if p != src.NoXPos {
6511 // If the position is defined, update the position.
6512 // Also convert default IsStmt to NotStmt; only
6513 // explicit statement boundaries should appear
6514 // in the generated code.
6515 if p.IsStmt() != src.PosIsStmt {
6517 // Calls use the pos attached to v, but copy the statement mark from State
6521 s.SetPos(s.pp.Pos.WithNotStmt())
6526 // genssa appends entries to pp for each instruction in f.
6527 func genssa(f *ssa.Func, pp *objw.Progs) {
6529 s.ABI = f.OwnAux.Fn.ABI()
6531 e := f.Frontend().(*ssafn)
6533 s.livenessMap, s.partLiveArgs = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
6535 openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
6536 if openDeferInfo != nil {
6537 // This function uses open-coded defers -- write out the funcdata
6538 // info that we computed at the end of genssa.
6539 p := pp.Prog(obj.AFUNCDATA)
6540 p.From.SetConst(objabi.FUNCDATA_OpenCodedDeferInfo)
6541 p.To.Type = obj.TYPE_MEM
6542 p.To.Name = obj.NAME_EXTERN
6543 p.To.Sym = openDeferInfo
6546 // Remember where each block starts.
6547 s.bstart = make([]*obj.Prog, f.NumBlocks())
6549 var progToValue map[*obj.Prog]*ssa.Value
6550 var progToBlock map[*obj.Prog]*ssa.Block
6551 var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
6552 if f.PrintOrHtmlSSA {
6553 progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
6554 progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
6555 f.Logf("genssa %s\n", f.Name)
6556 progToBlock[s.pp.Next] = f.Blocks[0]
6559 if base.Ctxt.Flag_locationlists {
6560 if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
6561 f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
6563 valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
6564 for i := range valueToProgAfter {
6565 valueToProgAfter[i] = nil
6569 // If the very first instruction is not tagged as a statement,
6570 // debuggers may attribute it to previous function in program.
6571 firstPos := src.NoXPos
6572 for _, v := range f.Entry.Values {
6573 if v.Pos.IsStmt() == src.PosIsStmt {
6575 v.Pos = firstPos.WithDefaultStmt()
6580 // inlMarks has an entry for each Prog that implements an inline mark.
6581 // It maps from that Prog to the global inlining id of the inlined body
6582 // which should unwind to this Prog's location.
6583 var inlMarks map[*obj.Prog]int32
6584 var inlMarkList []*obj.Prog
6586 // inlMarksByPos maps from a (column 1) source position to the set of
6587 // Progs that are in the set above and have that source position.
6588 var inlMarksByPos map[src.XPos][]*obj.Prog
6590 // Emit basic blocks
6591 for i, b := range f.Blocks {
6592 s.bstart[b.ID] = s.pp.Next
6593 s.lineRunStart = nil
6595 // Attach a "default" liveness info. Normally this will be
6596 // overwritten in the Values loop below for each Value. But
6597 // for an empty block this will be used for its control
6598 // instruction. We won't use the actual liveness map on a
6599 // control instruction. Just mark it something that is
6600 // preemptible, unless this function is "all unsafe".
6601 s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
6603 // Emit values in block
6604 Arch.SSAMarkMoves(&s, b)
6605 for _, v := range b.Values {
6607 s.DebugFriendlySetPosFrom(v)
6609 if v.Op.ResultInArg0() && v.ResultReg() != v.Args[0].Reg() {
6610 v.Fatalf("input[0] and output not in same register %s", v.LongString())
6615 // memory arg needs no code
6617 // input args need no code
6618 case ssa.OpSP, ssa.OpSB:
6620 case ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult:
6623 // nothing to do when there's a g register,
6624 // and checkLower complains if there's not
6625 case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill:
6626 // nothing to do; already used by liveness
6630 // nothing to do; no-op conversion for liveness
6631 if v.Args[0].Reg() != v.Reg() {
6632 v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
6635 p := Arch.Ginsnop(s.pp)
6636 if inlMarks == nil {
6637 inlMarks = map[*obj.Prog]int32{}
6638 inlMarksByPos = map[src.XPos][]*obj.Prog{}
6640 inlMarks[p] = v.AuxInt32()
6641 inlMarkList = append(inlMarkList, p)
6642 pos := v.Pos.AtColumn1()
6643 inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
6646 // Special case for first line in function; move it to the start (which cannot be a register-valued instruction)
6647 if firstPos != src.NoXPos && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg {
6649 firstPos = src.NoXPos
6651 // Attach this safe point to the next
6653 s.pp.NextLive = s.livenessMap.Get(v)
6655 // let the backend handle it
6656 Arch.SSAGenValue(&s, v)
6659 if base.Ctxt.Flag_locationlists {
6660 valueToProgAfter[v.ID] = s.pp.Next
6663 if f.PrintOrHtmlSSA {
6664 for ; x != s.pp.Next; x = x.Link {
6669 // If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
6670 if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
6671 p := Arch.Ginsnop(s.pp)
6672 p.Pos = p.Pos.WithIsStmt()
6673 if b.Pos == src.NoXPos {
6674 b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652.
6675 if b.Pos == src.NoXPos {
6676 b.Pos = pp.Text.Pos // Sometimes p.Pos is empty. See #35695.
6679 b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
6681 // Emit control flow instructions for block
6683 if i < len(f.Blocks)-1 && base.Flag.N == 0 {
6684 // If -N, leave next==nil so every block with successors
6685 // ends in a JMP (except call blocks - plive doesn't like
6686 // select{send,recv} followed by a JMP call). Helps keep
6687 // line numbers for otherwise empty blocks.
6688 next = f.Blocks[i+1]
6692 Arch.SSAGenBlock(&s, b, next)
6693 if f.PrintOrHtmlSSA {
6694 for ; x != s.pp.Next; x = x.Link {
6699 if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
6700 // We need the return address of a panic call to
6701 // still be inside the function in question. So if
6702 // it ends in a call which doesn't return, add a
6703 // nop (which will never execute) after the call.
6706 if openDeferInfo != nil {
6707 // When doing open-coded defers, generate a disconnected call to
6708 // deferreturn and a return. This will be used to during panic
6709 // recovery to unwind the stack and return back to the runtime.
6710 s.pp.NextLive = s.livenessMap.DeferReturn
6711 p := pp.Prog(obj.ACALL)
6712 p.To.Type = obj.TYPE_MEM
6713 p.To.Name = obj.NAME_EXTERN
6714 p.To.Sym = ir.Syms.Deferreturn
6716 // Load results into registers. So when a deferred function
6717 // recovers a panic, it will return to caller with right results.
6718 // The results are already in memory, because they are not SSA'd
6719 // when the function has defers (see canSSAName).
6720 if f.OwnAux.ABIInfo().OutRegistersUsed() != 0 {
6721 Arch.LoadRegResults(&s, f)
6727 if inlMarks != nil {
6728 // We have some inline marks. Try to find other instructions we're
6729 // going to emit anyway, and use those instructions instead of the
6731 for p := pp.Text; p != nil; p = p.Link {
6732 if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
6733 // Don't use 0-sized instructions as inline marks, because we need
6734 // to identify inline mark instructions by pc offset.
6735 // (Some of these instructions are sometimes zero-sized, sometimes not.
6736 // We must not use anything that even might be zero-sized.)
6737 // TODO: are there others?
6740 if _, ok := inlMarks[p]; ok {
6741 // Don't use inline marks themselves. We don't know
6742 // whether they will be zero-sized or not yet.
6745 pos := p.Pos.AtColumn1()
6746 s := inlMarksByPos[pos]
6750 for _, m := range s {
6751 // We found an instruction with the same source position as
6752 // some of the inline marks.
6753 // Use this instruction instead.
6754 p.Pos = p.Pos.WithIsStmt() // promote position to a statement
6755 pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
6756 // Make the inline mark a real nop, so it doesn't generate any code.
6762 delete(inlMarksByPos, pos)
6764 // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
6765 for _, p := range inlMarkList {
6766 if p.As != obj.ANOP {
6767 pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
6772 if base.Ctxt.Flag_locationlists {
6773 debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
6774 e.curfn.DebugInfo = debugInfo
6776 // Note that at this moment, Prog.Pc is a sequence number; it's
6777 // not a real PC until after assembly, so this mapping has to
6779 debugInfo.GetPC = func(b, v ssa.ID) int64 {
6781 case ssa.BlockStart.ID:
6782 if b == f.Entry.ID {
6783 return 0 // Start at the very beginning, at the assembler-generated prologue.
6784 // this should only happen for function args (ssa.OpArg)
6787 case ssa.BlockEnd.ID:
6788 return e.curfn.LSym.Size
6790 return valueToProgAfter[v].Pc
6795 // Resolve branches, and relax DefaultStmt into NotStmt
6796 for _, br := range s.Branches {
6797 br.P.To.SetTarget(s.bstart[br.B.ID])
6798 if br.P.Pos.IsStmt() != src.PosIsStmt {
6799 br.P.Pos = br.P.Pos.WithNotStmt()
6800 } else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
6801 br.P.Pos = br.P.Pos.WithNotStmt()
6806 if e.log { // spew to stdout
6808 for p := pp.Text; p != nil; p = p.Link {
6809 if p.Pos.IsKnown() && p.InnermostFilename() != filename {
6810 filename = p.InnermostFilename()
6811 f.Logf("# %s\n", filename)
6815 if v, ok := progToValue[p]; ok {
6817 } else if b, ok := progToBlock[p]; ok {
6820 s = " " // most value and branch strings are 2-3 characters long
6822 f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
6825 if f.HTMLWriter != nil { // spew to ssa.html
6826 var buf bytes.Buffer
6827 buf.WriteString("<code>")
6828 buf.WriteString("<dl class=\"ssa-gen\">")
6830 for p := pp.Text; p != nil; p = p.Link {
6831 // Don't spam every line with the file name, which is often huge.
6832 // Only print changes, and "unknown" is not a change.
6833 if p.Pos.IsKnown() && p.InnermostFilename() != filename {
6834 filename = p.InnermostFilename()
6835 buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
6836 buf.WriteString(html.EscapeString("# " + filename))
6837 buf.WriteString("</dd>")
6840 buf.WriteString("<dt class=\"ssa-prog-src\">")
6841 if v, ok := progToValue[p]; ok {
6842 buf.WriteString(v.HTML())
6843 } else if b, ok := progToBlock[p]; ok {
6844 buf.WriteString("<b>" + b.HTML() + "</b>")
6846 buf.WriteString("</dt>")
6847 buf.WriteString("<dd class=\"ssa-prog\">")
6848 buf.WriteString(fmt.Sprintf("%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString())))
6849 buf.WriteString("</dd>")
6851 buf.WriteString("</dl>")
6852 buf.WriteString("</code>")
6853 f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
6858 f.HTMLWriter.Close()
6862 func defframe(s *State, e *ssafn, f *ssa.Func) {
6865 frame := types.Rnd(s.maxarg+e.stksize, int64(types.RegSize))
6866 if Arch.PadFrame != nil {
6867 frame = Arch.PadFrame(frame)
6870 // Fill in argument and frame size.
6871 pp.Text.To.Type = obj.TYPE_TEXTSIZE
6872 pp.Text.To.Val = int32(types.Rnd(f.OwnAux.ArgWidth(), int64(types.RegSize)))
6873 pp.Text.To.Offset = frame
6877 // Insert code to spill argument registers if the named slot may be partially
6878 // live. That is, the named slot is considered live by liveness analysis,
6879 // (because a part of it is live), but we may not spill all parts into the
6880 // slot. This can only happen with aggregate-typed arguments that are SSA-able
6881 // and not address-taken (for non-SSA-able or address-taken arguments we always
6883 // TODO(register args) Make liveness more fine-grained to that partial spilling is okay.
6884 if objabi.Experiment.RegabiArgs {
6885 // First, see if it is already spilled before it may be live. Look for a spill
6886 // in the entry block up to the first safepoint.
6887 type nameOff struct {
6891 partLiveArgsSpilled := make(map[nameOff]bool)
6892 for _, v := range f.Entry.Values {
6896 if v.Op != ssa.OpStoreReg || v.Args[0].Op != ssa.OpArgIntReg {
6899 n, off := ssa.AutoVar(v)
6900 if n.Class != ir.PPARAM || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] {
6903 partLiveArgsSpilled[nameOff{n, off}] = true
6906 // Then, insert code to spill registers if not already.
6907 for _, a := range f.OwnAux.ABIInfo().InParams() {
6908 n, ok := a.Name.(*ir.Name)
6909 if !ok || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
6912 rts, offs := a.RegisterTypesAndOffsets()
6913 for i := range a.Registers {
6914 if !rts[i].HasPointers() {
6917 if partLiveArgsSpilled[nameOff{n, offs[i]}] {
6918 continue // already spilled
6920 reg := ssa.ObjRegForAbiReg(a.Registers[i], f.Config)
6921 p = Arch.SpillArgReg(pp, p, f, rts[i], reg, n, offs[i])
6926 // Insert code to zero ambiguously live variables so that the
6927 // garbage collector only sees initialized values when it
6928 // looks for pointers.
6931 // Opaque state for backend to use. Current backends use it to
6932 // keep track of which helper registers have been zeroed.
6935 // Iterate through declarations. They are sorted in decreasing Xoffset order.
6936 for _, n := range e.curfn.Dcl {
6940 if n.Class != ir.PAUTO {
6941 e.Fatalf(n.Pos(), "needzero class %d", n.Class)
6943 if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 {
6944 e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_)
6947 if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) {
6948 // Merge with range we already have.
6949 lo = n.FrameOffset()
6954 p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
6957 lo = n.FrameOffset()
6958 hi = lo + n.Type().Size()
6961 // Zero final range.
6962 Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
6965 // For generating consecutive jump instructions to model a specific branching
6966 type IndexJump struct {
6971 func (s *State) oneJump(b *ssa.Block, jump *IndexJump) {
6972 p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
6976 // CombJump generates combinational instructions (2 at present) for a block jump,
6977 // thereby the behaviour of non-standard condition codes could be simulated
6978 func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
6980 case b.Succs[0].Block():
6981 s.oneJump(b, &jumps[0][0])
6982 s.oneJump(b, &jumps[0][1])
6983 case b.Succs[1].Block():
6984 s.oneJump(b, &jumps[1][0])
6985 s.oneJump(b, &jumps[1][1])
6988 if b.Likely != ssa.BranchUnlikely {
6989 s.oneJump(b, &jumps[1][0])
6990 s.oneJump(b, &jumps[1][1])
6991 q = s.Br(obj.AJMP, b.Succs[1].Block())
6993 s.oneJump(b, &jumps[0][0])
6994 s.oneJump(b, &jumps[0][1])
6995 q = s.Br(obj.AJMP, b.Succs[0].Block())
7001 // AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
7002 func AddAux(a *obj.Addr, v *ssa.Value) {
7003 AddAux2(a, v, v.AuxInt)
7005 func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
7006 if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
7007 v.Fatalf("bad AddAux addr %v", a)
7009 // add integer offset
7012 // If no additional symbol offset, we're done.
7016 // Add symbol's offset from its base register.
7017 switch n := v.Aux.(type) {
7019 a.Name = obj.NAME_EXTERN
7022 a.Name = obj.NAME_EXTERN
7025 if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
7026 a.Name = obj.NAME_PARAM
7027 a.Sym = ir.Orig(n).(*ir.Name).Linksym()
7028 a.Offset += n.FrameOffset()
7031 a.Name = obj.NAME_AUTO
7032 if n.Class == ir.PPARAMOUT {
7033 a.Sym = ir.Orig(n).(*ir.Name).Linksym()
7037 a.Offset += n.FrameOffset()
7039 v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
7043 // extendIndex extends v to a full int width.
7044 // panic with the given kind if v does not fit in an int (only on 32-bit archs).
7045 func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
7046 size := idx.Type.Size()
7047 if size == s.config.PtrSize {
7050 if size > s.config.PtrSize {
7051 // truncate 64-bit indexes on 32-bit pointer archs. Test the
7052 // high word and branch to out-of-bounds failure if it is not 0.
7054 if idx.Type.IsSigned() {
7055 lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
7057 lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
7059 if bounded || base.Flag.B != 0 {
7062 bNext := s.f.NewBlock(ssa.BlockPlain)
7063 bPanic := s.f.NewBlock(ssa.BlockExit)
7064 hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
7065 cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
7066 if !idx.Type.IsSigned() {
7068 case ssa.BoundsIndex:
7069 kind = ssa.BoundsIndexU
7070 case ssa.BoundsSliceAlen:
7071 kind = ssa.BoundsSliceAlenU
7072 case ssa.BoundsSliceAcap:
7073 kind = ssa.BoundsSliceAcapU
7074 case ssa.BoundsSliceB:
7075 kind = ssa.BoundsSliceBU
7076 case ssa.BoundsSlice3Alen:
7077 kind = ssa.BoundsSlice3AlenU
7078 case ssa.BoundsSlice3Acap:
7079 kind = ssa.BoundsSlice3AcapU
7080 case ssa.BoundsSlice3B:
7081 kind = ssa.BoundsSlice3BU
7082 case ssa.BoundsSlice3C:
7083 kind = ssa.BoundsSlice3CU
7087 b.Kind = ssa.BlockIf
7089 b.Likely = ssa.BranchLikely
7093 s.startBlock(bPanic)
7094 mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
7095 s.endBlock().SetControl(mem)
7101 // Extend value to the required size
7103 if idx.Type.IsSigned() {
7104 switch 10*size + s.config.PtrSize {
7106 op = ssa.OpSignExt8to32
7108 op = ssa.OpSignExt8to64
7110 op = ssa.OpSignExt16to32
7112 op = ssa.OpSignExt16to64
7114 op = ssa.OpSignExt32to64
7116 s.Fatalf("bad signed index extension %s", idx.Type)
7119 switch 10*size + s.config.PtrSize {
7121 op = ssa.OpZeroExt8to32
7123 op = ssa.OpZeroExt8to64
7125 op = ssa.OpZeroExt16to32
7127 op = ssa.OpZeroExt16to64
7129 op = ssa.OpZeroExt32to64
7131 s.Fatalf("bad unsigned index extension %s", idx.Type)
7134 return s.newValue1(op, types.Types[types.TINT], idx)
7137 // CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
7138 // Called during ssaGenValue.
7139 func CheckLoweredPhi(v *ssa.Value) {
7140 if v.Op != ssa.OpPhi {
7141 v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
7143 if v.Type.IsMemory() {
7147 loc := f.RegAlloc[v.ID]
7148 for _, a := range v.Args {
7149 if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
7150 v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
7155 // CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block,
7156 // except for incoming in-register arguments.
7157 // The output of LoweredGetClosurePtr is generally hardwired to the correct register.
7158 // That register contains the closure pointer on closure entry.
7159 func CheckLoweredGetClosurePtr(v *ssa.Value) {
7160 entry := v.Block.Func.Entry
7161 if entry != v.Block {
7162 base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
7164 for _, w := range entry.Values {
7169 case ssa.OpArgIntReg, ssa.OpArgFloatReg:
7172 base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
7177 // CheckArgReg ensures that v is in the function's entry block.
7178 func CheckArgReg(v *ssa.Value) {
7179 entry := v.Block.Func.Entry
7180 if entry != v.Block {
7181 base.Fatalf("in %s, badly placed ArgIReg or ArgFReg: %v %v", v.Block.Func.Name, v.Block, v)
7185 func AddrAuto(a *obj.Addr, v *ssa.Value) {
7186 n, off := ssa.AutoVar(v)
7187 a.Type = obj.TYPE_MEM
7189 a.Reg = int16(Arch.REGSP)
7190 a.Offset = n.FrameOffset() + off
7191 if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
7192 a.Name = obj.NAME_PARAM
7194 a.Name = obj.NAME_AUTO
7198 // Call returns a new CALL instruction for the SSA value v.
7199 // It uses PrepareCall to prepare the call.
7200 func (s *State) Call(v *ssa.Value) *obj.Prog {
7201 pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
7204 p := s.Prog(obj.ACALL)
7205 if pPosIsStmt == src.PosIsStmt {
7206 p.Pos = v.Pos.WithIsStmt()
7208 p.Pos = v.Pos.WithNotStmt()
7210 if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
7211 p.To.Type = obj.TYPE_MEM
7212 p.To.Name = obj.NAME_EXTERN
7215 // TODO(mdempsky): Can these differences be eliminated?
7216 switch Arch.LinkArch.Family {
7217 case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
7218 p.To.Type = obj.TYPE_REG
7219 case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
7220 p.To.Type = obj.TYPE_MEM
7222 base.Fatalf("unknown indirect call family")
7224 p.To.Reg = v.Args[0].Reg()
7229 // PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
7230 // It must be called immediately before emitting the actual CALL instruction,
7231 // since it emits PCDATA for the stack map at the call (calls are safe points).
7232 func (s *State) PrepareCall(v *ssa.Value) {
7233 idx := s.livenessMap.Get(v)
7234 if !idx.StackMapValid() {
7235 // See Liveness.hasStackMap.
7236 if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
7237 base.Fatalf("missing stack map index for %v", v.LongString())
7241 call, ok := v.Aux.(*ssa.AuxCall)
7243 if ok && call.Fn == ir.Syms.Deferreturn {
7244 // Deferred calls will appear to be returning to
7245 // the CALL deferreturn(SB) that we are about to emit.
7246 // However, the stack trace code will show the line
7247 // of the instruction byte before the return PC.
7248 // To avoid that being an unrelated instruction,
7249 // insert an actual hardware NOP that will have the right line number.
7250 // This is different from obj.ANOP, which is a virtual no-op
7251 // that doesn't make it into the instruction stream.
7252 Arch.Ginsnopdefer(s.pp)
7256 // Record call graph information for nowritebarrierrec
7258 if nowritebarrierrecCheck != nil {
7259 nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
7263 if s.maxarg < v.AuxInt {
7268 // UseArgs records the fact that an instruction needs a certain amount of
7269 // callee args space for its use.
7270 func (s *State) UseArgs(n int64) {
7276 // fieldIdx finds the index of the field referred to by the ODOT node n.
7277 func fieldIdx(n *ir.SelectorExpr) int {
7280 panic("ODOT's LHS is not a struct")
7283 for i, f := range t.Fields().Slice() {
7285 if f.Offset != n.Offset() {
7286 panic("field offset doesn't match")
7291 panic(fmt.Sprintf("can't find field in expr %v\n", n))
7293 // TODO: keep the result of this function somewhere in the ODOT Node
7294 // so we don't have to recompute it each time we need it.
7297 // ssafn holds frontend information about a function that the backend is processing.
7298 // It also exports a bunch of compiler services for the ssa backend.
7301 strings map[string]*obj.LSym // map from constant string to data symbols
7302 stksize int64 // stack size for current frame
7303 stkptrsize int64 // prefix of stack containing pointers
7304 log bool // print ssa debug to the stdout
7307 // StringData returns a symbol which
7308 // is the data component of a global string constant containing s.
7309 func (e *ssafn) StringData(s string) *obj.LSym {
7310 if aux, ok := e.strings[s]; ok {
7313 if e.strings == nil {
7314 e.strings = make(map[string]*obj.LSym)
7316 data := staticdata.StringSym(e.curfn.Pos(), s)
7321 func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
7322 return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
7325 func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
7326 ptrType := types.NewPtr(types.Types[types.TUINT8])
7327 lenType := types.Types[types.TINT]
7328 // Split this string up into two separate variables.
7329 p := e.SplitSlot(&name, ".ptr", 0, ptrType)
7330 l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
7334 func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
7336 u := types.Types[types.TUINTPTR]
7337 t := types.NewPtr(types.Types[types.TUINT8])
7338 // Split this interface up into two separate variables.
7340 if n.Type().IsEmptyInterface() {
7343 c := e.SplitSlot(&name, f, 0, u) // see comment in typebits.Set
7344 d := e.SplitSlot(&name, ".data", u.Size(), t)
7348 func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
7349 ptrType := types.NewPtr(name.Type.Elem())
7350 lenType := types.Types[types.TINT]
7351 p := e.SplitSlot(&name, ".ptr", 0, ptrType)
7352 l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
7353 c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
7357 func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
7358 s := name.Type.Size() / 2
7361 t = types.Types[types.TFLOAT64]
7363 t = types.Types[types.TFLOAT32]
7365 r := e.SplitSlot(&name, ".real", 0, t)
7366 i := e.SplitSlot(&name, ".imag", t.Size(), t)
7370 func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
7372 if name.Type.IsSigned() {
7373 t = types.Types[types.TINT32]
7375 t = types.Types[types.TUINT32]
7377 if Arch.LinkArch.ByteOrder == binary.BigEndian {
7378 return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[types.TUINT32])
7380 return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[types.TUINT32])
7383 func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
7385 // Note: the _ field may appear several times. But
7386 // have no fear, identically-named but distinct Autos are
7387 // ok, albeit maybe confusing for a debugger.
7388 return e.SplitSlot(&name, "."+st.FieldName(i), st.FieldOff(i), st.FieldType(i))
7391 func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
7394 if at.NumElem() != 1 {
7395 e.Fatalf(n.Pos(), "bad array size")
7398 return e.SplitSlot(&name, "[0]", 0, et)
7401 func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
7402 return reflectdata.ITabSym(it, offset)
7405 // SplitSlot returns a slot representing the data of parent starting at offset.
7406 func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
7409 if node.Class != ir.PAUTO || node.Addrtaken() {
7410 // addressed things and non-autos retain their parents (i.e., cannot truly be split)
7411 return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
7414 s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
7415 n := ir.NewNameAt(parent.N.Pos(), s)
7417 ir.AsNode(s.Def).Name().SetUsed(true)
7420 n.SetEsc(ir.EscNever)
7422 e.curfn.Dcl = append(e.curfn.Dcl, n)
7424 return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
7427 func (e *ssafn) CanSSA(t *types.Type) bool {
7431 func (e *ssafn) Line(pos src.XPos) string {
7432 return base.FmtPos(pos)
7435 // Log logs a message from the compiler.
7436 func (e *ssafn) Logf(msg string, args ...interface{}) {
7438 fmt.Printf(msg, args...)
7442 func (e *ssafn) Log() bool {
7446 // Fatal reports a compiler error and exits.
7447 func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
7449 nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
7450 base.Fatalf("'%s': "+msg, nargs...)
7453 // Warnl reports a "warning", which is usually flag-triggered
7454 // logging output for the benefit of tests.
7455 func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
7456 base.WarnfAt(pos, fmt_, args...)
7459 func (e *ssafn) Debug_checknil() bool {
7460 return base.Debug.Nil != 0
7463 func (e *ssafn) UseWriteBarrier() bool {
7467 func (e *ssafn) Syslook(name string) *obj.LSym {
7469 case "goschedguarded":
7470 return ir.Syms.Goschedguarded
7471 case "writeBarrier":
7472 return ir.Syms.WriteBarrier
7473 case "gcWriteBarrier":
7474 return ir.Syms.GCWriteBarrier
7475 case "typedmemmove":
7476 return ir.Syms.Typedmemmove
7478 return ir.Syms.Typedmemclr
7480 e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
7484 func (e *ssafn) SetWBPos(pos src.XPos) {
7485 e.curfn.SetWBPos(pos)
7488 func (e *ssafn) MyImportPath() string {
7489 return base.Ctxt.Pkgpath
7492 func clobberBase(n ir.Node) ir.Node {
7493 if n.Op() == ir.ODOT {
7494 n := n.(*ir.SelectorExpr)
7495 if n.X.Type().NumFields() == 1 {
7496 return clobberBase(n.X)
7499 if n.Op() == ir.OINDEX {
7500 n := n.(*ir.IndexExpr)
7501 if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 {
7502 return clobberBase(n.X)
7508 // callTargetLSym returns the correct LSym to call 'callee' using its ABI.
7509 func callTargetLSym(callee *ir.Name) *obj.LSym {
7510 if callee.Func == nil {
7511 // TODO(austin): This happens in a few cases of
7512 // compiler-generated functions. These are all
7513 // ABIInternal. It would be better if callee.Func was
7514 // never nil and we didn't need this case.
7515 return callee.Linksym()
7518 return callee.LinksymABI(callee.Func.ABI)
7521 func min8(a, b int8) int8 {
7528 func max8(a, b int8) int8 {
7535 // deferstruct makes a runtime._defer structure, with additional space for
7536 // stksize bytes of args.
7537 func deferstruct(stksize int64) *types.Type {
7538 makefield := func(name string, typ *types.Type) *types.Field {
7539 // Unlike the global makefield function, this one needs to set Pkg
7540 // because these types might be compared (in SSA CSE sorting).
7541 // TODO: unify this makefield and the global one above.
7542 sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
7543 return types.NewField(src.NoXPos, sym, typ)
7545 argtype := types.NewArray(types.Types[types.TUINT8], stksize)
7546 argtype.Width = stksize
7548 // These fields must match the ones in runtime/runtime2.go:_defer and
7549 // cmd/compile/internal/gc/ssa.go:(*state).call.
7550 fields := []*types.Field{
7551 makefield("siz", types.Types[types.TUINT32]),
7552 makefield("started", types.Types[types.TBOOL]),
7553 makefield("heap", types.Types[types.TBOOL]),
7554 makefield("openDefer", types.Types[types.TBOOL]),
7555 makefield("sp", types.Types[types.TUINTPTR]),
7556 makefield("pc", types.Types[types.TUINTPTR]),
7557 // Note: the types here don't really matter. Defer structures
7558 // are always scanned explicitly during stack copying and GC,
7559 // so we make them uintptr type even though they are real pointers.
7560 makefield("fn", types.Types[types.TUINTPTR]),
7561 makefield("_panic", types.Types[types.TUINTPTR]),
7562 makefield("link", types.Types[types.TUINTPTR]),
7563 makefield("framepc", types.Types[types.TUINTPTR]),
7564 makefield("varp", types.Types[types.TUINTPTR]),
7565 makefield("fd", types.Types[types.TUINTPTR]),
7566 makefield("args", argtype),
7569 // build struct holding the above fields
7570 s := types.NewStruct(types.NoPkg, fields)
7572 types.CalcStructSize(s)
7576 // SlotAddr uses LocalSlot information to initialize an obj.Addr
7577 // The resulting addr is used in a non-standard context -- in the prologue
7578 // of a function, before the frame has been constructed, so the standard
7579 // addressing for the parameters will be wrong.
7580 func SpillSlotAddr(slot *ssa.LocalSlot, baseReg int16, extraOffset int64) obj.Addr {
7581 n, off := slot.N, slot.Off
7582 if n.Class != ir.PPARAM && n.Class != ir.PPARAMOUT {
7583 panic("Only expected to see param and returns here")
7586 Name: obj.NAME_NONE,
7589 Offset: off + extraOffset + n.FrameOffset(),
7593 // AddrForParamSlot fills in an Addr appropriately for a Spill,
7594 // Restore, or VARLIVE.
7595 func AddrForParamSlot(slot *ssa.LocalSlot, addr *obj.Addr) {
7596 // TODO replace this boilerplate in a couple of places.
7597 n, off := slot.N, slot.Off
7598 addr.Type = obj.TYPE_MEM
7599 addr.Sym = n.Linksym()
7601 if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
7602 addr.Name = obj.NAME_PARAM
7603 addr.Offset += n.FrameOffset()
7604 } else { // out parameters in registers allocate stack slots like autos.
7605 addr.Name = obj.NAME_AUTO
7610 BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
7611 ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
7614 // GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
7615 var GCWriteBarrierReg map[int16]*obj.LSym