1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
6 (Add(Ptr|64|32|16|8) ...) => (ADD ...)
7 (Add(64|32)F ...) => (FADD(D|S) ...)
9 (Sub(Ptr|64|32|16|8) ...) => (SUB ...)
10 (Sub(64|32)F ...) => (FSUB(D|S) ...)
12 (Mul64 ...) => (MUL ...)
13 (Mul64uhilo ...) => (LoweredMuluhilo ...)
14 (Mul64uover ...) => (LoweredMuluover ...)
15 (Mul32 ...) => (MULW ...)
16 (Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y))
17 (Mul8 x y) => (MULW (SignExt8to32 x) (SignExt8to32 y))
18 (Mul(64|32)F ...) => (FMUL(D|S) ...)
20 (Div(64|32)F ...) => (FDIV(D|S) ...)
22 (Div64 x y [false]) => (DIV x y)
23 (Div64u ...) => (DIVU ...)
24 (Div32 x y [false]) => (DIVW x y)
25 (Div32u ...) => (DIVUW ...)
26 (Div16 x y [false]) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
27 (Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
28 (Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
29 (Div8u x y) => (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
31 (Hmul64 ...) => (MULH ...)
32 (Hmul64u ...) => (MULHU ...)
33 (Hmul32 x y) => (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
34 (Hmul32u x y) => (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
36 (Select0 (Add64carry x y c)) => (ADD (ADD <typ.UInt64> x y) c)
37 (Select1 (Add64carry x y c)) =>
38 (OR (SLTU <typ.UInt64> s:(ADD <typ.UInt64> x y) x) (SLTU <typ.UInt64> (ADD <typ.UInt64> s c) s))
40 (Select0 (Sub64borrow x y c)) => (SUB (SUB <typ.UInt64> x y) c)
41 (Select1 (Sub64borrow x y c)) =>
42 (OR (SLTU <typ.UInt64> x s:(SUB <typ.UInt64> x y)) (SLTU <typ.UInt64> s (SUB <typ.UInt64> s c)))
44 // (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1)
45 (Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
47 (Mod64 x y [false]) => (REM x y)
48 (Mod64u ...) => (REMU ...)
49 (Mod32 x y [false]) => (REMW x y)
50 (Mod32u ...) => (REMUW ...)
51 (Mod16 x y [false]) => (REMW (SignExt16to32 x) (SignExt16to32 y))
52 (Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
53 (Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y))
54 (Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
56 (And(64|32|16|8) ...) => (AND ...)
57 (Or(64|32|16|8) ...) => (OR ...)
58 (Xor(64|32|16|8) ...) => (XOR ...)
60 (Neg(64|32|16|8) ...) => (NEG ...)
61 (Neg(64|32)F ...) => (FNEG(D|S) ...)
63 (Com(64|32|16|8) ...) => (NOT ...)
66 (Sqrt ...) => (FSQRTD ...)
67 (Sqrt32 ...) => (FSQRTS ...)
69 (Copysign ...) => (FSGNJD ...)
71 (Abs ...) => (FABSD ...)
73 (FMA ...) => (FMADDD ...)
75 // Sign and zero extension.
77 (SignExt8to16 ...) => (MOVBreg ...)
78 (SignExt8to32 ...) => (MOVBreg ...)
79 (SignExt8to64 ...) => (MOVBreg ...)
80 (SignExt16to32 ...) => (MOVHreg ...)
81 (SignExt16to64 ...) => (MOVHreg ...)
82 (SignExt32to64 ...) => (MOVWreg ...)
84 (ZeroExt8to16 ...) => (MOVBUreg ...)
85 (ZeroExt8to32 ...) => (MOVBUreg ...)
86 (ZeroExt8to64 ...) => (MOVBUreg ...)
87 (ZeroExt16to32 ...) => (MOVHUreg ...)
88 (ZeroExt16to64 ...) => (MOVHUreg ...)
89 (ZeroExt32to64 ...) => (MOVWUreg ...)
91 (Cvt32to32F ...) => (FCVTSW ...)
92 (Cvt32to64F ...) => (FCVTDW ...)
93 (Cvt64to32F ...) => (FCVTSL ...)
94 (Cvt64to64F ...) => (FCVTDL ...)
96 (Cvt32Fto32 ...) => (FCVTWS ...)
97 (Cvt32Fto64 ...) => (FCVTLS ...)
98 (Cvt64Fto32 ...) => (FCVTWD ...)
99 (Cvt64Fto64 ...) => (FCVTLD ...)
101 (Cvt32Fto64F ...) => (FCVTDS ...)
102 (Cvt64Fto32F ...) => (FCVTSD ...)
104 (CvtBoolToUint8 ...) => (Copy ...)
106 (Round(32|64)F ...) => (LoweredRound(32|64)F ...)
108 (Slicemask <t> x) => (SRAI [63] (NEG <t> x))
111 // We ignore the unused high parts of registers, so truncates are just copies.
112 (Trunc16to8 ...) => (Copy ...)
113 (Trunc32to8 ...) => (Copy ...)
114 (Trunc32to16 ...) => (Copy ...)
115 (Trunc64to8 ...) => (Copy ...)
116 (Trunc64to16 ...) => (Copy ...)
117 (Trunc64to32 ...) => (Copy ...)
121 // SLL only considers the bottom 6 bits of y. If y > 64, the result should
124 // Breaking down the operation:
126 // (SLL x y) generates x << (y & 63).
128 // If y < 64, this is the value we want. Otherwise, we want zero.
130 // So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
131 (Lsh8x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
132 (Lsh8x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
133 (Lsh8x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
134 (Lsh8x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
135 (Lsh16x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
136 (Lsh16x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
137 (Lsh16x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
138 (Lsh16x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
139 (Lsh32x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
140 (Lsh32x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
141 (Lsh32x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
142 (Lsh32x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
143 (Lsh64x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
144 (Lsh64x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
145 (Lsh64x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
146 (Lsh64x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
148 (Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
149 (Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
150 (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
151 (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
153 // SRL only considers the bottom 6 bits of y. If y > 64, the result should
154 // always be 0. See Lsh above for a detailed description.
155 (Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
156 (Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
157 (Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
158 (Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
159 (Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
160 (Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
161 (Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
162 (Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
163 (Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
164 (Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
165 (Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
166 (Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
167 (Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
168 (Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
169 (Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
170 (Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
172 (Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y)
173 (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y)
174 (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt32to64 x) y)
175 (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
177 // SRA only considers the bottom 6 bits of y. If y > 64, the result should
178 // be either 0 or -1 based on the sign bit.
180 // We implement this by performing the max shift (-1) if y >= 64.
182 // We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
183 // us with -1 (0xffff...) if y >= 64.
185 // We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
186 // more than the 6 bits SRA cares about.
187 (Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
188 (Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
189 (Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
190 (Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
191 (Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
192 (Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
193 (Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
194 (Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
195 (Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
196 (Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
197 (Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
198 (Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
199 (Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
200 (Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
201 (Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
202 (Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
204 (Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y)
205 (Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y)
206 (Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt32to64 x) y)
207 (Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
210 (RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
211 (RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
212 (RotateLeft32 <t> x (MOVDconst [c])) => (Or32 (Lsh32x64 <t> x (MOVDconst [c&31])) (Rsh32Ux64 <t> x (MOVDconst [-c&31])))
213 (RotateLeft64 <t> x (MOVDconst [c])) => (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
215 (Less64 ...) => (SLT ...)
216 (Less32 x y) => (SLT (SignExt32to64 x) (SignExt32to64 y))
217 (Less16 x y) => (SLT (SignExt16to64 x) (SignExt16to64 y))
218 (Less8 x y) => (SLT (SignExt8to64 x) (SignExt8to64 y))
219 (Less64U ...) => (SLTU ...)
220 (Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
221 (Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
222 (Less8U x y) => (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
223 (Less(64|32)F ...) => (FLT(D|S) ...)
225 // Convert x <= y to !(y > x).
226 (Leq(64|32|16|8) x y) => (Not (Less(64|32|16|8) y x))
227 (Leq(64|32|16|8)U x y) => (Not (Less(64|32|16|8)U y x))
228 (Leq(64|32)F ...) => (FLE(D|S) ...)
230 (EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y))
231 (Eq64 x y) => (SEQZ (SUB <x.Type> x y))
232 (Eq32 x y) && x.Type.IsSigned() => (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
233 (Eq32 x y) && !x.Type.IsSigned() => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
234 (Eq16 x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
235 (Eq8 x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
236 (Eq(64|32)F ...) => (FEQ(D|S) ...)
238 (NeqPtr x y) => (Not (EqPtr x y))
239 (Neq64 x y) => (Not (Eq64 x y))
240 (Neq32 x y) => (Not (Eq32 x y))
241 (Neq16 x y) => (Not (Eq16 x y))
242 (Neq8 x y) => (Not (Eq8 x y))
243 (Neq(64|32)F ...) => (FNE(D|S) ...)
246 (Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
247 (Load <t> ptr mem) && ( is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
248 (Load <t> ptr mem) && ( is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
249 (Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
250 (Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
251 (Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
252 (Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
253 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
254 (Load <t> ptr mem) && is32BitFloat(t) => (FMOVWload ptr mem)
255 (Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
258 (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
259 (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
260 (Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
261 (Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
262 (Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVWstore ptr val mem)
263 (Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem)
265 // We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
266 // knows what variables are being read/written by the ops.
267 (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
268 (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
269 (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
270 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
271 (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
272 (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
273 (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
274 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
275 (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
276 (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
277 (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
278 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
279 (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
280 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
282 (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
283 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
284 (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
285 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
286 (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
287 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
288 (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
289 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
290 (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
291 (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
292 (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
293 (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
294 (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
295 (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
296 (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
297 (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
299 (MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
300 (MOVBUload [off1+int32(off2)] {sym} base mem)
301 (MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
302 (MOVBload [off1+int32(off2)] {sym} base mem)
303 (MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
304 (MOVHUload [off1+int32(off2)] {sym} base mem)
305 (MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
306 (MOVHload [off1+int32(off2)] {sym} base mem)
307 (MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
308 (MOVWUload [off1+int32(off2)] {sym} base mem)
309 (MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
310 (MOVWload [off1+int32(off2)] {sym} base mem)
311 (MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
312 (MOVDload [off1+int32(off2)] {sym} base mem)
314 (MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
315 (MOVBstore [off1+int32(off2)] {sym} base val mem)
316 (MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
317 (MOVHstore [off1+int32(off2)] {sym} base val mem)
318 (MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
319 (MOVWstore [off1+int32(off2)] {sym} base val mem)
320 (MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
321 (MOVDstore [off1+int32(off2)] {sym} base val mem)
322 (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
323 (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
324 (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
325 (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
327 // Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
328 // with OffPtr -> ADDI.
329 (ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
332 (Zero [0] _ mem) => mem
333 (Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
334 (Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
335 (MOVHstore ptr (MOVDconst [0]) mem)
336 (Zero [2] ptr mem) =>
337 (MOVBstore [1] ptr (MOVDconst [0])
338 (MOVBstore ptr (MOVDconst [0]) mem))
339 (Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
340 (MOVWstore ptr (MOVDconst [0]) mem)
341 (Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
342 (MOVHstore [2] ptr (MOVDconst [0])
343 (MOVHstore ptr (MOVDconst [0]) mem))
344 (Zero [4] ptr mem) =>
345 (MOVBstore [3] ptr (MOVDconst [0])
346 (MOVBstore [2] ptr (MOVDconst [0])
347 (MOVBstore [1] ptr (MOVDconst [0])
348 (MOVBstore ptr (MOVDconst [0]) mem))))
349 (Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
350 (MOVDstore ptr (MOVDconst [0]) mem)
351 (Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
352 (MOVWstore [4] ptr (MOVDconst [0])
353 (MOVWstore ptr (MOVDconst [0]) mem))
354 (Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
355 (MOVHstore [6] ptr (MOVDconst [0])
356 (MOVHstore [4] ptr (MOVDconst [0])
357 (MOVHstore [2] ptr (MOVDconst [0])
358 (MOVHstore ptr (MOVDconst [0]) mem))))
360 (Zero [3] ptr mem) =>
361 (MOVBstore [2] ptr (MOVDconst [0])
362 (MOVBstore [1] ptr (MOVDconst [0])
363 (MOVBstore ptr (MOVDconst [0]) mem)))
364 (Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
365 (MOVHstore [4] ptr (MOVDconst [0])
366 (MOVHstore [2] ptr (MOVDconst [0])
367 (MOVHstore ptr (MOVDconst [0]) mem)))
368 (Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
369 (MOVWstore [8] ptr (MOVDconst [0])
370 (MOVWstore [4] ptr (MOVDconst [0])
371 (MOVWstore ptr (MOVDconst [0]) mem)))
372 (Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
373 (MOVDstore [8] ptr (MOVDconst [0])
374 (MOVDstore ptr (MOVDconst [0]) mem))
375 (Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
376 (MOVDstore [16] ptr (MOVDconst [0])
377 (MOVDstore [8] ptr (MOVDconst [0])
378 (MOVDstore ptr (MOVDconst [0]) mem)))
379 (Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 =>
380 (MOVDstore [24] ptr (MOVDconst [0])
381 (MOVDstore [16] ptr (MOVDconst [0])
382 (MOVDstore [8] ptr (MOVDconst [0])
383 (MOVDstore ptr (MOVDconst [0]) mem))))
385 // Medium 8-aligned zeroing uses a Duff's device
386 // 8 and 128 are magic constants, see runtime/mkduff.go
387 (Zero [s] {t} ptr mem)
388 && s%8 == 0 && s <= 8*128
389 && t.Alignment()%8 == 0 && !config.noDuffDevice =>
390 (DUFFZERO [8 * (128 - s/8)] ptr mem)
392 // Generic zeroing uses a loop
393 (Zero [s] {t} ptr mem) =>
394 (LoweredZero [t.Alignment()]
396 (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
399 (Convert ...) => (MOVconvert ...)
402 (IsNonNil ...) => (SNEZ ...)
403 (IsInBounds ...) => (Less64U ...)
404 (IsSliceInBounds ...) => (Leq64U ...)
407 (NilCheck ...) => (LoweredNilCheck ...)
408 (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
409 (GetCallerSP ...) => (LoweredGetCallerSP ...)
410 (GetCallerPC ...) => (LoweredGetCallerPC ...)
413 (WB ...) => (LoweredWB ...)
415 (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
416 (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
417 (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
420 (Move [0] _ _ mem) => mem
421 (Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
422 (Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
423 (MOVHstore dst (MOVHload src mem) mem)
424 (Move [2] dst src mem) =>
425 (MOVBstore [1] dst (MOVBload [1] src mem)
426 (MOVBstore dst (MOVBload src mem) mem))
427 (Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
428 (MOVWstore dst (MOVWload src mem) mem)
429 (Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
430 (MOVHstore [2] dst (MOVHload [2] src mem)
431 (MOVHstore dst (MOVHload src mem) mem))
432 (Move [4] dst src mem) =>
433 (MOVBstore [3] dst (MOVBload [3] src mem)
434 (MOVBstore [2] dst (MOVBload [2] src mem)
435 (MOVBstore [1] dst (MOVBload [1] src mem)
436 (MOVBstore dst (MOVBload src mem) mem))))
437 (Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
438 (MOVDstore dst (MOVDload src mem) mem)
439 (Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
440 (MOVWstore [4] dst (MOVWload [4] src mem)
441 (MOVWstore dst (MOVWload src mem) mem))
442 (Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
443 (MOVHstore [6] dst (MOVHload [6] src mem)
444 (MOVHstore [4] dst (MOVHload [4] src mem)
445 (MOVHstore [2] dst (MOVHload [2] src mem)
446 (MOVHstore dst (MOVHload src mem) mem))))
448 (Move [3] dst src mem) =>
449 (MOVBstore [2] dst (MOVBload [2] src mem)
450 (MOVBstore [1] dst (MOVBload [1] src mem)
451 (MOVBstore dst (MOVBload src mem) mem)))
452 (Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
453 (MOVHstore [4] dst (MOVHload [4] src mem)
454 (MOVHstore [2] dst (MOVHload [2] src mem)
455 (MOVHstore dst (MOVHload src mem) mem)))
456 (Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
457 (MOVWstore [8] dst (MOVWload [8] src mem)
458 (MOVWstore [4] dst (MOVWload [4] src mem)
459 (MOVWstore dst (MOVWload src mem) mem)))
460 (Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
461 (MOVDstore [8] dst (MOVDload [8] src mem)
462 (MOVDstore dst (MOVDload src mem) mem))
463 (Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
464 (MOVDstore [16] dst (MOVDload [16] src mem)
465 (MOVDstore [8] dst (MOVDload [8] src mem)
466 (MOVDstore dst (MOVDload src mem) mem)))
467 (Move [32] {t} dst src mem) && t.Alignment()%8 == 0 =>
468 (MOVDstore [24] dst (MOVDload [24] src mem)
469 (MOVDstore [16] dst (MOVDload [16] src mem)
470 (MOVDstore [8] dst (MOVDload [8] src mem)
471 (MOVDstore dst (MOVDload src mem) mem))))
473 // Medium 8-aligned move uses a Duff's device
474 // 16 and 128 are magic constants, see runtime/mkduff.go
475 (Move [s] {t} dst src mem)
476 && s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
477 && !config.noDuffDevice && logLargeCopy(v, s) =>
478 (DUFFCOPY [16 * (128 - s/8)] dst src mem)
480 // Generic move uses a loop
481 (Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
482 (LoweredMove [t.Alignment()]
485 (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src)
488 // Boolean ops; 0=false, 1=true
489 (AndB ...) => (AND ...)
490 (OrB ...) => (OR ...)
491 (EqB x y) => (SEQZ (SUB <typ.Bool> x y))
492 (NeqB x y) => (SNEZ (SUB <typ.Bool> x y))
493 (Not ...) => (SEQZ ...)
495 // Lowering pointer arithmetic
496 // TODO: Special handling for SP offsets, like ARM
497 (OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr)
498 (OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
499 (OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
501 (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
502 (Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
503 (Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
504 (ConstNil) => (MOVDconst [0])
505 (ConstBool [val]) => (MOVDconst [int64(b2i(val))])
507 (Addr {sym} base) => (MOVaddr {sym} [0] base)
508 (LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVaddr {sym} (SPanchored base mem))
509 (LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVaddr {sym} base)
512 (StaticCall ...) => (CALLstatic ...)
513 (ClosureCall ...) => (CALLclosure ...)
514 (InterCall ...) => (CALLinter ...)
515 (TailCall ...) => (CALLtail ...)
518 (AtomicLoad(Ptr|64|32|8) ...) => (LoweredAtomicLoad(64|64|32|8) ...)
519 (AtomicStore(PtrNoWB|64|32|8) ...) => (LoweredAtomicStore(64|64|32|8) ...)
520 (AtomicAdd(64|32) ...) => (LoweredAtomicAdd(64|32) ...)
522 // AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
523 (AtomicAnd8 ptr val mem) =>
524 (LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr)
525 (NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val))
526 (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
528 (AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
530 (AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
531 (AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
533 (AtomicExchange(64|32) ...) => (LoweredAtomicExchange(64|32) ...)
535 // AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8))
536 (AtomicOr8 ptr val mem) =>
537 (LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr)
538 (SLL <typ.UInt32> (ZeroExt8to32 val)
539 (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
541 (AtomicOr32 ...) => (LoweredAtomicOr32 ...)
543 // Conditional branches
544 (If cond yes no) => (BNEZ (MOVBUreg <typ.UInt64> cond) yes no)
548 // Absorb SEQZ/SNEZ into branch.
549 (BEQZ (SEQZ x) yes no) => (BNEZ x yes no)
550 (BEQZ (SNEZ x) yes no) => (BEQZ x yes no)
551 (BNEZ (SEQZ x) yes no) => (BEQZ x yes no)
552 (BNEZ (SNEZ x) yes no) => (BNEZ x yes no)
554 // Remove redundant NEG from BEQZ/BNEZ.
555 (BEQZ (NEG x) yes no) => (BEQZ x yes no)
556 (BNEZ (NEG x) yes no) => (BNEZ x yes no)
558 // Negate comparison with FNES/FNED.
559 (BEQZ (FNES <t> x y) yes no) => (BNEZ (FEQS <t> x y) yes no)
560 (BNEZ (FNES <t> x y) yes no) => (BEQZ (FEQS <t> x y) yes no)
561 (BEQZ (FNED <t> x y) yes no) => (BNEZ (FEQD <t> x y) yes no)
562 (BNEZ (FNED <t> x y) yes no) => (BEQZ (FEQD <t> x y) yes no)
564 // Convert BEQZ/BNEZ into more optimal branch conditions.
565 (BEQZ (SUB x y) yes no) => (BEQ x y yes no)
566 (BNEZ (SUB x y) yes no) => (BNE x y yes no)
567 (BEQZ (SLT x y) yes no) => (BGE x y yes no)
568 (BNEZ (SLT x y) yes no) => (BLT x y yes no)
569 (BEQZ (SLTU x y) yes no) => (BGEU x y yes no)
570 (BNEZ (SLTU x y) yes no) => (BLTU x y yes no)
571 (BEQZ (SLTI [x] y) yes no) => (BGE y (MOVDconst [x]) yes no)
572 (BNEZ (SLTI [x] y) yes no) => (BLT y (MOVDconst [x]) yes no)
573 (BEQZ (SLTIU [x] y) yes no) => (BGEU y (MOVDconst [x]) yes no)
574 (BNEZ (SLTIU [x] y) yes no) => (BLTU y (MOVDconst [x]) yes no)
576 // Convert branch with zero to more optimal branch zero.
577 (BEQ (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
578 (BEQ cond (MOVDconst [0]) yes no) => (BEQZ cond yes no)
579 (BNE (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
580 (BNE cond (MOVDconst [0]) yes no) => (BNEZ cond yes no)
581 (BLT (MOVDconst [0]) cond yes no) => (BGTZ cond yes no)
582 (BLT cond (MOVDconst [0]) yes no) => (BLTZ cond yes no)
583 (BGE (MOVDconst [0]) cond yes no) => (BLEZ cond yes no)
584 (BGE cond (MOVDconst [0]) yes no) => (BGEZ cond yes no)
586 // Remove redundant NEG from SEQZ/SNEZ.
587 (SEQZ (NEG x)) => (SEQZ x)
588 (SNEZ (NEG x)) => (SNEZ x)
590 // Remove redundant SEQZ/SNEZ.
591 (SEQZ (SEQZ x)) => (SNEZ x)
592 (SEQZ (SNEZ x)) => (SEQZ x)
593 (SNEZ (SEQZ x)) => (SEQZ x)
594 (SNEZ (SNEZ x)) => (SNEZ x)
597 (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
598 (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
599 (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
600 (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
602 // Boolean ops are already extended.
603 (MOVBUreg x:((FLES|FLTS|FEQS|FNES) _ _)) => x
604 (MOVBUreg x:((FLED|FLTD|FEQD|FNED) _ _)) => x
605 (MOVBUreg x:((SEQZ|SNEZ) _)) => x
606 (MOVBUreg x:((SLT|SLTU) _ _)) => x
608 // Avoid extending when already sufficiently masked.
609 (MOVBreg x:(ANDI [c] y)) && c >= 0 && int64(int8(c)) == c => x
610 (MOVHreg x:(ANDI [c] y)) && c >= 0 && int64(int16(c)) == c => x
611 (MOVWreg x:(ANDI [c] y)) && c >= 0 && int64(int32(c)) == c => x
612 (MOVBUreg x:(ANDI [c] y)) && c >= 0 && int64(uint8(c)) == c => x
613 (MOVHUreg x:(ANDI [c] y)) && c >= 0 && int64(uint16(c)) == c => x
614 (MOVWUreg x:(ANDI [c] y)) && c >= 0 && int64(uint32(c)) == c => x
616 // Combine masking and zero extension.
617 (MOVBUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint8(c))] x)
618 (MOVHUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint16(c))] x)
619 (MOVWUreg (ANDI [c] x)) && c < 0 => (AND (MOVDconst [int64(uint32(c))]) x)
621 // Avoid sign/zero extension for consts.
622 (MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
623 (MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
624 (MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
625 (MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
626 (MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
627 (MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
629 // Avoid sign/zero extension after properly typed load.
630 (MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
631 (MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
632 (MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
633 (MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
634 (MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
635 (MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
636 (MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
637 (MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
638 (MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
639 (MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
640 (MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
641 (MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
642 (MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
643 (MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
644 (MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
646 // Avoid zero extension after properly typed atomic operation.
647 (MOVBUreg x:(Select0 (LoweredAtomicLoad8 _ _))) => (MOVDreg x)
648 (MOVBUreg x:(Select0 (LoweredAtomicCas32 _ _ _ _))) => (MOVDreg x)
649 (MOVBUreg x:(Select0 (LoweredAtomicCas64 _ _ _ _))) => (MOVDreg x)
651 // Avoid sign extension after word arithmetic.
652 (MOVWreg x:(ADDIW _)) => (MOVDreg x)
653 (MOVWreg x:(SUBW _ _)) => (MOVDreg x)
654 (MOVWreg x:(NEGW _)) => (MOVDreg x)
655 (MOVWreg x:(MULW _ _)) => (MOVDreg x)
656 (MOVWreg x:(DIVW _ _)) => (MOVDreg x)
657 (MOVWreg x:(DIVUW _ _)) => (MOVDreg x)
658 (MOVWreg x:(REMW _ _)) => (MOVDreg x)
659 (MOVWreg x:(REMUW _ _)) => (MOVDreg x)
661 // Fold double extensions.
662 (MOVBreg x:(MOVBreg _)) => (MOVDreg x)
663 (MOVHreg x:(MOVBreg _)) => (MOVDreg x)
664 (MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
665 (MOVHreg x:(MOVHreg _)) => (MOVDreg x)
666 (MOVWreg x:(MOVBreg _)) => (MOVDreg x)
667 (MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
668 (MOVWreg x:(MOVHreg _)) => (MOVDreg x)
669 (MOVWreg x:(MOVWreg _)) => (MOVDreg x)
670 (MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
671 (MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
672 (MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
673 (MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
674 (MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
675 (MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
677 // Do not extend before store.
678 (MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
679 (MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
680 (MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
681 (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
682 (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
683 (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
684 (MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
685 (MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
686 (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
687 (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
688 (MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
689 (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
691 // Replace extend after load with alternate load where possible.
692 (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
693 (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
694 (MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <t> [off] {sym} ptr mem)
695 (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
696 (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
697 (MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
699 // If a register move has only 1 use, just use the same register without emitting instruction
700 // MOVnop does not emit an instruction, only for ensuring the type.
701 (MOVDreg x) && x.Uses == 1 => (MOVDnop x)
703 // TODO: we should be able to get rid of MOVDnop all together.
704 // But for now, this is enough to get rid of lots of them.
705 (MOVDnop (MOVDconst [c])) => (MOVDconst [c])
707 // Fold constant into immediate instructions where possible.
708 (ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x)
709 (AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
710 (OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x)
711 (XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
712 (SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
713 (SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
714 (SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
715 (SLT x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTI [val] x)
716 (SLTU x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTIU [val] x)
718 // Convert const subtraction into ADDI with negative immediate, where possible.
719 (SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
720 (SUB <t> (MOVDconst [val]) y) && is32Bit(-val) => (NEG (ADDI <t> [-val] y))
722 // Subtraction of zero.
723 (SUB x (MOVDconst [0])) => x
724 (SUBW x (MOVDconst [0])) => (ADDIW [0] x)
726 // Subtraction from zero.
727 (SUB (MOVDconst [0]) x) => (NEG x)
728 (SUBW (MOVDconst [0]) x) => (NEGW x)
730 // Fold negation into subtraction.
731 (NEG (SUB x y)) => (SUB y x)
732 (NEG <t> s:(ADDI [val] (SUB x y))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] (SUB <t> y x))
737 // Addition of zero or two constants.
739 (ADDI [x] (MOVDconst [y])) && is32Bit(x + y) => (MOVDconst [x + y])
741 // ANDI with all zeros, all ones or two constants.
742 (ANDI [0] x) => (MOVDconst [0])
744 (ANDI [x] (MOVDconst [y])) => (MOVDconst [x & y])
746 // ORI with all zeroes, all ones or two constants.
748 (ORI [-1] x) => (MOVDconst [-1])
749 (ORI [x] (MOVDconst [y])) => (MOVDconst [x | y])
751 // Combine operations with immediate.
752 (ADDI [x] (ADDI [y] z)) && is32Bit(x + y) => (ADDI [x + y] z)
753 (ANDI [x] (ANDI [y] z)) => (ANDI [x & y] z)
754 (ORI [x] (ORI [y] z)) => (ORI [x | y] z)
756 // Negation of a constant.
757 (NEG (MOVDconst [x])) => (MOVDconst [-x])
758 (NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))])
760 // Shift of a constant.
761 (SLLI [x] (MOVDconst [y])) && is32Bit(y << uint32(x)) => (MOVDconst [y << uint32(x)])
762 (SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> uint32(x))])
763 (SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)])
765 // SLTI/SLTIU with constants.
766 (SLTI [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))])
767 (SLTIU [x] (MOVDconst [y])) => (MOVDconst [b2i(uint64(y) < uint64(x))])
769 // SLTI/SLTIU with known outcomes.
770 (SLTI [x] (ANDI [y] _)) && y >= 0 && int64(y) < int64(x) => (MOVDconst [1])
771 (SLTIU [x] (ANDI [y] _)) && y >= 0 && uint64(y) < uint64(x) => (MOVDconst [1])
772 (SLTI [x] (ORI [y] _)) && y >= 0 && int64(y) >= int64(x) => (MOVDconst [0])
773 (SLTIU [x] (ORI [y] _)) && y >= 0 && uint64(y) >= uint64(x) => (MOVDconst [0])
775 // SLT/SLTU with known outcomes.
776 (SLT x x) => (MOVDconst [0])
777 (SLTU x x) => (MOVDconst [0])
779 // Deadcode for LoweredMuluhilo
780 (Select0 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MULHU x y)
781 (Select1 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MUL x y)
783 (FADDD a (FMULD x y)) && a.Block.Func.useFMA(v) => (FMADDD x y a)
784 (FSUBD a (FMULD x y)) && a.Block.Func.useFMA(v) => (FNMSUBD x y a)
785 (FSUBD (FMULD x y) a) && a.Block.Func.useFMA(v) => (FMSUBD x y a)
786 // Merge negation into fused multiply-add and multiply-subtract.
790 // [+ -](x * y [+ -] z).
795 // Note: multiplication commutativity handled by rule generator.
796 (F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z)
797 (F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)