]> Cypherpunks.ru repositories - gostls13.git/blob - src/cmd/compile/internal/ssa/rewriteAMD64.go
1f56b70816ff67379f6272ba1576c49505f14262
[gostls13.git] / src / cmd / compile / internal / ssa / rewriteAMD64.go
1 // Code generated from gen/AMD64.rules; DO NOT EDIT.
2 // generated with: cd gen; go run *.go
3
4 package ssa
5
6 import "math"
7 import "cmd/internal/obj"
8 import "cmd/internal/objabi"
9 import "cmd/compile/internal/types"
10
11 func rewriteValueAMD64(v *Value) bool {
12         switch v.Op {
13         case OpAMD64ADCQ:
14                 return rewriteValueAMD64_OpAMD64ADCQ(v)
15         case OpAMD64ADCQconst:
16                 return rewriteValueAMD64_OpAMD64ADCQconst(v)
17         case OpAMD64ADDL:
18                 return rewriteValueAMD64_OpAMD64ADDL(v)
19         case OpAMD64ADDLconst:
20                 return rewriteValueAMD64_OpAMD64ADDLconst(v)
21         case OpAMD64ADDLconstmodify:
22                 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
23         case OpAMD64ADDLload:
24                 return rewriteValueAMD64_OpAMD64ADDLload(v)
25         case OpAMD64ADDLmodify:
26                 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
27         case OpAMD64ADDQ:
28                 return rewriteValueAMD64_OpAMD64ADDQ(v)
29         case OpAMD64ADDQcarry:
30                 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
31         case OpAMD64ADDQconst:
32                 return rewriteValueAMD64_OpAMD64ADDQconst(v)
33         case OpAMD64ADDQconstmodify:
34                 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
35         case OpAMD64ADDQload:
36                 return rewriteValueAMD64_OpAMD64ADDQload(v)
37         case OpAMD64ADDQmodify:
38                 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
39         case OpAMD64ADDSD:
40                 return rewriteValueAMD64_OpAMD64ADDSD(v)
41         case OpAMD64ADDSDload:
42                 return rewriteValueAMD64_OpAMD64ADDSDload(v)
43         case OpAMD64ADDSS:
44                 return rewriteValueAMD64_OpAMD64ADDSS(v)
45         case OpAMD64ADDSSload:
46                 return rewriteValueAMD64_OpAMD64ADDSSload(v)
47         case OpAMD64ANDL:
48                 return rewriteValueAMD64_OpAMD64ANDL(v)
49         case OpAMD64ANDLconst:
50                 return rewriteValueAMD64_OpAMD64ANDLconst(v)
51         case OpAMD64ANDLconstmodify:
52                 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
53         case OpAMD64ANDLload:
54                 return rewriteValueAMD64_OpAMD64ANDLload(v)
55         case OpAMD64ANDLmodify:
56                 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
57         case OpAMD64ANDQ:
58                 return rewriteValueAMD64_OpAMD64ANDQ(v)
59         case OpAMD64ANDQconst:
60                 return rewriteValueAMD64_OpAMD64ANDQconst(v)
61         case OpAMD64ANDQconstmodify:
62                 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
63         case OpAMD64ANDQload:
64                 return rewriteValueAMD64_OpAMD64ANDQload(v)
65         case OpAMD64ANDQmodify:
66                 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
67         case OpAMD64BSFQ:
68                 return rewriteValueAMD64_OpAMD64BSFQ(v)
69         case OpAMD64BTCLconst:
70                 return rewriteValueAMD64_OpAMD64BTCLconst(v)
71         case OpAMD64BTCLconstmodify:
72                 return rewriteValueAMD64_OpAMD64BTCLconstmodify(v)
73         case OpAMD64BTCLmodify:
74                 return rewriteValueAMD64_OpAMD64BTCLmodify(v)
75         case OpAMD64BTCQconst:
76                 return rewriteValueAMD64_OpAMD64BTCQconst(v)
77         case OpAMD64BTCQconstmodify:
78                 return rewriteValueAMD64_OpAMD64BTCQconstmodify(v)
79         case OpAMD64BTCQmodify:
80                 return rewriteValueAMD64_OpAMD64BTCQmodify(v)
81         case OpAMD64BTLconst:
82                 return rewriteValueAMD64_OpAMD64BTLconst(v)
83         case OpAMD64BTQconst:
84                 return rewriteValueAMD64_OpAMD64BTQconst(v)
85         case OpAMD64BTRLconst:
86                 return rewriteValueAMD64_OpAMD64BTRLconst(v)
87         case OpAMD64BTRLconstmodify:
88                 return rewriteValueAMD64_OpAMD64BTRLconstmodify(v)
89         case OpAMD64BTRLmodify:
90                 return rewriteValueAMD64_OpAMD64BTRLmodify(v)
91         case OpAMD64BTRQconst:
92                 return rewriteValueAMD64_OpAMD64BTRQconst(v)
93         case OpAMD64BTRQconstmodify:
94                 return rewriteValueAMD64_OpAMD64BTRQconstmodify(v)
95         case OpAMD64BTRQmodify:
96                 return rewriteValueAMD64_OpAMD64BTRQmodify(v)
97         case OpAMD64BTSLconst:
98                 return rewriteValueAMD64_OpAMD64BTSLconst(v)
99         case OpAMD64BTSLconstmodify:
100                 return rewriteValueAMD64_OpAMD64BTSLconstmodify(v)
101         case OpAMD64BTSLmodify:
102                 return rewriteValueAMD64_OpAMD64BTSLmodify(v)
103         case OpAMD64BTSQconst:
104                 return rewriteValueAMD64_OpAMD64BTSQconst(v)
105         case OpAMD64BTSQconstmodify:
106                 return rewriteValueAMD64_OpAMD64BTSQconstmodify(v)
107         case OpAMD64BTSQmodify:
108                 return rewriteValueAMD64_OpAMD64BTSQmodify(v)
109         case OpAMD64CMOVLCC:
110                 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
111         case OpAMD64CMOVLCS:
112                 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
113         case OpAMD64CMOVLEQ:
114                 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
115         case OpAMD64CMOVLGE:
116                 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
117         case OpAMD64CMOVLGT:
118                 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
119         case OpAMD64CMOVLHI:
120                 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
121         case OpAMD64CMOVLLE:
122                 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
123         case OpAMD64CMOVLLS:
124                 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
125         case OpAMD64CMOVLLT:
126                 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
127         case OpAMD64CMOVLNE:
128                 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
129         case OpAMD64CMOVQCC:
130                 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
131         case OpAMD64CMOVQCS:
132                 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
133         case OpAMD64CMOVQEQ:
134                 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
135         case OpAMD64CMOVQGE:
136                 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
137         case OpAMD64CMOVQGT:
138                 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
139         case OpAMD64CMOVQHI:
140                 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
141         case OpAMD64CMOVQLE:
142                 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
143         case OpAMD64CMOVQLS:
144                 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
145         case OpAMD64CMOVQLT:
146                 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
147         case OpAMD64CMOVQNE:
148                 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
149         case OpAMD64CMOVWCC:
150                 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
151         case OpAMD64CMOVWCS:
152                 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
153         case OpAMD64CMOVWEQ:
154                 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
155         case OpAMD64CMOVWGE:
156                 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
157         case OpAMD64CMOVWGT:
158                 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
159         case OpAMD64CMOVWHI:
160                 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
161         case OpAMD64CMOVWLE:
162                 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
163         case OpAMD64CMOVWLS:
164                 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
165         case OpAMD64CMOVWLT:
166                 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
167         case OpAMD64CMOVWNE:
168                 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
169         case OpAMD64CMPB:
170                 return rewriteValueAMD64_OpAMD64CMPB(v)
171         case OpAMD64CMPBconst:
172                 return rewriteValueAMD64_OpAMD64CMPBconst(v)
173         case OpAMD64CMPBconstload:
174                 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
175         case OpAMD64CMPBload:
176                 return rewriteValueAMD64_OpAMD64CMPBload(v)
177         case OpAMD64CMPL:
178                 return rewriteValueAMD64_OpAMD64CMPL(v)
179         case OpAMD64CMPLconst:
180                 return rewriteValueAMD64_OpAMD64CMPLconst(v)
181         case OpAMD64CMPLconstload:
182                 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
183         case OpAMD64CMPLload:
184                 return rewriteValueAMD64_OpAMD64CMPLload(v)
185         case OpAMD64CMPQ:
186                 return rewriteValueAMD64_OpAMD64CMPQ(v)
187         case OpAMD64CMPQconst:
188                 return rewriteValueAMD64_OpAMD64CMPQconst(v)
189         case OpAMD64CMPQconstload:
190                 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
191         case OpAMD64CMPQload:
192                 return rewriteValueAMD64_OpAMD64CMPQload(v)
193         case OpAMD64CMPW:
194                 return rewriteValueAMD64_OpAMD64CMPW(v)
195         case OpAMD64CMPWconst:
196                 return rewriteValueAMD64_OpAMD64CMPWconst(v)
197         case OpAMD64CMPWconstload:
198                 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
199         case OpAMD64CMPWload:
200                 return rewriteValueAMD64_OpAMD64CMPWload(v)
201         case OpAMD64CMPXCHGLlock:
202                 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
203         case OpAMD64CMPXCHGQlock:
204                 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
205         case OpAMD64DIVSD:
206                 return rewriteValueAMD64_OpAMD64DIVSD(v)
207         case OpAMD64DIVSDload:
208                 return rewriteValueAMD64_OpAMD64DIVSDload(v)
209         case OpAMD64DIVSS:
210                 return rewriteValueAMD64_OpAMD64DIVSS(v)
211         case OpAMD64DIVSSload:
212                 return rewriteValueAMD64_OpAMD64DIVSSload(v)
213         case OpAMD64HMULL:
214                 return rewriteValueAMD64_OpAMD64HMULL(v)
215         case OpAMD64HMULLU:
216                 return rewriteValueAMD64_OpAMD64HMULLU(v)
217         case OpAMD64HMULQ:
218                 return rewriteValueAMD64_OpAMD64HMULQ(v)
219         case OpAMD64HMULQU:
220                 return rewriteValueAMD64_OpAMD64HMULQU(v)
221         case OpAMD64LEAL:
222                 return rewriteValueAMD64_OpAMD64LEAL(v)
223         case OpAMD64LEAL1:
224                 return rewriteValueAMD64_OpAMD64LEAL1(v)
225         case OpAMD64LEAL2:
226                 return rewriteValueAMD64_OpAMD64LEAL2(v)
227         case OpAMD64LEAL4:
228                 return rewriteValueAMD64_OpAMD64LEAL4(v)
229         case OpAMD64LEAL8:
230                 return rewriteValueAMD64_OpAMD64LEAL8(v)
231         case OpAMD64LEAQ:
232                 return rewriteValueAMD64_OpAMD64LEAQ(v)
233         case OpAMD64LEAQ1:
234                 return rewriteValueAMD64_OpAMD64LEAQ1(v)
235         case OpAMD64LEAQ2:
236                 return rewriteValueAMD64_OpAMD64LEAQ2(v)
237         case OpAMD64LEAQ4:
238                 return rewriteValueAMD64_OpAMD64LEAQ4(v)
239         case OpAMD64LEAQ8:
240                 return rewriteValueAMD64_OpAMD64LEAQ8(v)
241         case OpAMD64MOVBQSX:
242                 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
243         case OpAMD64MOVBQSXload:
244                 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
245         case OpAMD64MOVBQZX:
246                 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
247         case OpAMD64MOVBatomicload:
248                 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
249         case OpAMD64MOVBload:
250                 return rewriteValueAMD64_OpAMD64MOVBload(v)
251         case OpAMD64MOVBstore:
252                 return rewriteValueAMD64_OpAMD64MOVBstore(v)
253         case OpAMD64MOVBstoreconst:
254                 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
255         case OpAMD64MOVLQSX:
256                 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
257         case OpAMD64MOVLQSXload:
258                 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
259         case OpAMD64MOVLQZX:
260                 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
261         case OpAMD64MOVLatomicload:
262                 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
263         case OpAMD64MOVLf2i:
264                 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
265         case OpAMD64MOVLi2f:
266                 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
267         case OpAMD64MOVLload:
268                 return rewriteValueAMD64_OpAMD64MOVLload(v)
269         case OpAMD64MOVLstore:
270                 return rewriteValueAMD64_OpAMD64MOVLstore(v)
271         case OpAMD64MOVLstoreconst:
272                 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
273         case OpAMD64MOVOload:
274                 return rewriteValueAMD64_OpAMD64MOVOload(v)
275         case OpAMD64MOVOstore:
276                 return rewriteValueAMD64_OpAMD64MOVOstore(v)
277         case OpAMD64MOVQatomicload:
278                 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
279         case OpAMD64MOVQf2i:
280                 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
281         case OpAMD64MOVQi2f:
282                 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
283         case OpAMD64MOVQload:
284                 return rewriteValueAMD64_OpAMD64MOVQload(v)
285         case OpAMD64MOVQstore:
286                 return rewriteValueAMD64_OpAMD64MOVQstore(v)
287         case OpAMD64MOVQstoreconst:
288                 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
289         case OpAMD64MOVSDload:
290                 return rewriteValueAMD64_OpAMD64MOVSDload(v)
291         case OpAMD64MOVSDstore:
292                 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
293         case OpAMD64MOVSSload:
294                 return rewriteValueAMD64_OpAMD64MOVSSload(v)
295         case OpAMD64MOVSSstore:
296                 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
297         case OpAMD64MOVWQSX:
298                 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
299         case OpAMD64MOVWQSXload:
300                 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
301         case OpAMD64MOVWQZX:
302                 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
303         case OpAMD64MOVWload:
304                 return rewriteValueAMD64_OpAMD64MOVWload(v)
305         case OpAMD64MOVWstore:
306                 return rewriteValueAMD64_OpAMD64MOVWstore(v)
307         case OpAMD64MOVWstoreconst:
308                 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
309         case OpAMD64MULL:
310                 return rewriteValueAMD64_OpAMD64MULL(v)
311         case OpAMD64MULLconst:
312                 return rewriteValueAMD64_OpAMD64MULLconst(v)
313         case OpAMD64MULQ:
314                 return rewriteValueAMD64_OpAMD64MULQ(v)
315         case OpAMD64MULQconst:
316                 return rewriteValueAMD64_OpAMD64MULQconst(v)
317         case OpAMD64MULSD:
318                 return rewriteValueAMD64_OpAMD64MULSD(v)
319         case OpAMD64MULSDload:
320                 return rewriteValueAMD64_OpAMD64MULSDload(v)
321         case OpAMD64MULSS:
322                 return rewriteValueAMD64_OpAMD64MULSS(v)
323         case OpAMD64MULSSload:
324                 return rewriteValueAMD64_OpAMD64MULSSload(v)
325         case OpAMD64NEGL:
326                 return rewriteValueAMD64_OpAMD64NEGL(v)
327         case OpAMD64NEGQ:
328                 return rewriteValueAMD64_OpAMD64NEGQ(v)
329         case OpAMD64NOTL:
330                 return rewriteValueAMD64_OpAMD64NOTL(v)
331         case OpAMD64NOTQ:
332                 return rewriteValueAMD64_OpAMD64NOTQ(v)
333         case OpAMD64ORL:
334                 return rewriteValueAMD64_OpAMD64ORL(v)
335         case OpAMD64ORLconst:
336                 return rewriteValueAMD64_OpAMD64ORLconst(v)
337         case OpAMD64ORLconstmodify:
338                 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
339         case OpAMD64ORLload:
340                 return rewriteValueAMD64_OpAMD64ORLload(v)
341         case OpAMD64ORLmodify:
342                 return rewriteValueAMD64_OpAMD64ORLmodify(v)
343         case OpAMD64ORQ:
344                 return rewriteValueAMD64_OpAMD64ORQ(v)
345         case OpAMD64ORQconst:
346                 return rewriteValueAMD64_OpAMD64ORQconst(v)
347         case OpAMD64ORQconstmodify:
348                 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
349         case OpAMD64ORQload:
350                 return rewriteValueAMD64_OpAMD64ORQload(v)
351         case OpAMD64ORQmodify:
352                 return rewriteValueAMD64_OpAMD64ORQmodify(v)
353         case OpAMD64ROLB:
354                 return rewriteValueAMD64_OpAMD64ROLB(v)
355         case OpAMD64ROLBconst:
356                 return rewriteValueAMD64_OpAMD64ROLBconst(v)
357         case OpAMD64ROLL:
358                 return rewriteValueAMD64_OpAMD64ROLL(v)
359         case OpAMD64ROLLconst:
360                 return rewriteValueAMD64_OpAMD64ROLLconst(v)
361         case OpAMD64ROLQ:
362                 return rewriteValueAMD64_OpAMD64ROLQ(v)
363         case OpAMD64ROLQconst:
364                 return rewriteValueAMD64_OpAMD64ROLQconst(v)
365         case OpAMD64ROLW:
366                 return rewriteValueAMD64_OpAMD64ROLW(v)
367         case OpAMD64ROLWconst:
368                 return rewriteValueAMD64_OpAMD64ROLWconst(v)
369         case OpAMD64RORB:
370                 return rewriteValueAMD64_OpAMD64RORB(v)
371         case OpAMD64RORL:
372                 return rewriteValueAMD64_OpAMD64RORL(v)
373         case OpAMD64RORQ:
374                 return rewriteValueAMD64_OpAMD64RORQ(v)
375         case OpAMD64RORW:
376                 return rewriteValueAMD64_OpAMD64RORW(v)
377         case OpAMD64SARB:
378                 return rewriteValueAMD64_OpAMD64SARB(v)
379         case OpAMD64SARBconst:
380                 return rewriteValueAMD64_OpAMD64SARBconst(v)
381         case OpAMD64SARL:
382                 return rewriteValueAMD64_OpAMD64SARL(v)
383         case OpAMD64SARLconst:
384                 return rewriteValueAMD64_OpAMD64SARLconst(v)
385         case OpAMD64SARQ:
386                 return rewriteValueAMD64_OpAMD64SARQ(v)
387         case OpAMD64SARQconst:
388                 return rewriteValueAMD64_OpAMD64SARQconst(v)
389         case OpAMD64SARW:
390                 return rewriteValueAMD64_OpAMD64SARW(v)
391         case OpAMD64SARWconst:
392                 return rewriteValueAMD64_OpAMD64SARWconst(v)
393         case OpAMD64SBBLcarrymask:
394                 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
395         case OpAMD64SBBQ:
396                 return rewriteValueAMD64_OpAMD64SBBQ(v)
397         case OpAMD64SBBQcarrymask:
398                 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
399         case OpAMD64SBBQconst:
400                 return rewriteValueAMD64_OpAMD64SBBQconst(v)
401         case OpAMD64SETA:
402                 return rewriteValueAMD64_OpAMD64SETA(v)
403         case OpAMD64SETAE:
404                 return rewriteValueAMD64_OpAMD64SETAE(v)
405         case OpAMD64SETAEstore:
406                 return rewriteValueAMD64_OpAMD64SETAEstore(v)
407         case OpAMD64SETAstore:
408                 return rewriteValueAMD64_OpAMD64SETAstore(v)
409         case OpAMD64SETB:
410                 return rewriteValueAMD64_OpAMD64SETB(v)
411         case OpAMD64SETBE:
412                 return rewriteValueAMD64_OpAMD64SETBE(v)
413         case OpAMD64SETBEstore:
414                 return rewriteValueAMD64_OpAMD64SETBEstore(v)
415         case OpAMD64SETBstore:
416                 return rewriteValueAMD64_OpAMD64SETBstore(v)
417         case OpAMD64SETEQ:
418                 return rewriteValueAMD64_OpAMD64SETEQ(v)
419         case OpAMD64SETEQstore:
420                 return rewriteValueAMD64_OpAMD64SETEQstore(v)
421         case OpAMD64SETG:
422                 return rewriteValueAMD64_OpAMD64SETG(v)
423         case OpAMD64SETGE:
424                 return rewriteValueAMD64_OpAMD64SETGE(v)
425         case OpAMD64SETGEstore:
426                 return rewriteValueAMD64_OpAMD64SETGEstore(v)
427         case OpAMD64SETGstore:
428                 return rewriteValueAMD64_OpAMD64SETGstore(v)
429         case OpAMD64SETL:
430                 return rewriteValueAMD64_OpAMD64SETL(v)
431         case OpAMD64SETLE:
432                 return rewriteValueAMD64_OpAMD64SETLE(v)
433         case OpAMD64SETLEstore:
434                 return rewriteValueAMD64_OpAMD64SETLEstore(v)
435         case OpAMD64SETLstore:
436                 return rewriteValueAMD64_OpAMD64SETLstore(v)
437         case OpAMD64SETNE:
438                 return rewriteValueAMD64_OpAMD64SETNE(v)
439         case OpAMD64SETNEstore:
440                 return rewriteValueAMD64_OpAMD64SETNEstore(v)
441         case OpAMD64SHLL:
442                 return rewriteValueAMD64_OpAMD64SHLL(v)
443         case OpAMD64SHLLconst:
444                 return rewriteValueAMD64_OpAMD64SHLLconst(v)
445         case OpAMD64SHLQ:
446                 return rewriteValueAMD64_OpAMD64SHLQ(v)
447         case OpAMD64SHLQconst:
448                 return rewriteValueAMD64_OpAMD64SHLQconst(v)
449         case OpAMD64SHRB:
450                 return rewriteValueAMD64_OpAMD64SHRB(v)
451         case OpAMD64SHRBconst:
452                 return rewriteValueAMD64_OpAMD64SHRBconst(v)
453         case OpAMD64SHRL:
454                 return rewriteValueAMD64_OpAMD64SHRL(v)
455         case OpAMD64SHRLconst:
456                 return rewriteValueAMD64_OpAMD64SHRLconst(v)
457         case OpAMD64SHRQ:
458                 return rewriteValueAMD64_OpAMD64SHRQ(v)
459         case OpAMD64SHRQconst:
460                 return rewriteValueAMD64_OpAMD64SHRQconst(v)
461         case OpAMD64SHRW:
462                 return rewriteValueAMD64_OpAMD64SHRW(v)
463         case OpAMD64SHRWconst:
464                 return rewriteValueAMD64_OpAMD64SHRWconst(v)
465         case OpAMD64SUBL:
466                 return rewriteValueAMD64_OpAMD64SUBL(v)
467         case OpAMD64SUBLconst:
468                 return rewriteValueAMD64_OpAMD64SUBLconst(v)
469         case OpAMD64SUBLload:
470                 return rewriteValueAMD64_OpAMD64SUBLload(v)
471         case OpAMD64SUBLmodify:
472                 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
473         case OpAMD64SUBQ:
474                 return rewriteValueAMD64_OpAMD64SUBQ(v)
475         case OpAMD64SUBQborrow:
476                 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
477         case OpAMD64SUBQconst:
478                 return rewriteValueAMD64_OpAMD64SUBQconst(v)
479         case OpAMD64SUBQload:
480                 return rewriteValueAMD64_OpAMD64SUBQload(v)
481         case OpAMD64SUBQmodify:
482                 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
483         case OpAMD64SUBSD:
484                 return rewriteValueAMD64_OpAMD64SUBSD(v)
485         case OpAMD64SUBSDload:
486                 return rewriteValueAMD64_OpAMD64SUBSDload(v)
487         case OpAMD64SUBSS:
488                 return rewriteValueAMD64_OpAMD64SUBSS(v)
489         case OpAMD64SUBSSload:
490                 return rewriteValueAMD64_OpAMD64SUBSSload(v)
491         case OpAMD64TESTB:
492                 return rewriteValueAMD64_OpAMD64TESTB(v)
493         case OpAMD64TESTBconst:
494                 return rewriteValueAMD64_OpAMD64TESTBconst(v)
495         case OpAMD64TESTL:
496                 return rewriteValueAMD64_OpAMD64TESTL(v)
497         case OpAMD64TESTLconst:
498                 return rewriteValueAMD64_OpAMD64TESTLconst(v)
499         case OpAMD64TESTQ:
500                 return rewriteValueAMD64_OpAMD64TESTQ(v)
501         case OpAMD64TESTQconst:
502                 return rewriteValueAMD64_OpAMD64TESTQconst(v)
503         case OpAMD64TESTW:
504                 return rewriteValueAMD64_OpAMD64TESTW(v)
505         case OpAMD64TESTWconst:
506                 return rewriteValueAMD64_OpAMD64TESTWconst(v)
507         case OpAMD64XADDLlock:
508                 return rewriteValueAMD64_OpAMD64XADDLlock(v)
509         case OpAMD64XADDQlock:
510                 return rewriteValueAMD64_OpAMD64XADDQlock(v)
511         case OpAMD64XCHGL:
512                 return rewriteValueAMD64_OpAMD64XCHGL(v)
513         case OpAMD64XCHGQ:
514                 return rewriteValueAMD64_OpAMD64XCHGQ(v)
515         case OpAMD64XORL:
516                 return rewriteValueAMD64_OpAMD64XORL(v)
517         case OpAMD64XORLconst:
518                 return rewriteValueAMD64_OpAMD64XORLconst(v)
519         case OpAMD64XORLconstmodify:
520                 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
521         case OpAMD64XORLload:
522                 return rewriteValueAMD64_OpAMD64XORLload(v)
523         case OpAMD64XORLmodify:
524                 return rewriteValueAMD64_OpAMD64XORLmodify(v)
525         case OpAMD64XORQ:
526                 return rewriteValueAMD64_OpAMD64XORQ(v)
527         case OpAMD64XORQconst:
528                 return rewriteValueAMD64_OpAMD64XORQconst(v)
529         case OpAMD64XORQconstmodify:
530                 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
531         case OpAMD64XORQload:
532                 return rewriteValueAMD64_OpAMD64XORQload(v)
533         case OpAMD64XORQmodify:
534                 return rewriteValueAMD64_OpAMD64XORQmodify(v)
535         case OpAdd16:
536                 v.Op = OpAMD64ADDL
537                 return true
538         case OpAdd32:
539                 v.Op = OpAMD64ADDL
540                 return true
541         case OpAdd32F:
542                 v.Op = OpAMD64ADDSS
543                 return true
544         case OpAdd64:
545                 v.Op = OpAMD64ADDQ
546                 return true
547         case OpAdd64F:
548                 v.Op = OpAMD64ADDSD
549                 return true
550         case OpAdd8:
551                 v.Op = OpAMD64ADDL
552                 return true
553         case OpAddPtr:
554                 v.Op = OpAMD64ADDQ
555                 return true
556         case OpAddr:
557                 return rewriteValueAMD64_OpAddr(v)
558         case OpAnd16:
559                 v.Op = OpAMD64ANDL
560                 return true
561         case OpAnd32:
562                 v.Op = OpAMD64ANDL
563                 return true
564         case OpAnd64:
565                 v.Op = OpAMD64ANDQ
566                 return true
567         case OpAnd8:
568                 v.Op = OpAMD64ANDL
569                 return true
570         case OpAndB:
571                 v.Op = OpAMD64ANDL
572                 return true
573         case OpAtomicAdd32:
574                 return rewriteValueAMD64_OpAtomicAdd32(v)
575         case OpAtomicAdd64:
576                 return rewriteValueAMD64_OpAtomicAdd64(v)
577         case OpAtomicAnd32:
578                 return rewriteValueAMD64_OpAtomicAnd32(v)
579         case OpAtomicAnd8:
580                 return rewriteValueAMD64_OpAtomicAnd8(v)
581         case OpAtomicCompareAndSwap32:
582                 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
583         case OpAtomicCompareAndSwap64:
584                 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
585         case OpAtomicExchange32:
586                 return rewriteValueAMD64_OpAtomicExchange32(v)
587         case OpAtomicExchange64:
588                 return rewriteValueAMD64_OpAtomicExchange64(v)
589         case OpAtomicLoad32:
590                 return rewriteValueAMD64_OpAtomicLoad32(v)
591         case OpAtomicLoad64:
592                 return rewriteValueAMD64_OpAtomicLoad64(v)
593         case OpAtomicLoad8:
594                 return rewriteValueAMD64_OpAtomicLoad8(v)
595         case OpAtomicLoadPtr:
596                 return rewriteValueAMD64_OpAtomicLoadPtr(v)
597         case OpAtomicOr32:
598                 return rewriteValueAMD64_OpAtomicOr32(v)
599         case OpAtomicOr8:
600                 return rewriteValueAMD64_OpAtomicOr8(v)
601         case OpAtomicStore32:
602                 return rewriteValueAMD64_OpAtomicStore32(v)
603         case OpAtomicStore64:
604                 return rewriteValueAMD64_OpAtomicStore64(v)
605         case OpAtomicStore8:
606                 return rewriteValueAMD64_OpAtomicStore8(v)
607         case OpAtomicStorePtrNoWB:
608                 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
609         case OpAvg64u:
610                 v.Op = OpAMD64AVGQU
611                 return true
612         case OpBitLen16:
613                 return rewriteValueAMD64_OpBitLen16(v)
614         case OpBitLen32:
615                 return rewriteValueAMD64_OpBitLen32(v)
616         case OpBitLen64:
617                 return rewriteValueAMD64_OpBitLen64(v)
618         case OpBitLen8:
619                 return rewriteValueAMD64_OpBitLen8(v)
620         case OpBswap32:
621                 v.Op = OpAMD64BSWAPL
622                 return true
623         case OpBswap64:
624                 v.Op = OpAMD64BSWAPQ
625                 return true
626         case OpCeil:
627                 return rewriteValueAMD64_OpCeil(v)
628         case OpClosureCall:
629                 v.Op = OpAMD64CALLclosure
630                 return true
631         case OpCom16:
632                 v.Op = OpAMD64NOTL
633                 return true
634         case OpCom32:
635                 v.Op = OpAMD64NOTL
636                 return true
637         case OpCom64:
638                 v.Op = OpAMD64NOTQ
639                 return true
640         case OpCom8:
641                 v.Op = OpAMD64NOTL
642                 return true
643         case OpCondSelect:
644                 return rewriteValueAMD64_OpCondSelect(v)
645         case OpConst16:
646                 return rewriteValueAMD64_OpConst16(v)
647         case OpConst32:
648                 v.Op = OpAMD64MOVLconst
649                 return true
650         case OpConst32F:
651                 v.Op = OpAMD64MOVSSconst
652                 return true
653         case OpConst64:
654                 v.Op = OpAMD64MOVQconst
655                 return true
656         case OpConst64F:
657                 v.Op = OpAMD64MOVSDconst
658                 return true
659         case OpConst8:
660                 return rewriteValueAMD64_OpConst8(v)
661         case OpConstBool:
662                 return rewriteValueAMD64_OpConstBool(v)
663         case OpConstNil:
664                 return rewriteValueAMD64_OpConstNil(v)
665         case OpCtz16:
666                 return rewriteValueAMD64_OpCtz16(v)
667         case OpCtz16NonZero:
668                 v.Op = OpAMD64BSFL
669                 return true
670         case OpCtz32:
671                 return rewriteValueAMD64_OpCtz32(v)
672         case OpCtz32NonZero:
673                 v.Op = OpAMD64BSFL
674                 return true
675         case OpCtz64:
676                 return rewriteValueAMD64_OpCtz64(v)
677         case OpCtz64NonZero:
678                 return rewriteValueAMD64_OpCtz64NonZero(v)
679         case OpCtz8:
680                 return rewriteValueAMD64_OpCtz8(v)
681         case OpCtz8NonZero:
682                 v.Op = OpAMD64BSFL
683                 return true
684         case OpCvt32Fto32:
685                 v.Op = OpAMD64CVTTSS2SL
686                 return true
687         case OpCvt32Fto64:
688                 v.Op = OpAMD64CVTTSS2SQ
689                 return true
690         case OpCvt32Fto64F:
691                 v.Op = OpAMD64CVTSS2SD
692                 return true
693         case OpCvt32to32F:
694                 v.Op = OpAMD64CVTSL2SS
695                 return true
696         case OpCvt32to64F:
697                 v.Op = OpAMD64CVTSL2SD
698                 return true
699         case OpCvt64Fto32:
700                 v.Op = OpAMD64CVTTSD2SL
701                 return true
702         case OpCvt64Fto32F:
703                 v.Op = OpAMD64CVTSD2SS
704                 return true
705         case OpCvt64Fto64:
706                 v.Op = OpAMD64CVTTSD2SQ
707                 return true
708         case OpCvt64to32F:
709                 v.Op = OpAMD64CVTSQ2SS
710                 return true
711         case OpCvt64to64F:
712                 v.Op = OpAMD64CVTSQ2SD
713                 return true
714         case OpCvtBoolToUint8:
715                 v.Op = OpCopy
716                 return true
717         case OpDiv128u:
718                 v.Op = OpAMD64DIVQU2
719                 return true
720         case OpDiv16:
721                 return rewriteValueAMD64_OpDiv16(v)
722         case OpDiv16u:
723                 return rewriteValueAMD64_OpDiv16u(v)
724         case OpDiv32:
725                 return rewriteValueAMD64_OpDiv32(v)
726         case OpDiv32F:
727                 v.Op = OpAMD64DIVSS
728                 return true
729         case OpDiv32u:
730                 return rewriteValueAMD64_OpDiv32u(v)
731         case OpDiv64:
732                 return rewriteValueAMD64_OpDiv64(v)
733         case OpDiv64F:
734                 v.Op = OpAMD64DIVSD
735                 return true
736         case OpDiv64u:
737                 return rewriteValueAMD64_OpDiv64u(v)
738         case OpDiv8:
739                 return rewriteValueAMD64_OpDiv8(v)
740         case OpDiv8u:
741                 return rewriteValueAMD64_OpDiv8u(v)
742         case OpEq16:
743                 return rewriteValueAMD64_OpEq16(v)
744         case OpEq32:
745                 return rewriteValueAMD64_OpEq32(v)
746         case OpEq32F:
747                 return rewriteValueAMD64_OpEq32F(v)
748         case OpEq64:
749                 return rewriteValueAMD64_OpEq64(v)
750         case OpEq64F:
751                 return rewriteValueAMD64_OpEq64F(v)
752         case OpEq8:
753                 return rewriteValueAMD64_OpEq8(v)
754         case OpEqB:
755                 return rewriteValueAMD64_OpEqB(v)
756         case OpEqPtr:
757                 return rewriteValueAMD64_OpEqPtr(v)
758         case OpFMA:
759                 return rewriteValueAMD64_OpFMA(v)
760         case OpFloor:
761                 return rewriteValueAMD64_OpFloor(v)
762         case OpGetCallerPC:
763                 v.Op = OpAMD64LoweredGetCallerPC
764                 return true
765         case OpGetCallerSP:
766                 v.Op = OpAMD64LoweredGetCallerSP
767                 return true
768         case OpGetClosurePtr:
769                 v.Op = OpAMD64LoweredGetClosurePtr
770                 return true
771         case OpGetG:
772                 return rewriteValueAMD64_OpGetG(v)
773         case OpHasCPUFeature:
774                 return rewriteValueAMD64_OpHasCPUFeature(v)
775         case OpHmul32:
776                 v.Op = OpAMD64HMULL
777                 return true
778         case OpHmul32u:
779                 v.Op = OpAMD64HMULLU
780                 return true
781         case OpHmul64:
782                 v.Op = OpAMD64HMULQ
783                 return true
784         case OpHmul64u:
785                 v.Op = OpAMD64HMULQU
786                 return true
787         case OpInterCall:
788                 v.Op = OpAMD64CALLinter
789                 return true
790         case OpIsInBounds:
791                 return rewriteValueAMD64_OpIsInBounds(v)
792         case OpIsNonNil:
793                 return rewriteValueAMD64_OpIsNonNil(v)
794         case OpIsSliceInBounds:
795                 return rewriteValueAMD64_OpIsSliceInBounds(v)
796         case OpLeq16:
797                 return rewriteValueAMD64_OpLeq16(v)
798         case OpLeq16U:
799                 return rewriteValueAMD64_OpLeq16U(v)
800         case OpLeq32:
801                 return rewriteValueAMD64_OpLeq32(v)
802         case OpLeq32F:
803                 return rewriteValueAMD64_OpLeq32F(v)
804         case OpLeq32U:
805                 return rewriteValueAMD64_OpLeq32U(v)
806         case OpLeq64:
807                 return rewriteValueAMD64_OpLeq64(v)
808         case OpLeq64F:
809                 return rewriteValueAMD64_OpLeq64F(v)
810         case OpLeq64U:
811                 return rewriteValueAMD64_OpLeq64U(v)
812         case OpLeq8:
813                 return rewriteValueAMD64_OpLeq8(v)
814         case OpLeq8U:
815                 return rewriteValueAMD64_OpLeq8U(v)
816         case OpLess16:
817                 return rewriteValueAMD64_OpLess16(v)
818         case OpLess16U:
819                 return rewriteValueAMD64_OpLess16U(v)
820         case OpLess32:
821                 return rewriteValueAMD64_OpLess32(v)
822         case OpLess32F:
823                 return rewriteValueAMD64_OpLess32F(v)
824         case OpLess32U:
825                 return rewriteValueAMD64_OpLess32U(v)
826         case OpLess64:
827                 return rewriteValueAMD64_OpLess64(v)
828         case OpLess64F:
829                 return rewriteValueAMD64_OpLess64F(v)
830         case OpLess64U:
831                 return rewriteValueAMD64_OpLess64U(v)
832         case OpLess8:
833                 return rewriteValueAMD64_OpLess8(v)
834         case OpLess8U:
835                 return rewriteValueAMD64_OpLess8U(v)
836         case OpLoad:
837                 return rewriteValueAMD64_OpLoad(v)
838         case OpLocalAddr:
839                 return rewriteValueAMD64_OpLocalAddr(v)
840         case OpLsh16x16:
841                 return rewriteValueAMD64_OpLsh16x16(v)
842         case OpLsh16x32:
843                 return rewriteValueAMD64_OpLsh16x32(v)
844         case OpLsh16x64:
845                 return rewriteValueAMD64_OpLsh16x64(v)
846         case OpLsh16x8:
847                 return rewriteValueAMD64_OpLsh16x8(v)
848         case OpLsh32x16:
849                 return rewriteValueAMD64_OpLsh32x16(v)
850         case OpLsh32x32:
851                 return rewriteValueAMD64_OpLsh32x32(v)
852         case OpLsh32x64:
853                 return rewriteValueAMD64_OpLsh32x64(v)
854         case OpLsh32x8:
855                 return rewriteValueAMD64_OpLsh32x8(v)
856         case OpLsh64x16:
857                 return rewriteValueAMD64_OpLsh64x16(v)
858         case OpLsh64x32:
859                 return rewriteValueAMD64_OpLsh64x32(v)
860         case OpLsh64x64:
861                 return rewriteValueAMD64_OpLsh64x64(v)
862         case OpLsh64x8:
863                 return rewriteValueAMD64_OpLsh64x8(v)
864         case OpLsh8x16:
865                 return rewriteValueAMD64_OpLsh8x16(v)
866         case OpLsh8x32:
867                 return rewriteValueAMD64_OpLsh8x32(v)
868         case OpLsh8x64:
869                 return rewriteValueAMD64_OpLsh8x64(v)
870         case OpLsh8x8:
871                 return rewriteValueAMD64_OpLsh8x8(v)
872         case OpMod16:
873                 return rewriteValueAMD64_OpMod16(v)
874         case OpMod16u:
875                 return rewriteValueAMD64_OpMod16u(v)
876         case OpMod32:
877                 return rewriteValueAMD64_OpMod32(v)
878         case OpMod32u:
879                 return rewriteValueAMD64_OpMod32u(v)
880         case OpMod64:
881                 return rewriteValueAMD64_OpMod64(v)
882         case OpMod64u:
883                 return rewriteValueAMD64_OpMod64u(v)
884         case OpMod8:
885                 return rewriteValueAMD64_OpMod8(v)
886         case OpMod8u:
887                 return rewriteValueAMD64_OpMod8u(v)
888         case OpMove:
889                 return rewriteValueAMD64_OpMove(v)
890         case OpMul16:
891                 v.Op = OpAMD64MULL
892                 return true
893         case OpMul32:
894                 v.Op = OpAMD64MULL
895                 return true
896         case OpMul32F:
897                 v.Op = OpAMD64MULSS
898                 return true
899         case OpMul64:
900                 v.Op = OpAMD64MULQ
901                 return true
902         case OpMul64F:
903                 v.Op = OpAMD64MULSD
904                 return true
905         case OpMul64uhilo:
906                 v.Op = OpAMD64MULQU2
907                 return true
908         case OpMul8:
909                 v.Op = OpAMD64MULL
910                 return true
911         case OpNeg16:
912                 v.Op = OpAMD64NEGL
913                 return true
914         case OpNeg32:
915                 v.Op = OpAMD64NEGL
916                 return true
917         case OpNeg32F:
918                 return rewriteValueAMD64_OpNeg32F(v)
919         case OpNeg64:
920                 v.Op = OpAMD64NEGQ
921                 return true
922         case OpNeg64F:
923                 return rewriteValueAMD64_OpNeg64F(v)
924         case OpNeg8:
925                 v.Op = OpAMD64NEGL
926                 return true
927         case OpNeq16:
928                 return rewriteValueAMD64_OpNeq16(v)
929         case OpNeq32:
930                 return rewriteValueAMD64_OpNeq32(v)
931         case OpNeq32F:
932                 return rewriteValueAMD64_OpNeq32F(v)
933         case OpNeq64:
934                 return rewriteValueAMD64_OpNeq64(v)
935         case OpNeq64F:
936                 return rewriteValueAMD64_OpNeq64F(v)
937         case OpNeq8:
938                 return rewriteValueAMD64_OpNeq8(v)
939         case OpNeqB:
940                 return rewriteValueAMD64_OpNeqB(v)
941         case OpNeqPtr:
942                 return rewriteValueAMD64_OpNeqPtr(v)
943         case OpNilCheck:
944                 v.Op = OpAMD64LoweredNilCheck
945                 return true
946         case OpNot:
947                 return rewriteValueAMD64_OpNot(v)
948         case OpOffPtr:
949                 return rewriteValueAMD64_OpOffPtr(v)
950         case OpOr16:
951                 v.Op = OpAMD64ORL
952                 return true
953         case OpOr32:
954                 v.Op = OpAMD64ORL
955                 return true
956         case OpOr64:
957                 v.Op = OpAMD64ORQ
958                 return true
959         case OpOr8:
960                 v.Op = OpAMD64ORL
961                 return true
962         case OpOrB:
963                 v.Op = OpAMD64ORL
964                 return true
965         case OpPanicBounds:
966                 return rewriteValueAMD64_OpPanicBounds(v)
967         case OpPopCount16:
968                 return rewriteValueAMD64_OpPopCount16(v)
969         case OpPopCount32:
970                 v.Op = OpAMD64POPCNTL
971                 return true
972         case OpPopCount64:
973                 v.Op = OpAMD64POPCNTQ
974                 return true
975         case OpPopCount8:
976                 return rewriteValueAMD64_OpPopCount8(v)
977         case OpRotateLeft16:
978                 v.Op = OpAMD64ROLW
979                 return true
980         case OpRotateLeft32:
981                 v.Op = OpAMD64ROLL
982                 return true
983         case OpRotateLeft64:
984                 v.Op = OpAMD64ROLQ
985                 return true
986         case OpRotateLeft8:
987                 v.Op = OpAMD64ROLB
988                 return true
989         case OpRound32F:
990                 v.Op = OpCopy
991                 return true
992         case OpRound64F:
993                 v.Op = OpCopy
994                 return true
995         case OpRoundToEven:
996                 return rewriteValueAMD64_OpRoundToEven(v)
997         case OpRsh16Ux16:
998                 return rewriteValueAMD64_OpRsh16Ux16(v)
999         case OpRsh16Ux32:
1000                 return rewriteValueAMD64_OpRsh16Ux32(v)
1001         case OpRsh16Ux64:
1002                 return rewriteValueAMD64_OpRsh16Ux64(v)
1003         case OpRsh16Ux8:
1004                 return rewriteValueAMD64_OpRsh16Ux8(v)
1005         case OpRsh16x16:
1006                 return rewriteValueAMD64_OpRsh16x16(v)
1007         case OpRsh16x32:
1008                 return rewriteValueAMD64_OpRsh16x32(v)
1009         case OpRsh16x64:
1010                 return rewriteValueAMD64_OpRsh16x64(v)
1011         case OpRsh16x8:
1012                 return rewriteValueAMD64_OpRsh16x8(v)
1013         case OpRsh32Ux16:
1014                 return rewriteValueAMD64_OpRsh32Ux16(v)
1015         case OpRsh32Ux32:
1016                 return rewriteValueAMD64_OpRsh32Ux32(v)
1017         case OpRsh32Ux64:
1018                 return rewriteValueAMD64_OpRsh32Ux64(v)
1019         case OpRsh32Ux8:
1020                 return rewriteValueAMD64_OpRsh32Ux8(v)
1021         case OpRsh32x16:
1022                 return rewriteValueAMD64_OpRsh32x16(v)
1023         case OpRsh32x32:
1024                 return rewriteValueAMD64_OpRsh32x32(v)
1025         case OpRsh32x64:
1026                 return rewriteValueAMD64_OpRsh32x64(v)
1027         case OpRsh32x8:
1028                 return rewriteValueAMD64_OpRsh32x8(v)
1029         case OpRsh64Ux16:
1030                 return rewriteValueAMD64_OpRsh64Ux16(v)
1031         case OpRsh64Ux32:
1032                 return rewriteValueAMD64_OpRsh64Ux32(v)
1033         case OpRsh64Ux64:
1034                 return rewriteValueAMD64_OpRsh64Ux64(v)
1035         case OpRsh64Ux8:
1036                 return rewriteValueAMD64_OpRsh64Ux8(v)
1037         case OpRsh64x16:
1038                 return rewriteValueAMD64_OpRsh64x16(v)
1039         case OpRsh64x32:
1040                 return rewriteValueAMD64_OpRsh64x32(v)
1041         case OpRsh64x64:
1042                 return rewriteValueAMD64_OpRsh64x64(v)
1043         case OpRsh64x8:
1044                 return rewriteValueAMD64_OpRsh64x8(v)
1045         case OpRsh8Ux16:
1046                 return rewriteValueAMD64_OpRsh8Ux16(v)
1047         case OpRsh8Ux32:
1048                 return rewriteValueAMD64_OpRsh8Ux32(v)
1049         case OpRsh8Ux64:
1050                 return rewriteValueAMD64_OpRsh8Ux64(v)
1051         case OpRsh8Ux8:
1052                 return rewriteValueAMD64_OpRsh8Ux8(v)
1053         case OpRsh8x16:
1054                 return rewriteValueAMD64_OpRsh8x16(v)
1055         case OpRsh8x32:
1056                 return rewriteValueAMD64_OpRsh8x32(v)
1057         case OpRsh8x64:
1058                 return rewriteValueAMD64_OpRsh8x64(v)
1059         case OpRsh8x8:
1060                 return rewriteValueAMD64_OpRsh8x8(v)
1061         case OpSelect0:
1062                 return rewriteValueAMD64_OpSelect0(v)
1063         case OpSelect1:
1064                 return rewriteValueAMD64_OpSelect1(v)
1065         case OpSignExt16to32:
1066                 v.Op = OpAMD64MOVWQSX
1067                 return true
1068         case OpSignExt16to64:
1069                 v.Op = OpAMD64MOVWQSX
1070                 return true
1071         case OpSignExt32to64:
1072                 v.Op = OpAMD64MOVLQSX
1073                 return true
1074         case OpSignExt8to16:
1075                 v.Op = OpAMD64MOVBQSX
1076                 return true
1077         case OpSignExt8to32:
1078                 v.Op = OpAMD64MOVBQSX
1079                 return true
1080         case OpSignExt8to64:
1081                 v.Op = OpAMD64MOVBQSX
1082                 return true
1083         case OpSlicemask:
1084                 return rewriteValueAMD64_OpSlicemask(v)
1085         case OpSpectreIndex:
1086                 return rewriteValueAMD64_OpSpectreIndex(v)
1087         case OpSpectreSliceIndex:
1088                 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1089         case OpSqrt:
1090                 v.Op = OpAMD64SQRTSD
1091                 return true
1092         case OpSqrt32:
1093                 v.Op = OpAMD64SQRTSS
1094                 return true
1095         case OpStaticCall:
1096                 v.Op = OpAMD64CALLstatic
1097                 return true
1098         case OpStore:
1099                 return rewriteValueAMD64_OpStore(v)
1100         case OpSub16:
1101                 v.Op = OpAMD64SUBL
1102                 return true
1103         case OpSub32:
1104                 v.Op = OpAMD64SUBL
1105                 return true
1106         case OpSub32F:
1107                 v.Op = OpAMD64SUBSS
1108                 return true
1109         case OpSub64:
1110                 v.Op = OpAMD64SUBQ
1111                 return true
1112         case OpSub64F:
1113                 v.Op = OpAMD64SUBSD
1114                 return true
1115         case OpSub8:
1116                 v.Op = OpAMD64SUBL
1117                 return true
1118         case OpSubPtr:
1119                 v.Op = OpAMD64SUBQ
1120                 return true
1121         case OpTrunc:
1122                 return rewriteValueAMD64_OpTrunc(v)
1123         case OpTrunc16to8:
1124                 v.Op = OpCopy
1125                 return true
1126         case OpTrunc32to16:
1127                 v.Op = OpCopy
1128                 return true
1129         case OpTrunc32to8:
1130                 v.Op = OpCopy
1131                 return true
1132         case OpTrunc64to16:
1133                 v.Op = OpCopy
1134                 return true
1135         case OpTrunc64to32:
1136                 v.Op = OpCopy
1137                 return true
1138         case OpTrunc64to8:
1139                 v.Op = OpCopy
1140                 return true
1141         case OpWB:
1142                 v.Op = OpAMD64LoweredWB
1143                 return true
1144         case OpXor16:
1145                 v.Op = OpAMD64XORL
1146                 return true
1147         case OpXor32:
1148                 v.Op = OpAMD64XORL
1149                 return true
1150         case OpXor64:
1151                 v.Op = OpAMD64XORQ
1152                 return true
1153         case OpXor8:
1154                 v.Op = OpAMD64XORL
1155                 return true
1156         case OpZero:
1157                 return rewriteValueAMD64_OpZero(v)
1158         case OpZeroExt16to32:
1159                 v.Op = OpAMD64MOVWQZX
1160                 return true
1161         case OpZeroExt16to64:
1162                 v.Op = OpAMD64MOVWQZX
1163                 return true
1164         case OpZeroExt32to64:
1165                 v.Op = OpAMD64MOVLQZX
1166                 return true
1167         case OpZeroExt8to16:
1168                 v.Op = OpAMD64MOVBQZX
1169                 return true
1170         case OpZeroExt8to32:
1171                 v.Op = OpAMD64MOVBQZX
1172                 return true
1173         case OpZeroExt8to64:
1174                 v.Op = OpAMD64MOVBQZX
1175                 return true
1176         }
1177         return false
1178 }
1179 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1180         v_2 := v.Args[2]
1181         v_1 := v.Args[1]
1182         v_0 := v.Args[0]
1183         // match: (ADCQ x (MOVQconst [c]) carry)
1184         // cond: is32Bit(c)
1185         // result: (ADCQconst x [int32(c)] carry)
1186         for {
1187                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1188                         x := v_0
1189                         if v_1.Op != OpAMD64MOVQconst {
1190                                 continue
1191                         }
1192                         c := auxIntToInt64(v_1.AuxInt)
1193                         carry := v_2
1194                         if !(is32Bit(c)) {
1195                                 continue
1196                         }
1197                         v.reset(OpAMD64ADCQconst)
1198                         v.AuxInt = int32ToAuxInt(int32(c))
1199                         v.AddArg2(x, carry)
1200                         return true
1201                 }
1202                 break
1203         }
1204         // match: (ADCQ x y (FlagEQ))
1205         // result: (ADDQcarry x y)
1206         for {
1207                 x := v_0
1208                 y := v_1
1209                 if v_2.Op != OpAMD64FlagEQ {
1210                         break
1211                 }
1212                 v.reset(OpAMD64ADDQcarry)
1213                 v.AddArg2(x, y)
1214                 return true
1215         }
1216         return false
1217 }
1218 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1219         v_1 := v.Args[1]
1220         v_0 := v.Args[0]
1221         // match: (ADCQconst x [c] (FlagEQ))
1222         // result: (ADDQconstcarry x [c])
1223         for {
1224                 c := auxIntToInt32(v.AuxInt)
1225                 x := v_0
1226                 if v_1.Op != OpAMD64FlagEQ {
1227                         break
1228                 }
1229                 v.reset(OpAMD64ADDQconstcarry)
1230                 v.AuxInt = int32ToAuxInt(c)
1231                 v.AddArg(x)
1232                 return true
1233         }
1234         return false
1235 }
1236 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1237         v_1 := v.Args[1]
1238         v_0 := v.Args[0]
1239         // match: (ADDL x (MOVLconst [c]))
1240         // result: (ADDLconst [c] x)
1241         for {
1242                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1243                         x := v_0
1244                         if v_1.Op != OpAMD64MOVLconst {
1245                                 continue
1246                         }
1247                         c := auxIntToInt32(v_1.AuxInt)
1248                         v.reset(OpAMD64ADDLconst)
1249                         v.AuxInt = int32ToAuxInt(c)
1250                         v.AddArg(x)
1251                         return true
1252                 }
1253                 break
1254         }
1255         // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d]))
1256         // cond: d==32-c
1257         // result: (ROLLconst x [c])
1258         for {
1259                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1260                         if v_0.Op != OpAMD64SHLLconst {
1261                                 continue
1262                         }
1263                         c := auxIntToInt8(v_0.AuxInt)
1264                         x := v_0.Args[0]
1265                         if v_1.Op != OpAMD64SHRLconst {
1266                                 continue
1267                         }
1268                         d := auxIntToInt8(v_1.AuxInt)
1269                         if x != v_1.Args[0] || !(d == 32-c) {
1270                                 continue
1271                         }
1272                         v.reset(OpAMD64ROLLconst)
1273                         v.AuxInt = int8ToAuxInt(c)
1274                         v.AddArg(x)
1275                         return true
1276                 }
1277                 break
1278         }
1279         // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
1280         // cond: d==16-c && c < 16 && t.Size() == 2
1281         // result: (ROLWconst x [c])
1282         for {
1283                 t := v.Type
1284                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1285                         if v_0.Op != OpAMD64SHLLconst {
1286                                 continue
1287                         }
1288                         c := auxIntToInt8(v_0.AuxInt)
1289                         x := v_0.Args[0]
1290                         if v_1.Op != OpAMD64SHRWconst {
1291                                 continue
1292                         }
1293                         d := auxIntToInt8(v_1.AuxInt)
1294                         if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
1295                                 continue
1296                         }
1297                         v.reset(OpAMD64ROLWconst)
1298                         v.AuxInt = int8ToAuxInt(c)
1299                         v.AddArg(x)
1300                         return true
1301                 }
1302                 break
1303         }
1304         // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
1305         // cond: d==8-c && c < 8 && t.Size() == 1
1306         // result: (ROLBconst x [c])
1307         for {
1308                 t := v.Type
1309                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1310                         if v_0.Op != OpAMD64SHLLconst {
1311                                 continue
1312                         }
1313                         c := auxIntToInt8(v_0.AuxInt)
1314                         x := v_0.Args[0]
1315                         if v_1.Op != OpAMD64SHRBconst {
1316                                 continue
1317                         }
1318                         d := auxIntToInt8(v_1.AuxInt)
1319                         if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
1320                                 continue
1321                         }
1322                         v.reset(OpAMD64ROLBconst)
1323                         v.AuxInt = int8ToAuxInt(c)
1324                         v.AddArg(x)
1325                         return true
1326                 }
1327                 break
1328         }
1329         // match: (ADDL x (SHLLconst [3] y))
1330         // result: (LEAL8 x y)
1331         for {
1332                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1333                         x := v_0
1334                         if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1335                                 continue
1336                         }
1337                         y := v_1.Args[0]
1338                         v.reset(OpAMD64LEAL8)
1339                         v.AddArg2(x, y)
1340                         return true
1341                 }
1342                 break
1343         }
1344         // match: (ADDL x (SHLLconst [2] y))
1345         // result: (LEAL4 x y)
1346         for {
1347                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1348                         x := v_0
1349                         if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1350                                 continue
1351                         }
1352                         y := v_1.Args[0]
1353                         v.reset(OpAMD64LEAL4)
1354                         v.AddArg2(x, y)
1355                         return true
1356                 }
1357                 break
1358         }
1359         // match: (ADDL x (SHLLconst [1] y))
1360         // result: (LEAL2 x y)
1361         for {
1362                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1363                         x := v_0
1364                         if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
1365                                 continue
1366                         }
1367                         y := v_1.Args[0]
1368                         v.reset(OpAMD64LEAL2)
1369                         v.AddArg2(x, y)
1370                         return true
1371                 }
1372                 break
1373         }
1374         // match: (ADDL x (ADDL y y))
1375         // result: (LEAL2 x y)
1376         for {
1377                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1378                         x := v_0
1379                         if v_1.Op != OpAMD64ADDL {
1380                                 continue
1381                         }
1382                         y := v_1.Args[1]
1383                         if y != v_1.Args[0] {
1384                                 continue
1385                         }
1386                         v.reset(OpAMD64LEAL2)
1387                         v.AddArg2(x, y)
1388                         return true
1389                 }
1390                 break
1391         }
1392         // match: (ADDL x (ADDL x y))
1393         // result: (LEAL2 y x)
1394         for {
1395                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1396                         x := v_0
1397                         if v_1.Op != OpAMD64ADDL {
1398                                 continue
1399                         }
1400                         _ = v_1.Args[1]
1401                         v_1_0 := v_1.Args[0]
1402                         v_1_1 := v_1.Args[1]
1403                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1404                                 if x != v_1_0 {
1405                                         continue
1406                                 }
1407                                 y := v_1_1
1408                                 v.reset(OpAMD64LEAL2)
1409                                 v.AddArg2(y, x)
1410                                 return true
1411                         }
1412                 }
1413                 break
1414         }
1415         // match: (ADDL (ADDLconst [c] x) y)
1416         // result: (LEAL1 [c] x y)
1417         for {
1418                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1419                         if v_0.Op != OpAMD64ADDLconst {
1420                                 continue
1421                         }
1422                         c := auxIntToInt32(v_0.AuxInt)
1423                         x := v_0.Args[0]
1424                         y := v_1
1425                         v.reset(OpAMD64LEAL1)
1426                         v.AuxInt = int32ToAuxInt(c)
1427                         v.AddArg2(x, y)
1428                         return true
1429                 }
1430                 break
1431         }
1432         // match: (ADDL x (LEAL [c] {s} y))
1433         // cond: x.Op != OpSB && y.Op != OpSB
1434         // result: (LEAL1 [c] {s} x y)
1435         for {
1436                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1437                         x := v_0
1438                         if v_1.Op != OpAMD64LEAL {
1439                                 continue
1440                         }
1441                         c := auxIntToInt32(v_1.AuxInt)
1442                         s := auxToSym(v_1.Aux)
1443                         y := v_1.Args[0]
1444                         if !(x.Op != OpSB && y.Op != OpSB) {
1445                                 continue
1446                         }
1447                         v.reset(OpAMD64LEAL1)
1448                         v.AuxInt = int32ToAuxInt(c)
1449                         v.Aux = symToAux(s)
1450                         v.AddArg2(x, y)
1451                         return true
1452                 }
1453                 break
1454         }
1455         // match: (ADDL x (NEGL y))
1456         // result: (SUBL x y)
1457         for {
1458                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1459                         x := v_0
1460                         if v_1.Op != OpAMD64NEGL {
1461                                 continue
1462                         }
1463                         y := v_1.Args[0]
1464                         v.reset(OpAMD64SUBL)
1465                         v.AddArg2(x, y)
1466                         return true
1467                 }
1468                 break
1469         }
1470         // match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
1471         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
1472         // result: (ADDLload x [off] {sym} ptr mem)
1473         for {
1474                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1475                         x := v_0
1476                         l := v_1
1477                         if l.Op != OpAMD64MOVLload {
1478                                 continue
1479                         }
1480                         off := auxIntToInt32(l.AuxInt)
1481                         sym := auxToSym(l.Aux)
1482                         mem := l.Args[1]
1483                         ptr := l.Args[0]
1484                         if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1485                                 continue
1486                         }
1487                         v.reset(OpAMD64ADDLload)
1488                         v.AuxInt = int32ToAuxInt(off)
1489                         v.Aux = symToAux(sym)
1490                         v.AddArg3(x, ptr, mem)
1491                         return true
1492                 }
1493                 break
1494         }
1495         return false
1496 }
1497 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1498         v_0 := v.Args[0]
1499         // match: (ADDLconst [c] (ADDL x y))
1500         // result: (LEAL1 [c] x y)
1501         for {
1502                 c := auxIntToInt32(v.AuxInt)
1503                 if v_0.Op != OpAMD64ADDL {
1504                         break
1505                 }
1506                 y := v_0.Args[1]
1507                 x := v_0.Args[0]
1508                 v.reset(OpAMD64LEAL1)
1509                 v.AuxInt = int32ToAuxInt(c)
1510                 v.AddArg2(x, y)
1511                 return true
1512         }
1513         // match: (ADDLconst [c] (SHLLconst [1] x))
1514         // result: (LEAL1 [c] x x)
1515         for {
1516                 c := auxIntToInt32(v.AuxInt)
1517                 if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1518                         break
1519                 }
1520                 x := v_0.Args[0]
1521                 v.reset(OpAMD64LEAL1)
1522                 v.AuxInt = int32ToAuxInt(c)
1523                 v.AddArg2(x, x)
1524                 return true
1525         }
1526         // match: (ADDLconst [c] (LEAL [d] {s} x))
1527         // cond: is32Bit(int64(c)+int64(d))
1528         // result: (LEAL [c+d] {s} x)
1529         for {
1530                 c := auxIntToInt32(v.AuxInt)
1531                 if v_0.Op != OpAMD64LEAL {
1532                         break
1533                 }
1534                 d := auxIntToInt32(v_0.AuxInt)
1535                 s := auxToSym(v_0.Aux)
1536                 x := v_0.Args[0]
1537                 if !(is32Bit(int64(c) + int64(d))) {
1538                         break
1539                 }
1540                 v.reset(OpAMD64LEAL)
1541                 v.AuxInt = int32ToAuxInt(c + d)
1542                 v.Aux = symToAux(s)
1543                 v.AddArg(x)
1544                 return true
1545         }
1546         // match: (ADDLconst [c] (LEAL1 [d] {s} x y))
1547         // cond: is32Bit(int64(c)+int64(d))
1548         // result: (LEAL1 [c+d] {s} x y)
1549         for {
1550                 c := auxIntToInt32(v.AuxInt)
1551                 if v_0.Op != OpAMD64LEAL1 {
1552                         break
1553                 }
1554                 d := auxIntToInt32(v_0.AuxInt)
1555                 s := auxToSym(v_0.Aux)
1556                 y := v_0.Args[1]
1557                 x := v_0.Args[0]
1558                 if !(is32Bit(int64(c) + int64(d))) {
1559                         break
1560                 }
1561                 v.reset(OpAMD64LEAL1)
1562                 v.AuxInt = int32ToAuxInt(c + d)
1563                 v.Aux = symToAux(s)
1564                 v.AddArg2(x, y)
1565                 return true
1566         }
1567         // match: (ADDLconst [c] (LEAL2 [d] {s} x y))
1568         // cond: is32Bit(int64(c)+int64(d))
1569         // result: (LEAL2 [c+d] {s} x y)
1570         for {
1571                 c := auxIntToInt32(v.AuxInt)
1572                 if v_0.Op != OpAMD64LEAL2 {
1573                         break
1574                 }
1575                 d := auxIntToInt32(v_0.AuxInt)
1576                 s := auxToSym(v_0.Aux)
1577                 y := v_0.Args[1]
1578                 x := v_0.Args[0]
1579                 if !(is32Bit(int64(c) + int64(d))) {
1580                         break
1581                 }
1582                 v.reset(OpAMD64LEAL2)
1583                 v.AuxInt = int32ToAuxInt(c + d)
1584                 v.Aux = symToAux(s)
1585                 v.AddArg2(x, y)
1586                 return true
1587         }
1588         // match: (ADDLconst [c] (LEAL4 [d] {s} x y))
1589         // cond: is32Bit(int64(c)+int64(d))
1590         // result: (LEAL4 [c+d] {s} x y)
1591         for {
1592                 c := auxIntToInt32(v.AuxInt)
1593                 if v_0.Op != OpAMD64LEAL4 {
1594                         break
1595                 }
1596                 d := auxIntToInt32(v_0.AuxInt)
1597                 s := auxToSym(v_0.Aux)
1598                 y := v_0.Args[1]
1599                 x := v_0.Args[0]
1600                 if !(is32Bit(int64(c) + int64(d))) {
1601                         break
1602                 }
1603                 v.reset(OpAMD64LEAL4)
1604                 v.AuxInt = int32ToAuxInt(c + d)
1605                 v.Aux = symToAux(s)
1606                 v.AddArg2(x, y)
1607                 return true
1608         }
1609         // match: (ADDLconst [c] (LEAL8 [d] {s} x y))
1610         // cond: is32Bit(int64(c)+int64(d))
1611         // result: (LEAL8 [c+d] {s} x y)
1612         for {
1613                 c := auxIntToInt32(v.AuxInt)
1614                 if v_0.Op != OpAMD64LEAL8 {
1615                         break
1616                 }
1617                 d := auxIntToInt32(v_0.AuxInt)
1618                 s := auxToSym(v_0.Aux)
1619                 y := v_0.Args[1]
1620                 x := v_0.Args[0]
1621                 if !(is32Bit(int64(c) + int64(d))) {
1622                         break
1623                 }
1624                 v.reset(OpAMD64LEAL8)
1625                 v.AuxInt = int32ToAuxInt(c + d)
1626                 v.Aux = symToAux(s)
1627                 v.AddArg2(x, y)
1628                 return true
1629         }
1630         // match: (ADDLconst [c] x)
1631         // cond: c==0
1632         // result: x
1633         for {
1634                 c := auxIntToInt32(v.AuxInt)
1635                 x := v_0
1636                 if !(c == 0) {
1637                         break
1638                 }
1639                 v.copyOf(x)
1640                 return true
1641         }
1642         // match: (ADDLconst [c] (MOVLconst [d]))
1643         // result: (MOVLconst [c+d])
1644         for {
1645                 c := auxIntToInt32(v.AuxInt)
1646                 if v_0.Op != OpAMD64MOVLconst {
1647                         break
1648                 }
1649                 d := auxIntToInt32(v_0.AuxInt)
1650                 v.reset(OpAMD64MOVLconst)
1651                 v.AuxInt = int32ToAuxInt(c + d)
1652                 return true
1653         }
1654         // match: (ADDLconst [c] (ADDLconst [d] x))
1655         // result: (ADDLconst [c+d] x)
1656         for {
1657                 c := auxIntToInt32(v.AuxInt)
1658                 if v_0.Op != OpAMD64ADDLconst {
1659                         break
1660                 }
1661                 d := auxIntToInt32(v_0.AuxInt)
1662                 x := v_0.Args[0]
1663                 v.reset(OpAMD64ADDLconst)
1664                 v.AuxInt = int32ToAuxInt(c + d)
1665                 v.AddArg(x)
1666                 return true
1667         }
1668         // match: (ADDLconst [off] x:(SP))
1669         // result: (LEAL [off] x)
1670         for {
1671                 off := auxIntToInt32(v.AuxInt)
1672                 x := v_0
1673                 if x.Op != OpSP {
1674                         break
1675                 }
1676                 v.reset(OpAMD64LEAL)
1677                 v.AuxInt = int32ToAuxInt(off)
1678                 v.AddArg(x)
1679                 return true
1680         }
1681         return false
1682 }
1683 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1684         v_1 := v.Args[1]
1685         v_0 := v.Args[0]
1686         // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
1687         // cond: ValAndOff(valoff1).canAdd32(off2)
1688         // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
1689         for {
1690                 valoff1 := auxIntToValAndOff(v.AuxInt)
1691                 sym := auxToSym(v.Aux)
1692                 if v_0.Op != OpAMD64ADDQconst {
1693                         break
1694                 }
1695                 off2 := auxIntToInt32(v_0.AuxInt)
1696                 base := v_0.Args[0]
1697                 mem := v_1
1698                 if !(ValAndOff(valoff1).canAdd32(off2)) {
1699                         break
1700                 }
1701                 v.reset(OpAMD64ADDLconstmodify)
1702                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1703                 v.Aux = symToAux(sym)
1704                 v.AddArg2(base, mem)
1705                 return true
1706         }
1707         // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
1708         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
1709         // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
1710         for {
1711                 valoff1 := auxIntToValAndOff(v.AuxInt)
1712                 sym1 := auxToSym(v.Aux)
1713                 if v_0.Op != OpAMD64LEAQ {
1714                         break
1715                 }
1716                 off2 := auxIntToInt32(v_0.AuxInt)
1717                 sym2 := auxToSym(v_0.Aux)
1718                 base := v_0.Args[0]
1719                 mem := v_1
1720                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1721                         break
1722                 }
1723                 v.reset(OpAMD64ADDLconstmodify)
1724                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1725                 v.Aux = symToAux(mergeSym(sym1, sym2))
1726                 v.AddArg2(base, mem)
1727                 return true
1728         }
1729         return false
1730 }
1731 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1732         v_2 := v.Args[2]
1733         v_1 := v.Args[1]
1734         v_0 := v.Args[0]
1735         b := v.Block
1736         typ := &b.Func.Config.Types
1737         // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem)
1738         // cond: is32Bit(int64(off1)+int64(off2))
1739         // result: (ADDLload [off1+off2] {sym} val base mem)
1740         for {
1741                 off1 := auxIntToInt32(v.AuxInt)
1742                 sym := auxToSym(v.Aux)
1743                 val := v_0
1744                 if v_1.Op != OpAMD64ADDQconst {
1745                         break
1746                 }
1747                 off2 := auxIntToInt32(v_1.AuxInt)
1748                 base := v_1.Args[0]
1749                 mem := v_2
1750                 if !(is32Bit(int64(off1) + int64(off2))) {
1751                         break
1752                 }
1753                 v.reset(OpAMD64ADDLload)
1754                 v.AuxInt = int32ToAuxInt(off1 + off2)
1755                 v.Aux = symToAux(sym)
1756                 v.AddArg3(val, base, mem)
1757                 return true
1758         }
1759         // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
1760         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
1761         // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
1762         for {
1763                 off1 := auxIntToInt32(v.AuxInt)
1764                 sym1 := auxToSym(v.Aux)
1765                 val := v_0
1766                 if v_1.Op != OpAMD64LEAQ {
1767                         break
1768                 }
1769                 off2 := auxIntToInt32(v_1.AuxInt)
1770                 sym2 := auxToSym(v_1.Aux)
1771                 base := v_1.Args[0]
1772                 mem := v_2
1773                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1774                         break
1775                 }
1776                 v.reset(OpAMD64ADDLload)
1777                 v.AuxInt = int32ToAuxInt(off1 + off2)
1778                 v.Aux = symToAux(mergeSym(sym1, sym2))
1779                 v.AddArg3(val, base, mem)
1780                 return true
1781         }
1782         // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
1783         // result: (ADDL x (MOVLf2i y))
1784         for {
1785                 off := auxIntToInt32(v.AuxInt)
1786                 sym := auxToSym(v.Aux)
1787                 x := v_0
1788                 ptr := v_1
1789                 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1790                         break
1791                 }
1792                 y := v_2.Args[1]
1793                 if ptr != v_2.Args[0] {
1794                         break
1795                 }
1796                 v.reset(OpAMD64ADDL)
1797                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1798                 v0.AddArg(y)
1799                 v.AddArg2(x, v0)
1800                 return true
1801         }
1802         return false
1803 }
1804 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1805         v_2 := v.Args[2]
1806         v_1 := v.Args[1]
1807         v_0 := v.Args[0]
1808         // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
1809         // cond: is32Bit(int64(off1)+int64(off2))
1810         // result: (ADDLmodify [off1+off2] {sym} base val mem)
1811         for {
1812                 off1 := auxIntToInt32(v.AuxInt)
1813                 sym := auxToSym(v.Aux)
1814                 if v_0.Op != OpAMD64ADDQconst {
1815                         break
1816                 }
1817                 off2 := auxIntToInt32(v_0.AuxInt)
1818                 base := v_0.Args[0]
1819                 val := v_1
1820                 mem := v_2
1821                 if !(is32Bit(int64(off1) + int64(off2))) {
1822                         break
1823                 }
1824                 v.reset(OpAMD64ADDLmodify)
1825                 v.AuxInt = int32ToAuxInt(off1 + off2)
1826                 v.Aux = symToAux(sym)
1827                 v.AddArg3(base, val, mem)
1828                 return true
1829         }
1830         // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
1831         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
1832         // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1833         for {
1834                 off1 := auxIntToInt32(v.AuxInt)
1835                 sym1 := auxToSym(v.Aux)
1836                 if v_0.Op != OpAMD64LEAQ {
1837                         break
1838                 }
1839                 off2 := auxIntToInt32(v_0.AuxInt)
1840                 sym2 := auxToSym(v_0.Aux)
1841                 base := v_0.Args[0]
1842                 val := v_1
1843                 mem := v_2
1844                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1845                         break
1846                 }
1847                 v.reset(OpAMD64ADDLmodify)
1848                 v.AuxInt = int32ToAuxInt(off1 + off2)
1849                 v.Aux = symToAux(mergeSym(sym1, sym2))
1850                 v.AddArg3(base, val, mem)
1851                 return true
1852         }
1853         return false
1854 }
1855 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1856         v_1 := v.Args[1]
1857         v_0 := v.Args[0]
1858         // match: (ADDQ x (MOVQconst [c]))
1859         // cond: is32Bit(c)
1860         // result: (ADDQconst [int32(c)] x)
1861         for {
1862                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1863                         x := v_0
1864                         if v_1.Op != OpAMD64MOVQconst {
1865                                 continue
1866                         }
1867                         c := auxIntToInt64(v_1.AuxInt)
1868                         if !(is32Bit(c)) {
1869                                 continue
1870                         }
1871                         v.reset(OpAMD64ADDQconst)
1872                         v.AuxInt = int32ToAuxInt(int32(c))
1873                         v.AddArg(x)
1874                         return true
1875                 }
1876                 break
1877         }
1878         // match: (ADDQ x (MOVLconst [c]))
1879         // result: (ADDQconst [c] x)
1880         for {
1881                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1882                         x := v_0
1883                         if v_1.Op != OpAMD64MOVLconst {
1884                                 continue
1885                         }
1886                         c := auxIntToInt32(v_1.AuxInt)
1887                         v.reset(OpAMD64ADDQconst)
1888                         v.AuxInt = int32ToAuxInt(c)
1889                         v.AddArg(x)
1890                         return true
1891                 }
1892                 break
1893         }
1894         // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d]))
1895         // cond: d==64-c
1896         // result: (ROLQconst x [c])
1897         for {
1898                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1899                         if v_0.Op != OpAMD64SHLQconst {
1900                                 continue
1901                         }
1902                         c := auxIntToInt8(v_0.AuxInt)
1903                         x := v_0.Args[0]
1904                         if v_1.Op != OpAMD64SHRQconst {
1905                                 continue
1906                         }
1907                         d := auxIntToInt8(v_1.AuxInt)
1908                         if x != v_1.Args[0] || !(d == 64-c) {
1909                                 continue
1910                         }
1911                         v.reset(OpAMD64ROLQconst)
1912                         v.AuxInt = int8ToAuxInt(c)
1913                         v.AddArg(x)
1914                         return true
1915                 }
1916                 break
1917         }
1918         // match: (ADDQ x (SHLQconst [3] y))
1919         // result: (LEAQ8 x y)
1920         for {
1921                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1922                         x := v_0
1923                         if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1924                                 continue
1925                         }
1926                         y := v_1.Args[0]
1927                         v.reset(OpAMD64LEAQ8)
1928                         v.AddArg2(x, y)
1929                         return true
1930                 }
1931                 break
1932         }
1933         // match: (ADDQ x (SHLQconst [2] y))
1934         // result: (LEAQ4 x y)
1935         for {
1936                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1937                         x := v_0
1938                         if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1939                                 continue
1940                         }
1941                         y := v_1.Args[0]
1942                         v.reset(OpAMD64LEAQ4)
1943                         v.AddArg2(x, y)
1944                         return true
1945                 }
1946                 break
1947         }
1948         // match: (ADDQ x (SHLQconst [1] y))
1949         // result: (LEAQ2 x y)
1950         for {
1951                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1952                         x := v_0
1953                         if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
1954                                 continue
1955                         }
1956                         y := v_1.Args[0]
1957                         v.reset(OpAMD64LEAQ2)
1958                         v.AddArg2(x, y)
1959                         return true
1960                 }
1961                 break
1962         }
1963         // match: (ADDQ x (ADDQ y y))
1964         // result: (LEAQ2 x y)
1965         for {
1966                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1967                         x := v_0
1968                         if v_1.Op != OpAMD64ADDQ {
1969                                 continue
1970                         }
1971                         y := v_1.Args[1]
1972                         if y != v_1.Args[0] {
1973                                 continue
1974                         }
1975                         v.reset(OpAMD64LEAQ2)
1976                         v.AddArg2(x, y)
1977                         return true
1978                 }
1979                 break
1980         }
1981         // match: (ADDQ x (ADDQ x y))
1982         // result: (LEAQ2 y x)
1983         for {
1984                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1985                         x := v_0
1986                         if v_1.Op != OpAMD64ADDQ {
1987                                 continue
1988                         }
1989                         _ = v_1.Args[1]
1990                         v_1_0 := v_1.Args[0]
1991                         v_1_1 := v_1.Args[1]
1992                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1993                                 if x != v_1_0 {
1994                                         continue
1995                                 }
1996                                 y := v_1_1
1997                                 v.reset(OpAMD64LEAQ2)
1998                                 v.AddArg2(y, x)
1999                                 return true
2000                         }
2001                 }
2002                 break
2003         }
2004         // match: (ADDQ (ADDQconst [c] x) y)
2005         // result: (LEAQ1 [c] x y)
2006         for {
2007                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2008                         if v_0.Op != OpAMD64ADDQconst {
2009                                 continue
2010                         }
2011                         c := auxIntToInt32(v_0.AuxInt)
2012                         x := v_0.Args[0]
2013                         y := v_1
2014                         v.reset(OpAMD64LEAQ1)
2015                         v.AuxInt = int32ToAuxInt(c)
2016                         v.AddArg2(x, y)
2017                         return true
2018                 }
2019                 break
2020         }
2021         // match: (ADDQ x (LEAQ [c] {s} y))
2022         // cond: x.Op != OpSB && y.Op != OpSB
2023         // result: (LEAQ1 [c] {s} x y)
2024         for {
2025                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2026                         x := v_0
2027                         if v_1.Op != OpAMD64LEAQ {
2028                                 continue
2029                         }
2030                         c := auxIntToInt32(v_1.AuxInt)
2031                         s := auxToSym(v_1.Aux)
2032                         y := v_1.Args[0]
2033                         if !(x.Op != OpSB && y.Op != OpSB) {
2034                                 continue
2035                         }
2036                         v.reset(OpAMD64LEAQ1)
2037                         v.AuxInt = int32ToAuxInt(c)
2038                         v.Aux = symToAux(s)
2039                         v.AddArg2(x, y)
2040                         return true
2041                 }
2042                 break
2043         }
2044         // match: (ADDQ x (NEGQ y))
2045         // result: (SUBQ x y)
2046         for {
2047                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2048                         x := v_0
2049                         if v_1.Op != OpAMD64NEGQ {
2050                                 continue
2051                         }
2052                         y := v_1.Args[0]
2053                         v.reset(OpAMD64SUBQ)
2054                         v.AddArg2(x, y)
2055                         return true
2056                 }
2057                 break
2058         }
2059         // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem))
2060         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
2061         // result: (ADDQload x [off] {sym} ptr mem)
2062         for {
2063                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2064                         x := v_0
2065                         l := v_1
2066                         if l.Op != OpAMD64MOVQload {
2067                                 continue
2068                         }
2069                         off := auxIntToInt32(l.AuxInt)
2070                         sym := auxToSym(l.Aux)
2071                         mem := l.Args[1]
2072                         ptr := l.Args[0]
2073                         if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2074                                 continue
2075                         }
2076                         v.reset(OpAMD64ADDQload)
2077                         v.AuxInt = int32ToAuxInt(off)
2078                         v.Aux = symToAux(sym)
2079                         v.AddArg3(x, ptr, mem)
2080                         return true
2081                 }
2082                 break
2083         }
2084         return false
2085 }
2086 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2087         v_1 := v.Args[1]
2088         v_0 := v.Args[0]
2089         // match: (ADDQcarry x (MOVQconst [c]))
2090         // cond: is32Bit(c)
2091         // result: (ADDQconstcarry x [int32(c)])
2092         for {
2093                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2094                         x := v_0
2095                         if v_1.Op != OpAMD64MOVQconst {
2096                                 continue
2097                         }
2098                         c := auxIntToInt64(v_1.AuxInt)
2099                         if !(is32Bit(c)) {
2100                                 continue
2101                         }
2102                         v.reset(OpAMD64ADDQconstcarry)
2103                         v.AuxInt = int32ToAuxInt(int32(c))
2104                         v.AddArg(x)
2105                         return true
2106                 }
2107                 break
2108         }
2109         return false
2110 }
2111 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2112         v_0 := v.Args[0]
2113         // match: (ADDQconst [c] (ADDQ x y))
2114         // result: (LEAQ1 [c] x y)
2115         for {
2116                 c := auxIntToInt32(v.AuxInt)
2117                 if v_0.Op != OpAMD64ADDQ {
2118                         break
2119                 }
2120                 y := v_0.Args[1]
2121                 x := v_0.Args[0]
2122                 v.reset(OpAMD64LEAQ1)
2123                 v.AuxInt = int32ToAuxInt(c)
2124                 v.AddArg2(x, y)
2125                 return true
2126         }
2127         // match: (ADDQconst [c] (SHLQconst [1] x))
2128         // result: (LEAQ1 [c] x x)
2129         for {
2130                 c := auxIntToInt32(v.AuxInt)
2131                 if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
2132                         break
2133                 }
2134                 x := v_0.Args[0]
2135                 v.reset(OpAMD64LEAQ1)
2136                 v.AuxInt = int32ToAuxInt(c)
2137                 v.AddArg2(x, x)
2138                 return true
2139         }
2140         // match: (ADDQconst [c] (LEAQ [d] {s} x))
2141         // cond: is32Bit(int64(c)+int64(d))
2142         // result: (LEAQ [c+d] {s} x)
2143         for {
2144                 c := auxIntToInt32(v.AuxInt)
2145                 if v_0.Op != OpAMD64LEAQ {
2146                         break
2147                 }
2148                 d := auxIntToInt32(v_0.AuxInt)
2149                 s := auxToSym(v_0.Aux)
2150                 x := v_0.Args[0]
2151                 if !(is32Bit(int64(c) + int64(d))) {
2152                         break
2153                 }
2154                 v.reset(OpAMD64LEAQ)
2155                 v.AuxInt = int32ToAuxInt(c + d)
2156                 v.Aux = symToAux(s)
2157                 v.AddArg(x)
2158                 return true
2159         }
2160         // match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
2161         // cond: is32Bit(int64(c)+int64(d))
2162         // result: (LEAQ1 [c+d] {s} x y)
2163         for {
2164                 c := auxIntToInt32(v.AuxInt)
2165                 if v_0.Op != OpAMD64LEAQ1 {
2166                         break
2167                 }
2168                 d := auxIntToInt32(v_0.AuxInt)
2169                 s := auxToSym(v_0.Aux)
2170                 y := v_0.Args[1]
2171                 x := v_0.Args[0]
2172                 if !(is32Bit(int64(c) + int64(d))) {
2173                         break
2174                 }
2175                 v.reset(OpAMD64LEAQ1)
2176                 v.AuxInt = int32ToAuxInt(c + d)
2177                 v.Aux = symToAux(s)
2178                 v.AddArg2(x, y)
2179                 return true
2180         }
2181         // match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
2182         // cond: is32Bit(int64(c)+int64(d))
2183         // result: (LEAQ2 [c+d] {s} x y)
2184         for {
2185                 c := auxIntToInt32(v.AuxInt)
2186                 if v_0.Op != OpAMD64LEAQ2 {
2187                         break
2188                 }
2189                 d := auxIntToInt32(v_0.AuxInt)
2190                 s := auxToSym(v_0.Aux)
2191                 y := v_0.Args[1]
2192                 x := v_0.Args[0]
2193                 if !(is32Bit(int64(c) + int64(d))) {
2194                         break
2195                 }
2196                 v.reset(OpAMD64LEAQ2)
2197                 v.AuxInt = int32ToAuxInt(c + d)
2198                 v.Aux = symToAux(s)
2199                 v.AddArg2(x, y)
2200                 return true
2201         }
2202         // match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
2203         // cond: is32Bit(int64(c)+int64(d))
2204         // result: (LEAQ4 [c+d] {s} x y)
2205         for {
2206                 c := auxIntToInt32(v.AuxInt)
2207                 if v_0.Op != OpAMD64LEAQ4 {
2208                         break
2209                 }
2210                 d := auxIntToInt32(v_0.AuxInt)
2211                 s := auxToSym(v_0.Aux)
2212                 y := v_0.Args[1]
2213                 x := v_0.Args[0]
2214                 if !(is32Bit(int64(c) + int64(d))) {
2215                         break
2216                 }
2217                 v.reset(OpAMD64LEAQ4)
2218                 v.AuxInt = int32ToAuxInt(c + d)
2219                 v.Aux = symToAux(s)
2220                 v.AddArg2(x, y)
2221                 return true
2222         }
2223         // match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
2224         // cond: is32Bit(int64(c)+int64(d))
2225         // result: (LEAQ8 [c+d] {s} x y)
2226         for {
2227                 c := auxIntToInt32(v.AuxInt)
2228                 if v_0.Op != OpAMD64LEAQ8 {
2229                         break
2230                 }
2231                 d := auxIntToInt32(v_0.AuxInt)
2232                 s := auxToSym(v_0.Aux)
2233                 y := v_0.Args[1]
2234                 x := v_0.Args[0]
2235                 if !(is32Bit(int64(c) + int64(d))) {
2236                         break
2237                 }
2238                 v.reset(OpAMD64LEAQ8)
2239                 v.AuxInt = int32ToAuxInt(c + d)
2240                 v.Aux = symToAux(s)
2241                 v.AddArg2(x, y)
2242                 return true
2243         }
2244         // match: (ADDQconst [0] x)
2245         // result: x
2246         for {
2247                 if auxIntToInt32(v.AuxInt) != 0 {
2248                         break
2249                 }
2250                 x := v_0
2251                 v.copyOf(x)
2252                 return true
2253         }
2254         // match: (ADDQconst [c] (MOVQconst [d]))
2255         // result: (MOVQconst [int64(c)+d])
2256         for {
2257                 c := auxIntToInt32(v.AuxInt)
2258                 if v_0.Op != OpAMD64MOVQconst {
2259                         break
2260                 }
2261                 d := auxIntToInt64(v_0.AuxInt)
2262                 v.reset(OpAMD64MOVQconst)
2263                 v.AuxInt = int64ToAuxInt(int64(c) + d)
2264                 return true
2265         }
2266         // match: (ADDQconst [c] (ADDQconst [d] x))
2267         // cond: is32Bit(int64(c)+int64(d))
2268         // result: (ADDQconst [c+d] x)
2269         for {
2270                 c := auxIntToInt32(v.AuxInt)
2271                 if v_0.Op != OpAMD64ADDQconst {
2272                         break
2273                 }
2274                 d := auxIntToInt32(v_0.AuxInt)
2275                 x := v_0.Args[0]
2276                 if !(is32Bit(int64(c) + int64(d))) {
2277                         break
2278                 }
2279                 v.reset(OpAMD64ADDQconst)
2280                 v.AuxInt = int32ToAuxInt(c + d)
2281                 v.AddArg(x)
2282                 return true
2283         }
2284         // match: (ADDQconst [off] x:(SP))
2285         // result: (LEAQ [off] x)
2286         for {
2287                 off := auxIntToInt32(v.AuxInt)
2288                 x := v_0
2289                 if x.Op != OpSP {
2290                         break
2291                 }
2292                 v.reset(OpAMD64LEAQ)
2293                 v.AuxInt = int32ToAuxInt(off)
2294                 v.AddArg(x)
2295                 return true
2296         }
2297         return false
2298 }
2299 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2300         v_1 := v.Args[1]
2301         v_0 := v.Args[0]
2302         // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
2303         // cond: ValAndOff(valoff1).canAdd32(off2)
2304         // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
2305         for {
2306                 valoff1 := auxIntToValAndOff(v.AuxInt)
2307                 sym := auxToSym(v.Aux)
2308                 if v_0.Op != OpAMD64ADDQconst {
2309                         break
2310                 }
2311                 off2 := auxIntToInt32(v_0.AuxInt)
2312                 base := v_0.Args[0]
2313                 mem := v_1
2314                 if !(ValAndOff(valoff1).canAdd32(off2)) {
2315                         break
2316                 }
2317                 v.reset(OpAMD64ADDQconstmodify)
2318                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2319                 v.Aux = symToAux(sym)
2320                 v.AddArg2(base, mem)
2321                 return true
2322         }
2323         // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
2324         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
2325         // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
2326         for {
2327                 valoff1 := auxIntToValAndOff(v.AuxInt)
2328                 sym1 := auxToSym(v.Aux)
2329                 if v_0.Op != OpAMD64LEAQ {
2330                         break
2331                 }
2332                 off2 := auxIntToInt32(v_0.AuxInt)
2333                 sym2 := auxToSym(v_0.Aux)
2334                 base := v_0.Args[0]
2335                 mem := v_1
2336                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2337                         break
2338                 }
2339                 v.reset(OpAMD64ADDQconstmodify)
2340                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2341                 v.Aux = symToAux(mergeSym(sym1, sym2))
2342                 v.AddArg2(base, mem)
2343                 return true
2344         }
2345         return false
2346 }
2347 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2348         v_2 := v.Args[2]
2349         v_1 := v.Args[1]
2350         v_0 := v.Args[0]
2351         b := v.Block
2352         typ := &b.Func.Config.Types
2353         // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem)
2354         // cond: is32Bit(int64(off1)+int64(off2))
2355         // result: (ADDQload [off1+off2] {sym} val base mem)
2356         for {
2357                 off1 := auxIntToInt32(v.AuxInt)
2358                 sym := auxToSym(v.Aux)
2359                 val := v_0
2360                 if v_1.Op != OpAMD64ADDQconst {
2361                         break
2362                 }
2363                 off2 := auxIntToInt32(v_1.AuxInt)
2364                 base := v_1.Args[0]
2365                 mem := v_2
2366                 if !(is32Bit(int64(off1) + int64(off2))) {
2367                         break
2368                 }
2369                 v.reset(OpAMD64ADDQload)
2370                 v.AuxInt = int32ToAuxInt(off1 + off2)
2371                 v.Aux = symToAux(sym)
2372                 v.AddArg3(val, base, mem)
2373                 return true
2374         }
2375         // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
2376         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2377         // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
2378         for {
2379                 off1 := auxIntToInt32(v.AuxInt)
2380                 sym1 := auxToSym(v.Aux)
2381                 val := v_0
2382                 if v_1.Op != OpAMD64LEAQ {
2383                         break
2384                 }
2385                 off2 := auxIntToInt32(v_1.AuxInt)
2386                 sym2 := auxToSym(v_1.Aux)
2387                 base := v_1.Args[0]
2388                 mem := v_2
2389                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2390                         break
2391                 }
2392                 v.reset(OpAMD64ADDQload)
2393                 v.AuxInt = int32ToAuxInt(off1 + off2)
2394                 v.Aux = symToAux(mergeSym(sym1, sym2))
2395                 v.AddArg3(val, base, mem)
2396                 return true
2397         }
2398         // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
2399         // result: (ADDQ x (MOVQf2i y))
2400         for {
2401                 off := auxIntToInt32(v.AuxInt)
2402                 sym := auxToSym(v.Aux)
2403                 x := v_0
2404                 ptr := v_1
2405                 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2406                         break
2407                 }
2408                 y := v_2.Args[1]
2409                 if ptr != v_2.Args[0] {
2410                         break
2411                 }
2412                 v.reset(OpAMD64ADDQ)
2413                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2414                 v0.AddArg(y)
2415                 v.AddArg2(x, v0)
2416                 return true
2417         }
2418         return false
2419 }
2420 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2421         v_2 := v.Args[2]
2422         v_1 := v.Args[1]
2423         v_0 := v.Args[0]
2424         // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
2425         // cond: is32Bit(int64(off1)+int64(off2))
2426         // result: (ADDQmodify [off1+off2] {sym} base val mem)
2427         for {
2428                 off1 := auxIntToInt32(v.AuxInt)
2429                 sym := auxToSym(v.Aux)
2430                 if v_0.Op != OpAMD64ADDQconst {
2431                         break
2432                 }
2433                 off2 := auxIntToInt32(v_0.AuxInt)
2434                 base := v_0.Args[0]
2435                 val := v_1
2436                 mem := v_2
2437                 if !(is32Bit(int64(off1) + int64(off2))) {
2438                         break
2439                 }
2440                 v.reset(OpAMD64ADDQmodify)
2441                 v.AuxInt = int32ToAuxInt(off1 + off2)
2442                 v.Aux = symToAux(sym)
2443                 v.AddArg3(base, val, mem)
2444                 return true
2445         }
2446         // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
2447         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2448         // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
2449         for {
2450                 off1 := auxIntToInt32(v.AuxInt)
2451                 sym1 := auxToSym(v.Aux)
2452                 if v_0.Op != OpAMD64LEAQ {
2453                         break
2454                 }
2455                 off2 := auxIntToInt32(v_0.AuxInt)
2456                 sym2 := auxToSym(v_0.Aux)
2457                 base := v_0.Args[0]
2458                 val := v_1
2459                 mem := v_2
2460                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2461                         break
2462                 }
2463                 v.reset(OpAMD64ADDQmodify)
2464                 v.AuxInt = int32ToAuxInt(off1 + off2)
2465                 v.Aux = symToAux(mergeSym(sym1, sym2))
2466                 v.AddArg3(base, val, mem)
2467                 return true
2468         }
2469         return false
2470 }
2471 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2472         v_1 := v.Args[1]
2473         v_0 := v.Args[0]
2474         // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
2475         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
2476         // result: (ADDSDload x [off] {sym} ptr mem)
2477         for {
2478                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2479                         x := v_0
2480                         l := v_1
2481                         if l.Op != OpAMD64MOVSDload {
2482                                 continue
2483                         }
2484                         off := auxIntToInt32(l.AuxInt)
2485                         sym := auxToSym(l.Aux)
2486                         mem := l.Args[1]
2487                         ptr := l.Args[0]
2488                         if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2489                                 continue
2490                         }
2491                         v.reset(OpAMD64ADDSDload)
2492                         v.AuxInt = int32ToAuxInt(off)
2493                         v.Aux = symToAux(sym)
2494                         v.AddArg3(x, ptr, mem)
2495                         return true
2496                 }
2497                 break
2498         }
2499         return false
2500 }
2501 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2502         v_2 := v.Args[2]
2503         v_1 := v.Args[1]
2504         v_0 := v.Args[0]
2505         b := v.Block
2506         typ := &b.Func.Config.Types
2507         // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem)
2508         // cond: is32Bit(int64(off1)+int64(off2))
2509         // result: (ADDSDload [off1+off2] {sym} val base mem)
2510         for {
2511                 off1 := auxIntToInt32(v.AuxInt)
2512                 sym := auxToSym(v.Aux)
2513                 val := v_0
2514                 if v_1.Op != OpAMD64ADDQconst {
2515                         break
2516                 }
2517                 off2 := auxIntToInt32(v_1.AuxInt)
2518                 base := v_1.Args[0]
2519                 mem := v_2
2520                 if !(is32Bit(int64(off1) + int64(off2))) {
2521                         break
2522                 }
2523                 v.reset(OpAMD64ADDSDload)
2524                 v.AuxInt = int32ToAuxInt(off1 + off2)
2525                 v.Aux = symToAux(sym)
2526                 v.AddArg3(val, base, mem)
2527                 return true
2528         }
2529         // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
2530         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2531         // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
2532         for {
2533                 off1 := auxIntToInt32(v.AuxInt)
2534                 sym1 := auxToSym(v.Aux)
2535                 val := v_0
2536                 if v_1.Op != OpAMD64LEAQ {
2537                         break
2538                 }
2539                 off2 := auxIntToInt32(v_1.AuxInt)
2540                 sym2 := auxToSym(v_1.Aux)
2541                 base := v_1.Args[0]
2542                 mem := v_2
2543                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2544                         break
2545                 }
2546                 v.reset(OpAMD64ADDSDload)
2547                 v.AuxInt = int32ToAuxInt(off1 + off2)
2548                 v.Aux = symToAux(mergeSym(sym1, sym2))
2549                 v.AddArg3(val, base, mem)
2550                 return true
2551         }
2552         // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
2553         // result: (ADDSD x (MOVQi2f y))
2554         for {
2555                 off := auxIntToInt32(v.AuxInt)
2556                 sym := auxToSym(v.Aux)
2557                 x := v_0
2558                 ptr := v_1
2559                 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2560                         break
2561                 }
2562                 y := v_2.Args[1]
2563                 if ptr != v_2.Args[0] {
2564                         break
2565                 }
2566                 v.reset(OpAMD64ADDSD)
2567                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2568                 v0.AddArg(y)
2569                 v.AddArg2(x, v0)
2570                 return true
2571         }
2572         return false
2573 }
2574 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2575         v_1 := v.Args[1]
2576         v_0 := v.Args[0]
2577         // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
2578         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
2579         // result: (ADDSSload x [off] {sym} ptr mem)
2580         for {
2581                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2582                         x := v_0
2583                         l := v_1
2584                         if l.Op != OpAMD64MOVSSload {
2585                                 continue
2586                         }
2587                         off := auxIntToInt32(l.AuxInt)
2588                         sym := auxToSym(l.Aux)
2589                         mem := l.Args[1]
2590                         ptr := l.Args[0]
2591                         if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2592                                 continue
2593                         }
2594                         v.reset(OpAMD64ADDSSload)
2595                         v.AuxInt = int32ToAuxInt(off)
2596                         v.Aux = symToAux(sym)
2597                         v.AddArg3(x, ptr, mem)
2598                         return true
2599                 }
2600                 break
2601         }
2602         return false
2603 }
2604 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2605         v_2 := v.Args[2]
2606         v_1 := v.Args[1]
2607         v_0 := v.Args[0]
2608         b := v.Block
2609         typ := &b.Func.Config.Types
2610         // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem)
2611         // cond: is32Bit(int64(off1)+int64(off2))
2612         // result: (ADDSSload [off1+off2] {sym} val base mem)
2613         for {
2614                 off1 := auxIntToInt32(v.AuxInt)
2615                 sym := auxToSym(v.Aux)
2616                 val := v_0
2617                 if v_1.Op != OpAMD64ADDQconst {
2618                         break
2619                 }
2620                 off2 := auxIntToInt32(v_1.AuxInt)
2621                 base := v_1.Args[0]
2622                 mem := v_2
2623                 if !(is32Bit(int64(off1) + int64(off2))) {
2624                         break
2625                 }
2626                 v.reset(OpAMD64ADDSSload)
2627                 v.AuxInt = int32ToAuxInt(off1 + off2)
2628                 v.Aux = symToAux(sym)
2629                 v.AddArg3(val, base, mem)
2630                 return true
2631         }
2632         // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
2633         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2634         // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
2635         for {
2636                 off1 := auxIntToInt32(v.AuxInt)
2637                 sym1 := auxToSym(v.Aux)
2638                 val := v_0
2639                 if v_1.Op != OpAMD64LEAQ {
2640                         break
2641                 }
2642                 off2 := auxIntToInt32(v_1.AuxInt)
2643                 sym2 := auxToSym(v_1.Aux)
2644                 base := v_1.Args[0]
2645                 mem := v_2
2646                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2647                         break
2648                 }
2649                 v.reset(OpAMD64ADDSSload)
2650                 v.AuxInt = int32ToAuxInt(off1 + off2)
2651                 v.Aux = symToAux(mergeSym(sym1, sym2))
2652                 v.AddArg3(val, base, mem)
2653                 return true
2654         }
2655         // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
2656         // result: (ADDSS x (MOVLi2f y))
2657         for {
2658                 off := auxIntToInt32(v.AuxInt)
2659                 sym := auxToSym(v.Aux)
2660                 x := v_0
2661                 ptr := v_1
2662                 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2663                         break
2664                 }
2665                 y := v_2.Args[1]
2666                 if ptr != v_2.Args[0] {
2667                         break
2668                 }
2669                 v.reset(OpAMD64ADDSS)
2670                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2671                 v0.AddArg(y)
2672                 v.AddArg2(x, v0)
2673                 return true
2674         }
2675         return false
2676 }
2677 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2678         v_1 := v.Args[1]
2679         v_0 := v.Args[0]
2680         // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x)
2681         // result: (BTRL x y)
2682         for {
2683                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2684                         if v_0.Op != OpAMD64NOTL {
2685                                 continue
2686                         }
2687                         v_0_0 := v_0.Args[0]
2688                         if v_0_0.Op != OpAMD64SHLL {
2689                                 continue
2690                         }
2691                         y := v_0_0.Args[1]
2692                         v_0_0_0 := v_0_0.Args[0]
2693                         if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2694                                 continue
2695                         }
2696                         x := v_1
2697                         v.reset(OpAMD64BTRL)
2698                         v.AddArg2(x, y)
2699                         return true
2700                 }
2701                 break
2702         }
2703         // match: (ANDL (MOVLconst [c]) x)
2704         // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
2705         // result: (BTRLconst [int8(log32(^c))] x)
2706         for {
2707                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2708                         if v_0.Op != OpAMD64MOVLconst {
2709                                 continue
2710                         }
2711                         c := auxIntToInt32(v_0.AuxInt)
2712                         x := v_1
2713                         if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
2714                                 continue
2715                         }
2716                         v.reset(OpAMD64BTRLconst)
2717                         v.AuxInt = int8ToAuxInt(int8(log32(^c)))
2718                         v.AddArg(x)
2719                         return true
2720                 }
2721                 break
2722         }
2723         // match: (ANDL x (MOVLconst [c]))
2724         // result: (ANDLconst [c] x)
2725         for {
2726                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2727                         x := v_0
2728                         if v_1.Op != OpAMD64MOVLconst {
2729                                 continue
2730                         }
2731                         c := auxIntToInt32(v_1.AuxInt)
2732                         v.reset(OpAMD64ANDLconst)
2733                         v.AuxInt = int32ToAuxInt(c)
2734                         v.AddArg(x)
2735                         return true
2736                 }
2737                 break
2738         }
2739         // match: (ANDL x x)
2740         // result: x
2741         for {
2742                 x := v_0
2743                 if x != v_1 {
2744                         break
2745                 }
2746                 v.copyOf(x)
2747                 return true
2748         }
2749         // match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
2750         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
2751         // result: (ANDLload x [off] {sym} ptr mem)
2752         for {
2753                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2754                         x := v_0
2755                         l := v_1
2756                         if l.Op != OpAMD64MOVLload {
2757                                 continue
2758                         }
2759                         off := auxIntToInt32(l.AuxInt)
2760                         sym := auxToSym(l.Aux)
2761                         mem := l.Args[1]
2762                         ptr := l.Args[0]
2763                         if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2764                                 continue
2765                         }
2766                         v.reset(OpAMD64ANDLload)
2767                         v.AuxInt = int32ToAuxInt(off)
2768                         v.Aux = symToAux(sym)
2769                         v.AddArg3(x, ptr, mem)
2770                         return true
2771                 }
2772                 break
2773         }
2774         return false
2775 }
2776 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2777         v_0 := v.Args[0]
2778         // match: (ANDLconst [c] x)
2779         // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
2780         // result: (BTRLconst [int8(log32(^c))] x)
2781         for {
2782                 c := auxIntToInt32(v.AuxInt)
2783                 x := v_0
2784                 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
2785                         break
2786                 }
2787                 v.reset(OpAMD64BTRLconst)
2788                 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
2789                 v.AddArg(x)
2790                 return true
2791         }
2792         // match: (ANDLconst [c] (ANDLconst [d] x))
2793         // result: (ANDLconst [c & d] x)
2794         for {
2795                 c := auxIntToInt32(v.AuxInt)
2796                 if v_0.Op != OpAMD64ANDLconst {
2797                         break
2798                 }
2799                 d := auxIntToInt32(v_0.AuxInt)
2800                 x := v_0.Args[0]
2801                 v.reset(OpAMD64ANDLconst)
2802                 v.AuxInt = int32ToAuxInt(c & d)
2803                 v.AddArg(x)
2804                 return true
2805         }
2806         // match: (ANDLconst [c] (BTRLconst [d] x))
2807         // result: (ANDLconst [c &^ (1<<uint32(d))] x)
2808         for {
2809                 c := auxIntToInt32(v.AuxInt)
2810                 if v_0.Op != OpAMD64BTRLconst {
2811                         break
2812                 }
2813                 d := auxIntToInt8(v_0.AuxInt)
2814                 x := v_0.Args[0]
2815                 v.reset(OpAMD64ANDLconst)
2816                 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
2817                 v.AddArg(x)
2818                 return true
2819         }
2820         // match: (ANDLconst [ 0xFF] x)
2821         // result: (MOVBQZX x)
2822         for {
2823                 if auxIntToInt32(v.AuxInt) != 0xFF {
2824                         break
2825                 }
2826                 x := v_0
2827                 v.reset(OpAMD64MOVBQZX)
2828                 v.AddArg(x)
2829                 return true
2830         }
2831         // match: (ANDLconst [0xFFFF] x)
2832         // result: (MOVWQZX x)
2833         for {
2834                 if auxIntToInt32(v.AuxInt) != 0xFFFF {
2835                         break
2836                 }
2837                 x := v_0
2838                 v.reset(OpAMD64MOVWQZX)
2839                 v.AddArg(x)
2840                 return true
2841         }
2842         // match: (ANDLconst [c] _)
2843         // cond: c==0
2844         // result: (MOVLconst [0])
2845         for {
2846                 c := auxIntToInt32(v.AuxInt)
2847                 if !(c == 0) {
2848                         break
2849                 }
2850                 v.reset(OpAMD64MOVLconst)
2851                 v.AuxInt = int32ToAuxInt(0)
2852                 return true
2853         }
2854         // match: (ANDLconst [c] x)
2855         // cond: c==-1
2856         // result: x
2857         for {
2858                 c := auxIntToInt32(v.AuxInt)
2859                 x := v_0
2860                 if !(c == -1) {
2861                         break
2862                 }
2863                 v.copyOf(x)
2864                 return true
2865         }
2866         // match: (ANDLconst [c] (MOVLconst [d]))
2867         // result: (MOVLconst [c&d])
2868         for {
2869                 c := auxIntToInt32(v.AuxInt)
2870                 if v_0.Op != OpAMD64MOVLconst {
2871                         break
2872                 }
2873                 d := auxIntToInt32(v_0.AuxInt)
2874                 v.reset(OpAMD64MOVLconst)
2875                 v.AuxInt = int32ToAuxInt(c & d)
2876                 return true
2877         }
2878         return false
2879 }
2880 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2881         v_1 := v.Args[1]
2882         v_0 := v.Args[0]
2883         // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
2884         // cond: ValAndOff(valoff1).canAdd32(off2)
2885         // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
2886         for {
2887                 valoff1 := auxIntToValAndOff(v.AuxInt)
2888                 sym := auxToSym(v.Aux)
2889                 if v_0.Op != OpAMD64ADDQconst {
2890                         break
2891                 }
2892                 off2 := auxIntToInt32(v_0.AuxInt)
2893                 base := v_0.Args[0]
2894                 mem := v_1
2895                 if !(ValAndOff(valoff1).canAdd32(off2)) {
2896                         break
2897                 }
2898                 v.reset(OpAMD64ANDLconstmodify)
2899                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2900                 v.Aux = symToAux(sym)
2901                 v.AddArg2(base, mem)
2902                 return true
2903         }
2904         // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
2905         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
2906         // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
2907         for {
2908                 valoff1 := auxIntToValAndOff(v.AuxInt)
2909                 sym1 := auxToSym(v.Aux)
2910                 if v_0.Op != OpAMD64LEAQ {
2911                         break
2912                 }
2913                 off2 := auxIntToInt32(v_0.AuxInt)
2914                 sym2 := auxToSym(v_0.Aux)
2915                 base := v_0.Args[0]
2916                 mem := v_1
2917                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2918                         break
2919                 }
2920                 v.reset(OpAMD64ANDLconstmodify)
2921                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2922                 v.Aux = symToAux(mergeSym(sym1, sym2))
2923                 v.AddArg2(base, mem)
2924                 return true
2925         }
2926         return false
2927 }
2928 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2929         v_2 := v.Args[2]
2930         v_1 := v.Args[1]
2931         v_0 := v.Args[0]
2932         b := v.Block
2933         typ := &b.Func.Config.Types
2934         // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem)
2935         // cond: is32Bit(int64(off1)+int64(off2))
2936         // result: (ANDLload [off1+off2] {sym} val base mem)
2937         for {
2938                 off1 := auxIntToInt32(v.AuxInt)
2939                 sym := auxToSym(v.Aux)
2940                 val := v_0
2941                 if v_1.Op != OpAMD64ADDQconst {
2942                         break
2943                 }
2944                 off2 := auxIntToInt32(v_1.AuxInt)
2945                 base := v_1.Args[0]
2946                 mem := v_2
2947                 if !(is32Bit(int64(off1) + int64(off2))) {
2948                         break
2949                 }
2950                 v.reset(OpAMD64ANDLload)
2951                 v.AuxInt = int32ToAuxInt(off1 + off2)
2952                 v.Aux = symToAux(sym)
2953                 v.AddArg3(val, base, mem)
2954                 return true
2955         }
2956         // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
2957         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2958         // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
2959         for {
2960                 off1 := auxIntToInt32(v.AuxInt)
2961                 sym1 := auxToSym(v.Aux)
2962                 val := v_0
2963                 if v_1.Op != OpAMD64LEAQ {
2964                         break
2965                 }
2966                 off2 := auxIntToInt32(v_1.AuxInt)
2967                 sym2 := auxToSym(v_1.Aux)
2968                 base := v_1.Args[0]
2969                 mem := v_2
2970                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2971                         break
2972                 }
2973                 v.reset(OpAMD64ANDLload)
2974                 v.AuxInt = int32ToAuxInt(off1 + off2)
2975                 v.Aux = symToAux(mergeSym(sym1, sym2))
2976                 v.AddArg3(val, base, mem)
2977                 return true
2978         }
2979         // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
2980         // result: (ANDL x (MOVLf2i y))
2981         for {
2982                 off := auxIntToInt32(v.AuxInt)
2983                 sym := auxToSym(v.Aux)
2984                 x := v_0
2985                 ptr := v_1
2986                 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2987                         break
2988                 }
2989                 y := v_2.Args[1]
2990                 if ptr != v_2.Args[0] {
2991                         break
2992                 }
2993                 v.reset(OpAMD64ANDL)
2994                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
2995                 v0.AddArg(y)
2996                 v.AddArg2(x, v0)
2997                 return true
2998         }
2999         return false
3000 }
3001 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
3002         v_2 := v.Args[2]
3003         v_1 := v.Args[1]
3004         v_0 := v.Args[0]
3005         b := v.Block
3006         // match: (ANDLmodify [off] {sym} ptr (NOTL s:(SHLL (MOVLconst [1]) <t> x)) mem)
3007         // result: (BTRLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
3008         for {
3009                 off := auxIntToInt32(v.AuxInt)
3010                 sym := auxToSym(v.Aux)
3011                 ptr := v_0
3012                 if v_1.Op != OpAMD64NOTL {
3013                         break
3014                 }
3015                 s := v_1.Args[0]
3016                 if s.Op != OpAMD64SHLL {
3017                         break
3018                 }
3019                 t := s.Type
3020                 x := s.Args[1]
3021                 s_0 := s.Args[0]
3022                 if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
3023                         break
3024                 }
3025                 mem := v_2
3026                 v.reset(OpAMD64BTRLmodify)
3027                 v.AuxInt = int32ToAuxInt(off)
3028                 v.Aux = symToAux(sym)
3029                 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
3030                 v0.AuxInt = int32ToAuxInt(31)
3031                 v0.AddArg(x)
3032                 v.AddArg3(ptr, v0, mem)
3033                 return true
3034         }
3035         // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
3036         // cond: is32Bit(int64(off1)+int64(off2))
3037         // result: (ANDLmodify [off1+off2] {sym} base val mem)
3038         for {
3039                 off1 := auxIntToInt32(v.AuxInt)
3040                 sym := auxToSym(v.Aux)
3041                 if v_0.Op != OpAMD64ADDQconst {
3042                         break
3043                 }
3044                 off2 := auxIntToInt32(v_0.AuxInt)
3045                 base := v_0.Args[0]
3046                 val := v_1
3047                 mem := v_2
3048                 if !(is32Bit(int64(off1) + int64(off2))) {
3049                         break
3050                 }
3051                 v.reset(OpAMD64ANDLmodify)
3052                 v.AuxInt = int32ToAuxInt(off1 + off2)
3053                 v.Aux = symToAux(sym)
3054                 v.AddArg3(base, val, mem)
3055                 return true
3056         }
3057         // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
3058         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
3059         // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
3060         for {
3061                 off1 := auxIntToInt32(v.AuxInt)
3062                 sym1 := auxToSym(v.Aux)
3063                 if v_0.Op != OpAMD64LEAQ {
3064                         break
3065                 }
3066                 off2 := auxIntToInt32(v_0.AuxInt)
3067                 sym2 := auxToSym(v_0.Aux)
3068                 base := v_0.Args[0]
3069                 val := v_1
3070                 mem := v_2
3071                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3072                         break
3073                 }
3074                 v.reset(OpAMD64ANDLmodify)
3075                 v.AuxInt = int32ToAuxInt(off1 + off2)
3076                 v.Aux = symToAux(mergeSym(sym1, sym2))
3077                 v.AddArg3(base, val, mem)
3078                 return true
3079         }
3080         return false
3081 }
3082 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3083         v_1 := v.Args[1]
3084         v_0 := v.Args[0]
3085         // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x)
3086         // result: (BTRQ x y)
3087         for {
3088                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3089                         if v_0.Op != OpAMD64NOTQ {
3090                                 continue
3091                         }
3092                         v_0_0 := v_0.Args[0]
3093                         if v_0_0.Op != OpAMD64SHLQ {
3094                                 continue
3095                         }
3096                         y := v_0_0.Args[1]
3097                         v_0_0_0 := v_0_0.Args[0]
3098                         if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3099                                 continue
3100                         }
3101                         x := v_1
3102                         v.reset(OpAMD64BTRQ)
3103                         v.AddArg2(x, y)
3104                         return true
3105                 }
3106                 break
3107         }
3108         // match: (ANDQ (MOVQconst [c]) x)
3109         // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128
3110         // result: (BTRQconst [int8(log64(^c))] x)
3111         for {
3112                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3113                         if v_0.Op != OpAMD64MOVQconst {
3114                                 continue
3115                         }
3116                         c := auxIntToInt64(v_0.AuxInt)
3117                         x := v_1
3118                         if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) {
3119                                 continue
3120                         }
3121                         v.reset(OpAMD64BTRQconst)
3122                         v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3123                         v.AddArg(x)
3124                         return true
3125                 }
3126                 break
3127         }
3128         // match: (ANDQ x (MOVQconst [c]))
3129         // cond: is32Bit(c)
3130         // result: (ANDQconst [int32(c)] x)
3131         for {
3132                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3133                         x := v_0
3134                         if v_1.Op != OpAMD64MOVQconst {
3135                                 continue
3136                         }
3137                         c := auxIntToInt64(v_1.AuxInt)
3138                         if !(is32Bit(c)) {
3139                                 continue
3140                         }
3141                         v.reset(OpAMD64ANDQconst)
3142                         v.AuxInt = int32ToAuxInt(int32(c))
3143                         v.AddArg(x)
3144                         return true
3145                 }
3146                 break
3147         }
3148         // match: (ANDQ x x)
3149         // result: x
3150         for {
3151                 x := v_0
3152                 if x != v_1 {
3153                         break
3154                 }
3155                 v.copyOf(x)
3156                 return true
3157         }
3158         // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem))
3159         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
3160         // result: (ANDQload x [off] {sym} ptr mem)
3161         for {
3162                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3163                         x := v_0
3164                         l := v_1
3165                         if l.Op != OpAMD64MOVQload {
3166                                 continue
3167                         }
3168                         off := auxIntToInt32(l.AuxInt)
3169                         sym := auxToSym(l.Aux)
3170                         mem := l.Args[1]
3171                         ptr := l.Args[0]
3172                         if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3173                                 continue
3174                         }
3175                         v.reset(OpAMD64ANDQload)
3176                         v.AuxInt = int32ToAuxInt(off)
3177                         v.Aux = symToAux(sym)
3178                         v.AddArg3(x, ptr, mem)
3179                         return true
3180                 }
3181                 break
3182         }
3183         return false
3184 }
3185 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3186         v_0 := v.Args[0]
3187         // match: (ANDQconst [c] x)
3188         // cond: isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
3189         // result: (BTRQconst [int8(log32(^c))] x)
3190         for {
3191                 c := auxIntToInt32(v.AuxInt)
3192                 x := v_0
3193                 if !(isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
3194                         break
3195                 }
3196                 v.reset(OpAMD64BTRQconst)
3197                 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
3198                 v.AddArg(x)
3199                 return true
3200         }
3201         // match: (ANDQconst [c] (ANDQconst [d] x))
3202         // result: (ANDQconst [c & d] x)
3203         for {
3204                 c := auxIntToInt32(v.AuxInt)
3205                 if v_0.Op != OpAMD64ANDQconst {
3206                         break
3207                 }
3208                 d := auxIntToInt32(v_0.AuxInt)
3209                 x := v_0.Args[0]
3210                 v.reset(OpAMD64ANDQconst)
3211                 v.AuxInt = int32ToAuxInt(c & d)
3212                 v.AddArg(x)
3213                 return true
3214         }
3215         // match: (ANDQconst [c] (BTRQconst [d] x))
3216         // cond: is32Bit(int64(c) &^ (1<<uint32(d)))
3217         // result: (ANDQconst [c &^ (1<<uint32(d))] x)
3218         for {
3219                 c := auxIntToInt32(v.AuxInt)
3220                 if v_0.Op != OpAMD64BTRQconst {
3221                         break
3222                 }
3223                 d := auxIntToInt8(v_0.AuxInt)
3224                 x := v_0.Args[0]
3225                 if !(is32Bit(int64(c) &^ (1 << uint32(d)))) {
3226                         break
3227                 }
3228                 v.reset(OpAMD64ANDQconst)
3229                 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
3230                 v.AddArg(x)
3231                 return true
3232         }
3233         // match: (ANDQconst [ 0xFF] x)
3234         // result: (MOVBQZX x)
3235         for {
3236                 if auxIntToInt32(v.AuxInt) != 0xFF {
3237                         break
3238                 }
3239                 x := v_0
3240                 v.reset(OpAMD64MOVBQZX)
3241                 v.AddArg(x)
3242                 return true
3243         }
3244         // match: (ANDQconst [0xFFFF] x)
3245         // result: (MOVWQZX x)
3246         for {
3247                 if auxIntToInt32(v.AuxInt) != 0xFFFF {
3248                         break
3249                 }
3250                 x := v_0
3251                 v.reset(OpAMD64MOVWQZX)
3252                 v.AddArg(x)
3253                 return true
3254         }
3255         // match: (ANDQconst [0] _)
3256         // result: (MOVQconst [0])
3257         for {
3258                 if auxIntToInt32(v.AuxInt) != 0 {
3259                         break
3260                 }
3261                 v.reset(OpAMD64MOVQconst)
3262                 v.AuxInt = int64ToAuxInt(0)
3263                 return true
3264         }
3265         // match: (ANDQconst [-1] x)
3266         // result: x
3267         for {
3268                 if auxIntToInt32(v.AuxInt) != -1 {
3269                         break
3270                 }
3271                 x := v_0
3272                 v.copyOf(x)
3273                 return true
3274         }
3275         // match: (ANDQconst [c] (MOVQconst [d]))
3276         // result: (MOVQconst [int64(c)&d])
3277         for {
3278                 c := auxIntToInt32(v.AuxInt)
3279                 if v_0.Op != OpAMD64MOVQconst {
3280                         break
3281                 }
3282                 d := auxIntToInt64(v_0.AuxInt)
3283                 v.reset(OpAMD64MOVQconst)
3284                 v.AuxInt = int64ToAuxInt(int64(c) & d)
3285                 return true
3286         }
3287         return false
3288 }
3289 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3290         v_1 := v.Args[1]
3291         v_0 := v.Args[0]
3292         // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
3293         // cond: ValAndOff(valoff1).canAdd32(off2)
3294         // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
3295         for {
3296                 valoff1 := auxIntToValAndOff(v.AuxInt)
3297                 sym := auxToSym(v.Aux)
3298                 if v_0.Op != OpAMD64ADDQconst {
3299                         break
3300                 }
3301                 off2 := auxIntToInt32(v_0.AuxInt)
3302                 base := v_0.Args[0]
3303                 mem := v_1
3304                 if !(ValAndOff(valoff1).canAdd32(off2)) {
3305                         break
3306                 }
3307                 v.reset(OpAMD64ANDQconstmodify)
3308                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3309                 v.Aux = symToAux(sym)
3310                 v.AddArg2(base, mem)
3311                 return true
3312         }
3313         // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
3314         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
3315         // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
3316         for {
3317                 valoff1 := auxIntToValAndOff(v.AuxInt)
3318                 sym1 := auxToSym(v.Aux)
3319                 if v_0.Op != OpAMD64LEAQ {
3320                         break
3321                 }
3322                 off2 := auxIntToInt32(v_0.AuxInt)
3323                 sym2 := auxToSym(v_0.Aux)
3324                 base := v_0.Args[0]
3325                 mem := v_1
3326                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3327                         break
3328                 }
3329                 v.reset(OpAMD64ANDQconstmodify)
3330                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3331                 v.Aux = symToAux(mergeSym(sym1, sym2))
3332                 v.AddArg2(base, mem)
3333                 return true
3334         }
3335         return false
3336 }
3337 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3338         v_2 := v.Args[2]
3339         v_1 := v.Args[1]
3340         v_0 := v.Args[0]
3341         b := v.Block
3342         typ := &b.Func.Config.Types
3343         // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem)
3344         // cond: is32Bit(int64(off1)+int64(off2))
3345         // result: (ANDQload [off1+off2] {sym} val base mem)
3346         for {
3347                 off1 := auxIntToInt32(v.AuxInt)
3348                 sym := auxToSym(v.Aux)
3349                 val := v_0
3350                 if v_1.Op != OpAMD64ADDQconst {
3351                         break
3352                 }
3353                 off2 := auxIntToInt32(v_1.AuxInt)
3354                 base := v_1.Args[0]
3355                 mem := v_2
3356                 if !(is32Bit(int64(off1) + int64(off2))) {
3357                         break
3358                 }
3359                 v.reset(OpAMD64ANDQload)
3360                 v.AuxInt = int32ToAuxInt(off1 + off2)
3361                 v.Aux = symToAux(sym)
3362                 v.AddArg3(val, base, mem)
3363                 return true
3364         }
3365         // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
3366         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
3367         // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
3368         for {
3369                 off1 := auxIntToInt32(v.AuxInt)
3370                 sym1 := auxToSym(v.Aux)
3371                 val := v_0
3372                 if v_1.Op != OpAMD64LEAQ {
3373                         break
3374                 }
3375                 off2 := auxIntToInt32(v_1.AuxInt)
3376                 sym2 := auxToSym(v_1.Aux)
3377                 base := v_1.Args[0]
3378                 mem := v_2
3379                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3380                         break
3381                 }
3382                 v.reset(OpAMD64ANDQload)
3383                 v.AuxInt = int32ToAuxInt(off1 + off2)
3384                 v.Aux = symToAux(mergeSym(sym1, sym2))
3385                 v.AddArg3(val, base, mem)
3386                 return true
3387         }
3388         // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
3389         // result: (ANDQ x (MOVQf2i y))
3390         for {
3391                 off := auxIntToInt32(v.AuxInt)
3392                 sym := auxToSym(v.Aux)
3393                 x := v_0
3394                 ptr := v_1
3395                 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3396                         break
3397                 }
3398                 y := v_2.Args[1]
3399                 if ptr != v_2.Args[0] {
3400                         break
3401                 }
3402                 v.reset(OpAMD64ANDQ)
3403                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3404                 v0.AddArg(y)
3405                 v.AddArg2(x, v0)
3406                 return true
3407         }
3408         return false
3409 }
3410 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3411         v_2 := v.Args[2]
3412         v_1 := v.Args[1]
3413         v_0 := v.Args[0]
3414         b := v.Block
3415         // match: (ANDQmodify [off] {sym} ptr (NOTQ s:(SHLQ (MOVQconst [1]) <t> x)) mem)
3416         // result: (BTRQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
3417         for {
3418                 off := auxIntToInt32(v.AuxInt)
3419                 sym := auxToSym(v.Aux)
3420                 ptr := v_0
3421                 if v_1.Op != OpAMD64NOTQ {
3422                         break
3423                 }
3424                 s := v_1.Args[0]
3425                 if s.Op != OpAMD64SHLQ {
3426                         break
3427                 }
3428                 t := s.Type
3429                 x := s.Args[1]
3430                 s_0 := s.Args[0]
3431                 if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
3432                         break
3433                 }
3434                 mem := v_2
3435                 v.reset(OpAMD64BTRQmodify)
3436                 v.AuxInt = int32ToAuxInt(off)
3437                 v.Aux = symToAux(sym)
3438                 v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
3439                 v0.AuxInt = int32ToAuxInt(63)
3440                 v0.AddArg(x)
3441                 v.AddArg3(ptr, v0, mem)
3442                 return true
3443         }
3444         // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
3445         // cond: is32Bit(int64(off1)+int64(off2))
3446         // result: (ANDQmodify [off1+off2] {sym} base val mem)
3447         for {
3448                 off1 := auxIntToInt32(v.AuxInt)
3449                 sym := auxToSym(v.Aux)
3450                 if v_0.Op != OpAMD64ADDQconst {
3451                         break
3452                 }
3453                 off2 := auxIntToInt32(v_0.AuxInt)
3454                 base := v_0.Args[0]
3455                 val := v_1
3456                 mem := v_2
3457                 if !(is32Bit(int64(off1) + int64(off2))) {
3458                         break
3459                 }
3460                 v.reset(OpAMD64ANDQmodify)
3461                 v.AuxInt = int32ToAuxInt(off1 + off2)
3462                 v.Aux = symToAux(sym)
3463                 v.AddArg3(base, val, mem)
3464                 return true
3465         }
3466         // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
3467         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
3468         // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
3469         for {
3470                 off1 := auxIntToInt32(v.AuxInt)
3471                 sym1 := auxToSym(v.Aux)
3472                 if v_0.Op != OpAMD64LEAQ {
3473                         break
3474                 }
3475                 off2 := auxIntToInt32(v_0.AuxInt)
3476                 sym2 := auxToSym(v_0.Aux)
3477                 base := v_0.Args[0]
3478                 val := v_1
3479                 mem := v_2
3480                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3481                         break
3482                 }
3483                 v.reset(OpAMD64ANDQmodify)
3484                 v.AuxInt = int32ToAuxInt(off1 + off2)
3485                 v.Aux = symToAux(mergeSym(sym1, sym2))
3486                 v.AddArg3(base, val, mem)
3487                 return true
3488         }
3489         return false
3490 }
3491 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3492         v_0 := v.Args[0]
3493         b := v.Block
3494         // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x)))
3495         // result: (BSFQ (ORQconst <t> [1<<8] x))
3496         for {
3497                 if v_0.Op != OpAMD64ORQconst {
3498                         break
3499                 }
3500                 t := v_0.Type
3501                 if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3502                         break
3503                 }
3504                 v_0_0 := v_0.Args[0]
3505                 if v_0_0.Op != OpAMD64MOVBQZX {
3506                         break
3507                 }
3508                 x := v_0_0.Args[0]
3509                 v.reset(OpAMD64BSFQ)
3510                 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3511                 v0.AuxInt = int32ToAuxInt(1 << 8)
3512                 v0.AddArg(x)
3513                 v.AddArg(v0)
3514                 return true
3515         }
3516         // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x)))
3517         // result: (BSFQ (ORQconst <t> [1<<16] x))
3518         for {
3519                 if v_0.Op != OpAMD64ORQconst {
3520                         break
3521                 }
3522                 t := v_0.Type
3523                 if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3524                         break
3525                 }
3526                 v_0_0 := v_0.Args[0]
3527                 if v_0_0.Op != OpAMD64MOVWQZX {
3528                         break
3529                 }
3530                 x := v_0_0.Args[0]
3531                 v.reset(OpAMD64BSFQ)
3532                 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3533                 v0.AuxInt = int32ToAuxInt(1 << 16)
3534                 v0.AddArg(x)
3535                 v.AddArg(v0)
3536                 return true
3537         }
3538         return false
3539 }
3540 func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
3541         v_0 := v.Args[0]
3542         // match: (BTCLconst [c] (XORLconst [d] x))
3543         // result: (XORLconst [d ^ 1<<uint32(c)] x)
3544         for {
3545                 c := auxIntToInt8(v.AuxInt)
3546                 if v_0.Op != OpAMD64XORLconst {
3547                         break
3548                 }
3549                 d := auxIntToInt32(v_0.AuxInt)
3550                 x := v_0.Args[0]
3551                 v.reset(OpAMD64XORLconst)
3552                 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3553                 v.AddArg(x)
3554                 return true
3555         }
3556         // match: (BTCLconst [c] (BTCLconst [d] x))
3557         // result: (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
3558         for {
3559                 c := auxIntToInt8(v.AuxInt)
3560                 if v_0.Op != OpAMD64BTCLconst {
3561                         break
3562                 }
3563                 d := auxIntToInt8(v_0.AuxInt)
3564                 x := v_0.Args[0]
3565                 v.reset(OpAMD64XORLconst)
3566                 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
3567                 v.AddArg(x)
3568                 return true
3569         }
3570         // match: (BTCLconst [c] (MOVLconst [d]))
3571         // result: (MOVLconst [d^(1<<uint32(c))])
3572         for {
3573                 c := auxIntToInt8(v.AuxInt)
3574                 if v_0.Op != OpAMD64MOVLconst {
3575                         break
3576                 }
3577                 d := auxIntToInt32(v_0.AuxInt)
3578                 v.reset(OpAMD64MOVLconst)
3579                 v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c)))
3580                 return true
3581         }
3582         return false
3583 }
3584 func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool {
3585         v_1 := v.Args[1]
3586         v_0 := v.Args[0]
3587         // match: (BTCLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
3588         // cond: ValAndOff(valoff1).canAdd32(off2)
3589         // result: (BTCLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
3590         for {
3591                 valoff1 := auxIntToValAndOff(v.AuxInt)
3592                 sym := auxToSym(v.Aux)
3593                 if v_0.Op != OpAMD64ADDQconst {
3594                         break
3595                 }
3596                 off2 := auxIntToInt32(v_0.AuxInt)
3597                 base := v_0.Args[0]
3598                 mem := v_1
3599                 if !(ValAndOff(valoff1).canAdd32(off2)) {
3600                         break
3601                 }
3602                 v.reset(OpAMD64BTCLconstmodify)
3603                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3604                 v.Aux = symToAux(sym)
3605                 v.AddArg2(base, mem)
3606                 return true
3607         }
3608         // match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
3609         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
3610         // result: (BTCLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
3611         for {
3612                 valoff1 := auxIntToValAndOff(v.AuxInt)
3613                 sym1 := auxToSym(v.Aux)
3614                 if v_0.Op != OpAMD64LEAQ {
3615                         break
3616                 }
3617                 off2 := auxIntToInt32(v_0.AuxInt)
3618                 sym2 := auxToSym(v_0.Aux)
3619                 base := v_0.Args[0]
3620                 mem := v_1
3621                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3622                         break
3623                 }
3624                 v.reset(OpAMD64BTCLconstmodify)
3625                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3626                 v.Aux = symToAux(mergeSym(sym1, sym2))
3627                 v.AddArg2(base, mem)
3628                 return true
3629         }
3630         return false
3631 }
3632 func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool {
3633         v_2 := v.Args[2]
3634         v_1 := v.Args[1]
3635         v_0 := v.Args[0]
3636         // match: (BTCLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
3637         // cond: is32Bit(int64(off1)+int64(off2))
3638         // result: (BTCLmodify [off1+off2] {sym} base val mem)
3639         for {
3640                 off1 := auxIntToInt32(v.AuxInt)
3641                 sym := auxToSym(v.Aux)
3642                 if v_0.Op != OpAMD64ADDQconst {
3643                         break
3644                 }
3645                 off2 := auxIntToInt32(v_0.AuxInt)
3646                 base := v_0.Args[0]
3647                 val := v_1
3648                 mem := v_2
3649                 if !(is32Bit(int64(off1) + int64(off2))) {
3650                         break
3651                 }
3652                 v.reset(OpAMD64BTCLmodify)
3653                 v.AuxInt = int32ToAuxInt(off1 + off2)
3654                 v.Aux = symToAux(sym)
3655                 v.AddArg3(base, val, mem)
3656                 return true
3657         }
3658         // match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
3659         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
3660         // result: (BTCLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
3661         for {
3662                 off1 := auxIntToInt32(v.AuxInt)
3663                 sym1 := auxToSym(v.Aux)
3664                 if v_0.Op != OpAMD64LEAQ {
3665                         break
3666                 }
3667                 off2 := auxIntToInt32(v_0.AuxInt)
3668                 sym2 := auxToSym(v_0.Aux)
3669                 base := v_0.Args[0]
3670                 val := v_1
3671                 mem := v_2
3672                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3673                         break
3674                 }
3675                 v.reset(OpAMD64BTCLmodify)
3676                 v.AuxInt = int32ToAuxInt(off1 + off2)
3677                 v.Aux = symToAux(mergeSym(sym1, sym2))
3678                 v.AddArg3(base, val, mem)
3679                 return true
3680         }
3681         return false
3682 }
3683 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3684         v_0 := v.Args[0]
3685         // match: (BTCQconst [c] (XORQconst [d] x))
3686         // cond: is32Bit(int64(d) ^ 1<<uint32(c))
3687         // result: (XORQconst [d ^ 1<<uint32(c)] x)
3688         for {
3689                 c := auxIntToInt8(v.AuxInt)
3690                 if v_0.Op != OpAMD64XORQconst {
3691                         break
3692                 }
3693                 d := auxIntToInt32(v_0.AuxInt)
3694                 x := v_0.Args[0]
3695                 if !(is32Bit(int64(d) ^ 1<<uint32(c))) {
3696                         break
3697                 }
3698                 v.reset(OpAMD64XORQconst)
3699                 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3700                 v.AddArg(x)
3701                 return true
3702         }
3703         // match: (BTCQconst [c] (BTCQconst [d] x))
3704         // cond: is32Bit(1<<uint32(c) ^ 1<<uint32(d))
3705         // result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
3706         for {
3707                 c := auxIntToInt8(v.AuxInt)
3708                 if v_0.Op != OpAMD64BTCQconst {
3709                         break
3710                 }
3711                 d := auxIntToInt8(v_0.AuxInt)
3712                 x := v_0.Args[0]
3713                 if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) {
3714                         break
3715                 }
3716                 v.reset(OpAMD64XORQconst)
3717                 v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d))
3718                 v.AddArg(x)
3719                 return true
3720         }
3721         // match: (BTCQconst [c] (MOVQconst [d]))
3722         // result: (MOVQconst [d^(1<<uint32(c))])
3723         for {
3724                 c := auxIntToInt8(v.AuxInt)
3725                 if v_0.Op != OpAMD64MOVQconst {
3726                         break
3727                 }
3728                 d := auxIntToInt64(v_0.AuxInt)
3729                 v.reset(OpAMD64MOVQconst)
3730                 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3731                 return true
3732         }
3733         return false
3734 }
3735 func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool {
3736         v_1 := v.Args[1]
3737         v_0 := v.Args[0]
3738         // match: (BTCQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
3739         // cond: ValAndOff(valoff1).canAdd32(off2)
3740         // result: (BTCQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
3741         for {
3742                 valoff1 := auxIntToValAndOff(v.AuxInt)
3743                 sym := auxToSym(v.Aux)
3744                 if v_0.Op != OpAMD64ADDQconst {
3745                         break
3746                 }
3747                 off2 := auxIntToInt32(v_0.AuxInt)
3748                 base := v_0.Args[0]
3749                 mem := v_1
3750                 if !(ValAndOff(valoff1).canAdd32(off2)) {
3751                         break
3752                 }
3753                 v.reset(OpAMD64BTCQconstmodify)
3754                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3755                 v.Aux = symToAux(sym)
3756                 v.AddArg2(base, mem)
3757                 return true
3758         }
3759         // match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
3760         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
3761         // result: (BTCQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
3762         for {
3763                 valoff1 := auxIntToValAndOff(v.AuxInt)
3764                 sym1 := auxToSym(v.Aux)
3765                 if v_0.Op != OpAMD64LEAQ {
3766                         break
3767                 }
3768                 off2 := auxIntToInt32(v_0.AuxInt)
3769                 sym2 := auxToSym(v_0.Aux)
3770                 base := v_0.Args[0]
3771                 mem := v_1
3772                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3773                         break
3774                 }
3775                 v.reset(OpAMD64BTCQconstmodify)
3776                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3777                 v.Aux = symToAux(mergeSym(sym1, sym2))
3778                 v.AddArg2(base, mem)
3779                 return true
3780         }
3781         return false
3782 }
3783 func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool {
3784         v_2 := v.Args[2]
3785         v_1 := v.Args[1]
3786         v_0 := v.Args[0]
3787         // match: (BTCQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
3788         // cond: is32Bit(int64(off1)+int64(off2))
3789         // result: (BTCQmodify [off1+off2] {sym} base val mem)
3790         for {
3791                 off1 := auxIntToInt32(v.AuxInt)
3792                 sym := auxToSym(v.Aux)
3793                 if v_0.Op != OpAMD64ADDQconst {
3794                         break
3795                 }
3796                 off2 := auxIntToInt32(v_0.AuxInt)
3797                 base := v_0.Args[0]
3798                 val := v_1
3799                 mem := v_2
3800                 if !(is32Bit(int64(off1) + int64(off2))) {
3801                         break
3802                 }
3803                 v.reset(OpAMD64BTCQmodify)
3804                 v.AuxInt = int32ToAuxInt(off1 + off2)
3805                 v.Aux = symToAux(sym)
3806                 v.AddArg3(base, val, mem)
3807                 return true
3808         }
3809         // match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
3810         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
3811         // result: (BTCQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
3812         for {
3813                 off1 := auxIntToInt32(v.AuxInt)
3814                 sym1 := auxToSym(v.Aux)
3815                 if v_0.Op != OpAMD64LEAQ {
3816                         break
3817                 }
3818                 off2 := auxIntToInt32(v_0.AuxInt)
3819                 sym2 := auxToSym(v_0.Aux)
3820                 base := v_0.Args[0]
3821                 val := v_1
3822                 mem := v_2
3823                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3824                         break
3825                 }
3826                 v.reset(OpAMD64BTCQmodify)
3827                 v.AuxInt = int32ToAuxInt(off1 + off2)
3828                 v.Aux = symToAux(mergeSym(sym1, sym2))
3829                 v.AddArg3(base, val, mem)
3830                 return true
3831         }
3832         return false
3833 }
3834 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3835         v_0 := v.Args[0]
3836         // match: (BTLconst [c] (SHRQconst [d] x))
3837         // cond: (c+d)<64
3838         // result: (BTQconst [c+d] x)
3839         for {
3840                 c := auxIntToInt8(v.AuxInt)
3841                 if v_0.Op != OpAMD64SHRQconst {
3842                         break
3843                 }
3844                 d := auxIntToInt8(v_0.AuxInt)
3845                 x := v_0.Args[0]
3846                 if !((c + d) < 64) {
3847                         break
3848                 }
3849                 v.reset(OpAMD64BTQconst)
3850                 v.AuxInt = int8ToAuxInt(c + d)
3851                 v.AddArg(x)
3852                 return true
3853         }
3854         // match: (BTLconst [c] (SHLQconst [d] x))
3855         // cond: c>d
3856         // result: (BTLconst [c-d] x)
3857         for {
3858                 c := auxIntToInt8(v.AuxInt)
3859                 if v_0.Op != OpAMD64SHLQconst {
3860                         break
3861                 }
3862                 d := auxIntToInt8(v_0.AuxInt)
3863                 x := v_0.Args[0]
3864                 if !(c > d) {
3865                         break
3866                 }
3867                 v.reset(OpAMD64BTLconst)
3868                 v.AuxInt = int8ToAuxInt(c - d)
3869                 v.AddArg(x)
3870                 return true
3871         }
3872         // match: (BTLconst [0] s:(SHRQ x y))
3873         // result: (BTQ y x)
3874         for {
3875                 if auxIntToInt8(v.AuxInt) != 0 {
3876                         break
3877                 }
3878                 s := v_0
3879                 if s.Op != OpAMD64SHRQ {
3880                         break
3881                 }
3882                 y := s.Args[1]
3883                 x := s.Args[0]
3884                 v.reset(OpAMD64BTQ)
3885                 v.AddArg2(y, x)
3886                 return true
3887         }
3888         // match: (BTLconst [c] (SHRLconst [d] x))
3889         // cond: (c+d)<32
3890         // result: (BTLconst [c+d] x)
3891         for {
3892                 c := auxIntToInt8(v.AuxInt)
3893                 if v_0.Op != OpAMD64SHRLconst {
3894                         break
3895                 }
3896                 d := auxIntToInt8(v_0.AuxInt)
3897                 x := v_0.Args[0]
3898                 if !((c + d) < 32) {
3899                         break
3900                 }
3901                 v.reset(OpAMD64BTLconst)
3902                 v.AuxInt = int8ToAuxInt(c + d)
3903                 v.AddArg(x)
3904                 return true
3905         }
3906         // match: (BTLconst [c] (SHLLconst [d] x))
3907         // cond: c>d
3908         // result: (BTLconst [c-d] x)
3909         for {
3910                 c := auxIntToInt8(v.AuxInt)
3911                 if v_0.Op != OpAMD64SHLLconst {
3912                         break
3913                 }
3914                 d := auxIntToInt8(v_0.AuxInt)
3915                 x := v_0.Args[0]
3916                 if !(c > d) {
3917                         break
3918                 }
3919                 v.reset(OpAMD64BTLconst)
3920                 v.AuxInt = int8ToAuxInt(c - d)
3921                 v.AddArg(x)
3922                 return true
3923         }
3924         // match: (BTLconst [0] s:(SHRL x y))
3925         // result: (BTL y x)
3926         for {
3927                 if auxIntToInt8(v.AuxInt) != 0 {
3928                         break
3929                 }
3930                 s := v_0
3931                 if s.Op != OpAMD64SHRL {
3932                         break
3933                 }
3934                 y := s.Args[1]
3935                 x := s.Args[0]
3936                 v.reset(OpAMD64BTL)
3937                 v.AddArg2(y, x)
3938                 return true
3939         }
3940         return false
3941 }
3942 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3943         v_0 := v.Args[0]
3944         // match: (BTQconst [c] (SHRQconst [d] x))
3945         // cond: (c+d)<64
3946         // result: (BTQconst [c+d] x)
3947         for {
3948                 c := auxIntToInt8(v.AuxInt)
3949                 if v_0.Op != OpAMD64SHRQconst {
3950                         break
3951                 }
3952                 d := auxIntToInt8(v_0.AuxInt)
3953                 x := v_0.Args[0]
3954                 if !((c + d) < 64) {
3955                         break
3956                 }
3957                 v.reset(OpAMD64BTQconst)
3958                 v.AuxInt = int8ToAuxInt(c + d)
3959                 v.AddArg(x)
3960                 return true
3961         }
3962         // match: (BTQconst [c] (SHLQconst [d] x))
3963         // cond: c>d
3964         // result: (BTQconst [c-d] x)
3965         for {
3966                 c := auxIntToInt8(v.AuxInt)
3967                 if v_0.Op != OpAMD64SHLQconst {
3968                         break
3969                 }
3970                 d := auxIntToInt8(v_0.AuxInt)
3971                 x := v_0.Args[0]
3972                 if !(c > d) {
3973                         break
3974                 }
3975                 v.reset(OpAMD64BTQconst)
3976                 v.AuxInt = int8ToAuxInt(c - d)
3977                 v.AddArg(x)
3978                 return true
3979         }
3980         // match: (BTQconst [0] s:(SHRQ x y))
3981         // result: (BTQ y x)
3982         for {
3983                 if auxIntToInt8(v.AuxInt) != 0 {
3984                         break
3985                 }
3986                 s := v_0
3987                 if s.Op != OpAMD64SHRQ {
3988                         break
3989                 }
3990                 y := s.Args[1]
3991                 x := s.Args[0]
3992                 v.reset(OpAMD64BTQ)
3993                 v.AddArg2(y, x)
3994                 return true
3995         }
3996         return false
3997 }
3998 func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
3999         v_0 := v.Args[0]
4000         // match: (BTRLconst [c] (BTSLconst [c] x))
4001         // result: (BTRLconst [c] x)
4002         for {
4003                 c := auxIntToInt8(v.AuxInt)
4004                 if v_0.Op != OpAMD64BTSLconst || auxIntToInt8(v_0.AuxInt) != c {
4005                         break
4006                 }
4007                 x := v_0.Args[0]
4008                 v.reset(OpAMD64BTRLconst)
4009                 v.AuxInt = int8ToAuxInt(c)
4010                 v.AddArg(x)
4011                 return true
4012         }
4013         // match: (BTRLconst [c] (BTCLconst [c] x))
4014         // result: (BTRLconst [c] x)
4015         for {
4016                 c := auxIntToInt8(v.AuxInt)
4017                 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
4018                         break
4019                 }
4020                 x := v_0.Args[0]
4021                 v.reset(OpAMD64BTRLconst)
4022                 v.AuxInt = int8ToAuxInt(c)
4023                 v.AddArg(x)
4024                 return true
4025         }
4026         // match: (BTRLconst [c] (ANDLconst [d] x))
4027         // result: (ANDLconst [d &^ (1<<uint32(c))] x)
4028         for {
4029                 c := auxIntToInt8(v.AuxInt)
4030                 if v_0.Op != OpAMD64ANDLconst {
4031                         break
4032                 }
4033                 d := auxIntToInt32(v_0.AuxInt)
4034                 x := v_0.Args[0]
4035                 v.reset(OpAMD64ANDLconst)
4036                 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4037                 v.AddArg(x)
4038                 return true
4039         }
4040         // match: (BTRLconst [c] (BTRLconst [d] x))
4041         // result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
4042         for {
4043                 c := auxIntToInt8(v.AuxInt)
4044                 if v_0.Op != OpAMD64BTRLconst {
4045                         break
4046                 }
4047                 d := auxIntToInt8(v_0.AuxInt)
4048                 x := v_0.Args[0]
4049                 v.reset(OpAMD64ANDLconst)
4050                 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4051                 v.AddArg(x)
4052                 return true
4053         }
4054         // match: (BTRLconst [c] (MOVLconst [d]))
4055         // result: (MOVLconst [d&^(1<<uint32(c))])
4056         for {
4057                 c := auxIntToInt8(v.AuxInt)
4058                 if v_0.Op != OpAMD64MOVLconst {
4059                         break
4060                 }
4061                 d := auxIntToInt32(v_0.AuxInt)
4062                 v.reset(OpAMD64MOVLconst)
4063                 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4064                 return true
4065         }
4066         return false
4067 }
4068 func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool {
4069         v_1 := v.Args[1]
4070         v_0 := v.Args[0]
4071         // match: (BTRLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
4072         // cond: ValAndOff(valoff1).canAdd32(off2)
4073         // result: (BTRLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
4074         for {
4075                 valoff1 := auxIntToValAndOff(v.AuxInt)
4076                 sym := auxToSym(v.Aux)
4077                 if v_0.Op != OpAMD64ADDQconst {
4078                         break
4079                 }
4080                 off2 := auxIntToInt32(v_0.AuxInt)
4081                 base := v_0.Args[0]
4082                 mem := v_1
4083                 if !(ValAndOff(valoff1).canAdd32(off2)) {
4084                         break
4085                 }
4086                 v.reset(OpAMD64BTRLconstmodify)
4087                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4088                 v.Aux = symToAux(sym)
4089                 v.AddArg2(base, mem)
4090                 return true
4091         }
4092         // match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
4093         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
4094         // result: (BTRLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
4095         for {
4096                 valoff1 := auxIntToValAndOff(v.AuxInt)
4097                 sym1 := auxToSym(v.Aux)
4098                 if v_0.Op != OpAMD64LEAQ {
4099                         break
4100                 }
4101                 off2 := auxIntToInt32(v_0.AuxInt)
4102                 sym2 := auxToSym(v_0.Aux)
4103                 base := v_0.Args[0]
4104                 mem := v_1
4105                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
4106                         break
4107                 }
4108                 v.reset(OpAMD64BTRLconstmodify)
4109                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4110                 v.Aux = symToAux(mergeSym(sym1, sym2))
4111                 v.AddArg2(base, mem)
4112                 return true
4113         }
4114         return false
4115 }
4116 func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool {
4117         v_2 := v.Args[2]
4118         v_1 := v.Args[1]
4119         v_0 := v.Args[0]
4120         // match: (BTRLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
4121         // cond: is32Bit(int64(off1)+int64(off2))
4122         // result: (BTRLmodify [off1+off2] {sym} base val mem)
4123         for {
4124                 off1 := auxIntToInt32(v.AuxInt)
4125                 sym := auxToSym(v.Aux)
4126                 if v_0.Op != OpAMD64ADDQconst {
4127                         break
4128                 }
4129                 off2 := auxIntToInt32(v_0.AuxInt)
4130                 base := v_0.Args[0]
4131                 val := v_1
4132                 mem := v_2
4133                 if !(is32Bit(int64(off1) + int64(off2))) {
4134                         break
4135                 }
4136                 v.reset(OpAMD64BTRLmodify)
4137                 v.AuxInt = int32ToAuxInt(off1 + off2)
4138                 v.Aux = symToAux(sym)
4139                 v.AddArg3(base, val, mem)
4140                 return true
4141         }
4142         // match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
4143         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
4144         // result: (BTRLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
4145         for {
4146                 off1 := auxIntToInt32(v.AuxInt)
4147                 sym1 := auxToSym(v.Aux)
4148                 if v_0.Op != OpAMD64LEAQ {
4149                         break
4150                 }
4151                 off2 := auxIntToInt32(v_0.AuxInt)
4152                 sym2 := auxToSym(v_0.Aux)
4153                 base := v_0.Args[0]
4154                 val := v_1
4155                 mem := v_2
4156                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
4157                         break
4158                 }
4159                 v.reset(OpAMD64BTRLmodify)
4160                 v.AuxInt = int32ToAuxInt(off1 + off2)
4161                 v.Aux = symToAux(mergeSym(sym1, sym2))
4162                 v.AddArg3(base, val, mem)
4163                 return true
4164         }
4165         return false
4166 }
4167 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
4168         v_0 := v.Args[0]
4169         // match: (BTRQconst [c] (BTSQconst [c] x))
4170         // result: (BTRQconst [c] x)
4171         for {
4172                 c := auxIntToInt8(v.AuxInt)
4173                 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
4174                         break
4175                 }
4176                 x := v_0.Args[0]
4177                 v.reset(OpAMD64BTRQconst)
4178                 v.AuxInt = int8ToAuxInt(c)
4179                 v.AddArg(x)
4180                 return true
4181         }
4182         // match: (BTRQconst [c] (BTCQconst [c] x))
4183         // result: (BTRQconst [c] x)
4184         for {
4185                 c := auxIntToInt8(v.AuxInt)
4186                 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
4187                         break
4188                 }
4189                 x := v_0.Args[0]
4190                 v.reset(OpAMD64BTRQconst)
4191                 v.AuxInt = int8ToAuxInt(c)
4192                 v.AddArg(x)
4193                 return true
4194         }
4195         // match: (BTRQconst [c] (ANDQconst [d] x))
4196         // cond: is32Bit(int64(d) &^ (1<<uint32(c)))
4197         // result: (ANDQconst [d &^ (1<<uint32(c))] x)
4198         for {
4199                 c := auxIntToInt8(v.AuxInt)
4200                 if v_0.Op != OpAMD64ANDQconst {
4201                         break
4202                 }
4203                 d := auxIntToInt32(v_0.AuxInt)
4204                 x := v_0.Args[0]
4205                 if !(is32Bit(int64(d) &^ (1 << uint32(c)))) {
4206                         break
4207                 }
4208                 v.reset(OpAMD64ANDQconst)
4209                 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4210                 v.AddArg(x)
4211                 return true
4212         }
4213         // match: (BTRQconst [c] (BTRQconst [d] x))
4214         // cond: is32Bit(^(1<<uint32(c) | 1<<uint32(d)))
4215         // result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
4216         for {
4217                 c := auxIntToInt8(v.AuxInt)
4218                 if v_0.Op != OpAMD64BTRQconst {
4219                         break
4220                 }
4221                 d := auxIntToInt8(v_0.AuxInt)
4222                 x := v_0.Args[0]
4223                 if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) {
4224                         break
4225                 }
4226                 v.reset(OpAMD64ANDQconst)
4227                 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4228                 v.AddArg(x)
4229                 return true
4230         }
4231         // match: (BTRQconst [c] (MOVQconst [d]))
4232         // result: (MOVQconst [d&^(1<<uint32(c))])
4233         for {
4234                 c := auxIntToInt8(v.AuxInt)
4235                 if v_0.Op != OpAMD64MOVQconst {
4236                         break
4237                 }
4238                 d := auxIntToInt64(v_0.AuxInt)
4239                 v.reset(OpAMD64MOVQconst)
4240                 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
4241                 return true
4242         }
4243         return false
4244 }
4245 func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool {
4246         v_1 := v.Args[1]
4247         v_0 := v.Args[0]
4248         // match: (BTRQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
4249         // cond: ValAndOff(valoff1).canAdd32(off2)
4250         // result: (BTRQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
4251         for {
4252                 valoff1 := auxIntToValAndOff(v.AuxInt)
4253                 sym := auxToSym(v.Aux)
4254                 if v_0.Op != OpAMD64ADDQconst {
4255                         break
4256                 }
4257                 off2 := auxIntToInt32(v_0.AuxInt)
4258                 base := v_0.Args[0]
4259                 mem := v_1
4260                 if !(ValAndOff(valoff1).canAdd32(off2)) {
4261                         break
4262                 }
4263                 v.reset(OpAMD64BTRQconstmodify)
4264                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4265                 v.Aux = symToAux(sym)
4266                 v.AddArg2(base, mem)
4267                 return true
4268         }
4269         // match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
4270         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
4271         // result: (BTRQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
4272         for {
4273                 valoff1 := auxIntToValAndOff(v.AuxInt)
4274                 sym1 := auxToSym(v.Aux)
4275                 if v_0.Op != OpAMD64LEAQ {
4276                         break
4277                 }
4278                 off2 := auxIntToInt32(v_0.AuxInt)
4279                 sym2 := auxToSym(v_0.Aux)
4280                 base := v_0.Args[0]
4281                 mem := v_1
4282                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
4283                         break
4284                 }
4285                 v.reset(OpAMD64BTRQconstmodify)
4286                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4287                 v.Aux = symToAux(mergeSym(sym1, sym2))
4288                 v.AddArg2(base, mem)
4289                 return true
4290         }
4291         return false
4292 }
4293 func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool {
4294         v_2 := v.Args[2]
4295         v_1 := v.Args[1]
4296         v_0 := v.Args[0]
4297         // match: (BTRQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
4298         // cond: is32Bit(int64(off1)+int64(off2))
4299         // result: (BTRQmodify [off1+off2] {sym} base val mem)
4300         for {
4301                 off1 := auxIntToInt32(v.AuxInt)
4302                 sym := auxToSym(v.Aux)
4303                 if v_0.Op != OpAMD64ADDQconst {
4304                         break
4305                 }
4306                 off2 := auxIntToInt32(v_0.AuxInt)
4307                 base := v_0.Args[0]
4308                 val := v_1
4309                 mem := v_2
4310                 if !(is32Bit(int64(off1) + int64(off2))) {
4311                         break
4312                 }
4313                 v.reset(OpAMD64BTRQmodify)
4314                 v.AuxInt = int32ToAuxInt(off1 + off2)
4315                 v.Aux = symToAux(sym)
4316                 v.AddArg3(base, val, mem)
4317                 return true
4318         }
4319         // match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
4320         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
4321         // result: (BTRQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
4322         for {
4323                 off1 := auxIntToInt32(v.AuxInt)
4324                 sym1 := auxToSym(v.Aux)
4325                 if v_0.Op != OpAMD64LEAQ {
4326                         break
4327                 }
4328                 off2 := auxIntToInt32(v_0.AuxInt)
4329                 sym2 := auxToSym(v_0.Aux)
4330                 base := v_0.Args[0]
4331                 val := v_1
4332                 mem := v_2
4333                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
4334                         break
4335                 }
4336                 v.reset(OpAMD64BTRQmodify)
4337                 v.AuxInt = int32ToAuxInt(off1 + off2)
4338                 v.Aux = symToAux(mergeSym(sym1, sym2))
4339                 v.AddArg3(base, val, mem)
4340                 return true
4341         }
4342         return false
4343 }
4344 func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool {
4345         v_0 := v.Args[0]
4346         // match: (BTSLconst [c] (BTRLconst [c] x))
4347         // result: (BTSLconst [c] x)
4348         for {
4349                 c := auxIntToInt8(v.AuxInt)
4350                 if v_0.Op != OpAMD64BTRLconst || auxIntToInt8(v_0.AuxInt) != c {
4351                         break
4352                 }
4353                 x := v_0.Args[0]
4354                 v.reset(OpAMD64BTSLconst)
4355                 v.AuxInt = int8ToAuxInt(c)
4356                 v.AddArg(x)
4357                 return true
4358         }
4359         // match: (BTSLconst [c] (BTCLconst [c] x))
4360         // result: (BTSLconst [c] x)
4361         for {
4362                 c := auxIntToInt8(v.AuxInt)
4363                 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
4364                         break
4365                 }
4366                 x := v_0.Args[0]
4367                 v.reset(OpAMD64BTSLconst)
4368                 v.AuxInt = int8ToAuxInt(c)
4369                 v.AddArg(x)
4370                 return true
4371         }
4372         // match: (BTSLconst [c] (ORLconst [d] x))
4373         // result: (ORLconst [d | 1<<uint32(c)] x)
4374         for {
4375                 c := auxIntToInt8(v.AuxInt)
4376                 if v_0.Op != OpAMD64ORLconst {
4377                         break
4378                 }
4379                 d := auxIntToInt32(v_0.AuxInt)
4380                 x := v_0.Args[0]
4381                 v.reset(OpAMD64ORLconst)
4382                 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4383                 v.AddArg(x)
4384                 return true
4385         }
4386         // match: (BTSLconst [c] (BTSLconst [d] x))
4387         // result: (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
4388         for {
4389                 c := auxIntToInt8(v.AuxInt)
4390                 if v_0.Op != OpAMD64BTSLconst {
4391                         break
4392                 }
4393                 d := auxIntToInt8(v_0.AuxInt)
4394                 x := v_0.Args[0]
4395                 v.reset(OpAMD64ORLconst)
4396                 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4397                 v.AddArg(x)
4398                 return true
4399         }
4400         // match: (BTSLconst [c] (MOVLconst [d]))
4401         // result: (MOVLconst [d|(1<<uint32(c))])
4402         for {
4403                 c := auxIntToInt8(v.AuxInt)
4404                 if v_0.Op != OpAMD64MOVLconst {
4405                         break
4406                 }
4407                 d := auxIntToInt32(v_0.AuxInt)
4408                 v.reset(OpAMD64MOVLconst)
4409                 v.AuxInt = int32ToAuxInt(d | (1 << uint32(c)))
4410                 return true
4411         }
4412         return false
4413 }
4414 func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool {
4415         v_1 := v.Args[1]
4416         v_0 := v.Args[0]
4417         // match: (BTSLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
4418         // cond: ValAndOff(valoff1).canAdd32(off2)
4419         // result: (BTSLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
4420         for {
4421                 valoff1 := auxIntToValAndOff(v.AuxInt)
4422                 sym := auxToSym(v.Aux)
4423                 if v_0.Op != OpAMD64ADDQconst {
4424                         break
4425                 }
4426                 off2 := auxIntToInt32(v_0.AuxInt)
4427                 base := v_0.Args[0]
4428                 mem := v_1
4429                 if !(ValAndOff(valoff1).canAdd32(off2)) {
4430                         break
4431                 }
4432                 v.reset(OpAMD64BTSLconstmodify)
4433                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4434                 v.Aux = symToAux(sym)
4435                 v.AddArg2(base, mem)
4436                 return true
4437         }
4438         // match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
4439         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
4440         // result: (BTSLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
4441         for {
4442                 valoff1 := auxIntToValAndOff(v.AuxInt)
4443                 sym1 := auxToSym(v.Aux)
4444                 if v_0.Op != OpAMD64LEAQ {
4445                         break
4446                 }
4447                 off2 := auxIntToInt32(v_0.AuxInt)
4448                 sym2 := auxToSym(v_0.Aux)
4449                 base := v_0.Args[0]
4450                 mem := v_1
4451                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
4452                         break
4453                 }
4454                 v.reset(OpAMD64BTSLconstmodify)
4455                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4456                 v.Aux = symToAux(mergeSym(sym1, sym2))
4457                 v.AddArg2(base, mem)
4458                 return true
4459         }
4460         return false
4461 }
4462 func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool {
4463         v_2 := v.Args[2]
4464         v_1 := v.Args[1]
4465         v_0 := v.Args[0]
4466         // match: (BTSLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
4467         // cond: is32Bit(int64(off1)+int64(off2))
4468         // result: (BTSLmodify [off1+off2] {sym} base val mem)
4469         for {
4470                 off1 := auxIntToInt32(v.AuxInt)
4471                 sym := auxToSym(v.Aux)
4472                 if v_0.Op != OpAMD64ADDQconst {
4473                         break
4474                 }
4475                 off2 := auxIntToInt32(v_0.AuxInt)
4476                 base := v_0.Args[0]
4477                 val := v_1
4478                 mem := v_2
4479                 if !(is32Bit(int64(off1) + int64(off2))) {
4480                         break
4481                 }
4482                 v.reset(OpAMD64BTSLmodify)
4483                 v.AuxInt = int32ToAuxInt(off1 + off2)
4484                 v.Aux = symToAux(sym)
4485                 v.AddArg3(base, val, mem)
4486                 return true
4487         }
4488         // match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
4489         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
4490         // result: (BTSLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
4491         for {
4492                 off1 := auxIntToInt32(v.AuxInt)
4493                 sym1 := auxToSym(v.Aux)
4494                 if v_0.Op != OpAMD64LEAQ {
4495                         break
4496                 }
4497                 off2 := auxIntToInt32(v_0.AuxInt)
4498                 sym2 := auxToSym(v_0.Aux)
4499                 base := v_0.Args[0]
4500                 val := v_1
4501                 mem := v_2
4502                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
4503                         break
4504                 }
4505                 v.reset(OpAMD64BTSLmodify)
4506                 v.AuxInt = int32ToAuxInt(off1 + off2)
4507                 v.Aux = symToAux(mergeSym(sym1, sym2))
4508                 v.AddArg3(base, val, mem)
4509                 return true
4510         }
4511         return false
4512 }
4513 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
4514         v_0 := v.Args[0]
4515         // match: (BTSQconst [c] (BTRQconst [c] x))
4516         // result: (BTSQconst [c] x)
4517         for {
4518                 c := auxIntToInt8(v.AuxInt)
4519                 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
4520                         break
4521                 }
4522                 x := v_0.Args[0]
4523                 v.reset(OpAMD64BTSQconst)
4524                 v.AuxInt = int8ToAuxInt(c)
4525                 v.AddArg(x)
4526                 return true
4527         }
4528         // match: (BTSQconst [c] (BTCQconst [c] x))
4529         // result: (BTSQconst [c] x)
4530         for {
4531                 c := auxIntToInt8(v.AuxInt)
4532                 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
4533                         break
4534                 }
4535                 x := v_0.Args[0]
4536                 v.reset(OpAMD64BTSQconst)
4537                 v.AuxInt = int8ToAuxInt(c)
4538                 v.AddArg(x)
4539                 return true
4540         }
4541         // match: (BTSQconst [c] (ORQconst [d] x))
4542         // cond: is32Bit(int64(d) | 1<<uint32(c))
4543         // result: (ORQconst [d | 1<<uint32(c)] x)
4544         for {
4545                 c := auxIntToInt8(v.AuxInt)
4546                 if v_0.Op != OpAMD64ORQconst {
4547                         break
4548                 }
4549                 d := auxIntToInt32(v_0.AuxInt)
4550                 x := v_0.Args[0]
4551                 if !(is32Bit(int64(d) | 1<<uint32(c))) {
4552                         break
4553                 }
4554                 v.reset(OpAMD64ORQconst)
4555                 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4556                 v.AddArg(x)
4557                 return true
4558         }
4559         // match: (BTSQconst [c] (BTSQconst [d] x))
4560         // cond: is32Bit(1<<uint32(c) | 1<<uint32(d))
4561         // result: (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
4562         for {
4563                 c := auxIntToInt8(v.AuxInt)
4564                 if v_0.Op != OpAMD64BTSQconst {
4565                         break
4566                 }
4567                 d := auxIntToInt8(v_0.AuxInt)
4568                 x := v_0.Args[0]
4569                 if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) {
4570                         break
4571                 }
4572                 v.reset(OpAMD64ORQconst)
4573                 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4574                 v.AddArg(x)
4575                 return true
4576         }
4577         // match: (BTSQconst [c] (MOVQconst [d]))
4578         // result: (MOVQconst [d|(1<<uint32(c))])
4579         for {
4580                 c := auxIntToInt8(v.AuxInt)
4581                 if v_0.Op != OpAMD64MOVQconst {
4582                         break
4583                 }
4584                 d := auxIntToInt64(v_0.AuxInt)
4585                 v.reset(OpAMD64MOVQconst)
4586                 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
4587                 return true
4588         }
4589         return false
4590 }
4591 func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool {
4592         v_1 := v.Args[1]
4593         v_0 := v.Args[0]
4594         // match: (BTSQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
4595         // cond: ValAndOff(valoff1).canAdd32(off2)
4596         // result: (BTSQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
4597         for {
4598                 valoff1 := auxIntToValAndOff(v.AuxInt)
4599                 sym := auxToSym(v.Aux)
4600                 if v_0.Op != OpAMD64ADDQconst {
4601                         break
4602                 }
4603                 off2 := auxIntToInt32(v_0.AuxInt)
4604                 base := v_0.Args[0]
4605                 mem := v_1
4606                 if !(ValAndOff(valoff1).canAdd32(off2)) {
4607                         break
4608                 }
4609                 v.reset(OpAMD64BTSQconstmodify)
4610                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4611                 v.Aux = symToAux(sym)
4612                 v.AddArg2(base, mem)
4613                 return true
4614         }
4615         // match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
4616         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
4617         // result: (BTSQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
4618         for {
4619                 valoff1 := auxIntToValAndOff(v.AuxInt)
4620                 sym1 := auxToSym(v.Aux)
4621                 if v_0.Op != OpAMD64LEAQ {
4622                         break
4623                 }
4624                 off2 := auxIntToInt32(v_0.AuxInt)
4625                 sym2 := auxToSym(v_0.Aux)
4626                 base := v_0.Args[0]
4627                 mem := v_1
4628                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
4629                         break
4630                 }
4631                 v.reset(OpAMD64BTSQconstmodify)
4632                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4633                 v.Aux = symToAux(mergeSym(sym1, sym2))
4634                 v.AddArg2(base, mem)
4635                 return true
4636         }
4637         return false
4638 }
4639 func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool {
4640         v_2 := v.Args[2]
4641         v_1 := v.Args[1]
4642         v_0 := v.Args[0]
4643         // match: (BTSQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
4644         // cond: is32Bit(int64(off1)+int64(off2))
4645         // result: (BTSQmodify [off1+off2] {sym} base val mem)
4646         for {
4647                 off1 := auxIntToInt32(v.AuxInt)
4648                 sym := auxToSym(v.Aux)
4649                 if v_0.Op != OpAMD64ADDQconst {
4650                         break
4651                 }
4652                 off2 := auxIntToInt32(v_0.AuxInt)
4653                 base := v_0.Args[0]
4654                 val := v_1
4655                 mem := v_2
4656                 if !(is32Bit(int64(off1) + int64(off2))) {
4657                         break
4658                 }
4659                 v.reset(OpAMD64BTSQmodify)
4660                 v.AuxInt = int32ToAuxInt(off1 + off2)
4661                 v.Aux = symToAux(sym)
4662                 v.AddArg3(base, val, mem)
4663                 return true
4664         }
4665         // match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
4666         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
4667         // result: (BTSQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
4668         for {
4669                 off1 := auxIntToInt32(v.AuxInt)
4670                 sym1 := auxToSym(v.Aux)
4671                 if v_0.Op != OpAMD64LEAQ {
4672                         break
4673                 }
4674                 off2 := auxIntToInt32(v_0.AuxInt)
4675                 sym2 := auxToSym(v_0.Aux)
4676                 base := v_0.Args[0]
4677                 val := v_1
4678                 mem := v_2
4679                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
4680                         break
4681                 }
4682                 v.reset(OpAMD64BTSQmodify)
4683                 v.AuxInt = int32ToAuxInt(off1 + off2)
4684                 v.Aux = symToAux(mergeSym(sym1, sym2))
4685                 v.AddArg3(base, val, mem)
4686                 return true
4687         }
4688         return false
4689 }
4690 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
4691         v_2 := v.Args[2]
4692         v_1 := v.Args[1]
4693         v_0 := v.Args[0]
4694         // match: (CMOVLCC x y (InvertFlags cond))
4695         // result: (CMOVLLS x y cond)
4696         for {
4697                 x := v_0
4698                 y := v_1
4699                 if v_2.Op != OpAMD64InvertFlags {
4700                         break
4701                 }
4702                 cond := v_2.Args[0]
4703                 v.reset(OpAMD64CMOVLLS)
4704                 v.AddArg3(x, y, cond)
4705                 return true
4706         }
4707         // match: (CMOVLCC _ x (FlagEQ))
4708         // result: x
4709         for {
4710                 x := v_1
4711                 if v_2.Op != OpAMD64FlagEQ {
4712                         break
4713                 }
4714                 v.copyOf(x)
4715                 return true
4716         }
4717         // match: (CMOVLCC _ x (FlagGT_UGT))
4718         // result: x
4719         for {
4720                 x := v_1
4721                 if v_2.Op != OpAMD64FlagGT_UGT {
4722                         break
4723                 }
4724                 v.copyOf(x)
4725                 return true
4726         }
4727         // match: (CMOVLCC y _ (FlagGT_ULT))
4728         // result: y
4729         for {
4730                 y := v_0
4731                 if v_2.Op != OpAMD64FlagGT_ULT {
4732                         break
4733                 }
4734                 v.copyOf(y)
4735                 return true
4736         }
4737         // match: (CMOVLCC y _ (FlagLT_ULT))
4738         // result: y
4739         for {
4740                 y := v_0
4741                 if v_2.Op != OpAMD64FlagLT_ULT {
4742                         break
4743                 }
4744                 v.copyOf(y)
4745                 return true
4746         }
4747         // match: (CMOVLCC _ x (FlagLT_UGT))
4748         // result: x
4749         for {
4750                 x := v_1
4751                 if v_2.Op != OpAMD64FlagLT_UGT {
4752                         break
4753                 }
4754                 v.copyOf(x)
4755                 return true
4756         }
4757         return false
4758 }
4759 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
4760         v_2 := v.Args[2]
4761         v_1 := v.Args[1]
4762         v_0 := v.Args[0]
4763         // match: (CMOVLCS x y (InvertFlags cond))
4764         // result: (CMOVLHI x y cond)
4765         for {
4766                 x := v_0
4767                 y := v_1
4768                 if v_2.Op != OpAMD64InvertFlags {
4769                         break
4770                 }
4771                 cond := v_2.Args[0]
4772                 v.reset(OpAMD64CMOVLHI)
4773                 v.AddArg3(x, y, cond)
4774                 return true
4775         }
4776         // match: (CMOVLCS y _ (FlagEQ))
4777         // result: y
4778         for {
4779                 y := v_0
4780                 if v_2.Op != OpAMD64FlagEQ {
4781                         break
4782                 }
4783                 v.copyOf(y)
4784                 return true
4785         }
4786         // match: (CMOVLCS y _ (FlagGT_UGT))
4787         // result: y
4788         for {
4789                 y := v_0
4790                 if v_2.Op != OpAMD64FlagGT_UGT {
4791                         break
4792                 }
4793                 v.copyOf(y)
4794                 return true
4795         }
4796         // match: (CMOVLCS _ x (FlagGT_ULT))
4797         // result: x
4798         for {
4799                 x := v_1
4800                 if v_2.Op != OpAMD64FlagGT_ULT {
4801                         break
4802                 }
4803                 v.copyOf(x)
4804                 return true
4805         }
4806         // match: (CMOVLCS _ x (FlagLT_ULT))
4807         // result: x
4808         for {
4809                 x := v_1
4810                 if v_2.Op != OpAMD64FlagLT_ULT {
4811                         break
4812                 }
4813                 v.copyOf(x)
4814                 return true
4815         }
4816         // match: (CMOVLCS y _ (FlagLT_UGT))
4817         // result: y
4818         for {
4819                 y := v_0
4820                 if v_2.Op != OpAMD64FlagLT_UGT {
4821                         break
4822                 }
4823                 v.copyOf(y)
4824                 return true
4825         }
4826         return false
4827 }
4828 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4829         v_2 := v.Args[2]
4830         v_1 := v.Args[1]
4831         v_0 := v.Args[0]
4832         // match: (CMOVLEQ x y (InvertFlags cond))
4833         // result: (CMOVLEQ x y cond)
4834         for {
4835                 x := v_0
4836                 y := v_1
4837                 if v_2.Op != OpAMD64InvertFlags {
4838                         break
4839                 }
4840                 cond := v_2.Args[0]
4841                 v.reset(OpAMD64CMOVLEQ)
4842                 v.AddArg3(x, y, cond)
4843                 return true
4844         }
4845         // match: (CMOVLEQ _ x (FlagEQ))
4846         // result: x
4847         for {
4848                 x := v_1
4849                 if v_2.Op != OpAMD64FlagEQ {
4850                         break
4851                 }
4852                 v.copyOf(x)
4853                 return true
4854         }
4855         // match: (CMOVLEQ y _ (FlagGT_UGT))
4856         // result: y
4857         for {
4858                 y := v_0
4859                 if v_2.Op != OpAMD64FlagGT_UGT {
4860                         break
4861                 }
4862                 v.copyOf(y)
4863                 return true
4864         }
4865         // match: (CMOVLEQ y _ (FlagGT_ULT))
4866         // result: y
4867         for {
4868                 y := v_0
4869                 if v_2.Op != OpAMD64FlagGT_ULT {
4870                         break
4871                 }
4872                 v.copyOf(y)
4873                 return true
4874         }
4875         // match: (CMOVLEQ y _ (FlagLT_ULT))
4876         // result: y
4877         for {
4878                 y := v_0
4879                 if v_2.Op != OpAMD64FlagLT_ULT {
4880                         break
4881                 }
4882                 v.copyOf(y)
4883                 return true
4884         }
4885         // match: (CMOVLEQ y _ (FlagLT_UGT))
4886         // result: y
4887         for {
4888                 y := v_0
4889                 if v_2.Op != OpAMD64FlagLT_UGT {
4890                         break
4891                 }
4892                 v.copyOf(y)
4893                 return true
4894         }
4895         return false
4896 }
4897 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4898         v_2 := v.Args[2]
4899         v_1 := v.Args[1]
4900         v_0 := v.Args[0]
4901         // match: (CMOVLGE x y (InvertFlags cond))
4902         // result: (CMOVLLE x y cond)
4903         for {
4904                 x := v_0
4905                 y := v_1
4906                 if v_2.Op != OpAMD64InvertFlags {
4907                         break
4908                 }
4909                 cond := v_2.Args[0]
4910                 v.reset(OpAMD64CMOVLLE)
4911                 v.AddArg3(x, y, cond)
4912                 return true
4913         }
4914         // match: (CMOVLGE _ x (FlagEQ))
4915         // result: x
4916         for {
4917                 x := v_1
4918                 if v_2.Op != OpAMD64FlagEQ {
4919                         break
4920                 }
4921                 v.copyOf(x)
4922                 return true
4923         }
4924         // match: (CMOVLGE _ x (FlagGT_UGT))
4925         // result: x
4926         for {
4927                 x := v_1
4928                 if v_2.Op != OpAMD64FlagGT_UGT {
4929                         break
4930                 }
4931                 v.copyOf(x)
4932                 return true
4933         }
4934         // match: (CMOVLGE _ x (FlagGT_ULT))
4935         // result: x
4936         for {
4937                 x := v_1
4938                 if v_2.Op != OpAMD64FlagGT_ULT {
4939                         break
4940                 }
4941                 v.copyOf(x)
4942                 return true
4943         }
4944         // match: (CMOVLGE y _ (FlagLT_ULT))
4945         // result: y
4946         for {
4947                 y := v_0
4948                 if v_2.Op != OpAMD64FlagLT_ULT {
4949                         break
4950                 }
4951                 v.copyOf(y)
4952                 return true
4953         }
4954         // match: (CMOVLGE y _ (FlagLT_UGT))
4955         // result: y
4956         for {
4957                 y := v_0
4958                 if v_2.Op != OpAMD64FlagLT_UGT {
4959                         break
4960                 }
4961                 v.copyOf(y)
4962                 return true
4963         }
4964         return false
4965 }
4966 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4967         v_2 := v.Args[2]
4968         v_1 := v.Args[1]
4969         v_0 := v.Args[0]
4970         // match: (CMOVLGT x y (InvertFlags cond))
4971         // result: (CMOVLLT x y cond)
4972         for {
4973                 x := v_0
4974                 y := v_1
4975                 if v_2.Op != OpAMD64InvertFlags {
4976                         break
4977                 }
4978                 cond := v_2.Args[0]
4979                 v.reset(OpAMD64CMOVLLT)
4980                 v.AddArg3(x, y, cond)
4981                 return true
4982         }
4983         // match: (CMOVLGT y _ (FlagEQ))
4984         // result: y
4985         for {
4986                 y := v_0
4987                 if v_2.Op != OpAMD64FlagEQ {
4988                         break
4989                 }
4990                 v.copyOf(y)
4991                 return true
4992         }
4993         // match: (CMOVLGT _ x (FlagGT_UGT))
4994         // result: x
4995         for {
4996                 x := v_1
4997                 if v_2.Op != OpAMD64FlagGT_UGT {
4998                         break
4999                 }
5000                 v.copyOf(x)
5001                 return true
5002         }
5003         // match: (CMOVLGT _ x (FlagGT_ULT))
5004         // result: x
5005         for {
5006                 x := v_1
5007                 if v_2.Op != OpAMD64FlagGT_ULT {
5008                         break
5009                 }
5010                 v.copyOf(x)
5011                 return true
5012         }
5013         // match: (CMOVLGT y _ (FlagLT_ULT))
5014         // result: y
5015         for {
5016                 y := v_0
5017                 if v_2.Op != OpAMD64FlagLT_ULT {
5018                         break
5019                 }
5020                 v.copyOf(y)
5021                 return true
5022         }
5023         // match: (CMOVLGT y _ (FlagLT_UGT))
5024         // result: y
5025         for {
5026                 y := v_0
5027                 if v_2.Op != OpAMD64FlagLT_UGT {
5028                         break
5029                 }
5030                 v.copyOf(y)
5031                 return true
5032         }
5033         return false
5034 }
5035 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
5036         v_2 := v.Args[2]
5037         v_1 := v.Args[1]
5038         v_0 := v.Args[0]
5039         // match: (CMOVLHI x y (InvertFlags cond))
5040         // result: (CMOVLCS x y cond)
5041         for {
5042                 x := v_0
5043                 y := v_1
5044                 if v_2.Op != OpAMD64InvertFlags {
5045                         break
5046                 }
5047                 cond := v_2.Args[0]
5048                 v.reset(OpAMD64CMOVLCS)
5049                 v.AddArg3(x, y, cond)
5050                 return true
5051         }
5052         // match: (CMOVLHI y _ (FlagEQ))
5053         // result: y
5054         for {
5055                 y := v_0
5056                 if v_2.Op != OpAMD64FlagEQ {
5057                         break
5058                 }
5059                 v.copyOf(y)
5060                 return true
5061         }
5062         // match: (CMOVLHI _ x (FlagGT_UGT))
5063         // result: x
5064         for {
5065                 x := v_1
5066                 if v_2.Op != OpAMD64FlagGT_UGT {
5067                         break
5068                 }
5069                 v.copyOf(x)
5070                 return true
5071         }
5072         // match: (CMOVLHI y _ (FlagGT_ULT))
5073         // result: y
5074         for {
5075                 y := v_0
5076                 if v_2.Op != OpAMD64FlagGT_ULT {
5077                         break
5078                 }
5079                 v.copyOf(y)
5080                 return true
5081         }
5082         // match: (CMOVLHI y _ (FlagLT_ULT))
5083         // result: y
5084         for {
5085                 y := v_0
5086                 if v_2.Op != OpAMD64FlagLT_ULT {
5087                         break
5088                 }
5089                 v.copyOf(y)
5090                 return true
5091         }
5092         // match: (CMOVLHI _ x (FlagLT_UGT))
5093         // result: x
5094         for {
5095                 x := v_1
5096                 if v_2.Op != OpAMD64FlagLT_UGT {
5097                         break
5098                 }
5099                 v.copyOf(x)
5100                 return true
5101         }
5102         return false
5103 }
5104 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
5105         v_2 := v.Args[2]
5106         v_1 := v.Args[1]
5107         v_0 := v.Args[0]
5108         // match: (CMOVLLE x y (InvertFlags cond))
5109         // result: (CMOVLGE x y cond)
5110         for {
5111                 x := v_0
5112                 y := v_1
5113                 if v_2.Op != OpAMD64InvertFlags {
5114                         break
5115                 }
5116                 cond := v_2.Args[0]
5117                 v.reset(OpAMD64CMOVLGE)
5118                 v.AddArg3(x, y, cond)
5119                 return true
5120         }
5121         // match: (CMOVLLE _ x (FlagEQ))
5122         // result: x
5123         for {
5124                 x := v_1
5125                 if v_2.Op != OpAMD64FlagEQ {
5126                         break
5127                 }
5128                 v.copyOf(x)
5129                 return true
5130         }
5131         // match: (CMOVLLE y _ (FlagGT_UGT))
5132         // result: y
5133         for {
5134                 y := v_0
5135                 if v_2.Op != OpAMD64FlagGT_UGT {
5136                         break
5137                 }
5138                 v.copyOf(y)
5139                 return true
5140         }
5141         // match: (CMOVLLE y _ (FlagGT_ULT))
5142         // result: y
5143         for {
5144                 y := v_0
5145                 if v_2.Op != OpAMD64FlagGT_ULT {
5146                         break
5147                 }
5148                 v.copyOf(y)
5149                 return true
5150         }
5151         // match: (CMOVLLE _ x (FlagLT_ULT))
5152         // result: x
5153         for {
5154                 x := v_1
5155                 if v_2.Op != OpAMD64FlagLT_ULT {
5156                         break
5157                 }
5158                 v.copyOf(x)
5159                 return true
5160         }
5161         // match: (CMOVLLE _ x (FlagLT_UGT))
5162         // result: x
5163         for {
5164                 x := v_1
5165                 if v_2.Op != OpAMD64FlagLT_UGT {
5166                         break
5167                 }
5168                 v.copyOf(x)
5169                 return true
5170         }
5171         return false
5172 }
5173 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
5174         v_2 := v.Args[2]
5175         v_1 := v.Args[1]
5176         v_0 := v.Args[0]
5177         // match: (CMOVLLS x y (InvertFlags cond))
5178         // result: (CMOVLCC x y cond)
5179         for {
5180                 x := v_0
5181                 y := v_1
5182                 if v_2.Op != OpAMD64InvertFlags {
5183                         break
5184                 }
5185                 cond := v_2.Args[0]
5186                 v.reset(OpAMD64CMOVLCC)
5187                 v.AddArg3(x, y, cond)
5188                 return true
5189         }
5190         // match: (CMOVLLS _ x (FlagEQ))
5191         // result: x
5192         for {
5193                 x := v_1
5194                 if v_2.Op != OpAMD64FlagEQ {
5195                         break
5196                 }
5197                 v.copyOf(x)
5198                 return true
5199         }
5200         // match: (CMOVLLS y _ (FlagGT_UGT))
5201         // result: y
5202         for {
5203                 y := v_0
5204                 if v_2.Op != OpAMD64FlagGT_UGT {
5205                         break
5206                 }
5207                 v.copyOf(y)
5208                 return true
5209         }
5210         // match: (CMOVLLS _ x (FlagGT_ULT))
5211         // result: x
5212         for {
5213                 x := v_1
5214                 if v_2.Op != OpAMD64FlagGT_ULT {
5215                         break
5216                 }
5217                 v.copyOf(x)
5218                 return true
5219         }
5220         // match: (CMOVLLS _ x (FlagLT_ULT))
5221         // result: x
5222         for {
5223                 x := v_1
5224                 if v_2.Op != OpAMD64FlagLT_ULT {
5225                         break
5226                 }
5227                 v.copyOf(x)
5228                 return true
5229         }
5230         // match: (CMOVLLS y _ (FlagLT_UGT))
5231         // result: y
5232         for {
5233                 y := v_0
5234                 if v_2.Op != OpAMD64FlagLT_UGT {
5235                         break
5236                 }
5237                 v.copyOf(y)
5238                 return true
5239         }
5240         return false
5241 }
5242 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
5243         v_2 := v.Args[2]
5244         v_1 := v.Args[1]
5245         v_0 := v.Args[0]
5246         // match: (CMOVLLT x y (InvertFlags cond))
5247         // result: (CMOVLGT x y cond)
5248         for {
5249                 x := v_0
5250                 y := v_1
5251                 if v_2.Op != OpAMD64InvertFlags {
5252                         break
5253                 }
5254                 cond := v_2.Args[0]
5255                 v.reset(OpAMD64CMOVLGT)
5256                 v.AddArg3(x, y, cond)
5257                 return true
5258         }
5259         // match: (CMOVLLT y _ (FlagEQ))
5260         // result: y
5261         for {
5262                 y := v_0
5263                 if v_2.Op != OpAMD64FlagEQ {
5264                         break
5265                 }
5266                 v.copyOf(y)
5267                 return true
5268         }
5269         // match: (CMOVLLT y _ (FlagGT_UGT))
5270         // result: y
5271         for {
5272                 y := v_0
5273                 if v_2.Op != OpAMD64FlagGT_UGT {
5274                         break
5275                 }
5276                 v.copyOf(y)
5277                 return true
5278         }
5279         // match: (CMOVLLT y _ (FlagGT_ULT))
5280         // result: y
5281         for {
5282                 y := v_0
5283                 if v_2.Op != OpAMD64FlagGT_ULT {
5284                         break
5285                 }
5286                 v.copyOf(y)
5287                 return true
5288         }
5289         // match: (CMOVLLT _ x (FlagLT_ULT))
5290         // result: x
5291         for {
5292                 x := v_1
5293                 if v_2.Op != OpAMD64FlagLT_ULT {
5294                         break
5295                 }
5296                 v.copyOf(x)
5297                 return true
5298         }
5299         // match: (CMOVLLT _ x (FlagLT_UGT))
5300         // result: x
5301         for {
5302                 x := v_1
5303                 if v_2.Op != OpAMD64FlagLT_UGT {
5304                         break
5305                 }
5306                 v.copyOf(x)
5307                 return true
5308         }
5309         return false
5310 }
5311 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
5312         v_2 := v.Args[2]
5313         v_1 := v.Args[1]
5314         v_0 := v.Args[0]
5315         // match: (CMOVLNE x y (InvertFlags cond))
5316         // result: (CMOVLNE x y cond)
5317         for {
5318                 x := v_0
5319                 y := v_1
5320                 if v_2.Op != OpAMD64InvertFlags {
5321                         break
5322                 }
5323                 cond := v_2.Args[0]
5324                 v.reset(OpAMD64CMOVLNE)
5325                 v.AddArg3(x, y, cond)
5326                 return true
5327         }
5328         // match: (CMOVLNE y _ (FlagEQ))
5329         // result: y
5330         for {
5331                 y := v_0
5332                 if v_2.Op != OpAMD64FlagEQ {
5333                         break
5334                 }
5335                 v.copyOf(y)
5336                 return true
5337         }
5338         // match: (CMOVLNE _ x (FlagGT_UGT))
5339         // result: x
5340         for {
5341                 x := v_1
5342                 if v_2.Op != OpAMD64FlagGT_UGT {
5343                         break
5344                 }
5345                 v.copyOf(x)
5346                 return true
5347         }
5348         // match: (CMOVLNE _ x (FlagGT_ULT))
5349         // result: x
5350         for {
5351                 x := v_1
5352                 if v_2.Op != OpAMD64FlagGT_ULT {
5353                         break
5354                 }
5355                 v.copyOf(x)
5356                 return true
5357         }
5358         // match: (CMOVLNE _ x (FlagLT_ULT))
5359         // result: x
5360         for {
5361                 x := v_1
5362                 if v_2.Op != OpAMD64FlagLT_ULT {
5363                         break
5364                 }
5365                 v.copyOf(x)
5366                 return true
5367         }
5368         // match: (CMOVLNE _ x (FlagLT_UGT))
5369         // result: x
5370         for {
5371                 x := v_1
5372                 if v_2.Op != OpAMD64FlagLT_UGT {
5373                         break
5374                 }
5375                 v.copyOf(x)
5376                 return true
5377         }
5378         return false
5379 }
5380 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
5381         v_2 := v.Args[2]
5382         v_1 := v.Args[1]
5383         v_0 := v.Args[0]
5384         // match: (CMOVQCC x y (InvertFlags cond))
5385         // result: (CMOVQLS x y cond)
5386         for {
5387                 x := v_0
5388                 y := v_1
5389                 if v_2.Op != OpAMD64InvertFlags {
5390                         break
5391                 }
5392                 cond := v_2.Args[0]
5393                 v.reset(OpAMD64CMOVQLS)
5394                 v.AddArg3(x, y, cond)
5395                 return true
5396         }
5397         // match: (CMOVQCC _ x (FlagEQ))
5398         // result: x
5399         for {
5400                 x := v_1
5401                 if v_2.Op != OpAMD64FlagEQ {
5402                         break
5403                 }
5404                 v.copyOf(x)
5405                 return true
5406         }
5407         // match: (CMOVQCC _ x (FlagGT_UGT))
5408         // result: x
5409         for {
5410                 x := v_1
5411                 if v_2.Op != OpAMD64FlagGT_UGT {
5412                         break
5413                 }
5414                 v.copyOf(x)
5415                 return true
5416         }
5417         // match: (CMOVQCC y _ (FlagGT_ULT))
5418         // result: y
5419         for {
5420                 y := v_0
5421                 if v_2.Op != OpAMD64FlagGT_ULT {
5422                         break
5423                 }
5424                 v.copyOf(y)
5425                 return true
5426         }
5427         // match: (CMOVQCC y _ (FlagLT_ULT))
5428         // result: y
5429         for {
5430                 y := v_0
5431                 if v_2.Op != OpAMD64FlagLT_ULT {
5432                         break
5433                 }
5434                 v.copyOf(y)
5435                 return true
5436         }
5437         // match: (CMOVQCC _ x (FlagLT_UGT))
5438         // result: x
5439         for {
5440                 x := v_1
5441                 if v_2.Op != OpAMD64FlagLT_UGT {
5442                         break
5443                 }
5444                 v.copyOf(x)
5445                 return true
5446         }
5447         return false
5448 }
5449 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
5450         v_2 := v.Args[2]
5451         v_1 := v.Args[1]
5452         v_0 := v.Args[0]
5453         // match: (CMOVQCS x y (InvertFlags cond))
5454         // result: (CMOVQHI x y cond)
5455         for {
5456                 x := v_0
5457                 y := v_1
5458                 if v_2.Op != OpAMD64InvertFlags {
5459                         break
5460                 }
5461                 cond := v_2.Args[0]
5462                 v.reset(OpAMD64CMOVQHI)
5463                 v.AddArg3(x, y, cond)
5464                 return true
5465         }
5466         // match: (CMOVQCS y _ (FlagEQ))
5467         // result: y
5468         for {
5469                 y := v_0
5470                 if v_2.Op != OpAMD64FlagEQ {
5471                         break
5472                 }
5473                 v.copyOf(y)
5474                 return true
5475         }
5476         // match: (CMOVQCS y _ (FlagGT_UGT))
5477         // result: y
5478         for {
5479                 y := v_0
5480                 if v_2.Op != OpAMD64FlagGT_UGT {
5481                         break
5482                 }
5483                 v.copyOf(y)
5484                 return true
5485         }
5486         // match: (CMOVQCS _ x (FlagGT_ULT))
5487         // result: x
5488         for {
5489                 x := v_1
5490                 if v_2.Op != OpAMD64FlagGT_ULT {
5491                         break
5492                 }
5493                 v.copyOf(x)
5494                 return true
5495         }
5496         // match: (CMOVQCS _ x (FlagLT_ULT))
5497         // result: x
5498         for {
5499                 x := v_1
5500                 if v_2.Op != OpAMD64FlagLT_ULT {
5501                         break
5502                 }
5503                 v.copyOf(x)
5504                 return true
5505         }
5506         // match: (CMOVQCS y _ (FlagLT_UGT))
5507         // result: y
5508         for {
5509                 y := v_0
5510                 if v_2.Op != OpAMD64FlagLT_UGT {
5511                         break
5512                 }
5513                 v.copyOf(y)
5514                 return true
5515         }
5516         return false
5517 }
5518 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
5519         v_2 := v.Args[2]
5520         v_1 := v.Args[1]
5521         v_0 := v.Args[0]
5522         // match: (CMOVQEQ x y (InvertFlags cond))
5523         // result: (CMOVQEQ x y cond)
5524         for {
5525                 x := v_0
5526                 y := v_1
5527                 if v_2.Op != OpAMD64InvertFlags {
5528                         break
5529                 }
5530                 cond := v_2.Args[0]
5531                 v.reset(OpAMD64CMOVQEQ)
5532                 v.AddArg3(x, y, cond)
5533                 return true
5534         }
5535         // match: (CMOVQEQ _ x (FlagEQ))
5536         // result: x
5537         for {
5538                 x := v_1
5539                 if v_2.Op != OpAMD64FlagEQ {
5540                         break
5541                 }
5542                 v.copyOf(x)
5543                 return true
5544         }
5545         // match: (CMOVQEQ y _ (FlagGT_UGT))
5546         // result: y
5547         for {
5548                 y := v_0
5549                 if v_2.Op != OpAMD64FlagGT_UGT {
5550                         break
5551                 }
5552                 v.copyOf(y)
5553                 return true
5554         }
5555         // match: (CMOVQEQ y _ (FlagGT_ULT))
5556         // result: y
5557         for {
5558                 y := v_0
5559                 if v_2.Op != OpAMD64FlagGT_ULT {
5560                         break
5561                 }
5562                 v.copyOf(y)
5563                 return true
5564         }
5565         // match: (CMOVQEQ y _ (FlagLT_ULT))
5566         // result: y
5567         for {
5568                 y := v_0
5569                 if v_2.Op != OpAMD64FlagLT_ULT {
5570                         break
5571                 }
5572                 v.copyOf(y)
5573                 return true
5574         }
5575         // match: (CMOVQEQ y _ (FlagLT_UGT))
5576         // result: y
5577         for {
5578                 y := v_0
5579                 if v_2.Op != OpAMD64FlagLT_UGT {
5580                         break
5581                 }
5582                 v.copyOf(y)
5583                 return true
5584         }
5585         // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
5586         // cond: c != 0
5587         // result: x
5588         for {
5589                 x := v_0
5590                 if v_2.Op != OpSelect1 {
5591                         break
5592                 }
5593                 v_2_0 := v_2.Args[0]
5594                 if v_2_0.Op != OpAMD64BSFQ {
5595                         break
5596                 }
5597                 v_2_0_0 := v_2_0.Args[0]
5598                 if v_2_0_0.Op != OpAMD64ORQconst {
5599                         break
5600                 }
5601                 c := auxIntToInt32(v_2_0_0.AuxInt)
5602                 if !(c != 0) {
5603                         break
5604                 }
5605                 v.copyOf(x)
5606                 return true
5607         }
5608         return false
5609 }
5610 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
5611         v_2 := v.Args[2]
5612         v_1 := v.Args[1]
5613         v_0 := v.Args[0]
5614         // match: (CMOVQGE x y (InvertFlags cond))
5615         // result: (CMOVQLE x y cond)
5616         for {
5617                 x := v_0
5618                 y := v_1
5619                 if v_2.Op != OpAMD64InvertFlags {
5620                         break
5621                 }
5622                 cond := v_2.Args[0]
5623                 v.reset(OpAMD64CMOVQLE)
5624                 v.AddArg3(x, y, cond)
5625                 return true
5626         }
5627         // match: (CMOVQGE _ x (FlagEQ))
5628         // result: x
5629         for {
5630                 x := v_1
5631                 if v_2.Op != OpAMD64FlagEQ {
5632                         break
5633                 }
5634                 v.copyOf(x)
5635                 return true
5636         }
5637         // match: (CMOVQGE _ x (FlagGT_UGT))
5638         // result: x
5639         for {
5640                 x := v_1
5641                 if v_2.Op != OpAMD64FlagGT_UGT {
5642                         break
5643                 }
5644                 v.copyOf(x)
5645                 return true
5646         }
5647         // match: (CMOVQGE _ x (FlagGT_ULT))
5648         // result: x
5649         for {
5650                 x := v_1
5651                 if v_2.Op != OpAMD64FlagGT_ULT {
5652                         break
5653                 }
5654                 v.copyOf(x)
5655                 return true
5656         }
5657         // match: (CMOVQGE y _ (FlagLT_ULT))
5658         // result: y
5659         for {
5660                 y := v_0
5661                 if v_2.Op != OpAMD64FlagLT_ULT {
5662                         break
5663                 }
5664                 v.copyOf(y)
5665                 return true
5666         }
5667         // match: (CMOVQGE y _ (FlagLT_UGT))
5668         // result: y
5669         for {
5670                 y := v_0
5671                 if v_2.Op != OpAMD64FlagLT_UGT {
5672                         break
5673                 }
5674                 v.copyOf(y)
5675                 return true
5676         }
5677         return false
5678 }
5679 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5680         v_2 := v.Args[2]
5681         v_1 := v.Args[1]
5682         v_0 := v.Args[0]
5683         // match: (CMOVQGT x y (InvertFlags cond))
5684         // result: (CMOVQLT x y cond)
5685         for {
5686                 x := v_0
5687                 y := v_1
5688                 if v_2.Op != OpAMD64InvertFlags {
5689                         break
5690                 }
5691                 cond := v_2.Args[0]
5692                 v.reset(OpAMD64CMOVQLT)
5693                 v.AddArg3(x, y, cond)
5694                 return true
5695         }
5696         // match: (CMOVQGT y _ (FlagEQ))
5697         // result: y
5698         for {
5699                 y := v_0
5700                 if v_2.Op != OpAMD64FlagEQ {
5701                         break
5702                 }
5703                 v.copyOf(y)
5704                 return true
5705         }
5706         // match: (CMOVQGT _ x (FlagGT_UGT))
5707         // result: x
5708         for {
5709                 x := v_1
5710                 if v_2.Op != OpAMD64FlagGT_UGT {
5711                         break
5712                 }
5713                 v.copyOf(x)
5714                 return true
5715         }
5716         // match: (CMOVQGT _ x (FlagGT_ULT))
5717         // result: x
5718         for {
5719                 x := v_1
5720                 if v_2.Op != OpAMD64FlagGT_ULT {
5721                         break
5722                 }
5723                 v.copyOf(x)
5724                 return true
5725         }
5726         // match: (CMOVQGT y _ (FlagLT_ULT))
5727         // result: y
5728         for {
5729                 y := v_0
5730                 if v_2.Op != OpAMD64FlagLT_ULT {
5731                         break
5732                 }
5733                 v.copyOf(y)
5734                 return true
5735         }
5736         // match: (CMOVQGT y _ (FlagLT_UGT))
5737         // result: y
5738         for {
5739                 y := v_0
5740                 if v_2.Op != OpAMD64FlagLT_UGT {
5741                         break
5742                 }
5743                 v.copyOf(y)
5744                 return true
5745         }
5746         return false
5747 }
5748 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5749         v_2 := v.Args[2]
5750         v_1 := v.Args[1]
5751         v_0 := v.Args[0]
5752         // match: (CMOVQHI x y (InvertFlags cond))
5753         // result: (CMOVQCS x y cond)
5754         for {
5755                 x := v_0
5756                 y := v_1
5757                 if v_2.Op != OpAMD64InvertFlags {
5758                         break
5759                 }
5760                 cond := v_2.Args[0]
5761                 v.reset(OpAMD64CMOVQCS)
5762                 v.AddArg3(x, y, cond)
5763                 return true
5764         }
5765         // match: (CMOVQHI y _ (FlagEQ))
5766         // result: y
5767         for {
5768                 y := v_0
5769                 if v_2.Op != OpAMD64FlagEQ {
5770                         break
5771                 }
5772                 v.copyOf(y)
5773                 return true
5774         }
5775         // match: (CMOVQHI _ x (FlagGT_UGT))
5776         // result: x
5777         for {
5778                 x := v_1
5779                 if v_2.Op != OpAMD64FlagGT_UGT {
5780                         break
5781                 }
5782                 v.copyOf(x)
5783                 return true
5784         }
5785         // match: (CMOVQHI y _ (FlagGT_ULT))
5786         // result: y
5787         for {
5788                 y := v_0
5789                 if v_2.Op != OpAMD64FlagGT_ULT {
5790                         break
5791                 }
5792                 v.copyOf(y)
5793                 return true
5794         }
5795         // match: (CMOVQHI y _ (FlagLT_ULT))
5796         // result: y
5797         for {
5798                 y := v_0
5799                 if v_2.Op != OpAMD64FlagLT_ULT {
5800                         break
5801                 }
5802                 v.copyOf(y)
5803                 return true
5804         }
5805         // match: (CMOVQHI _ x (FlagLT_UGT))
5806         // result: x
5807         for {
5808                 x := v_1
5809                 if v_2.Op != OpAMD64FlagLT_UGT {
5810                         break
5811                 }
5812                 v.copyOf(x)
5813                 return true
5814         }
5815         return false
5816 }
5817 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5818         v_2 := v.Args[2]
5819         v_1 := v.Args[1]
5820         v_0 := v.Args[0]
5821         // match: (CMOVQLE x y (InvertFlags cond))
5822         // result: (CMOVQGE x y cond)
5823         for {
5824                 x := v_0
5825                 y := v_1
5826                 if v_2.Op != OpAMD64InvertFlags {
5827                         break
5828                 }
5829                 cond := v_2.Args[0]
5830                 v.reset(OpAMD64CMOVQGE)
5831                 v.AddArg3(x, y, cond)
5832                 return true
5833         }
5834         // match: (CMOVQLE _ x (FlagEQ))
5835         // result: x
5836         for {
5837                 x := v_1
5838                 if v_2.Op != OpAMD64FlagEQ {
5839                         break
5840                 }
5841                 v.copyOf(x)
5842                 return true
5843         }
5844         // match: (CMOVQLE y _ (FlagGT_UGT))
5845         // result: y
5846         for {
5847                 y := v_0
5848                 if v_2.Op != OpAMD64FlagGT_UGT {
5849                         break
5850                 }
5851                 v.copyOf(y)
5852                 return true
5853         }
5854         // match: (CMOVQLE y _ (FlagGT_ULT))
5855         // result: y
5856         for {
5857                 y := v_0
5858                 if v_2.Op != OpAMD64FlagGT_ULT {
5859                         break
5860                 }
5861                 v.copyOf(y)
5862                 return true
5863         }
5864         // match: (CMOVQLE _ x (FlagLT_ULT))
5865         // result: x
5866         for {
5867                 x := v_1
5868                 if v_2.Op != OpAMD64FlagLT_ULT {
5869                         break
5870                 }
5871                 v.copyOf(x)
5872                 return true
5873         }
5874         // match: (CMOVQLE _ x (FlagLT_UGT))
5875         // result: x
5876         for {
5877                 x := v_1
5878                 if v_2.Op != OpAMD64FlagLT_UGT {
5879                         break
5880                 }
5881                 v.copyOf(x)
5882                 return true
5883         }
5884         return false
5885 }
5886 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5887         v_2 := v.Args[2]
5888         v_1 := v.Args[1]
5889         v_0 := v.Args[0]
5890         // match: (CMOVQLS x y (InvertFlags cond))
5891         // result: (CMOVQCC x y cond)
5892         for {
5893                 x := v_0
5894                 y := v_1
5895                 if v_2.Op != OpAMD64InvertFlags {
5896                         break
5897                 }
5898                 cond := v_2.Args[0]
5899                 v.reset(OpAMD64CMOVQCC)
5900                 v.AddArg3(x, y, cond)
5901                 return true
5902         }
5903         // match: (CMOVQLS _ x (FlagEQ))
5904         // result: x
5905         for {
5906                 x := v_1
5907                 if v_2.Op != OpAMD64FlagEQ {
5908                         break
5909                 }
5910                 v.copyOf(x)
5911                 return true
5912         }
5913         // match: (CMOVQLS y _ (FlagGT_UGT))
5914         // result: y
5915         for {
5916                 y := v_0
5917                 if v_2.Op != OpAMD64FlagGT_UGT {
5918                         break
5919                 }
5920                 v.copyOf(y)
5921                 return true
5922         }
5923         // match: (CMOVQLS _ x (FlagGT_ULT))
5924         // result: x
5925         for {
5926                 x := v_1
5927                 if v_2.Op != OpAMD64FlagGT_ULT {
5928                         break
5929                 }
5930                 v.copyOf(x)
5931                 return true
5932         }
5933         // match: (CMOVQLS _ x (FlagLT_ULT))
5934         // result: x
5935         for {
5936                 x := v_1
5937                 if v_2.Op != OpAMD64FlagLT_ULT {
5938                         break
5939                 }
5940                 v.copyOf(x)
5941                 return true
5942         }
5943         // match: (CMOVQLS y _ (FlagLT_UGT))
5944         // result: y
5945         for {
5946                 y := v_0
5947                 if v_2.Op != OpAMD64FlagLT_UGT {
5948                         break
5949                 }
5950                 v.copyOf(y)
5951                 return true
5952         }
5953         return false
5954 }
5955 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5956         v_2 := v.Args[2]
5957         v_1 := v.Args[1]
5958         v_0 := v.Args[0]
5959         // match: (CMOVQLT x y (InvertFlags cond))
5960         // result: (CMOVQGT x y cond)
5961         for {
5962                 x := v_0
5963                 y := v_1
5964                 if v_2.Op != OpAMD64InvertFlags {
5965                         break
5966                 }
5967                 cond := v_2.Args[0]
5968                 v.reset(OpAMD64CMOVQGT)
5969                 v.AddArg3(x, y, cond)
5970                 return true
5971         }
5972         // match: (CMOVQLT y _ (FlagEQ))
5973         // result: y
5974         for {
5975                 y := v_0
5976                 if v_2.Op != OpAMD64FlagEQ {
5977                         break
5978                 }
5979                 v.copyOf(y)
5980                 return true
5981         }
5982         // match: (CMOVQLT y _ (FlagGT_UGT))
5983         // result: y
5984         for {
5985                 y := v_0
5986                 if v_2.Op != OpAMD64FlagGT_UGT {
5987                         break
5988                 }
5989                 v.copyOf(y)
5990                 return true
5991         }
5992         // match: (CMOVQLT y _ (FlagGT_ULT))
5993         // result: y
5994         for {
5995                 y := v_0
5996                 if v_2.Op != OpAMD64FlagGT_ULT {
5997                         break
5998                 }
5999                 v.copyOf(y)
6000                 return true
6001         }
6002         // match: (CMOVQLT _ x (FlagLT_ULT))
6003         // result: x
6004         for {
6005                 x := v_1
6006                 if v_2.Op != OpAMD64FlagLT_ULT {
6007                         break
6008                 }
6009                 v.copyOf(x)
6010                 return true
6011         }
6012         // match: (CMOVQLT _ x (FlagLT_UGT))
6013         // result: x
6014         for {
6015                 x := v_1
6016                 if v_2.Op != OpAMD64FlagLT_UGT {
6017                         break
6018                 }
6019                 v.copyOf(x)
6020                 return true
6021         }
6022         return false
6023 }
6024 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
6025         v_2 := v.Args[2]
6026         v_1 := v.Args[1]
6027         v_0 := v.Args[0]
6028         // match: (CMOVQNE x y (InvertFlags cond))
6029         // result: (CMOVQNE x y cond)
6030         for {
6031                 x := v_0
6032                 y := v_1
6033                 if v_2.Op != OpAMD64InvertFlags {
6034                         break
6035                 }
6036                 cond := v_2.Args[0]
6037                 v.reset(OpAMD64CMOVQNE)
6038                 v.AddArg3(x, y, cond)
6039                 return true
6040         }
6041         // match: (CMOVQNE y _ (FlagEQ))
6042         // result: y
6043         for {
6044                 y := v_0
6045                 if v_2.Op != OpAMD64FlagEQ {
6046                         break
6047                 }
6048                 v.copyOf(y)
6049                 return true
6050         }
6051         // match: (CMOVQNE _ x (FlagGT_UGT))
6052         // result: x
6053         for {
6054                 x := v_1
6055                 if v_2.Op != OpAMD64FlagGT_UGT {
6056                         break
6057                 }
6058                 v.copyOf(x)
6059                 return true
6060         }
6061         // match: (CMOVQNE _ x (FlagGT_ULT))
6062         // result: x
6063         for {
6064                 x := v_1
6065                 if v_2.Op != OpAMD64FlagGT_ULT {
6066                         break
6067                 }
6068                 v.copyOf(x)
6069                 return true
6070         }
6071         // match: (CMOVQNE _ x (FlagLT_ULT))
6072         // result: x
6073         for {
6074                 x := v_1
6075                 if v_2.Op != OpAMD64FlagLT_ULT {
6076                         break
6077                 }
6078                 v.copyOf(x)
6079                 return true
6080         }
6081         // match: (CMOVQNE _ x (FlagLT_UGT))
6082         // result: x
6083         for {
6084                 x := v_1
6085                 if v_2.Op != OpAMD64FlagLT_UGT {
6086                         break
6087                 }
6088                 v.copyOf(x)
6089                 return true
6090         }
6091         return false
6092 }
6093 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
6094         v_2 := v.Args[2]
6095         v_1 := v.Args[1]
6096         v_0 := v.Args[0]
6097         // match: (CMOVWCC x y (InvertFlags cond))
6098         // result: (CMOVWLS x y cond)
6099         for {
6100                 x := v_0
6101                 y := v_1
6102                 if v_2.Op != OpAMD64InvertFlags {
6103                         break
6104                 }
6105                 cond := v_2.Args[0]
6106                 v.reset(OpAMD64CMOVWLS)
6107                 v.AddArg3(x, y, cond)
6108                 return true
6109         }
6110         // match: (CMOVWCC _ x (FlagEQ))
6111         // result: x
6112         for {
6113                 x := v_1
6114                 if v_2.Op != OpAMD64FlagEQ {
6115                         break
6116                 }
6117                 v.copyOf(x)
6118                 return true
6119         }
6120         // match: (CMOVWCC _ x (FlagGT_UGT))
6121         // result: x
6122         for {
6123                 x := v_1
6124                 if v_2.Op != OpAMD64FlagGT_UGT {
6125                         break
6126                 }
6127                 v.copyOf(x)
6128                 return true
6129         }
6130         // match: (CMOVWCC y _ (FlagGT_ULT))
6131         // result: y
6132         for {
6133                 y := v_0
6134                 if v_2.Op != OpAMD64FlagGT_ULT {
6135                         break
6136                 }
6137                 v.copyOf(y)
6138                 return true
6139         }
6140         // match: (CMOVWCC y _ (FlagLT_ULT))
6141         // result: y
6142         for {
6143                 y := v_0
6144                 if v_2.Op != OpAMD64FlagLT_ULT {
6145                         break
6146                 }
6147                 v.copyOf(y)
6148                 return true
6149         }
6150         // match: (CMOVWCC _ x (FlagLT_UGT))
6151         // result: x
6152         for {
6153                 x := v_1
6154                 if v_2.Op != OpAMD64FlagLT_UGT {
6155                         break
6156                 }
6157                 v.copyOf(x)
6158                 return true
6159         }
6160         return false
6161 }
6162 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
6163         v_2 := v.Args[2]
6164         v_1 := v.Args[1]
6165         v_0 := v.Args[0]
6166         // match: (CMOVWCS x y (InvertFlags cond))
6167         // result: (CMOVWHI x y cond)
6168         for {
6169                 x := v_0
6170                 y := v_1
6171                 if v_2.Op != OpAMD64InvertFlags {
6172                         break
6173                 }
6174                 cond := v_2.Args[0]
6175                 v.reset(OpAMD64CMOVWHI)
6176                 v.AddArg3(x, y, cond)
6177                 return true
6178         }
6179         // match: (CMOVWCS y _ (FlagEQ))
6180         // result: y
6181         for {
6182                 y := v_0
6183                 if v_2.Op != OpAMD64FlagEQ {
6184                         break
6185                 }
6186                 v.copyOf(y)
6187                 return true
6188         }
6189         // match: (CMOVWCS y _ (FlagGT_UGT))
6190         // result: y
6191         for {
6192                 y := v_0
6193                 if v_2.Op != OpAMD64FlagGT_UGT {
6194                         break
6195                 }
6196                 v.copyOf(y)
6197                 return true
6198         }
6199         // match: (CMOVWCS _ x (FlagGT_ULT))
6200         // result: x
6201         for {
6202                 x := v_1
6203                 if v_2.Op != OpAMD64FlagGT_ULT {
6204                         break
6205                 }
6206                 v.copyOf(x)
6207                 return true
6208         }
6209         // match: (CMOVWCS _ x (FlagLT_ULT))
6210         // result: x
6211         for {
6212                 x := v_1
6213                 if v_2.Op != OpAMD64FlagLT_ULT {
6214                         break
6215                 }
6216                 v.copyOf(x)
6217                 return true
6218         }
6219         // match: (CMOVWCS y _ (FlagLT_UGT))
6220         // result: y
6221         for {
6222                 y := v_0
6223                 if v_2.Op != OpAMD64FlagLT_UGT {
6224                         break
6225                 }
6226                 v.copyOf(y)
6227                 return true
6228         }
6229         return false
6230 }
6231 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
6232         v_2 := v.Args[2]
6233         v_1 := v.Args[1]
6234         v_0 := v.Args[0]
6235         // match: (CMOVWEQ x y (InvertFlags cond))
6236         // result: (CMOVWEQ x y cond)
6237         for {
6238                 x := v_0
6239                 y := v_1
6240                 if v_2.Op != OpAMD64InvertFlags {
6241                         break
6242                 }
6243                 cond := v_2.Args[0]
6244                 v.reset(OpAMD64CMOVWEQ)
6245                 v.AddArg3(x, y, cond)
6246                 return true
6247         }
6248         // match: (CMOVWEQ _ x (FlagEQ))
6249         // result: x
6250         for {
6251                 x := v_1
6252                 if v_2.Op != OpAMD64FlagEQ {
6253                         break
6254                 }
6255                 v.copyOf(x)
6256                 return true
6257         }
6258         // match: (CMOVWEQ y _ (FlagGT_UGT))
6259         // result: y
6260         for {
6261                 y := v_0
6262                 if v_2.Op != OpAMD64FlagGT_UGT {
6263                         break
6264                 }
6265                 v.copyOf(y)
6266                 return true
6267         }
6268         // match: (CMOVWEQ y _ (FlagGT_ULT))
6269         // result: y
6270         for {
6271                 y := v_0
6272                 if v_2.Op != OpAMD64FlagGT_ULT {
6273                         break
6274                 }
6275                 v.copyOf(y)
6276                 return true
6277         }
6278         // match: (CMOVWEQ y _ (FlagLT_ULT))
6279         // result: y
6280         for {
6281                 y := v_0
6282                 if v_2.Op != OpAMD64FlagLT_ULT {
6283                         break
6284                 }
6285                 v.copyOf(y)
6286                 return true
6287         }
6288         // match: (CMOVWEQ y _ (FlagLT_UGT))
6289         // result: y
6290         for {
6291                 y := v_0
6292                 if v_2.Op != OpAMD64FlagLT_UGT {
6293                         break
6294                 }
6295                 v.copyOf(y)
6296                 return true
6297         }
6298         return false
6299 }
6300 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
6301         v_2 := v.Args[2]
6302         v_1 := v.Args[1]
6303         v_0 := v.Args[0]
6304         // match: (CMOVWGE x y (InvertFlags cond))
6305         // result: (CMOVWLE x y cond)
6306         for {
6307                 x := v_0
6308                 y := v_1
6309                 if v_2.Op != OpAMD64InvertFlags {
6310                         break
6311                 }
6312                 cond := v_2.Args[0]
6313                 v.reset(OpAMD64CMOVWLE)
6314                 v.AddArg3(x, y, cond)
6315                 return true
6316         }
6317         // match: (CMOVWGE _ x (FlagEQ))
6318         // result: x
6319         for {
6320                 x := v_1
6321                 if v_2.Op != OpAMD64FlagEQ {
6322                         break
6323                 }
6324                 v.copyOf(x)
6325                 return true
6326         }
6327         // match: (CMOVWGE _ x (FlagGT_UGT))
6328         // result: x
6329         for {
6330                 x := v_1
6331                 if v_2.Op != OpAMD64FlagGT_UGT {
6332                         break
6333                 }
6334                 v.copyOf(x)
6335                 return true
6336         }
6337         // match: (CMOVWGE _ x (FlagGT_ULT))
6338         // result: x
6339         for {
6340                 x := v_1
6341                 if v_2.Op != OpAMD64FlagGT_ULT {
6342                         break
6343                 }
6344                 v.copyOf(x)
6345                 return true
6346         }
6347         // match: (CMOVWGE y _ (FlagLT_ULT))
6348         // result: y
6349         for {
6350                 y := v_0
6351                 if v_2.Op != OpAMD64FlagLT_ULT {
6352                         break
6353                 }
6354                 v.copyOf(y)
6355                 return true
6356         }
6357         // match: (CMOVWGE y _ (FlagLT_UGT))
6358         // result: y
6359         for {
6360                 y := v_0
6361                 if v_2.Op != OpAMD64FlagLT_UGT {
6362                         break
6363                 }
6364                 v.copyOf(y)
6365                 return true
6366         }
6367         return false
6368 }
6369 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
6370         v_2 := v.Args[2]
6371         v_1 := v.Args[1]
6372         v_0 := v.Args[0]
6373         // match: (CMOVWGT x y (InvertFlags cond))
6374         // result: (CMOVWLT x y cond)
6375         for {
6376                 x := v_0
6377                 y := v_1
6378                 if v_2.Op != OpAMD64InvertFlags {
6379                         break
6380                 }
6381                 cond := v_2.Args[0]
6382                 v.reset(OpAMD64CMOVWLT)
6383                 v.AddArg3(x, y, cond)
6384                 return true
6385         }
6386         // match: (CMOVWGT y _ (FlagEQ))
6387         // result: y
6388         for {
6389                 y := v_0
6390                 if v_2.Op != OpAMD64FlagEQ {
6391                         break
6392                 }
6393                 v.copyOf(y)
6394                 return true
6395         }
6396         // match: (CMOVWGT _ x (FlagGT_UGT))
6397         // result: x
6398         for {
6399                 x := v_1
6400                 if v_2.Op != OpAMD64FlagGT_UGT {
6401                         break
6402                 }
6403                 v.copyOf(x)
6404                 return true
6405         }
6406         // match: (CMOVWGT _ x (FlagGT_ULT))
6407         // result: x
6408         for {
6409                 x := v_1
6410                 if v_2.Op != OpAMD64FlagGT_ULT {
6411                         break
6412                 }
6413                 v.copyOf(x)
6414                 return true
6415         }
6416         // match: (CMOVWGT y _ (FlagLT_ULT))
6417         // result: y
6418         for {
6419                 y := v_0
6420                 if v_2.Op != OpAMD64FlagLT_ULT {
6421                         break
6422                 }
6423                 v.copyOf(y)
6424                 return true
6425         }
6426         // match: (CMOVWGT y _ (FlagLT_UGT))
6427         // result: y
6428         for {
6429                 y := v_0
6430                 if v_2.Op != OpAMD64FlagLT_UGT {
6431                         break
6432                 }
6433                 v.copyOf(y)
6434                 return true
6435         }
6436         return false
6437 }
6438 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
6439         v_2 := v.Args[2]
6440         v_1 := v.Args[1]
6441         v_0 := v.Args[0]
6442         // match: (CMOVWHI x y (InvertFlags cond))
6443         // result: (CMOVWCS x y cond)
6444         for {
6445                 x := v_0
6446                 y := v_1
6447                 if v_2.Op != OpAMD64InvertFlags {
6448                         break
6449                 }
6450                 cond := v_2.Args[0]
6451                 v.reset(OpAMD64CMOVWCS)
6452                 v.AddArg3(x, y, cond)
6453                 return true
6454         }
6455         // match: (CMOVWHI y _ (FlagEQ))
6456         // result: y
6457         for {
6458                 y := v_0
6459                 if v_2.Op != OpAMD64FlagEQ {
6460                         break
6461                 }
6462                 v.copyOf(y)
6463                 return true
6464         }
6465         // match: (CMOVWHI _ x (FlagGT_UGT))
6466         // result: x
6467         for {
6468                 x := v_1
6469                 if v_2.Op != OpAMD64FlagGT_UGT {
6470                         break
6471                 }
6472                 v.copyOf(x)
6473                 return true
6474         }
6475         // match: (CMOVWHI y _ (FlagGT_ULT))
6476         // result: y
6477         for {
6478                 y := v_0
6479                 if v_2.Op != OpAMD64FlagGT_ULT {
6480                         break
6481                 }
6482                 v.copyOf(y)
6483                 return true
6484         }
6485         // match: (CMOVWHI y _ (FlagLT_ULT))
6486         // result: y
6487         for {
6488                 y := v_0
6489                 if v_2.Op != OpAMD64FlagLT_ULT {
6490                         break
6491                 }
6492                 v.copyOf(y)
6493                 return true
6494         }
6495         // match: (CMOVWHI _ x (FlagLT_UGT))
6496         // result: x
6497         for {
6498                 x := v_1
6499                 if v_2.Op != OpAMD64FlagLT_UGT {
6500                         break
6501                 }
6502                 v.copyOf(x)
6503                 return true
6504         }
6505         return false
6506 }
6507 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
6508         v_2 := v.Args[2]
6509         v_1 := v.Args[1]
6510         v_0 := v.Args[0]
6511         // match: (CMOVWLE x y (InvertFlags cond))
6512         // result: (CMOVWGE x y cond)
6513         for {
6514                 x := v_0
6515                 y := v_1
6516                 if v_2.Op != OpAMD64InvertFlags {
6517                         break
6518                 }
6519                 cond := v_2.Args[0]
6520                 v.reset(OpAMD64CMOVWGE)
6521                 v.AddArg3(x, y, cond)
6522                 return true
6523         }
6524         // match: (CMOVWLE _ x (FlagEQ))
6525         // result: x
6526         for {
6527                 x := v_1
6528                 if v_2.Op != OpAMD64FlagEQ {
6529                         break
6530                 }
6531                 v.copyOf(x)
6532                 return true
6533         }
6534         // match: (CMOVWLE y _ (FlagGT_UGT))
6535         // result: y
6536         for {
6537                 y := v_0
6538                 if v_2.Op != OpAMD64FlagGT_UGT {
6539                         break
6540                 }
6541                 v.copyOf(y)
6542                 return true
6543         }
6544         // match: (CMOVWLE y _ (FlagGT_ULT))
6545         // result: y
6546         for {
6547                 y := v_0
6548                 if v_2.Op != OpAMD64FlagGT_ULT {
6549                         break
6550                 }
6551                 v.copyOf(y)
6552                 return true
6553         }
6554         // match: (CMOVWLE _ x (FlagLT_ULT))
6555         // result: x
6556         for {
6557                 x := v_1
6558                 if v_2.Op != OpAMD64FlagLT_ULT {
6559                         break
6560                 }
6561                 v.copyOf(x)
6562                 return true
6563         }
6564         // match: (CMOVWLE _ x (FlagLT_UGT))
6565         // result: x
6566         for {
6567                 x := v_1
6568                 if v_2.Op != OpAMD64FlagLT_UGT {
6569                         break
6570                 }
6571                 v.copyOf(x)
6572                 return true
6573         }
6574         return false
6575 }
6576 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6577         v_2 := v.Args[2]
6578         v_1 := v.Args[1]
6579         v_0 := v.Args[0]
6580         // match: (CMOVWLS x y (InvertFlags cond))
6581         // result: (CMOVWCC x y cond)
6582         for {
6583                 x := v_0
6584                 y := v_1
6585                 if v_2.Op != OpAMD64InvertFlags {
6586                         break
6587                 }
6588                 cond := v_2.Args[0]
6589                 v.reset(OpAMD64CMOVWCC)
6590                 v.AddArg3(x, y, cond)
6591                 return true
6592         }
6593         // match: (CMOVWLS _ x (FlagEQ))
6594         // result: x
6595         for {
6596                 x := v_1
6597                 if v_2.Op != OpAMD64FlagEQ {
6598                         break
6599                 }
6600                 v.copyOf(x)
6601                 return true
6602         }
6603         // match: (CMOVWLS y _ (FlagGT_UGT))
6604         // result: y
6605         for {
6606                 y := v_0
6607                 if v_2.Op != OpAMD64FlagGT_UGT {
6608                         break
6609                 }
6610                 v.copyOf(y)
6611                 return true
6612         }
6613         // match: (CMOVWLS _ x (FlagGT_ULT))
6614         // result: x
6615         for {
6616                 x := v_1
6617                 if v_2.Op != OpAMD64FlagGT_ULT {
6618                         break
6619                 }
6620                 v.copyOf(x)
6621                 return true
6622         }
6623         // match: (CMOVWLS _ x (FlagLT_ULT))
6624         // result: x
6625         for {
6626                 x := v_1
6627                 if v_2.Op != OpAMD64FlagLT_ULT {
6628                         break
6629                 }
6630                 v.copyOf(x)
6631                 return true
6632         }
6633         // match: (CMOVWLS y _ (FlagLT_UGT))
6634         // result: y
6635         for {
6636                 y := v_0
6637                 if v_2.Op != OpAMD64FlagLT_UGT {
6638                         break
6639                 }
6640                 v.copyOf(y)
6641                 return true
6642         }
6643         return false
6644 }
6645 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6646         v_2 := v.Args[2]
6647         v_1 := v.Args[1]
6648         v_0 := v.Args[0]
6649         // match: (CMOVWLT x y (InvertFlags cond))
6650         // result: (CMOVWGT x y cond)
6651         for {
6652                 x := v_0
6653                 y := v_1
6654                 if v_2.Op != OpAMD64InvertFlags {
6655                         break
6656                 }
6657                 cond := v_2.Args[0]
6658                 v.reset(OpAMD64CMOVWGT)
6659                 v.AddArg3(x, y, cond)
6660                 return true
6661         }
6662         // match: (CMOVWLT y _ (FlagEQ))
6663         // result: y
6664         for {
6665                 y := v_0
6666                 if v_2.Op != OpAMD64FlagEQ {
6667                         break
6668                 }
6669                 v.copyOf(y)
6670                 return true
6671         }
6672         // match: (CMOVWLT y _ (FlagGT_UGT))
6673         // result: y
6674         for {
6675                 y := v_0
6676                 if v_2.Op != OpAMD64FlagGT_UGT {
6677                         break
6678                 }
6679                 v.copyOf(y)
6680                 return true
6681         }
6682         // match: (CMOVWLT y _ (FlagGT_ULT))
6683         // result: y
6684         for {
6685                 y := v_0
6686                 if v_2.Op != OpAMD64FlagGT_ULT {
6687                         break
6688                 }
6689                 v.copyOf(y)
6690                 return true
6691         }
6692         // match: (CMOVWLT _ x (FlagLT_ULT))
6693         // result: x
6694         for {
6695                 x := v_1
6696                 if v_2.Op != OpAMD64FlagLT_ULT {
6697                         break
6698                 }
6699                 v.copyOf(x)
6700                 return true
6701         }
6702         // match: (CMOVWLT _ x (FlagLT_UGT))
6703         // result: x
6704         for {
6705                 x := v_1
6706                 if v_2.Op != OpAMD64FlagLT_UGT {
6707                         break
6708                 }
6709                 v.copyOf(x)
6710                 return true
6711         }
6712         return false
6713 }
6714 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6715         v_2 := v.Args[2]
6716         v_1 := v.Args[1]
6717         v_0 := v.Args[0]
6718         // match: (CMOVWNE x y (InvertFlags cond))
6719         // result: (CMOVWNE x y cond)
6720         for {
6721                 x := v_0
6722                 y := v_1
6723                 if v_2.Op != OpAMD64InvertFlags {
6724                         break
6725                 }
6726                 cond := v_2.Args[0]
6727                 v.reset(OpAMD64CMOVWNE)
6728                 v.AddArg3(x, y, cond)
6729                 return true
6730         }
6731         // match: (CMOVWNE y _ (FlagEQ))
6732         // result: y
6733         for {
6734                 y := v_0
6735                 if v_2.Op != OpAMD64FlagEQ {
6736                         break
6737                 }
6738                 v.copyOf(y)
6739                 return true
6740         }
6741         // match: (CMOVWNE _ x (FlagGT_UGT))
6742         // result: x
6743         for {
6744                 x := v_1
6745                 if v_2.Op != OpAMD64FlagGT_UGT {
6746                         break
6747                 }
6748                 v.copyOf(x)
6749                 return true
6750         }
6751         // match: (CMOVWNE _ x (FlagGT_ULT))
6752         // result: x
6753         for {
6754                 x := v_1
6755                 if v_2.Op != OpAMD64FlagGT_ULT {
6756                         break
6757                 }
6758                 v.copyOf(x)
6759                 return true
6760         }
6761         // match: (CMOVWNE _ x (FlagLT_ULT))
6762         // result: x
6763         for {
6764                 x := v_1
6765                 if v_2.Op != OpAMD64FlagLT_ULT {
6766                         break
6767                 }
6768                 v.copyOf(x)
6769                 return true
6770         }
6771         // match: (CMOVWNE _ x (FlagLT_UGT))
6772         // result: x
6773         for {
6774                 x := v_1
6775                 if v_2.Op != OpAMD64FlagLT_UGT {
6776                         break
6777                 }
6778                 v.copyOf(x)
6779                 return true
6780         }
6781         return false
6782 }
6783 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6784         v_1 := v.Args[1]
6785         v_0 := v.Args[0]
6786         b := v.Block
6787         // match: (CMPB x (MOVLconst [c]))
6788         // result: (CMPBconst x [int8(c)])
6789         for {
6790                 x := v_0
6791                 if v_1.Op != OpAMD64MOVLconst {
6792                         break
6793                 }
6794                 c := auxIntToInt32(v_1.AuxInt)
6795                 v.reset(OpAMD64CMPBconst)
6796                 v.AuxInt = int8ToAuxInt(int8(c))
6797                 v.AddArg(x)
6798                 return true
6799         }
6800         // match: (CMPB (MOVLconst [c]) x)
6801         // result: (InvertFlags (CMPBconst x [int8(c)]))
6802         for {
6803                 if v_0.Op != OpAMD64MOVLconst {
6804                         break
6805                 }
6806                 c := auxIntToInt32(v_0.AuxInt)
6807                 x := v_1
6808                 v.reset(OpAMD64InvertFlags)
6809                 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6810                 v0.AuxInt = int8ToAuxInt(int8(c))
6811                 v0.AddArg(x)
6812                 v.AddArg(v0)
6813                 return true
6814         }
6815         // match: (CMPB x y)
6816         // cond: canonLessThan(x,y)
6817         // result: (InvertFlags (CMPB y x))
6818         for {
6819                 x := v_0
6820                 y := v_1
6821                 if !(canonLessThan(x, y)) {
6822                         break
6823                 }
6824                 v.reset(OpAMD64InvertFlags)
6825                 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6826                 v0.AddArg2(y, x)
6827                 v.AddArg(v0)
6828                 return true
6829         }
6830         // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
6831         // cond: canMergeLoad(v, l) && clobber(l)
6832         // result: (CMPBload {sym} [off] ptr x mem)
6833         for {
6834                 l := v_0
6835                 if l.Op != OpAMD64MOVBload {
6836                         break
6837                 }
6838                 off := auxIntToInt32(l.AuxInt)
6839                 sym := auxToSym(l.Aux)
6840                 mem := l.Args[1]
6841                 ptr := l.Args[0]
6842                 x := v_1
6843                 if !(canMergeLoad(v, l) && clobber(l)) {
6844                         break
6845                 }
6846                 v.reset(OpAMD64CMPBload)
6847                 v.AuxInt = int32ToAuxInt(off)
6848                 v.Aux = symToAux(sym)
6849                 v.AddArg3(ptr, x, mem)
6850                 return true
6851         }
6852         // match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
6853         // cond: canMergeLoad(v, l) && clobber(l)
6854         // result: (InvertFlags (CMPBload {sym} [off] ptr x mem))
6855         for {
6856                 x := v_0
6857                 l := v_1
6858                 if l.Op != OpAMD64MOVBload {
6859                         break
6860                 }
6861                 off := auxIntToInt32(l.AuxInt)
6862                 sym := auxToSym(l.Aux)
6863                 mem := l.Args[1]
6864                 ptr := l.Args[0]
6865                 if !(canMergeLoad(v, l) && clobber(l)) {
6866                         break
6867                 }
6868                 v.reset(OpAMD64InvertFlags)
6869                 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6870                 v0.AuxInt = int32ToAuxInt(off)
6871                 v0.Aux = symToAux(sym)
6872                 v0.AddArg3(ptr, x, mem)
6873                 v.AddArg(v0)
6874                 return true
6875         }
6876         return false
6877 }
6878 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6879         v_0 := v.Args[0]
6880         b := v.Block
6881         // match: (CMPBconst (MOVLconst [x]) [y])
6882         // cond: int8(x)==y
6883         // result: (FlagEQ)
6884         for {
6885                 y := auxIntToInt8(v.AuxInt)
6886                 if v_0.Op != OpAMD64MOVLconst {
6887                         break
6888                 }
6889                 x := auxIntToInt32(v_0.AuxInt)
6890                 if !(int8(x) == y) {
6891                         break
6892                 }
6893                 v.reset(OpAMD64FlagEQ)
6894                 return true
6895         }
6896         // match: (CMPBconst (MOVLconst [x]) [y])
6897         // cond: int8(x)<y && uint8(x)<uint8(y)
6898         // result: (FlagLT_ULT)
6899         for {
6900                 y := auxIntToInt8(v.AuxInt)
6901                 if v_0.Op != OpAMD64MOVLconst {
6902                         break
6903                 }
6904                 x := auxIntToInt32(v_0.AuxInt)
6905                 if !(int8(x) < y && uint8(x) < uint8(y)) {
6906                         break
6907                 }
6908                 v.reset(OpAMD64FlagLT_ULT)
6909                 return true
6910         }
6911         // match: (CMPBconst (MOVLconst [x]) [y])
6912         // cond: int8(x)<y && uint8(x)>uint8(y)
6913         // result: (FlagLT_UGT)
6914         for {
6915                 y := auxIntToInt8(v.AuxInt)
6916                 if v_0.Op != OpAMD64MOVLconst {
6917                         break
6918                 }
6919                 x := auxIntToInt32(v_0.AuxInt)
6920                 if !(int8(x) < y && uint8(x) > uint8(y)) {
6921                         break
6922                 }
6923                 v.reset(OpAMD64FlagLT_UGT)
6924                 return true
6925         }
6926         // match: (CMPBconst (MOVLconst [x]) [y])
6927         // cond: int8(x)>y && uint8(x)<uint8(y)
6928         // result: (FlagGT_ULT)
6929         for {
6930                 y := auxIntToInt8(v.AuxInt)
6931                 if v_0.Op != OpAMD64MOVLconst {
6932                         break
6933                 }
6934                 x := auxIntToInt32(v_0.AuxInt)
6935                 if !(int8(x) > y && uint8(x) < uint8(y)) {
6936                         break
6937                 }
6938                 v.reset(OpAMD64FlagGT_ULT)
6939                 return true
6940         }
6941         // match: (CMPBconst (MOVLconst [x]) [y])
6942         // cond: int8(x)>y && uint8(x)>uint8(y)
6943         // result: (FlagGT_UGT)
6944         for {
6945                 y := auxIntToInt8(v.AuxInt)
6946                 if v_0.Op != OpAMD64MOVLconst {
6947                         break
6948                 }
6949                 x := auxIntToInt32(v_0.AuxInt)
6950                 if !(int8(x) > y && uint8(x) > uint8(y)) {
6951                         break
6952                 }
6953                 v.reset(OpAMD64FlagGT_UGT)
6954                 return true
6955         }
6956         // match: (CMPBconst (ANDLconst _ [m]) [n])
6957         // cond: 0 <= int8(m) && int8(m) < n
6958         // result: (FlagLT_ULT)
6959         for {
6960                 n := auxIntToInt8(v.AuxInt)
6961                 if v_0.Op != OpAMD64ANDLconst {
6962                         break
6963                 }
6964                 m := auxIntToInt32(v_0.AuxInt)
6965                 if !(0 <= int8(m) && int8(m) < n) {
6966                         break
6967                 }
6968                 v.reset(OpAMD64FlagLT_ULT)
6969                 return true
6970         }
6971         // match: (CMPBconst a:(ANDL x y) [0])
6972         // cond: a.Uses == 1
6973         // result: (TESTB x y)
6974         for {
6975                 if auxIntToInt8(v.AuxInt) != 0 {
6976                         break
6977                 }
6978                 a := v_0
6979                 if a.Op != OpAMD64ANDL {
6980                         break
6981                 }
6982                 y := a.Args[1]
6983                 x := a.Args[0]
6984                 if !(a.Uses == 1) {
6985                         break
6986                 }
6987                 v.reset(OpAMD64TESTB)
6988                 v.AddArg2(x, y)
6989                 return true
6990         }
6991         // match: (CMPBconst a:(ANDLconst [c] x) [0])
6992         // cond: a.Uses == 1
6993         // result: (TESTBconst [int8(c)] x)
6994         for {
6995                 if auxIntToInt8(v.AuxInt) != 0 {
6996                         break
6997                 }
6998                 a := v_0
6999                 if a.Op != OpAMD64ANDLconst {
7000                         break
7001                 }
7002                 c := auxIntToInt32(a.AuxInt)
7003                 x := a.Args[0]
7004                 if !(a.Uses == 1) {
7005                         break
7006                 }
7007                 v.reset(OpAMD64TESTBconst)
7008                 v.AuxInt = int8ToAuxInt(int8(c))
7009                 v.AddArg(x)
7010                 return true
7011         }
7012         // match: (CMPBconst x [0])
7013         // result: (TESTB x x)
7014         for {
7015                 if auxIntToInt8(v.AuxInt) != 0 {
7016                         break
7017                 }
7018                 x := v_0
7019                 v.reset(OpAMD64TESTB)
7020                 v.AddArg2(x, x)
7021                 return true
7022         }
7023         // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
7024         // cond: l.Uses == 1 && clobber(l)
7025         // result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
7026         for {
7027                 c := auxIntToInt8(v.AuxInt)
7028                 l := v_0
7029                 if l.Op != OpAMD64MOVBload {
7030                         break
7031                 }
7032                 off := auxIntToInt32(l.AuxInt)
7033                 sym := auxToSym(l.Aux)
7034                 mem := l.Args[1]
7035                 ptr := l.Args[0]
7036                 if !(l.Uses == 1 && clobber(l)) {
7037                         break
7038                 }
7039                 b = l.Block
7040                 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
7041                 v.copyOf(v0)
7042                 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7043                 v0.Aux = symToAux(sym)
7044                 v0.AddArg2(ptr, mem)
7045                 return true
7046         }
7047         return false
7048 }
7049 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
7050         v_1 := v.Args[1]
7051         v_0 := v.Args[0]
7052         // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
7053         // cond: ValAndOff(valoff1).canAdd32(off2)
7054         // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
7055         for {
7056                 valoff1 := auxIntToValAndOff(v.AuxInt)
7057                 sym := auxToSym(v.Aux)
7058                 if v_0.Op != OpAMD64ADDQconst {
7059                         break
7060                 }
7061                 off2 := auxIntToInt32(v_0.AuxInt)
7062                 base := v_0.Args[0]
7063                 mem := v_1
7064                 if !(ValAndOff(valoff1).canAdd32(off2)) {
7065                         break
7066                 }
7067                 v.reset(OpAMD64CMPBconstload)
7068                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7069                 v.Aux = symToAux(sym)
7070                 v.AddArg2(base, mem)
7071                 return true
7072         }
7073         // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
7074         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
7075         // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
7076         for {
7077                 valoff1 := auxIntToValAndOff(v.AuxInt)
7078                 sym1 := auxToSym(v.Aux)
7079                 if v_0.Op != OpAMD64LEAQ {
7080                         break
7081                 }
7082                 off2 := auxIntToInt32(v_0.AuxInt)
7083                 sym2 := auxToSym(v_0.Aux)
7084                 base := v_0.Args[0]
7085                 mem := v_1
7086                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7087                         break
7088                 }
7089                 v.reset(OpAMD64CMPBconstload)
7090                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7091                 v.Aux = symToAux(mergeSym(sym1, sym2))
7092                 v.AddArg2(base, mem)
7093                 return true
7094         }
7095         return false
7096 }
7097 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
7098         v_2 := v.Args[2]
7099         v_1 := v.Args[1]
7100         v_0 := v.Args[0]
7101         // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem)
7102         // cond: is32Bit(int64(off1)+int64(off2))
7103         // result: (CMPBload [off1+off2] {sym} base val mem)
7104         for {
7105                 off1 := auxIntToInt32(v.AuxInt)
7106                 sym := auxToSym(v.Aux)
7107                 if v_0.Op != OpAMD64ADDQconst {
7108                         break
7109                 }
7110                 off2 := auxIntToInt32(v_0.AuxInt)
7111                 base := v_0.Args[0]
7112                 val := v_1
7113                 mem := v_2
7114                 if !(is32Bit(int64(off1) + int64(off2))) {
7115                         break
7116                 }
7117                 v.reset(OpAMD64CMPBload)
7118                 v.AuxInt = int32ToAuxInt(off1 + off2)
7119                 v.Aux = symToAux(sym)
7120                 v.AddArg3(base, val, mem)
7121                 return true
7122         }
7123         // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
7124         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
7125         // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
7126         for {
7127                 off1 := auxIntToInt32(v.AuxInt)
7128                 sym1 := auxToSym(v.Aux)
7129                 if v_0.Op != OpAMD64LEAQ {
7130                         break
7131                 }
7132                 off2 := auxIntToInt32(v_0.AuxInt)
7133                 sym2 := auxToSym(v_0.Aux)
7134                 base := v_0.Args[0]
7135                 val := v_1
7136                 mem := v_2
7137                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7138                         break
7139                 }
7140                 v.reset(OpAMD64CMPBload)
7141                 v.AuxInt = int32ToAuxInt(off1 + off2)
7142                 v.Aux = symToAux(mergeSym(sym1, sym2))
7143                 v.AddArg3(base, val, mem)
7144                 return true
7145         }
7146         // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
7147         // result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
7148         for {
7149                 off := auxIntToInt32(v.AuxInt)
7150                 sym := auxToSym(v.Aux)
7151                 ptr := v_0
7152                 if v_1.Op != OpAMD64MOVLconst {
7153                         break
7154                 }
7155                 c := auxIntToInt32(v_1.AuxInt)
7156                 mem := v_2
7157                 v.reset(OpAMD64CMPBconstload)
7158                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
7159                 v.Aux = symToAux(sym)
7160                 v.AddArg2(ptr, mem)
7161                 return true
7162         }
7163         return false
7164 }
7165 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
7166         v_1 := v.Args[1]
7167         v_0 := v.Args[0]
7168         b := v.Block
7169         // match: (CMPL x (MOVLconst [c]))
7170         // result: (CMPLconst x [c])
7171         for {
7172                 x := v_0
7173                 if v_1.Op != OpAMD64MOVLconst {
7174                         break
7175                 }
7176                 c := auxIntToInt32(v_1.AuxInt)
7177                 v.reset(OpAMD64CMPLconst)
7178                 v.AuxInt = int32ToAuxInt(c)
7179                 v.AddArg(x)
7180                 return true
7181         }
7182         // match: (CMPL (MOVLconst [c]) x)
7183         // result: (InvertFlags (CMPLconst x [c]))
7184         for {
7185                 if v_0.Op != OpAMD64MOVLconst {
7186                         break
7187                 }
7188                 c := auxIntToInt32(v_0.AuxInt)
7189                 x := v_1
7190                 v.reset(OpAMD64InvertFlags)
7191                 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
7192                 v0.AuxInt = int32ToAuxInt(c)
7193                 v0.AddArg(x)
7194                 v.AddArg(v0)
7195                 return true
7196         }
7197         // match: (CMPL x y)
7198         // cond: canonLessThan(x,y)
7199         // result: (InvertFlags (CMPL y x))
7200         for {
7201                 x := v_0
7202                 y := v_1
7203                 if !(canonLessThan(x, y)) {
7204                         break
7205                 }
7206                 v.reset(OpAMD64InvertFlags)
7207                 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
7208                 v0.AddArg2(y, x)
7209                 v.AddArg(v0)
7210                 return true
7211         }
7212         // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
7213         // cond: canMergeLoad(v, l) && clobber(l)
7214         // result: (CMPLload {sym} [off] ptr x mem)
7215         for {
7216                 l := v_0
7217                 if l.Op != OpAMD64MOVLload {
7218                         break
7219                 }
7220                 off := auxIntToInt32(l.AuxInt)
7221                 sym := auxToSym(l.Aux)
7222                 mem := l.Args[1]
7223                 ptr := l.Args[0]
7224                 x := v_1
7225                 if !(canMergeLoad(v, l) && clobber(l)) {
7226                         break
7227                 }
7228                 v.reset(OpAMD64CMPLload)
7229                 v.AuxInt = int32ToAuxInt(off)
7230                 v.Aux = symToAux(sym)
7231                 v.AddArg3(ptr, x, mem)
7232                 return true
7233         }
7234         // match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
7235         // cond: canMergeLoad(v, l) && clobber(l)
7236         // result: (InvertFlags (CMPLload {sym} [off] ptr x mem))
7237         for {
7238                 x := v_0
7239                 l := v_1
7240                 if l.Op != OpAMD64MOVLload {
7241                         break
7242                 }
7243                 off := auxIntToInt32(l.AuxInt)
7244                 sym := auxToSym(l.Aux)
7245                 mem := l.Args[1]
7246                 ptr := l.Args[0]
7247                 if !(canMergeLoad(v, l) && clobber(l)) {
7248                         break
7249                 }
7250                 v.reset(OpAMD64InvertFlags)
7251                 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
7252                 v0.AuxInt = int32ToAuxInt(off)
7253                 v0.Aux = symToAux(sym)
7254                 v0.AddArg3(ptr, x, mem)
7255                 v.AddArg(v0)
7256                 return true
7257         }
7258         return false
7259 }
7260 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
7261         v_0 := v.Args[0]
7262         b := v.Block
7263         // match: (CMPLconst (MOVLconst [x]) [y])
7264         // cond: x==y
7265         // result: (FlagEQ)
7266         for {
7267                 y := auxIntToInt32(v.AuxInt)
7268                 if v_0.Op != OpAMD64MOVLconst {
7269                         break
7270                 }
7271                 x := auxIntToInt32(v_0.AuxInt)
7272                 if !(x == y) {
7273                         break
7274                 }
7275                 v.reset(OpAMD64FlagEQ)
7276                 return true
7277         }
7278         // match: (CMPLconst (MOVLconst [x]) [y])
7279         // cond: x<y && uint32(x)<uint32(y)
7280         // result: (FlagLT_ULT)
7281         for {
7282                 y := auxIntToInt32(v.AuxInt)
7283                 if v_0.Op != OpAMD64MOVLconst {
7284                         break
7285                 }
7286                 x := auxIntToInt32(v_0.AuxInt)
7287                 if !(x < y && uint32(x) < uint32(y)) {
7288                         break
7289                 }
7290                 v.reset(OpAMD64FlagLT_ULT)
7291                 return true
7292         }
7293         // match: (CMPLconst (MOVLconst [x]) [y])
7294         // cond: x<y && uint32(x)>uint32(y)
7295         // result: (FlagLT_UGT)
7296         for {
7297                 y := auxIntToInt32(v.AuxInt)
7298                 if v_0.Op != OpAMD64MOVLconst {
7299                         break
7300                 }
7301                 x := auxIntToInt32(v_0.AuxInt)
7302                 if !(x < y && uint32(x) > uint32(y)) {
7303                         break
7304                 }
7305                 v.reset(OpAMD64FlagLT_UGT)
7306                 return true
7307         }
7308         // match: (CMPLconst (MOVLconst [x]) [y])
7309         // cond: x>y && uint32(x)<uint32(y)
7310         // result: (FlagGT_ULT)
7311         for {
7312                 y := auxIntToInt32(v.AuxInt)
7313                 if v_0.Op != OpAMD64MOVLconst {
7314                         break
7315                 }
7316                 x := auxIntToInt32(v_0.AuxInt)
7317                 if !(x > y && uint32(x) < uint32(y)) {
7318                         break
7319                 }
7320                 v.reset(OpAMD64FlagGT_ULT)
7321                 return true
7322         }
7323         // match: (CMPLconst (MOVLconst [x]) [y])
7324         // cond: x>y && uint32(x)>uint32(y)
7325         // result: (FlagGT_UGT)
7326         for {
7327                 y := auxIntToInt32(v.AuxInt)
7328                 if v_0.Op != OpAMD64MOVLconst {
7329                         break
7330                 }
7331                 x := auxIntToInt32(v_0.AuxInt)
7332                 if !(x > y && uint32(x) > uint32(y)) {
7333                         break
7334                 }
7335                 v.reset(OpAMD64FlagGT_UGT)
7336                 return true
7337         }
7338         // match: (CMPLconst (SHRLconst _ [c]) [n])
7339         // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
7340         // result: (FlagLT_ULT)
7341         for {
7342                 n := auxIntToInt32(v.AuxInt)
7343                 if v_0.Op != OpAMD64SHRLconst {
7344                         break
7345                 }
7346                 c := auxIntToInt8(v_0.AuxInt)
7347                 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
7348                         break
7349                 }
7350                 v.reset(OpAMD64FlagLT_ULT)
7351                 return true
7352         }
7353         // match: (CMPLconst (ANDLconst _ [m]) [n])
7354         // cond: 0 <= m && m < n
7355         // result: (FlagLT_ULT)
7356         for {
7357                 n := auxIntToInt32(v.AuxInt)
7358                 if v_0.Op != OpAMD64ANDLconst {
7359                         break
7360                 }
7361                 m := auxIntToInt32(v_0.AuxInt)
7362                 if !(0 <= m && m < n) {
7363                         break
7364                 }
7365                 v.reset(OpAMD64FlagLT_ULT)
7366                 return true
7367         }
7368         // match: (CMPLconst a:(ANDL x y) [0])
7369         // cond: a.Uses == 1
7370         // result: (TESTL x y)
7371         for {
7372                 if auxIntToInt32(v.AuxInt) != 0 {
7373                         break
7374                 }
7375                 a := v_0
7376                 if a.Op != OpAMD64ANDL {
7377                         break
7378                 }
7379                 y := a.Args[1]
7380                 x := a.Args[0]
7381                 if !(a.Uses == 1) {
7382                         break
7383                 }
7384                 v.reset(OpAMD64TESTL)
7385                 v.AddArg2(x, y)
7386                 return true
7387         }
7388         // match: (CMPLconst a:(ANDLconst [c] x) [0])
7389         // cond: a.Uses == 1
7390         // result: (TESTLconst [c] x)
7391         for {
7392                 if auxIntToInt32(v.AuxInt) != 0 {
7393                         break
7394                 }
7395                 a := v_0
7396                 if a.Op != OpAMD64ANDLconst {
7397                         break
7398                 }
7399                 c := auxIntToInt32(a.AuxInt)
7400                 x := a.Args[0]
7401                 if !(a.Uses == 1) {
7402                         break
7403                 }
7404                 v.reset(OpAMD64TESTLconst)
7405                 v.AuxInt = int32ToAuxInt(c)
7406                 v.AddArg(x)
7407                 return true
7408         }
7409         // match: (CMPLconst x [0])
7410         // result: (TESTL x x)
7411         for {
7412                 if auxIntToInt32(v.AuxInt) != 0 {
7413                         break
7414                 }
7415                 x := v_0
7416                 v.reset(OpAMD64TESTL)
7417                 v.AddArg2(x, x)
7418                 return true
7419         }
7420         // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
7421         // cond: l.Uses == 1 && clobber(l)
7422         // result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
7423         for {
7424                 c := auxIntToInt32(v.AuxInt)
7425                 l := v_0
7426                 if l.Op != OpAMD64MOVLload {
7427                         break
7428                 }
7429                 off := auxIntToInt32(l.AuxInt)
7430                 sym := auxToSym(l.Aux)
7431                 mem := l.Args[1]
7432                 ptr := l.Args[0]
7433                 if !(l.Uses == 1 && clobber(l)) {
7434                         break
7435                 }
7436                 b = l.Block
7437                 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
7438                 v.copyOf(v0)
7439                 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7440                 v0.Aux = symToAux(sym)
7441                 v0.AddArg2(ptr, mem)
7442                 return true
7443         }
7444         return false
7445 }
7446 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
7447         v_1 := v.Args[1]
7448         v_0 := v.Args[0]
7449         // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
7450         // cond: ValAndOff(valoff1).canAdd32(off2)
7451         // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
7452         for {
7453                 valoff1 := auxIntToValAndOff(v.AuxInt)
7454                 sym := auxToSym(v.Aux)
7455                 if v_0.Op != OpAMD64ADDQconst {
7456                         break
7457                 }
7458                 off2 := auxIntToInt32(v_0.AuxInt)
7459                 base := v_0.Args[0]
7460                 mem := v_1
7461                 if !(ValAndOff(valoff1).canAdd32(off2)) {
7462                         break
7463                 }
7464                 v.reset(OpAMD64CMPLconstload)
7465                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7466                 v.Aux = symToAux(sym)
7467                 v.AddArg2(base, mem)
7468                 return true
7469         }
7470         // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
7471         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
7472         // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
7473         for {
7474                 valoff1 := auxIntToValAndOff(v.AuxInt)
7475                 sym1 := auxToSym(v.Aux)
7476                 if v_0.Op != OpAMD64LEAQ {
7477                         break
7478                 }
7479                 off2 := auxIntToInt32(v_0.AuxInt)
7480                 sym2 := auxToSym(v_0.Aux)
7481                 base := v_0.Args[0]
7482                 mem := v_1
7483                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7484                         break
7485                 }
7486                 v.reset(OpAMD64CMPLconstload)
7487                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7488                 v.Aux = symToAux(mergeSym(sym1, sym2))
7489                 v.AddArg2(base, mem)
7490                 return true
7491         }
7492         return false
7493 }
7494 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
7495         v_2 := v.Args[2]
7496         v_1 := v.Args[1]
7497         v_0 := v.Args[0]
7498         // match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem)
7499         // cond: is32Bit(int64(off1)+int64(off2))
7500         // result: (CMPLload [off1+off2] {sym} base val mem)
7501         for {
7502                 off1 := auxIntToInt32(v.AuxInt)
7503                 sym := auxToSym(v.Aux)
7504                 if v_0.Op != OpAMD64ADDQconst {
7505                         break
7506                 }
7507                 off2 := auxIntToInt32(v_0.AuxInt)
7508                 base := v_0.Args[0]
7509                 val := v_1
7510                 mem := v_2
7511                 if !(is32Bit(int64(off1) + int64(off2))) {
7512                         break
7513                 }
7514                 v.reset(OpAMD64CMPLload)
7515                 v.AuxInt = int32ToAuxInt(off1 + off2)
7516                 v.Aux = symToAux(sym)
7517                 v.AddArg3(base, val, mem)
7518                 return true
7519         }
7520         // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
7521         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
7522         // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
7523         for {
7524                 off1 := auxIntToInt32(v.AuxInt)
7525                 sym1 := auxToSym(v.Aux)
7526                 if v_0.Op != OpAMD64LEAQ {
7527                         break
7528                 }
7529                 off2 := auxIntToInt32(v_0.AuxInt)
7530                 sym2 := auxToSym(v_0.Aux)
7531                 base := v_0.Args[0]
7532                 val := v_1
7533                 mem := v_2
7534                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7535                         break
7536                 }
7537                 v.reset(OpAMD64CMPLload)
7538                 v.AuxInt = int32ToAuxInt(off1 + off2)
7539                 v.Aux = symToAux(mergeSym(sym1, sym2))
7540                 v.AddArg3(base, val, mem)
7541                 return true
7542         }
7543         // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
7544         // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
7545         for {
7546                 off := auxIntToInt32(v.AuxInt)
7547                 sym := auxToSym(v.Aux)
7548                 ptr := v_0
7549                 if v_1.Op != OpAMD64MOVLconst {
7550                         break
7551                 }
7552                 c := auxIntToInt32(v_1.AuxInt)
7553                 mem := v_2
7554                 v.reset(OpAMD64CMPLconstload)
7555                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7556                 v.Aux = symToAux(sym)
7557                 v.AddArg2(ptr, mem)
7558                 return true
7559         }
7560         return false
7561 }
7562 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
7563         v_1 := v.Args[1]
7564         v_0 := v.Args[0]
7565         b := v.Block
7566         // match: (CMPQ x (MOVQconst [c]))
7567         // cond: is32Bit(c)
7568         // result: (CMPQconst x [int32(c)])
7569         for {
7570                 x := v_0
7571                 if v_1.Op != OpAMD64MOVQconst {
7572                         break
7573                 }
7574                 c := auxIntToInt64(v_1.AuxInt)
7575                 if !(is32Bit(c)) {
7576                         break
7577                 }
7578                 v.reset(OpAMD64CMPQconst)
7579                 v.AuxInt = int32ToAuxInt(int32(c))
7580                 v.AddArg(x)
7581                 return true
7582         }
7583         // match: (CMPQ (MOVQconst [c]) x)
7584         // cond: is32Bit(c)
7585         // result: (InvertFlags (CMPQconst x [int32(c)]))
7586         for {
7587                 if v_0.Op != OpAMD64MOVQconst {
7588                         break
7589                 }
7590                 c := auxIntToInt64(v_0.AuxInt)
7591                 x := v_1
7592                 if !(is32Bit(c)) {
7593                         break
7594                 }
7595                 v.reset(OpAMD64InvertFlags)
7596                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7597                 v0.AuxInt = int32ToAuxInt(int32(c))
7598                 v0.AddArg(x)
7599                 v.AddArg(v0)
7600                 return true
7601         }
7602         // match: (CMPQ x y)
7603         // cond: canonLessThan(x,y)
7604         // result: (InvertFlags (CMPQ y x))
7605         for {
7606                 x := v_0
7607                 y := v_1
7608                 if !(canonLessThan(x, y)) {
7609                         break
7610                 }
7611                 v.reset(OpAMD64InvertFlags)
7612                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7613                 v0.AddArg2(y, x)
7614                 v.AddArg(v0)
7615                 return true
7616         }
7617         // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7618         // cond: x==y
7619         // result: (FlagEQ)
7620         for {
7621                 if v_0.Op != OpAMD64MOVQconst {
7622                         break
7623                 }
7624                 x := auxIntToInt64(v_0.AuxInt)
7625                 if v_1.Op != OpAMD64MOVQconst {
7626                         break
7627                 }
7628                 y := auxIntToInt64(v_1.AuxInt)
7629                 if !(x == y) {
7630                         break
7631                 }
7632                 v.reset(OpAMD64FlagEQ)
7633                 return true
7634         }
7635         // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7636         // cond: x<y && uint64(x)<uint64(y)
7637         // result: (FlagLT_ULT)
7638         for {
7639                 if v_0.Op != OpAMD64MOVQconst {
7640                         break
7641                 }
7642                 x := auxIntToInt64(v_0.AuxInt)
7643                 if v_1.Op != OpAMD64MOVQconst {
7644                         break
7645                 }
7646                 y := auxIntToInt64(v_1.AuxInt)
7647                 if !(x < y && uint64(x) < uint64(y)) {
7648                         break
7649                 }
7650                 v.reset(OpAMD64FlagLT_ULT)
7651                 return true
7652         }
7653         // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7654         // cond: x<y && uint64(x)>uint64(y)
7655         // result: (FlagLT_UGT)
7656         for {
7657                 if v_0.Op != OpAMD64MOVQconst {
7658                         break
7659                 }
7660                 x := auxIntToInt64(v_0.AuxInt)
7661                 if v_1.Op != OpAMD64MOVQconst {
7662                         break
7663                 }
7664                 y := auxIntToInt64(v_1.AuxInt)
7665                 if !(x < y && uint64(x) > uint64(y)) {
7666                         break
7667                 }
7668                 v.reset(OpAMD64FlagLT_UGT)
7669                 return true
7670         }
7671         // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7672         // cond: x>y && uint64(x)<uint64(y)
7673         // result: (FlagGT_ULT)
7674         for {
7675                 if v_0.Op != OpAMD64MOVQconst {
7676                         break
7677                 }
7678                 x := auxIntToInt64(v_0.AuxInt)
7679                 if v_1.Op != OpAMD64MOVQconst {
7680                         break
7681                 }
7682                 y := auxIntToInt64(v_1.AuxInt)
7683                 if !(x > y && uint64(x) < uint64(y)) {
7684                         break
7685                 }
7686                 v.reset(OpAMD64FlagGT_ULT)
7687                 return true
7688         }
7689         // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7690         // cond: x>y && uint64(x)>uint64(y)
7691         // result: (FlagGT_UGT)
7692         for {
7693                 if v_0.Op != OpAMD64MOVQconst {
7694                         break
7695                 }
7696                 x := auxIntToInt64(v_0.AuxInt)
7697                 if v_1.Op != OpAMD64MOVQconst {
7698                         break
7699                 }
7700                 y := auxIntToInt64(v_1.AuxInt)
7701                 if !(x > y && uint64(x) > uint64(y)) {
7702                         break
7703                 }
7704                 v.reset(OpAMD64FlagGT_UGT)
7705                 return true
7706         }
7707         // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x)
7708         // cond: canMergeLoad(v, l) && clobber(l)
7709         // result: (CMPQload {sym} [off] ptr x mem)
7710         for {
7711                 l := v_0
7712                 if l.Op != OpAMD64MOVQload {
7713                         break
7714                 }
7715                 off := auxIntToInt32(l.AuxInt)
7716                 sym := auxToSym(l.Aux)
7717                 mem := l.Args[1]
7718                 ptr := l.Args[0]
7719                 x := v_1
7720                 if !(canMergeLoad(v, l) && clobber(l)) {
7721                         break
7722                 }
7723                 v.reset(OpAMD64CMPQload)
7724                 v.AuxInt = int32ToAuxInt(off)
7725                 v.Aux = symToAux(sym)
7726                 v.AddArg3(ptr, x, mem)
7727                 return true
7728         }
7729         // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem))
7730         // cond: canMergeLoad(v, l) && clobber(l)
7731         // result: (InvertFlags (CMPQload {sym} [off] ptr x mem))
7732         for {
7733                 x := v_0
7734                 l := v_1
7735                 if l.Op != OpAMD64MOVQload {
7736                         break
7737                 }
7738                 off := auxIntToInt32(l.AuxInt)
7739                 sym := auxToSym(l.Aux)
7740                 mem := l.Args[1]
7741                 ptr := l.Args[0]
7742                 if !(canMergeLoad(v, l) && clobber(l)) {
7743                         break
7744                 }
7745                 v.reset(OpAMD64InvertFlags)
7746                 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7747                 v0.AuxInt = int32ToAuxInt(off)
7748                 v0.Aux = symToAux(sym)
7749                 v0.AddArg3(ptr, x, mem)
7750                 v.AddArg(v0)
7751                 return true
7752         }
7753         return false
7754 }
7755 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7756         v_0 := v.Args[0]
7757         b := v.Block
7758         // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32])
7759         // result: (FlagLT_ULT)
7760         for {
7761                 if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
7762                         break
7763                 }
7764                 v_0_0 := v_0.Args[0]
7765                 if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -16 {
7766                         break
7767                 }
7768                 v_0_0_0 := v_0_0.Args[0]
7769                 if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 15 {
7770                         break
7771                 }
7772                 v.reset(OpAMD64FlagLT_ULT)
7773                 return true
7774         }
7775         // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32])
7776         // result: (FlagLT_ULT)
7777         for {
7778                 if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
7779                         break
7780                 }
7781                 v_0_0 := v_0.Args[0]
7782                 if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -8 {
7783                         break
7784                 }
7785                 v_0_0_0 := v_0_0.Args[0]
7786                 if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 7 {
7787                         break
7788                 }
7789                 v.reset(OpAMD64FlagLT_ULT)
7790                 return true
7791         }
7792         // match: (CMPQconst (MOVQconst [x]) [y])
7793         // cond: x==int64(y)
7794         // result: (FlagEQ)
7795         for {
7796                 y := auxIntToInt32(v.AuxInt)
7797                 if v_0.Op != OpAMD64MOVQconst {
7798                         break
7799                 }
7800                 x := auxIntToInt64(v_0.AuxInt)
7801                 if !(x == int64(y)) {
7802                         break
7803                 }
7804                 v.reset(OpAMD64FlagEQ)
7805                 return true
7806         }
7807         // match: (CMPQconst (MOVQconst [x]) [y])
7808         // cond: x<int64(y) && uint64(x)<uint64(int64(y))
7809         // result: (FlagLT_ULT)
7810         for {
7811                 y := auxIntToInt32(v.AuxInt)
7812                 if v_0.Op != OpAMD64MOVQconst {
7813                         break
7814                 }
7815                 x := auxIntToInt64(v_0.AuxInt)
7816                 if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7817                         break
7818                 }
7819                 v.reset(OpAMD64FlagLT_ULT)
7820                 return true
7821         }
7822         // match: (CMPQconst (MOVQconst [x]) [y])
7823         // cond: x<int64(y) && uint64(x)>uint64(int64(y))
7824         // result: (FlagLT_UGT)
7825         for {
7826                 y := auxIntToInt32(v.AuxInt)
7827                 if v_0.Op != OpAMD64MOVQconst {
7828                         break
7829                 }
7830                 x := auxIntToInt64(v_0.AuxInt)
7831                 if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7832                         break
7833                 }
7834                 v.reset(OpAMD64FlagLT_UGT)
7835                 return true
7836         }
7837         // match: (CMPQconst (MOVQconst [x]) [y])
7838         // cond: x>int64(y) && uint64(x)<uint64(int64(y))
7839         // result: (FlagGT_ULT)
7840         for {
7841                 y := auxIntToInt32(v.AuxInt)
7842                 if v_0.Op != OpAMD64MOVQconst {
7843                         break
7844                 }
7845                 x := auxIntToInt64(v_0.AuxInt)
7846                 if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7847                         break
7848                 }
7849                 v.reset(OpAMD64FlagGT_ULT)
7850                 return true
7851         }
7852         // match: (CMPQconst (MOVQconst [x]) [y])
7853         // cond: x>int64(y) && uint64(x)>uint64(int64(y))
7854         // result: (FlagGT_UGT)
7855         for {
7856                 y := auxIntToInt32(v.AuxInt)
7857                 if v_0.Op != OpAMD64MOVQconst {
7858                         break
7859                 }
7860                 x := auxIntToInt64(v_0.AuxInt)
7861                 if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7862                         break
7863                 }
7864                 v.reset(OpAMD64FlagGT_UGT)
7865                 return true
7866         }
7867         // match: (CMPQconst (MOVBQZX _) [c])
7868         // cond: 0xFF < c
7869         // result: (FlagLT_ULT)
7870         for {
7871                 c := auxIntToInt32(v.AuxInt)
7872                 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7873                         break
7874                 }
7875                 v.reset(OpAMD64FlagLT_ULT)
7876                 return true
7877         }
7878         // match: (CMPQconst (MOVWQZX _) [c])
7879         // cond: 0xFFFF < c
7880         // result: (FlagLT_ULT)
7881         for {
7882                 c := auxIntToInt32(v.AuxInt)
7883                 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7884                         break
7885                 }
7886                 v.reset(OpAMD64FlagLT_ULT)
7887                 return true
7888         }
7889         // match: (CMPQconst (SHRQconst _ [c]) [n])
7890         // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
7891         // result: (FlagLT_ULT)
7892         for {
7893                 n := auxIntToInt32(v.AuxInt)
7894                 if v_0.Op != OpAMD64SHRQconst {
7895                         break
7896                 }
7897                 c := auxIntToInt8(v_0.AuxInt)
7898                 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7899                         break
7900                 }
7901                 v.reset(OpAMD64FlagLT_ULT)
7902                 return true
7903         }
7904         // match: (CMPQconst (ANDQconst _ [m]) [n])
7905         // cond: 0 <= m && m < n
7906         // result: (FlagLT_ULT)
7907         for {
7908                 n := auxIntToInt32(v.AuxInt)
7909                 if v_0.Op != OpAMD64ANDQconst {
7910                         break
7911                 }
7912                 m := auxIntToInt32(v_0.AuxInt)
7913                 if !(0 <= m && m < n) {
7914                         break
7915                 }
7916                 v.reset(OpAMD64FlagLT_ULT)
7917                 return true
7918         }
7919         // match: (CMPQconst (ANDLconst _ [m]) [n])
7920         // cond: 0 <= m && m < n
7921         // result: (FlagLT_ULT)
7922         for {
7923                 n := auxIntToInt32(v.AuxInt)
7924                 if v_0.Op != OpAMD64ANDLconst {
7925                         break
7926                 }
7927                 m := auxIntToInt32(v_0.AuxInt)
7928                 if !(0 <= m && m < n) {
7929                         break
7930                 }
7931                 v.reset(OpAMD64FlagLT_ULT)
7932                 return true
7933         }
7934         // match: (CMPQconst a:(ANDQ x y) [0])
7935         // cond: a.Uses == 1
7936         // result: (TESTQ x y)
7937         for {
7938                 if auxIntToInt32(v.AuxInt) != 0 {
7939                         break
7940                 }
7941                 a := v_0
7942                 if a.Op != OpAMD64ANDQ {
7943                         break
7944                 }
7945                 y := a.Args[1]
7946                 x := a.Args[0]
7947                 if !(a.Uses == 1) {
7948                         break
7949                 }
7950                 v.reset(OpAMD64TESTQ)
7951                 v.AddArg2(x, y)
7952                 return true
7953         }
7954         // match: (CMPQconst a:(ANDQconst [c] x) [0])
7955         // cond: a.Uses == 1
7956         // result: (TESTQconst [c] x)
7957         for {
7958                 if auxIntToInt32(v.AuxInt) != 0 {
7959                         break
7960                 }
7961                 a := v_0
7962                 if a.Op != OpAMD64ANDQconst {
7963                         break
7964                 }
7965                 c := auxIntToInt32(a.AuxInt)
7966                 x := a.Args[0]
7967                 if !(a.Uses == 1) {
7968                         break
7969                 }
7970                 v.reset(OpAMD64TESTQconst)
7971                 v.AuxInt = int32ToAuxInt(c)
7972                 v.AddArg(x)
7973                 return true
7974         }
7975         // match: (CMPQconst x [0])
7976         // result: (TESTQ x x)
7977         for {
7978                 if auxIntToInt32(v.AuxInt) != 0 {
7979                         break
7980                 }
7981                 x := v_0
7982                 v.reset(OpAMD64TESTQ)
7983                 v.AddArg2(x, x)
7984                 return true
7985         }
7986         // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c])
7987         // cond: l.Uses == 1 && clobber(l)
7988         // result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
7989         for {
7990                 c := auxIntToInt32(v.AuxInt)
7991                 l := v_0
7992                 if l.Op != OpAMD64MOVQload {
7993                         break
7994                 }
7995                 off := auxIntToInt32(l.AuxInt)
7996                 sym := auxToSym(l.Aux)
7997                 mem := l.Args[1]
7998                 ptr := l.Args[0]
7999                 if !(l.Uses == 1 && clobber(l)) {
8000                         break
8001                 }
8002                 b = l.Block
8003                 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
8004                 v.copyOf(v0)
8005                 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
8006                 v0.Aux = symToAux(sym)
8007                 v0.AddArg2(ptr, mem)
8008                 return true
8009         }
8010         return false
8011 }
8012 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
8013         v_1 := v.Args[1]
8014         v_0 := v.Args[0]
8015         // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
8016         // cond: ValAndOff(valoff1).canAdd32(off2)
8017         // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
8018         for {
8019                 valoff1 := auxIntToValAndOff(v.AuxInt)
8020                 sym := auxToSym(v.Aux)
8021                 if v_0.Op != OpAMD64ADDQconst {
8022                         break
8023                 }
8024                 off2 := auxIntToInt32(v_0.AuxInt)
8025                 base := v_0.Args[0]
8026                 mem := v_1
8027                 if !(ValAndOff(valoff1).canAdd32(off2)) {
8028                         break
8029                 }
8030                 v.reset(OpAMD64CMPQconstload)
8031                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8032                 v.Aux = symToAux(sym)
8033                 v.AddArg2(base, mem)
8034                 return true
8035         }
8036         // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
8037         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
8038         // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
8039         for {
8040                 valoff1 := auxIntToValAndOff(v.AuxInt)
8041                 sym1 := auxToSym(v.Aux)
8042                 if v_0.Op != OpAMD64LEAQ {
8043                         break
8044                 }
8045                 off2 := auxIntToInt32(v_0.AuxInt)
8046                 sym2 := auxToSym(v_0.Aux)
8047                 base := v_0.Args[0]
8048                 mem := v_1
8049                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
8050                         break
8051                 }
8052                 v.reset(OpAMD64CMPQconstload)
8053                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8054                 v.Aux = symToAux(mergeSym(sym1, sym2))
8055                 v.AddArg2(base, mem)
8056                 return true
8057         }
8058         return false
8059 }
8060 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
8061         v_2 := v.Args[2]
8062         v_1 := v.Args[1]
8063         v_0 := v.Args[0]
8064         // match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem)
8065         // cond: is32Bit(int64(off1)+int64(off2))
8066         // result: (CMPQload [off1+off2] {sym} base val mem)
8067         for {
8068                 off1 := auxIntToInt32(v.AuxInt)
8069                 sym := auxToSym(v.Aux)
8070                 if v_0.Op != OpAMD64ADDQconst {
8071                         break
8072                 }
8073                 off2 := auxIntToInt32(v_0.AuxInt)
8074                 base := v_0.Args[0]
8075                 val := v_1
8076                 mem := v_2
8077                 if !(is32Bit(int64(off1) + int64(off2))) {
8078                         break
8079                 }
8080                 v.reset(OpAMD64CMPQload)
8081                 v.AuxInt = int32ToAuxInt(off1 + off2)
8082                 v.Aux = symToAux(sym)
8083                 v.AddArg3(base, val, mem)
8084                 return true
8085         }
8086         // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
8087         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8088         // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
8089         for {
8090                 off1 := auxIntToInt32(v.AuxInt)
8091                 sym1 := auxToSym(v.Aux)
8092                 if v_0.Op != OpAMD64LEAQ {
8093                         break
8094                 }
8095                 off2 := auxIntToInt32(v_0.AuxInt)
8096                 sym2 := auxToSym(v_0.Aux)
8097                 base := v_0.Args[0]
8098                 val := v_1
8099                 mem := v_2
8100                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8101                         break
8102                 }
8103                 v.reset(OpAMD64CMPQload)
8104                 v.AuxInt = int32ToAuxInt(off1 + off2)
8105                 v.Aux = symToAux(mergeSym(sym1, sym2))
8106                 v.AddArg3(base, val, mem)
8107                 return true
8108         }
8109         // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem)
8110         // cond: validVal(c)
8111         // result: (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
8112         for {
8113                 off := auxIntToInt32(v.AuxInt)
8114                 sym := auxToSym(v.Aux)
8115                 ptr := v_0
8116                 if v_1.Op != OpAMD64MOVQconst {
8117                         break
8118                 }
8119                 c := auxIntToInt64(v_1.AuxInt)
8120                 mem := v_2
8121                 if !(validVal(c)) {
8122                         break
8123                 }
8124                 v.reset(OpAMD64CMPQconstload)
8125                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
8126                 v.Aux = symToAux(sym)
8127                 v.AddArg2(ptr, mem)
8128                 return true
8129         }
8130         return false
8131 }
8132 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
8133         v_1 := v.Args[1]
8134         v_0 := v.Args[0]
8135         b := v.Block
8136         // match: (CMPW x (MOVLconst [c]))
8137         // result: (CMPWconst x [int16(c)])
8138         for {
8139                 x := v_0
8140                 if v_1.Op != OpAMD64MOVLconst {
8141                         break
8142                 }
8143                 c := auxIntToInt32(v_1.AuxInt)
8144                 v.reset(OpAMD64CMPWconst)
8145                 v.AuxInt = int16ToAuxInt(int16(c))
8146                 v.AddArg(x)
8147                 return true
8148         }
8149         // match: (CMPW (MOVLconst [c]) x)
8150         // result: (InvertFlags (CMPWconst x [int16(c)]))
8151         for {
8152                 if v_0.Op != OpAMD64MOVLconst {
8153                         break
8154                 }
8155                 c := auxIntToInt32(v_0.AuxInt)
8156                 x := v_1
8157                 v.reset(OpAMD64InvertFlags)
8158                 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
8159                 v0.AuxInt = int16ToAuxInt(int16(c))
8160                 v0.AddArg(x)
8161                 v.AddArg(v0)
8162                 return true
8163         }
8164         // match: (CMPW x y)
8165         // cond: canonLessThan(x,y)
8166         // result: (InvertFlags (CMPW y x))
8167         for {
8168                 x := v_0
8169                 y := v_1
8170                 if !(canonLessThan(x, y)) {
8171                         break
8172                 }
8173                 v.reset(OpAMD64InvertFlags)
8174                 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
8175                 v0.AddArg2(y, x)
8176                 v.AddArg(v0)
8177                 return true
8178         }
8179         // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
8180         // cond: canMergeLoad(v, l) && clobber(l)
8181         // result: (CMPWload {sym} [off] ptr x mem)
8182         for {
8183                 l := v_0
8184                 if l.Op != OpAMD64MOVWload {
8185                         break
8186                 }
8187                 off := auxIntToInt32(l.AuxInt)
8188                 sym := auxToSym(l.Aux)
8189                 mem := l.Args[1]
8190                 ptr := l.Args[0]
8191                 x := v_1
8192                 if !(canMergeLoad(v, l) && clobber(l)) {
8193                         break
8194                 }
8195                 v.reset(OpAMD64CMPWload)
8196                 v.AuxInt = int32ToAuxInt(off)
8197                 v.Aux = symToAux(sym)
8198                 v.AddArg3(ptr, x, mem)
8199                 return true
8200         }
8201         // match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
8202         // cond: canMergeLoad(v, l) && clobber(l)
8203         // result: (InvertFlags (CMPWload {sym} [off] ptr x mem))
8204         for {
8205                 x := v_0
8206                 l := v_1
8207                 if l.Op != OpAMD64MOVWload {
8208                         break
8209                 }
8210                 off := auxIntToInt32(l.AuxInt)
8211                 sym := auxToSym(l.Aux)
8212                 mem := l.Args[1]
8213                 ptr := l.Args[0]
8214                 if !(canMergeLoad(v, l) && clobber(l)) {
8215                         break
8216                 }
8217                 v.reset(OpAMD64InvertFlags)
8218                 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
8219                 v0.AuxInt = int32ToAuxInt(off)
8220                 v0.Aux = symToAux(sym)
8221                 v0.AddArg3(ptr, x, mem)
8222                 v.AddArg(v0)
8223                 return true
8224         }
8225         return false
8226 }
8227 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
8228         v_0 := v.Args[0]
8229         b := v.Block
8230         // match: (CMPWconst (MOVLconst [x]) [y])
8231         // cond: int16(x)==y
8232         // result: (FlagEQ)
8233         for {
8234                 y := auxIntToInt16(v.AuxInt)
8235                 if v_0.Op != OpAMD64MOVLconst {
8236                         break
8237                 }
8238                 x := auxIntToInt32(v_0.AuxInt)
8239                 if !(int16(x) == y) {
8240                         break
8241                 }
8242                 v.reset(OpAMD64FlagEQ)
8243                 return true
8244         }
8245         // match: (CMPWconst (MOVLconst [x]) [y])
8246         // cond: int16(x)<y && uint16(x)<uint16(y)
8247         // result: (FlagLT_ULT)
8248         for {
8249                 y := auxIntToInt16(v.AuxInt)
8250                 if v_0.Op != OpAMD64MOVLconst {
8251                         break
8252                 }
8253                 x := auxIntToInt32(v_0.AuxInt)
8254                 if !(int16(x) < y && uint16(x) < uint16(y)) {
8255                         break
8256                 }
8257                 v.reset(OpAMD64FlagLT_ULT)
8258                 return true
8259         }
8260         // match: (CMPWconst (MOVLconst [x]) [y])
8261         // cond: int16(x)<y && uint16(x)>uint16(y)
8262         // result: (FlagLT_UGT)
8263         for {
8264                 y := auxIntToInt16(v.AuxInt)
8265                 if v_0.Op != OpAMD64MOVLconst {
8266                         break
8267                 }
8268                 x := auxIntToInt32(v_0.AuxInt)
8269                 if !(int16(x) < y && uint16(x) > uint16(y)) {
8270                         break
8271                 }
8272                 v.reset(OpAMD64FlagLT_UGT)
8273                 return true
8274         }
8275         // match: (CMPWconst (MOVLconst [x]) [y])
8276         // cond: int16(x)>y && uint16(x)<uint16(y)
8277         // result: (FlagGT_ULT)
8278         for {
8279                 y := auxIntToInt16(v.AuxInt)
8280                 if v_0.Op != OpAMD64MOVLconst {
8281                         break
8282                 }
8283                 x := auxIntToInt32(v_0.AuxInt)
8284                 if !(int16(x) > y && uint16(x) < uint16(y)) {
8285                         break
8286                 }
8287                 v.reset(OpAMD64FlagGT_ULT)
8288                 return true
8289         }
8290         // match: (CMPWconst (MOVLconst [x]) [y])
8291         // cond: int16(x)>y && uint16(x)>uint16(y)
8292         // result: (FlagGT_UGT)
8293         for {
8294                 y := auxIntToInt16(v.AuxInt)
8295                 if v_0.Op != OpAMD64MOVLconst {
8296                         break
8297                 }
8298                 x := auxIntToInt32(v_0.AuxInt)
8299                 if !(int16(x) > y && uint16(x) > uint16(y)) {
8300                         break
8301                 }
8302                 v.reset(OpAMD64FlagGT_UGT)
8303                 return true
8304         }
8305         // match: (CMPWconst (ANDLconst _ [m]) [n])
8306         // cond: 0 <= int16(m) && int16(m) < n
8307         // result: (FlagLT_ULT)
8308         for {
8309                 n := auxIntToInt16(v.AuxInt)
8310                 if v_0.Op != OpAMD64ANDLconst {
8311                         break
8312                 }
8313                 m := auxIntToInt32(v_0.AuxInt)
8314                 if !(0 <= int16(m) && int16(m) < n) {
8315                         break
8316                 }
8317                 v.reset(OpAMD64FlagLT_ULT)
8318                 return true
8319         }
8320         // match: (CMPWconst a:(ANDL x y) [0])
8321         // cond: a.Uses == 1
8322         // result: (TESTW x y)
8323         for {
8324                 if auxIntToInt16(v.AuxInt) != 0 {
8325                         break
8326                 }
8327                 a := v_0
8328                 if a.Op != OpAMD64ANDL {
8329                         break
8330                 }
8331                 y := a.Args[1]
8332                 x := a.Args[0]
8333                 if !(a.Uses == 1) {
8334                         break
8335                 }
8336                 v.reset(OpAMD64TESTW)
8337                 v.AddArg2(x, y)
8338                 return true
8339         }
8340         // match: (CMPWconst a:(ANDLconst [c] x) [0])
8341         // cond: a.Uses == 1
8342         // result: (TESTWconst [int16(c)] x)
8343         for {
8344                 if auxIntToInt16(v.AuxInt) != 0 {
8345                         break
8346                 }
8347                 a := v_0
8348                 if a.Op != OpAMD64ANDLconst {
8349                         break
8350                 }
8351                 c := auxIntToInt32(a.AuxInt)
8352                 x := a.Args[0]
8353                 if !(a.Uses == 1) {
8354                         break
8355                 }
8356                 v.reset(OpAMD64TESTWconst)
8357                 v.AuxInt = int16ToAuxInt(int16(c))
8358                 v.AddArg(x)
8359                 return true
8360         }
8361         // match: (CMPWconst x [0])
8362         // result: (TESTW x x)
8363         for {
8364                 if auxIntToInt16(v.AuxInt) != 0 {
8365                         break
8366                 }
8367                 x := v_0
8368                 v.reset(OpAMD64TESTW)
8369                 v.AddArg2(x, x)
8370                 return true
8371         }
8372         // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
8373         // cond: l.Uses == 1 && clobber(l)
8374         // result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
8375         for {
8376                 c := auxIntToInt16(v.AuxInt)
8377                 l := v_0
8378                 if l.Op != OpAMD64MOVWload {
8379                         break
8380                 }
8381                 off := auxIntToInt32(l.AuxInt)
8382                 sym := auxToSym(l.Aux)
8383                 mem := l.Args[1]
8384                 ptr := l.Args[0]
8385                 if !(l.Uses == 1 && clobber(l)) {
8386                         break
8387                 }
8388                 b = l.Block
8389                 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
8390                 v.copyOf(v0)
8391                 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
8392                 v0.Aux = symToAux(sym)
8393                 v0.AddArg2(ptr, mem)
8394                 return true
8395         }
8396         return false
8397 }
8398 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
8399         v_1 := v.Args[1]
8400         v_0 := v.Args[0]
8401         // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
8402         // cond: ValAndOff(valoff1).canAdd32(off2)
8403         // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
8404         for {
8405                 valoff1 := auxIntToValAndOff(v.AuxInt)
8406                 sym := auxToSym(v.Aux)
8407                 if v_0.Op != OpAMD64ADDQconst {
8408                         break
8409                 }
8410                 off2 := auxIntToInt32(v_0.AuxInt)
8411                 base := v_0.Args[0]
8412                 mem := v_1
8413                 if !(ValAndOff(valoff1).canAdd32(off2)) {
8414                         break
8415                 }
8416                 v.reset(OpAMD64CMPWconstload)
8417                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8418                 v.Aux = symToAux(sym)
8419                 v.AddArg2(base, mem)
8420                 return true
8421         }
8422         // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
8423         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
8424         // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
8425         for {
8426                 valoff1 := auxIntToValAndOff(v.AuxInt)
8427                 sym1 := auxToSym(v.Aux)
8428                 if v_0.Op != OpAMD64LEAQ {
8429                         break
8430                 }
8431                 off2 := auxIntToInt32(v_0.AuxInt)
8432                 sym2 := auxToSym(v_0.Aux)
8433                 base := v_0.Args[0]
8434                 mem := v_1
8435                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
8436                         break
8437                 }
8438                 v.reset(OpAMD64CMPWconstload)
8439                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8440                 v.Aux = symToAux(mergeSym(sym1, sym2))
8441                 v.AddArg2(base, mem)
8442                 return true
8443         }
8444         return false
8445 }
8446 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
8447         v_2 := v.Args[2]
8448         v_1 := v.Args[1]
8449         v_0 := v.Args[0]
8450         // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem)
8451         // cond: is32Bit(int64(off1)+int64(off2))
8452         // result: (CMPWload [off1+off2] {sym} base val mem)
8453         for {
8454                 off1 := auxIntToInt32(v.AuxInt)
8455                 sym := auxToSym(v.Aux)
8456                 if v_0.Op != OpAMD64ADDQconst {
8457                         break
8458                 }
8459                 off2 := auxIntToInt32(v_0.AuxInt)
8460                 base := v_0.Args[0]
8461                 val := v_1
8462                 mem := v_2
8463                 if !(is32Bit(int64(off1) + int64(off2))) {
8464                         break
8465                 }
8466                 v.reset(OpAMD64CMPWload)
8467                 v.AuxInt = int32ToAuxInt(off1 + off2)
8468                 v.Aux = symToAux(sym)
8469                 v.AddArg3(base, val, mem)
8470                 return true
8471         }
8472         // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
8473         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8474         // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
8475         for {
8476                 off1 := auxIntToInt32(v.AuxInt)
8477                 sym1 := auxToSym(v.Aux)
8478                 if v_0.Op != OpAMD64LEAQ {
8479                         break
8480                 }
8481                 off2 := auxIntToInt32(v_0.AuxInt)
8482                 sym2 := auxToSym(v_0.Aux)
8483                 base := v_0.Args[0]
8484                 val := v_1
8485                 mem := v_2
8486                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8487                         break
8488                 }
8489                 v.reset(OpAMD64CMPWload)
8490                 v.AuxInt = int32ToAuxInt(off1 + off2)
8491                 v.Aux = symToAux(mergeSym(sym1, sym2))
8492                 v.AddArg3(base, val, mem)
8493                 return true
8494         }
8495         // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
8496         // result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
8497         for {
8498                 off := auxIntToInt32(v.AuxInt)
8499                 sym := auxToSym(v.Aux)
8500                 ptr := v_0
8501                 if v_1.Op != OpAMD64MOVLconst {
8502                         break
8503                 }
8504                 c := auxIntToInt32(v_1.AuxInt)
8505                 mem := v_2
8506                 v.reset(OpAMD64CMPWconstload)
8507                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
8508                 v.Aux = symToAux(sym)
8509                 v.AddArg2(ptr, mem)
8510                 return true
8511         }
8512         return false
8513 }
8514 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
8515         v_3 := v.Args[3]
8516         v_2 := v.Args[2]
8517         v_1 := v.Args[1]
8518         v_0 := v.Args[0]
8519         // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
8520         // cond: is32Bit(int64(off1)+int64(off2))
8521         // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
8522         for {
8523                 off1 := auxIntToInt32(v.AuxInt)
8524                 sym := auxToSym(v.Aux)
8525                 if v_0.Op != OpAMD64ADDQconst {
8526                         break
8527                 }
8528                 off2 := auxIntToInt32(v_0.AuxInt)
8529                 ptr := v_0.Args[0]
8530                 old := v_1
8531                 new_ := v_2
8532                 mem := v_3
8533                 if !(is32Bit(int64(off1) + int64(off2))) {
8534                         break
8535                 }
8536                 v.reset(OpAMD64CMPXCHGLlock)
8537                 v.AuxInt = int32ToAuxInt(off1 + off2)
8538                 v.Aux = symToAux(sym)
8539                 v.AddArg4(ptr, old, new_, mem)
8540                 return true
8541         }
8542         return false
8543 }
8544 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
8545         v_3 := v.Args[3]
8546         v_2 := v.Args[2]
8547         v_1 := v.Args[1]
8548         v_0 := v.Args[0]
8549         // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
8550         // cond: is32Bit(int64(off1)+int64(off2))
8551         // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
8552         for {
8553                 off1 := auxIntToInt32(v.AuxInt)
8554                 sym := auxToSym(v.Aux)
8555                 if v_0.Op != OpAMD64ADDQconst {
8556                         break
8557                 }
8558                 off2 := auxIntToInt32(v_0.AuxInt)
8559                 ptr := v_0.Args[0]
8560                 old := v_1
8561                 new_ := v_2
8562                 mem := v_3
8563                 if !(is32Bit(int64(off1) + int64(off2))) {
8564                         break
8565                 }
8566                 v.reset(OpAMD64CMPXCHGQlock)
8567                 v.AuxInt = int32ToAuxInt(off1 + off2)
8568                 v.Aux = symToAux(sym)
8569                 v.AddArg4(ptr, old, new_, mem)
8570                 return true
8571         }
8572         return false
8573 }
8574 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
8575         v_1 := v.Args[1]
8576         v_0 := v.Args[0]
8577         // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem))
8578         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
8579         // result: (DIVSDload x [off] {sym} ptr mem)
8580         for {
8581                 x := v_0
8582                 l := v_1
8583                 if l.Op != OpAMD64MOVSDload {
8584                         break
8585                 }
8586                 off := auxIntToInt32(l.AuxInt)
8587                 sym := auxToSym(l.Aux)
8588                 mem := l.Args[1]
8589                 ptr := l.Args[0]
8590                 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8591                         break
8592                 }
8593                 v.reset(OpAMD64DIVSDload)
8594                 v.AuxInt = int32ToAuxInt(off)
8595                 v.Aux = symToAux(sym)
8596                 v.AddArg3(x, ptr, mem)
8597                 return true
8598         }
8599         return false
8600 }
8601 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8602         v_2 := v.Args[2]
8603         v_1 := v.Args[1]
8604         v_0 := v.Args[0]
8605         // match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem)
8606         // cond: is32Bit(int64(off1)+int64(off2))
8607         // result: (DIVSDload [off1+off2] {sym} val base mem)
8608         for {
8609                 off1 := auxIntToInt32(v.AuxInt)
8610                 sym := auxToSym(v.Aux)
8611                 val := v_0
8612                 if v_1.Op != OpAMD64ADDQconst {
8613                         break
8614                 }
8615                 off2 := auxIntToInt32(v_1.AuxInt)
8616                 base := v_1.Args[0]
8617                 mem := v_2
8618                 if !(is32Bit(int64(off1) + int64(off2))) {
8619                         break
8620                 }
8621                 v.reset(OpAMD64DIVSDload)
8622                 v.AuxInt = int32ToAuxInt(off1 + off2)
8623                 v.Aux = symToAux(sym)
8624                 v.AddArg3(val, base, mem)
8625                 return true
8626         }
8627         // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
8628         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8629         // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
8630         for {
8631                 off1 := auxIntToInt32(v.AuxInt)
8632                 sym1 := auxToSym(v.Aux)
8633                 val := v_0
8634                 if v_1.Op != OpAMD64LEAQ {
8635                         break
8636                 }
8637                 off2 := auxIntToInt32(v_1.AuxInt)
8638                 sym2 := auxToSym(v_1.Aux)
8639                 base := v_1.Args[0]
8640                 mem := v_2
8641                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8642                         break
8643                 }
8644                 v.reset(OpAMD64DIVSDload)
8645                 v.AuxInt = int32ToAuxInt(off1 + off2)
8646                 v.Aux = symToAux(mergeSym(sym1, sym2))
8647                 v.AddArg3(val, base, mem)
8648                 return true
8649         }
8650         return false
8651 }
8652 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8653         v_1 := v.Args[1]
8654         v_0 := v.Args[0]
8655         // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem))
8656         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
8657         // result: (DIVSSload x [off] {sym} ptr mem)
8658         for {
8659                 x := v_0
8660                 l := v_1
8661                 if l.Op != OpAMD64MOVSSload {
8662                         break
8663                 }
8664                 off := auxIntToInt32(l.AuxInt)
8665                 sym := auxToSym(l.Aux)
8666                 mem := l.Args[1]
8667                 ptr := l.Args[0]
8668                 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8669                         break
8670                 }
8671                 v.reset(OpAMD64DIVSSload)
8672                 v.AuxInt = int32ToAuxInt(off)
8673                 v.Aux = symToAux(sym)
8674                 v.AddArg3(x, ptr, mem)
8675                 return true
8676         }
8677         return false
8678 }
8679 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8680         v_2 := v.Args[2]
8681         v_1 := v.Args[1]
8682         v_0 := v.Args[0]
8683         // match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem)
8684         // cond: is32Bit(int64(off1)+int64(off2))
8685         // result: (DIVSSload [off1+off2] {sym} val base mem)
8686         for {
8687                 off1 := auxIntToInt32(v.AuxInt)
8688                 sym := auxToSym(v.Aux)
8689                 val := v_0
8690                 if v_1.Op != OpAMD64ADDQconst {
8691                         break
8692                 }
8693                 off2 := auxIntToInt32(v_1.AuxInt)
8694                 base := v_1.Args[0]
8695                 mem := v_2
8696                 if !(is32Bit(int64(off1) + int64(off2))) {
8697                         break
8698                 }
8699                 v.reset(OpAMD64DIVSSload)
8700                 v.AuxInt = int32ToAuxInt(off1 + off2)
8701                 v.Aux = symToAux(sym)
8702                 v.AddArg3(val, base, mem)
8703                 return true
8704         }
8705         // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
8706         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8707         // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
8708         for {
8709                 off1 := auxIntToInt32(v.AuxInt)
8710                 sym1 := auxToSym(v.Aux)
8711                 val := v_0
8712                 if v_1.Op != OpAMD64LEAQ {
8713                         break
8714                 }
8715                 off2 := auxIntToInt32(v_1.AuxInt)
8716                 sym2 := auxToSym(v_1.Aux)
8717                 base := v_1.Args[0]
8718                 mem := v_2
8719                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8720                         break
8721                 }
8722                 v.reset(OpAMD64DIVSSload)
8723                 v.AuxInt = int32ToAuxInt(off1 + off2)
8724                 v.Aux = symToAux(mergeSym(sym1, sym2))
8725                 v.AddArg3(val, base, mem)
8726                 return true
8727         }
8728         return false
8729 }
8730 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8731         v_1 := v.Args[1]
8732         v_0 := v.Args[0]
8733         // match: (HMULL x y)
8734         // cond: !x.rematerializeable() && y.rematerializeable()
8735         // result: (HMULL y x)
8736         for {
8737                 x := v_0
8738                 y := v_1
8739                 if !(!x.rematerializeable() && y.rematerializeable()) {
8740                         break
8741                 }
8742                 v.reset(OpAMD64HMULL)
8743                 v.AddArg2(y, x)
8744                 return true
8745         }
8746         return false
8747 }
8748 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8749         v_1 := v.Args[1]
8750         v_0 := v.Args[0]
8751         // match: (HMULLU x y)
8752         // cond: !x.rematerializeable() && y.rematerializeable()
8753         // result: (HMULLU y x)
8754         for {
8755                 x := v_0
8756                 y := v_1
8757                 if !(!x.rematerializeable() && y.rematerializeable()) {
8758                         break
8759                 }
8760                 v.reset(OpAMD64HMULLU)
8761                 v.AddArg2(y, x)
8762                 return true
8763         }
8764         return false
8765 }
8766 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8767         v_1 := v.Args[1]
8768         v_0 := v.Args[0]
8769         // match: (HMULQ x y)
8770         // cond: !x.rematerializeable() && y.rematerializeable()
8771         // result: (HMULQ y x)
8772         for {
8773                 x := v_0
8774                 y := v_1
8775                 if !(!x.rematerializeable() && y.rematerializeable()) {
8776                         break
8777                 }
8778                 v.reset(OpAMD64HMULQ)
8779                 v.AddArg2(y, x)
8780                 return true
8781         }
8782         return false
8783 }
8784 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8785         v_1 := v.Args[1]
8786         v_0 := v.Args[0]
8787         // match: (HMULQU x y)
8788         // cond: !x.rematerializeable() && y.rematerializeable()
8789         // result: (HMULQU y x)
8790         for {
8791                 x := v_0
8792                 y := v_1
8793                 if !(!x.rematerializeable() && y.rematerializeable()) {
8794                         break
8795                 }
8796                 v.reset(OpAMD64HMULQU)
8797                 v.AddArg2(y, x)
8798                 return true
8799         }
8800         return false
8801 }
8802 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8803         v_0 := v.Args[0]
8804         // match: (LEAL [c] {s} (ADDLconst [d] x))
8805         // cond: is32Bit(int64(c)+int64(d))
8806         // result: (LEAL [c+d] {s} x)
8807         for {
8808                 c := auxIntToInt32(v.AuxInt)
8809                 s := auxToSym(v.Aux)
8810                 if v_0.Op != OpAMD64ADDLconst {
8811                         break
8812                 }
8813                 d := auxIntToInt32(v_0.AuxInt)
8814                 x := v_0.Args[0]
8815                 if !(is32Bit(int64(c) + int64(d))) {
8816                         break
8817                 }
8818                 v.reset(OpAMD64LEAL)
8819                 v.AuxInt = int32ToAuxInt(c + d)
8820                 v.Aux = symToAux(s)
8821                 v.AddArg(x)
8822                 return true
8823         }
8824         // match: (LEAL [c] {s} (ADDL x y))
8825         // cond: x.Op != OpSB && y.Op != OpSB
8826         // result: (LEAL1 [c] {s} x y)
8827         for {
8828                 c := auxIntToInt32(v.AuxInt)
8829                 s := auxToSym(v.Aux)
8830                 if v_0.Op != OpAMD64ADDL {
8831                         break
8832                 }
8833                 _ = v_0.Args[1]
8834                 v_0_0 := v_0.Args[0]
8835                 v_0_1 := v_0.Args[1]
8836                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8837                         x := v_0_0
8838                         y := v_0_1
8839                         if !(x.Op != OpSB && y.Op != OpSB) {
8840                                 continue
8841                         }
8842                         v.reset(OpAMD64LEAL1)
8843                         v.AuxInt = int32ToAuxInt(c)
8844                         v.Aux = symToAux(s)
8845                         v.AddArg2(x, y)
8846                         return true
8847                 }
8848                 break
8849         }
8850         return false
8851 }
8852 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8853         v_1 := v.Args[1]
8854         v_0 := v.Args[0]
8855         // match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
8856         // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
8857         // result: (LEAL1 [c+d] {s} x y)
8858         for {
8859                 c := auxIntToInt32(v.AuxInt)
8860                 s := auxToSym(v.Aux)
8861                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8862                         if v_0.Op != OpAMD64ADDLconst {
8863                                 continue
8864                         }
8865                         d := auxIntToInt32(v_0.AuxInt)
8866                         x := v_0.Args[0]
8867                         y := v_1
8868                         if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8869                                 continue
8870                         }
8871                         v.reset(OpAMD64LEAL1)
8872                         v.AuxInt = int32ToAuxInt(c + d)
8873                         v.Aux = symToAux(s)
8874                         v.AddArg2(x, y)
8875                         return true
8876                 }
8877                 break
8878         }
8879         // match: (LEAL1 [c] {s} x (SHLLconst [1] y))
8880         // result: (LEAL2 [c] {s} x y)
8881         for {
8882                 c := auxIntToInt32(v.AuxInt)
8883                 s := auxToSym(v.Aux)
8884                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8885                         x := v_0
8886                         if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8887                                 continue
8888                         }
8889                         y := v_1.Args[0]
8890                         v.reset(OpAMD64LEAL2)
8891                         v.AuxInt = int32ToAuxInt(c)
8892                         v.Aux = symToAux(s)
8893                         v.AddArg2(x, y)
8894                         return true
8895                 }
8896                 break
8897         }
8898         // match: (LEAL1 [c] {s} x (SHLLconst [2] y))
8899         // result: (LEAL4 [c] {s} x y)
8900         for {
8901                 c := auxIntToInt32(v.AuxInt)
8902                 s := auxToSym(v.Aux)
8903                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8904                         x := v_0
8905                         if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8906                                 continue
8907                         }
8908                         y := v_1.Args[0]
8909                         v.reset(OpAMD64LEAL4)
8910                         v.AuxInt = int32ToAuxInt(c)
8911                         v.Aux = symToAux(s)
8912                         v.AddArg2(x, y)
8913                         return true
8914                 }
8915                 break
8916         }
8917         // match: (LEAL1 [c] {s} x (SHLLconst [3] y))
8918         // result: (LEAL8 [c] {s} x y)
8919         for {
8920                 c := auxIntToInt32(v.AuxInt)
8921                 s := auxToSym(v.Aux)
8922                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8923                         x := v_0
8924                         if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8925                                 continue
8926                         }
8927                         y := v_1.Args[0]
8928                         v.reset(OpAMD64LEAL8)
8929                         v.AuxInt = int32ToAuxInt(c)
8930                         v.Aux = symToAux(s)
8931                         v.AddArg2(x, y)
8932                         return true
8933                 }
8934                 break
8935         }
8936         return false
8937 }
8938 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8939         v_1 := v.Args[1]
8940         v_0 := v.Args[0]
8941         // match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
8942         // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
8943         // result: (LEAL2 [c+d] {s} x y)
8944         for {
8945                 c := auxIntToInt32(v.AuxInt)
8946                 s := auxToSym(v.Aux)
8947                 if v_0.Op != OpAMD64ADDLconst {
8948                         break
8949                 }
8950                 d := auxIntToInt32(v_0.AuxInt)
8951                 x := v_0.Args[0]
8952                 y := v_1
8953                 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8954                         break
8955                 }
8956                 v.reset(OpAMD64LEAL2)
8957                 v.AuxInt = int32ToAuxInt(c + d)
8958                 v.Aux = symToAux(s)
8959                 v.AddArg2(x, y)
8960                 return true
8961         }
8962         // match: (LEAL2 [c] {s} x (ADDLconst [d] y))
8963         // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
8964         // result: (LEAL2 [c+2*d] {s} x y)
8965         for {
8966                 c := auxIntToInt32(v.AuxInt)
8967                 s := auxToSym(v.Aux)
8968                 x := v_0
8969                 if v_1.Op != OpAMD64ADDLconst {
8970                         break
8971                 }
8972                 d := auxIntToInt32(v_1.AuxInt)
8973                 y := v_1.Args[0]
8974                 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8975                         break
8976                 }
8977                 v.reset(OpAMD64LEAL2)
8978                 v.AuxInt = int32ToAuxInt(c + 2*d)
8979                 v.Aux = symToAux(s)
8980                 v.AddArg2(x, y)
8981                 return true
8982         }
8983         // match: (LEAL2 [c] {s} x (SHLLconst [1] y))
8984         // result: (LEAL4 [c] {s} x y)
8985         for {
8986                 c := auxIntToInt32(v.AuxInt)
8987                 s := auxToSym(v.Aux)
8988                 x := v_0
8989                 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8990                         break
8991                 }
8992                 y := v_1.Args[0]
8993                 v.reset(OpAMD64LEAL4)
8994                 v.AuxInt = int32ToAuxInt(c)
8995                 v.Aux = symToAux(s)
8996                 v.AddArg2(x, y)
8997                 return true
8998         }
8999         // match: (LEAL2 [c] {s} x (SHLLconst [2] y))
9000         // result: (LEAL8 [c] {s} x y)
9001         for {
9002                 c := auxIntToInt32(v.AuxInt)
9003                 s := auxToSym(v.Aux)
9004                 x := v_0
9005                 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
9006                         break
9007                 }
9008                 y := v_1.Args[0]
9009                 v.reset(OpAMD64LEAL8)
9010                 v.AuxInt = int32ToAuxInt(c)
9011                 v.Aux = symToAux(s)
9012                 v.AddArg2(x, y)
9013                 return true
9014         }
9015         return false
9016 }
9017 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
9018         v_1 := v.Args[1]
9019         v_0 := v.Args[0]
9020         // match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
9021         // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9022         // result: (LEAL4 [c+d] {s} x y)
9023         for {
9024                 c := auxIntToInt32(v.AuxInt)
9025                 s := auxToSym(v.Aux)
9026                 if v_0.Op != OpAMD64ADDLconst {
9027                         break
9028                 }
9029                 d := auxIntToInt32(v_0.AuxInt)
9030                 x := v_0.Args[0]
9031                 y := v_1
9032                 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9033                         break
9034                 }
9035                 v.reset(OpAMD64LEAL4)
9036                 v.AuxInt = int32ToAuxInt(c + d)
9037                 v.Aux = symToAux(s)
9038                 v.AddArg2(x, y)
9039                 return true
9040         }
9041         // match: (LEAL4 [c] {s} x (ADDLconst [d] y))
9042         // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
9043         // result: (LEAL4 [c+4*d] {s} x y)
9044         for {
9045                 c := auxIntToInt32(v.AuxInt)
9046                 s := auxToSym(v.Aux)
9047                 x := v_0
9048                 if v_1.Op != OpAMD64ADDLconst {
9049                         break
9050                 }
9051                 d := auxIntToInt32(v_1.AuxInt)
9052                 y := v_1.Args[0]
9053                 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9054                         break
9055                 }
9056                 v.reset(OpAMD64LEAL4)
9057                 v.AuxInt = int32ToAuxInt(c + 4*d)
9058                 v.Aux = symToAux(s)
9059                 v.AddArg2(x, y)
9060                 return true
9061         }
9062         // match: (LEAL4 [c] {s} x (SHLLconst [1] y))
9063         // result: (LEAL8 [c] {s} x y)
9064         for {
9065                 c := auxIntToInt32(v.AuxInt)
9066                 s := auxToSym(v.Aux)
9067                 x := v_0
9068                 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
9069                         break
9070                 }
9071                 y := v_1.Args[0]
9072                 v.reset(OpAMD64LEAL8)
9073                 v.AuxInt = int32ToAuxInt(c)
9074                 v.Aux = symToAux(s)
9075                 v.AddArg2(x, y)
9076                 return true
9077         }
9078         return false
9079 }
9080 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
9081         v_1 := v.Args[1]
9082         v_0 := v.Args[0]
9083         // match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
9084         // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9085         // result: (LEAL8 [c+d] {s} x y)
9086         for {
9087                 c := auxIntToInt32(v.AuxInt)
9088                 s := auxToSym(v.Aux)
9089                 if v_0.Op != OpAMD64ADDLconst {
9090                         break
9091                 }
9092                 d := auxIntToInt32(v_0.AuxInt)
9093                 x := v_0.Args[0]
9094                 y := v_1
9095                 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9096                         break
9097                 }
9098                 v.reset(OpAMD64LEAL8)
9099                 v.AuxInt = int32ToAuxInt(c + d)
9100                 v.Aux = symToAux(s)
9101                 v.AddArg2(x, y)
9102                 return true
9103         }
9104         // match: (LEAL8 [c] {s} x (ADDLconst [d] y))
9105         // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
9106         // result: (LEAL8 [c+8*d] {s} x y)
9107         for {
9108                 c := auxIntToInt32(v.AuxInt)
9109                 s := auxToSym(v.Aux)
9110                 x := v_0
9111                 if v_1.Op != OpAMD64ADDLconst {
9112                         break
9113                 }
9114                 d := auxIntToInt32(v_1.AuxInt)
9115                 y := v_1.Args[0]
9116                 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9117                         break
9118                 }
9119                 v.reset(OpAMD64LEAL8)
9120                 v.AuxInt = int32ToAuxInt(c + 8*d)
9121                 v.Aux = symToAux(s)
9122                 v.AddArg2(x, y)
9123                 return true
9124         }
9125         return false
9126 }
9127 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
9128         v_0 := v.Args[0]
9129         // match: (LEAQ [c] {s} (ADDQconst [d] x))
9130         // cond: is32Bit(int64(c)+int64(d))
9131         // result: (LEAQ [c+d] {s} x)
9132         for {
9133                 c := auxIntToInt32(v.AuxInt)
9134                 s := auxToSym(v.Aux)
9135                 if v_0.Op != OpAMD64ADDQconst {
9136                         break
9137                 }
9138                 d := auxIntToInt32(v_0.AuxInt)
9139                 x := v_0.Args[0]
9140                 if !(is32Bit(int64(c) + int64(d))) {
9141                         break
9142                 }
9143                 v.reset(OpAMD64LEAQ)
9144                 v.AuxInt = int32ToAuxInt(c + d)
9145                 v.Aux = symToAux(s)
9146                 v.AddArg(x)
9147                 return true
9148         }
9149         // match: (LEAQ [c] {s} (ADDQ x y))
9150         // cond: x.Op != OpSB && y.Op != OpSB
9151         // result: (LEAQ1 [c] {s} x y)
9152         for {
9153                 c := auxIntToInt32(v.AuxInt)
9154                 s := auxToSym(v.Aux)
9155                 if v_0.Op != OpAMD64ADDQ {
9156                         break
9157                 }
9158                 _ = v_0.Args[1]
9159                 v_0_0 := v_0.Args[0]
9160                 v_0_1 := v_0.Args[1]
9161                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
9162                         x := v_0_0
9163                         y := v_0_1
9164                         if !(x.Op != OpSB && y.Op != OpSB) {
9165                                 continue
9166                         }
9167                         v.reset(OpAMD64LEAQ1)
9168                         v.AuxInt = int32ToAuxInt(c)
9169                         v.Aux = symToAux(s)
9170                         v.AddArg2(x, y)
9171                         return true
9172                 }
9173                 break
9174         }
9175         // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
9176         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9177         // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
9178         for {
9179                 off1 := auxIntToInt32(v.AuxInt)
9180                 sym1 := auxToSym(v.Aux)
9181                 if v_0.Op != OpAMD64LEAQ {
9182                         break
9183                 }
9184                 off2 := auxIntToInt32(v_0.AuxInt)
9185                 sym2 := auxToSym(v_0.Aux)
9186                 x := v_0.Args[0]
9187                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9188                         break
9189                 }
9190                 v.reset(OpAMD64LEAQ)
9191                 v.AuxInt = int32ToAuxInt(off1 + off2)
9192                 v.Aux = symToAux(mergeSym(sym1, sym2))
9193                 v.AddArg(x)
9194                 return true
9195         }
9196         // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
9197         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9198         // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
9199         for {
9200                 off1 := auxIntToInt32(v.AuxInt)
9201                 sym1 := auxToSym(v.Aux)
9202                 if v_0.Op != OpAMD64LEAQ1 {
9203                         break
9204                 }
9205                 off2 := auxIntToInt32(v_0.AuxInt)
9206                 sym2 := auxToSym(v_0.Aux)
9207                 y := v_0.Args[1]
9208                 x := v_0.Args[0]
9209                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9210                         break
9211                 }
9212                 v.reset(OpAMD64LEAQ1)
9213                 v.AuxInt = int32ToAuxInt(off1 + off2)
9214                 v.Aux = symToAux(mergeSym(sym1, sym2))
9215                 v.AddArg2(x, y)
9216                 return true
9217         }
9218         // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
9219         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9220         // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
9221         for {
9222                 off1 := auxIntToInt32(v.AuxInt)
9223                 sym1 := auxToSym(v.Aux)
9224                 if v_0.Op != OpAMD64LEAQ2 {
9225                         break
9226                 }
9227                 off2 := auxIntToInt32(v_0.AuxInt)
9228                 sym2 := auxToSym(v_0.Aux)
9229                 y := v_0.Args[1]
9230                 x := v_0.Args[0]
9231                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9232                         break
9233                 }
9234                 v.reset(OpAMD64LEAQ2)
9235                 v.AuxInt = int32ToAuxInt(off1 + off2)
9236                 v.Aux = symToAux(mergeSym(sym1, sym2))
9237                 v.AddArg2(x, y)
9238                 return true
9239         }
9240         // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
9241         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9242         // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
9243         for {
9244                 off1 := auxIntToInt32(v.AuxInt)
9245                 sym1 := auxToSym(v.Aux)
9246                 if v_0.Op != OpAMD64LEAQ4 {
9247                         break
9248                 }
9249                 off2 := auxIntToInt32(v_0.AuxInt)
9250                 sym2 := auxToSym(v_0.Aux)
9251                 y := v_0.Args[1]
9252                 x := v_0.Args[0]
9253                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9254                         break
9255                 }
9256                 v.reset(OpAMD64LEAQ4)
9257                 v.AuxInt = int32ToAuxInt(off1 + off2)
9258                 v.Aux = symToAux(mergeSym(sym1, sym2))
9259                 v.AddArg2(x, y)
9260                 return true
9261         }
9262         // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
9263         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9264         // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
9265         for {
9266                 off1 := auxIntToInt32(v.AuxInt)
9267                 sym1 := auxToSym(v.Aux)
9268                 if v_0.Op != OpAMD64LEAQ8 {
9269                         break
9270                 }
9271                 off2 := auxIntToInt32(v_0.AuxInt)
9272                 sym2 := auxToSym(v_0.Aux)
9273                 y := v_0.Args[1]
9274                 x := v_0.Args[0]
9275                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9276                         break
9277                 }
9278                 v.reset(OpAMD64LEAQ8)
9279                 v.AuxInt = int32ToAuxInt(off1 + off2)
9280                 v.Aux = symToAux(mergeSym(sym1, sym2))
9281                 v.AddArg2(x, y)
9282                 return true
9283         }
9284         return false
9285 }
9286 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
9287         v_1 := v.Args[1]
9288         v_0 := v.Args[0]
9289         // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
9290         // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9291         // result: (LEAQ1 [c+d] {s} x y)
9292         for {
9293                 c := auxIntToInt32(v.AuxInt)
9294                 s := auxToSym(v.Aux)
9295                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9296                         if v_0.Op != OpAMD64ADDQconst {
9297                                 continue
9298                         }
9299                         d := auxIntToInt32(v_0.AuxInt)
9300                         x := v_0.Args[0]
9301                         y := v_1
9302                         if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9303                                 continue
9304                         }
9305                         v.reset(OpAMD64LEAQ1)
9306                         v.AuxInt = int32ToAuxInt(c + d)
9307                         v.Aux = symToAux(s)
9308                         v.AddArg2(x, y)
9309                         return true
9310                 }
9311                 break
9312         }
9313         // match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
9314         // result: (LEAQ2 [c] {s} x y)
9315         for {
9316                 c := auxIntToInt32(v.AuxInt)
9317                 s := auxToSym(v.Aux)
9318                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9319                         x := v_0
9320                         if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9321                                 continue
9322                         }
9323                         y := v_1.Args[0]
9324                         v.reset(OpAMD64LEAQ2)
9325                         v.AuxInt = int32ToAuxInt(c)
9326                         v.Aux = symToAux(s)
9327                         v.AddArg2(x, y)
9328                         return true
9329                 }
9330                 break
9331         }
9332         // match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
9333         // result: (LEAQ4 [c] {s} x y)
9334         for {
9335                 c := auxIntToInt32(v.AuxInt)
9336                 s := auxToSym(v.Aux)
9337                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9338                         x := v_0
9339                         if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9340                                 continue
9341                         }
9342                         y := v_1.Args[0]
9343                         v.reset(OpAMD64LEAQ4)
9344                         v.AuxInt = int32ToAuxInt(c)
9345                         v.Aux = symToAux(s)
9346                         v.AddArg2(x, y)
9347                         return true
9348                 }
9349                 break
9350         }
9351         // match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
9352         // result: (LEAQ8 [c] {s} x y)
9353         for {
9354                 c := auxIntToInt32(v.AuxInt)
9355                 s := auxToSym(v.Aux)
9356                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9357                         x := v_0
9358                         if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
9359                                 continue
9360                         }
9361                         y := v_1.Args[0]
9362                         v.reset(OpAMD64LEAQ8)
9363                         v.AuxInt = int32ToAuxInt(c)
9364                         v.Aux = symToAux(s)
9365                         v.AddArg2(x, y)
9366                         return true
9367                 }
9368                 break
9369         }
9370         // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
9371         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
9372         // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
9373         for {
9374                 off1 := auxIntToInt32(v.AuxInt)
9375                 sym1 := auxToSym(v.Aux)
9376                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9377                         if v_0.Op != OpAMD64LEAQ {
9378                                 continue
9379                         }
9380                         off2 := auxIntToInt32(v_0.AuxInt)
9381                         sym2 := auxToSym(v_0.Aux)
9382                         x := v_0.Args[0]
9383                         y := v_1
9384                         if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9385                                 continue
9386                         }
9387                         v.reset(OpAMD64LEAQ1)
9388                         v.AuxInt = int32ToAuxInt(off1 + off2)
9389                         v.Aux = symToAux(mergeSym(sym1, sym2))
9390                         v.AddArg2(x, y)
9391                         return true
9392                 }
9393                 break
9394         }
9395         // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
9396         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9397         // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
9398         for {
9399                 off1 := auxIntToInt32(v.AuxInt)
9400                 sym1 := auxToSym(v.Aux)
9401                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9402                         x := v_0
9403                         if v_1.Op != OpAMD64LEAQ1 {
9404                                 continue
9405                         }
9406                         off2 := auxIntToInt32(v_1.AuxInt)
9407                         sym2 := auxToSym(v_1.Aux)
9408                         y := v_1.Args[1]
9409                         if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9410                                 continue
9411                         }
9412                         v.reset(OpAMD64LEAQ2)
9413                         v.AuxInt = int32ToAuxInt(off1 + off2)
9414                         v.Aux = symToAux(mergeSym(sym1, sym2))
9415                         v.AddArg2(x, y)
9416                         return true
9417                 }
9418                 break
9419         }
9420         // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y))
9421         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9422         // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
9423         for {
9424                 off1 := auxIntToInt32(v.AuxInt)
9425                 sym1 := auxToSym(v.Aux)
9426                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9427                         x := v_0
9428                         if v_1.Op != OpAMD64LEAQ1 {
9429                                 continue
9430                         }
9431                         off2 := auxIntToInt32(v_1.AuxInt)
9432                         sym2 := auxToSym(v_1.Aux)
9433                         _ = v_1.Args[1]
9434                         v_1_0 := v_1.Args[0]
9435                         v_1_1 := v_1.Args[1]
9436                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
9437                                 if x != v_1_0 {
9438                                         continue
9439                                 }
9440                                 y := v_1_1
9441                                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9442                                         continue
9443                                 }
9444                                 v.reset(OpAMD64LEAQ2)
9445                                 v.AuxInt = int32ToAuxInt(off1 + off2)
9446                                 v.Aux = symToAux(mergeSym(sym1, sym2))
9447                                 v.AddArg2(y, x)
9448                                 return true
9449                         }
9450                 }
9451                 break
9452         }
9453         // match: (LEAQ1 [0] x y)
9454         // cond: v.Aux == nil
9455         // result: (ADDQ x y)
9456         for {
9457                 if auxIntToInt32(v.AuxInt) != 0 {
9458                         break
9459                 }
9460                 x := v_0
9461                 y := v_1
9462                 if !(v.Aux == nil) {
9463                         break
9464                 }
9465                 v.reset(OpAMD64ADDQ)
9466                 v.AddArg2(x, y)
9467                 return true
9468         }
9469         return false
9470 }
9471 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
9472         v_1 := v.Args[1]
9473         v_0 := v.Args[0]
9474         // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
9475         // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9476         // result: (LEAQ2 [c+d] {s} x y)
9477         for {
9478                 c := auxIntToInt32(v.AuxInt)
9479                 s := auxToSym(v.Aux)
9480                 if v_0.Op != OpAMD64ADDQconst {
9481                         break
9482                 }
9483                 d := auxIntToInt32(v_0.AuxInt)
9484                 x := v_0.Args[0]
9485                 y := v_1
9486                 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9487                         break
9488                 }
9489                 v.reset(OpAMD64LEAQ2)
9490                 v.AuxInt = int32ToAuxInt(c + d)
9491                 v.Aux = symToAux(s)
9492                 v.AddArg2(x, y)
9493                 return true
9494         }
9495         // match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
9496         // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
9497         // result: (LEAQ2 [c+2*d] {s} x y)
9498         for {
9499                 c := auxIntToInt32(v.AuxInt)
9500                 s := auxToSym(v.Aux)
9501                 x := v_0
9502                 if v_1.Op != OpAMD64ADDQconst {
9503                         break
9504                 }
9505                 d := auxIntToInt32(v_1.AuxInt)
9506                 y := v_1.Args[0]
9507                 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
9508                         break
9509                 }
9510                 v.reset(OpAMD64LEAQ2)
9511                 v.AuxInt = int32ToAuxInt(c + 2*d)
9512                 v.Aux = symToAux(s)
9513                 v.AddArg2(x, y)
9514                 return true
9515         }
9516         // match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
9517         // result: (LEAQ4 [c] {s} x y)
9518         for {
9519                 c := auxIntToInt32(v.AuxInt)
9520                 s := auxToSym(v.Aux)
9521                 x := v_0
9522                 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9523                         break
9524                 }
9525                 y := v_1.Args[0]
9526                 v.reset(OpAMD64LEAQ4)
9527                 v.AuxInt = int32ToAuxInt(c)
9528                 v.Aux = symToAux(s)
9529                 v.AddArg2(x, y)
9530                 return true
9531         }
9532         // match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
9533         // result: (LEAQ8 [c] {s} x y)
9534         for {
9535                 c := auxIntToInt32(v.AuxInt)
9536                 s := auxToSym(v.Aux)
9537                 x := v_0
9538                 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9539                         break
9540                 }
9541                 y := v_1.Args[0]
9542                 v.reset(OpAMD64LEAQ8)
9543                 v.AuxInt = int32ToAuxInt(c)
9544                 v.Aux = symToAux(s)
9545                 v.AddArg2(x, y)
9546                 return true
9547         }
9548         // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
9549         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
9550         // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
9551         for {
9552                 off1 := auxIntToInt32(v.AuxInt)
9553                 sym1 := auxToSym(v.Aux)
9554                 if v_0.Op != OpAMD64LEAQ {
9555                         break
9556                 }
9557                 off2 := auxIntToInt32(v_0.AuxInt)
9558                 sym2 := auxToSym(v_0.Aux)
9559                 x := v_0.Args[0]
9560                 y := v_1
9561                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9562                         break
9563                 }
9564                 v.reset(OpAMD64LEAQ2)
9565                 v.AuxInt = int32ToAuxInt(off1 + off2)
9566                 v.Aux = symToAux(mergeSym(sym1, sym2))
9567                 v.AddArg2(x, y)
9568                 return true
9569         }
9570         // match: (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
9571         // cond: is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil
9572         // result: (LEAQ4 [off1+2*off2] {sym1} x y)
9573         for {
9574                 off1 := auxIntToInt32(v.AuxInt)
9575                 sym1 := auxToSym(v.Aux)
9576                 x := v_0
9577                 if v_1.Op != OpAMD64LEAQ1 {
9578                         break
9579                 }
9580                 off2 := auxIntToInt32(v_1.AuxInt)
9581                 sym2 := auxToSym(v_1.Aux)
9582                 y := v_1.Args[1]
9583                 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
9584                         break
9585                 }
9586                 v.reset(OpAMD64LEAQ4)
9587                 v.AuxInt = int32ToAuxInt(off1 + 2*off2)
9588                 v.Aux = symToAux(sym1)
9589                 v.AddArg2(x, y)
9590                 return true
9591         }
9592         // match: (LEAQ2 [off] {sym} x (MOVQconst [scale]))
9593         // cond: is32Bit(int64(off)+int64(scale)*2)
9594         // result: (LEAQ [off+int32(scale)*2] {sym} x)
9595         for {
9596                 off := auxIntToInt32(v.AuxInt)
9597                 sym := auxToSym(v.Aux)
9598                 x := v_0
9599                 if v_1.Op != OpAMD64MOVQconst {
9600                         break
9601                 }
9602                 scale := auxIntToInt64(v_1.AuxInt)
9603                 if !(is32Bit(int64(off) + int64(scale)*2)) {
9604                         break
9605                 }
9606                 v.reset(OpAMD64LEAQ)
9607                 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9608                 v.Aux = symToAux(sym)
9609                 v.AddArg(x)
9610                 return true
9611         }
9612         // match: (LEAQ2 [off] {sym} x (MOVLconst [scale]))
9613         // cond: is32Bit(int64(off)+int64(scale)*2)
9614         // result: (LEAQ [off+int32(scale)*2] {sym} x)
9615         for {
9616                 off := auxIntToInt32(v.AuxInt)
9617                 sym := auxToSym(v.Aux)
9618                 x := v_0
9619                 if v_1.Op != OpAMD64MOVLconst {
9620                         break
9621                 }
9622                 scale := auxIntToInt32(v_1.AuxInt)
9623                 if !(is32Bit(int64(off) + int64(scale)*2)) {
9624                         break
9625                 }
9626                 v.reset(OpAMD64LEAQ)
9627                 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9628                 v.Aux = symToAux(sym)
9629                 v.AddArg(x)
9630                 return true
9631         }
9632         return false
9633 }
9634 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9635         v_1 := v.Args[1]
9636         v_0 := v.Args[0]
9637         // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
9638         // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9639         // result: (LEAQ4 [c+d] {s} x y)
9640         for {
9641                 c := auxIntToInt32(v.AuxInt)
9642                 s := auxToSym(v.Aux)
9643                 if v_0.Op != OpAMD64ADDQconst {
9644                         break
9645                 }
9646                 d := auxIntToInt32(v_0.AuxInt)
9647                 x := v_0.Args[0]
9648                 y := v_1
9649                 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9650                         break
9651                 }
9652                 v.reset(OpAMD64LEAQ4)
9653                 v.AuxInt = int32ToAuxInt(c + d)
9654                 v.Aux = symToAux(s)
9655                 v.AddArg2(x, y)
9656                 return true
9657         }
9658         // match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
9659         // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
9660         // result: (LEAQ4 [c+4*d] {s} x y)
9661         for {
9662                 c := auxIntToInt32(v.AuxInt)
9663                 s := auxToSym(v.Aux)
9664                 x := v_0
9665                 if v_1.Op != OpAMD64ADDQconst {
9666                         break
9667                 }
9668                 d := auxIntToInt32(v_1.AuxInt)
9669                 y := v_1.Args[0]
9670                 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9671                         break
9672                 }
9673                 v.reset(OpAMD64LEAQ4)
9674                 v.AuxInt = int32ToAuxInt(c + 4*d)
9675                 v.Aux = symToAux(s)
9676                 v.AddArg2(x, y)
9677                 return true
9678         }
9679         // match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
9680         // result: (LEAQ8 [c] {s} x y)
9681         for {
9682                 c := auxIntToInt32(v.AuxInt)
9683                 s := auxToSym(v.Aux)
9684                 x := v_0
9685                 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9686                         break
9687                 }
9688                 y := v_1.Args[0]
9689                 v.reset(OpAMD64LEAQ8)
9690                 v.AuxInt = int32ToAuxInt(c)
9691                 v.Aux = symToAux(s)
9692                 v.AddArg2(x, y)
9693                 return true
9694         }
9695         // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
9696         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
9697         // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
9698         for {
9699                 off1 := auxIntToInt32(v.AuxInt)
9700                 sym1 := auxToSym(v.Aux)
9701                 if v_0.Op != OpAMD64LEAQ {
9702                         break
9703                 }
9704                 off2 := auxIntToInt32(v_0.AuxInt)
9705                 sym2 := auxToSym(v_0.Aux)
9706                 x := v_0.Args[0]
9707                 y := v_1
9708                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9709                         break
9710                 }
9711                 v.reset(OpAMD64LEAQ4)
9712                 v.AuxInt = int32ToAuxInt(off1 + off2)
9713                 v.Aux = symToAux(mergeSym(sym1, sym2))
9714                 v.AddArg2(x, y)
9715                 return true
9716         }
9717         // match: (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
9718         // cond: is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil
9719         // result: (LEAQ8 [off1+4*off2] {sym1} x y)
9720         for {
9721                 off1 := auxIntToInt32(v.AuxInt)
9722                 sym1 := auxToSym(v.Aux)
9723                 x := v_0
9724                 if v_1.Op != OpAMD64LEAQ1 {
9725                         break
9726                 }
9727                 off2 := auxIntToInt32(v_1.AuxInt)
9728                 sym2 := auxToSym(v_1.Aux)
9729                 y := v_1.Args[1]
9730                 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9731                         break
9732                 }
9733                 v.reset(OpAMD64LEAQ8)
9734                 v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9735                 v.Aux = symToAux(sym1)
9736                 v.AddArg2(x, y)
9737                 return true
9738         }
9739         // match: (LEAQ4 [off] {sym} x (MOVQconst [scale]))
9740         // cond: is32Bit(int64(off)+int64(scale)*4)
9741         // result: (LEAQ [off+int32(scale)*4] {sym} x)
9742         for {
9743                 off := auxIntToInt32(v.AuxInt)
9744                 sym := auxToSym(v.Aux)
9745                 x := v_0
9746                 if v_1.Op != OpAMD64MOVQconst {
9747                         break
9748                 }
9749                 scale := auxIntToInt64(v_1.AuxInt)
9750                 if !(is32Bit(int64(off) + int64(scale)*4)) {
9751                         break
9752                 }
9753                 v.reset(OpAMD64LEAQ)
9754                 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9755                 v.Aux = symToAux(sym)
9756                 v.AddArg(x)
9757                 return true
9758         }
9759         // match: (LEAQ4 [off] {sym} x (MOVLconst [scale]))
9760         // cond: is32Bit(int64(off)+int64(scale)*4)
9761         // result: (LEAQ [off+int32(scale)*4] {sym} x)
9762         for {
9763                 off := auxIntToInt32(v.AuxInt)
9764                 sym := auxToSym(v.Aux)
9765                 x := v_0
9766                 if v_1.Op != OpAMD64MOVLconst {
9767                         break
9768                 }
9769                 scale := auxIntToInt32(v_1.AuxInt)
9770                 if !(is32Bit(int64(off) + int64(scale)*4)) {
9771                         break
9772                 }
9773                 v.reset(OpAMD64LEAQ)
9774                 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9775                 v.Aux = symToAux(sym)
9776                 v.AddArg(x)
9777                 return true
9778         }
9779         return false
9780 }
9781 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9782         v_1 := v.Args[1]
9783         v_0 := v.Args[0]
9784         // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
9785         // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9786         // result: (LEAQ8 [c+d] {s} x y)
9787         for {
9788                 c := auxIntToInt32(v.AuxInt)
9789                 s := auxToSym(v.Aux)
9790                 if v_0.Op != OpAMD64ADDQconst {
9791                         break
9792                 }
9793                 d := auxIntToInt32(v_0.AuxInt)
9794                 x := v_0.Args[0]
9795                 y := v_1
9796                 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9797                         break
9798                 }
9799                 v.reset(OpAMD64LEAQ8)
9800                 v.AuxInt = int32ToAuxInt(c + d)
9801                 v.Aux = symToAux(s)
9802                 v.AddArg2(x, y)
9803                 return true
9804         }
9805         // match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
9806         // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
9807         // result: (LEAQ8 [c+8*d] {s} x y)
9808         for {
9809                 c := auxIntToInt32(v.AuxInt)
9810                 s := auxToSym(v.Aux)
9811                 x := v_0
9812                 if v_1.Op != OpAMD64ADDQconst {
9813                         break
9814                 }
9815                 d := auxIntToInt32(v_1.AuxInt)
9816                 y := v_1.Args[0]
9817                 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9818                         break
9819                 }
9820                 v.reset(OpAMD64LEAQ8)
9821                 v.AuxInt = int32ToAuxInt(c + 8*d)
9822                 v.Aux = symToAux(s)
9823                 v.AddArg2(x, y)
9824                 return true
9825         }
9826         // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
9827         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
9828         // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
9829         for {
9830                 off1 := auxIntToInt32(v.AuxInt)
9831                 sym1 := auxToSym(v.Aux)
9832                 if v_0.Op != OpAMD64LEAQ {
9833                         break
9834                 }
9835                 off2 := auxIntToInt32(v_0.AuxInt)
9836                 sym2 := auxToSym(v_0.Aux)
9837                 x := v_0.Args[0]
9838                 y := v_1
9839                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9840                         break
9841                 }
9842                 v.reset(OpAMD64LEAQ8)
9843                 v.AuxInt = int32ToAuxInt(off1 + off2)
9844                 v.Aux = symToAux(mergeSym(sym1, sym2))
9845                 v.AddArg2(x, y)
9846                 return true
9847         }
9848         // match: (LEAQ8 [off] {sym} x (MOVQconst [scale]))
9849         // cond: is32Bit(int64(off)+int64(scale)*8)
9850         // result: (LEAQ [off+int32(scale)*8] {sym} x)
9851         for {
9852                 off := auxIntToInt32(v.AuxInt)
9853                 sym := auxToSym(v.Aux)
9854                 x := v_0
9855                 if v_1.Op != OpAMD64MOVQconst {
9856                         break
9857                 }
9858                 scale := auxIntToInt64(v_1.AuxInt)
9859                 if !(is32Bit(int64(off) + int64(scale)*8)) {
9860                         break
9861                 }
9862                 v.reset(OpAMD64LEAQ)
9863                 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9864                 v.Aux = symToAux(sym)
9865                 v.AddArg(x)
9866                 return true
9867         }
9868         // match: (LEAQ8 [off] {sym} x (MOVLconst [scale]))
9869         // cond: is32Bit(int64(off)+int64(scale)*8)
9870         // result: (LEAQ [off+int32(scale)*8] {sym} x)
9871         for {
9872                 off := auxIntToInt32(v.AuxInt)
9873                 sym := auxToSym(v.Aux)
9874                 x := v_0
9875                 if v_1.Op != OpAMD64MOVLconst {
9876                         break
9877                 }
9878                 scale := auxIntToInt32(v_1.AuxInt)
9879                 if !(is32Bit(int64(off) + int64(scale)*8)) {
9880                         break
9881                 }
9882                 v.reset(OpAMD64LEAQ)
9883                 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9884                 v.Aux = symToAux(sym)
9885                 v.AddArg(x)
9886                 return true
9887         }
9888         return false
9889 }
9890 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9891         v_0 := v.Args[0]
9892         b := v.Block
9893         // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
9894         // cond: x.Uses == 1 && clobber(x)
9895         // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
9896         for {
9897                 x := v_0
9898                 if x.Op != OpAMD64MOVBload {
9899                         break
9900                 }
9901                 off := auxIntToInt32(x.AuxInt)
9902                 sym := auxToSym(x.Aux)
9903                 mem := x.Args[1]
9904                 ptr := x.Args[0]
9905                 if !(x.Uses == 1 && clobber(x)) {
9906                         break
9907                 }
9908                 b = x.Block
9909                 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9910                 v.copyOf(v0)
9911                 v0.AuxInt = int32ToAuxInt(off)
9912                 v0.Aux = symToAux(sym)
9913                 v0.AddArg2(ptr, mem)
9914                 return true
9915         }
9916         // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
9917         // cond: x.Uses == 1 && clobber(x)
9918         // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
9919         for {
9920                 x := v_0
9921                 if x.Op != OpAMD64MOVWload {
9922                         break
9923                 }
9924                 off := auxIntToInt32(x.AuxInt)
9925                 sym := auxToSym(x.Aux)
9926                 mem := x.Args[1]
9927                 ptr := x.Args[0]
9928                 if !(x.Uses == 1 && clobber(x)) {
9929                         break
9930                 }
9931                 b = x.Block
9932                 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9933                 v.copyOf(v0)
9934                 v0.AuxInt = int32ToAuxInt(off)
9935                 v0.Aux = symToAux(sym)
9936                 v0.AddArg2(ptr, mem)
9937                 return true
9938         }
9939         // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
9940         // cond: x.Uses == 1 && clobber(x)
9941         // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
9942         for {
9943                 x := v_0
9944                 if x.Op != OpAMD64MOVLload {
9945                         break
9946                 }
9947                 off := auxIntToInt32(x.AuxInt)
9948                 sym := auxToSym(x.Aux)
9949                 mem := x.Args[1]
9950                 ptr := x.Args[0]
9951                 if !(x.Uses == 1 && clobber(x)) {
9952                         break
9953                 }
9954                 b = x.Block
9955                 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9956                 v.copyOf(v0)
9957                 v0.AuxInt = int32ToAuxInt(off)
9958                 v0.Aux = symToAux(sym)
9959                 v0.AddArg2(ptr, mem)
9960                 return true
9961         }
9962         // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
9963         // cond: x.Uses == 1 && clobber(x)
9964         // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
9965         for {
9966                 x := v_0
9967                 if x.Op != OpAMD64MOVQload {
9968                         break
9969                 }
9970                 off := auxIntToInt32(x.AuxInt)
9971                 sym := auxToSym(x.Aux)
9972                 mem := x.Args[1]
9973                 ptr := x.Args[0]
9974                 if !(x.Uses == 1 && clobber(x)) {
9975                         break
9976                 }
9977                 b = x.Block
9978                 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9979                 v.copyOf(v0)
9980                 v0.AuxInt = int32ToAuxInt(off)
9981                 v0.Aux = symToAux(sym)
9982                 v0.AddArg2(ptr, mem)
9983                 return true
9984         }
9985         // match: (MOVBQSX (ANDLconst [c] x))
9986         // cond: c & 0x80 == 0
9987         // result: (ANDLconst [c & 0x7f] x)
9988         for {
9989                 if v_0.Op != OpAMD64ANDLconst {
9990                         break
9991                 }
9992                 c := auxIntToInt32(v_0.AuxInt)
9993                 x := v_0.Args[0]
9994                 if !(c&0x80 == 0) {
9995                         break
9996                 }
9997                 v.reset(OpAMD64ANDLconst)
9998                 v.AuxInt = int32ToAuxInt(c & 0x7f)
9999                 v.AddArg(x)
10000                 return true
10001         }
10002         // match: (MOVBQSX (MOVBQSX x))
10003         // result: (MOVBQSX x)
10004         for {
10005                 if v_0.Op != OpAMD64MOVBQSX {
10006                         break
10007                 }
10008                 x := v_0.Args[0]
10009                 v.reset(OpAMD64MOVBQSX)
10010                 v.AddArg(x)
10011                 return true
10012         }
10013         return false
10014 }
10015 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
10016         v_1 := v.Args[1]
10017         v_0 := v.Args[0]
10018         // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
10019         // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
10020         // result: (MOVBQSX x)
10021         for {
10022                 off := auxIntToInt32(v.AuxInt)
10023                 sym := auxToSym(v.Aux)
10024                 ptr := v_0
10025                 if v_1.Op != OpAMD64MOVBstore {
10026                         break
10027                 }
10028                 off2 := auxIntToInt32(v_1.AuxInt)
10029                 sym2 := auxToSym(v_1.Aux)
10030                 x := v_1.Args[1]
10031                 ptr2 := v_1.Args[0]
10032                 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10033                         break
10034                 }
10035                 v.reset(OpAMD64MOVBQSX)
10036                 v.AddArg(x)
10037                 return true
10038         }
10039         // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
10040         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10041         // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
10042         for {
10043                 off1 := auxIntToInt32(v.AuxInt)
10044                 sym1 := auxToSym(v.Aux)
10045                 if v_0.Op != OpAMD64LEAQ {
10046                         break
10047                 }
10048                 off2 := auxIntToInt32(v_0.AuxInt)
10049                 sym2 := auxToSym(v_0.Aux)
10050                 base := v_0.Args[0]
10051                 mem := v_1
10052                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10053                         break
10054                 }
10055                 v.reset(OpAMD64MOVBQSXload)
10056                 v.AuxInt = int32ToAuxInt(off1 + off2)
10057                 v.Aux = symToAux(mergeSym(sym1, sym2))
10058                 v.AddArg2(base, mem)
10059                 return true
10060         }
10061         return false
10062 }
10063 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
10064         v_0 := v.Args[0]
10065         b := v.Block
10066         // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
10067         // cond: x.Uses == 1 && clobber(x)
10068         // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
10069         for {
10070                 x := v_0
10071                 if x.Op != OpAMD64MOVBload {
10072                         break
10073                 }
10074                 off := auxIntToInt32(x.AuxInt)
10075                 sym := auxToSym(x.Aux)
10076                 mem := x.Args[1]
10077                 ptr := x.Args[0]
10078                 if !(x.Uses == 1 && clobber(x)) {
10079                         break
10080                 }
10081                 b = x.Block
10082                 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10083                 v.copyOf(v0)
10084                 v0.AuxInt = int32ToAuxInt(off)
10085                 v0.Aux = symToAux(sym)
10086                 v0.AddArg2(ptr, mem)
10087                 return true
10088         }
10089         // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
10090         // cond: x.Uses == 1 && clobber(x)
10091         // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
10092         for {
10093                 x := v_0
10094                 if x.Op != OpAMD64MOVWload {
10095                         break
10096                 }
10097                 off := auxIntToInt32(x.AuxInt)
10098                 sym := auxToSym(x.Aux)
10099                 mem := x.Args[1]
10100                 ptr := x.Args[0]
10101                 if !(x.Uses == 1 && clobber(x)) {
10102                         break
10103                 }
10104                 b = x.Block
10105                 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10106                 v.copyOf(v0)
10107                 v0.AuxInt = int32ToAuxInt(off)
10108                 v0.Aux = symToAux(sym)
10109                 v0.AddArg2(ptr, mem)
10110                 return true
10111         }
10112         // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
10113         // cond: x.Uses == 1 && clobber(x)
10114         // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
10115         for {
10116                 x := v_0
10117                 if x.Op != OpAMD64MOVLload {
10118                         break
10119                 }
10120                 off := auxIntToInt32(x.AuxInt)
10121                 sym := auxToSym(x.Aux)
10122                 mem := x.Args[1]
10123                 ptr := x.Args[0]
10124                 if !(x.Uses == 1 && clobber(x)) {
10125                         break
10126                 }
10127                 b = x.Block
10128                 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10129                 v.copyOf(v0)
10130                 v0.AuxInt = int32ToAuxInt(off)
10131                 v0.Aux = symToAux(sym)
10132                 v0.AddArg2(ptr, mem)
10133                 return true
10134         }
10135         // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
10136         // cond: x.Uses == 1 && clobber(x)
10137         // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
10138         for {
10139                 x := v_0
10140                 if x.Op != OpAMD64MOVQload {
10141                         break
10142                 }
10143                 off := auxIntToInt32(x.AuxInt)
10144                 sym := auxToSym(x.Aux)
10145                 mem := x.Args[1]
10146                 ptr := x.Args[0]
10147                 if !(x.Uses == 1 && clobber(x)) {
10148                         break
10149                 }
10150                 b = x.Block
10151                 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10152                 v.copyOf(v0)
10153                 v0.AuxInt = int32ToAuxInt(off)
10154                 v0.Aux = symToAux(sym)
10155                 v0.AddArg2(ptr, mem)
10156                 return true
10157         }
10158         // match: (MOVBQZX x)
10159         // cond: zeroUpper56Bits(x,3)
10160         // result: x
10161         for {
10162                 x := v_0
10163                 if !(zeroUpper56Bits(x, 3)) {
10164                         break
10165                 }
10166                 v.copyOf(x)
10167                 return true
10168         }
10169         // match: (MOVBQZX (ANDLconst [c] x))
10170         // result: (ANDLconst [c & 0xff] x)
10171         for {
10172                 if v_0.Op != OpAMD64ANDLconst {
10173                         break
10174                 }
10175                 c := auxIntToInt32(v_0.AuxInt)
10176                 x := v_0.Args[0]
10177                 v.reset(OpAMD64ANDLconst)
10178                 v.AuxInt = int32ToAuxInt(c & 0xff)
10179                 v.AddArg(x)
10180                 return true
10181         }
10182         // match: (MOVBQZX (MOVBQZX x))
10183         // result: (MOVBQZX x)
10184         for {
10185                 if v_0.Op != OpAMD64MOVBQZX {
10186                         break
10187                 }
10188                 x := v_0.Args[0]
10189                 v.reset(OpAMD64MOVBQZX)
10190                 v.AddArg(x)
10191                 return true
10192         }
10193         return false
10194 }
10195 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
10196         v_1 := v.Args[1]
10197         v_0 := v.Args[0]
10198         // match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
10199         // cond: is32Bit(int64(off1)+int64(off2))
10200         // result: (MOVBatomicload [off1+off2] {sym} ptr mem)
10201         for {
10202                 off1 := auxIntToInt32(v.AuxInt)
10203                 sym := auxToSym(v.Aux)
10204                 if v_0.Op != OpAMD64ADDQconst {
10205                         break
10206                 }
10207                 off2 := auxIntToInt32(v_0.AuxInt)
10208                 ptr := v_0.Args[0]
10209                 mem := v_1
10210                 if !(is32Bit(int64(off1) + int64(off2))) {
10211                         break
10212                 }
10213                 v.reset(OpAMD64MOVBatomicload)
10214                 v.AuxInt = int32ToAuxInt(off1 + off2)
10215                 v.Aux = symToAux(sym)
10216                 v.AddArg2(ptr, mem)
10217                 return true
10218         }
10219         // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
10220         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10221         // result: (MOVBatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
10222         for {
10223                 off1 := auxIntToInt32(v.AuxInt)
10224                 sym1 := auxToSym(v.Aux)
10225                 if v_0.Op != OpAMD64LEAQ {
10226                         break
10227                 }
10228                 off2 := auxIntToInt32(v_0.AuxInt)
10229                 sym2 := auxToSym(v_0.Aux)
10230                 ptr := v_0.Args[0]
10231                 mem := v_1
10232                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10233                         break
10234                 }
10235                 v.reset(OpAMD64MOVBatomicload)
10236                 v.AuxInt = int32ToAuxInt(off1 + off2)
10237                 v.Aux = symToAux(mergeSym(sym1, sym2))
10238                 v.AddArg2(ptr, mem)
10239                 return true
10240         }
10241         return false
10242 }
10243 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
10244         v_1 := v.Args[1]
10245         v_0 := v.Args[0]
10246         // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
10247         // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
10248         // result: (MOVBQZX x)
10249         for {
10250                 off := auxIntToInt32(v.AuxInt)
10251                 sym := auxToSym(v.Aux)
10252                 ptr := v_0
10253                 if v_1.Op != OpAMD64MOVBstore {
10254                         break
10255                 }
10256                 off2 := auxIntToInt32(v_1.AuxInt)
10257                 sym2 := auxToSym(v_1.Aux)
10258                 x := v_1.Args[1]
10259                 ptr2 := v_1.Args[0]
10260                 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10261                         break
10262                 }
10263                 v.reset(OpAMD64MOVBQZX)
10264                 v.AddArg(x)
10265                 return true
10266         }
10267         // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
10268         // cond: is32Bit(int64(off1)+int64(off2))
10269         // result: (MOVBload [off1+off2] {sym} ptr mem)
10270         for {
10271                 off1 := auxIntToInt32(v.AuxInt)
10272                 sym := auxToSym(v.Aux)
10273                 if v_0.Op != OpAMD64ADDQconst {
10274                         break
10275                 }
10276                 off2 := auxIntToInt32(v_0.AuxInt)
10277                 ptr := v_0.Args[0]
10278                 mem := v_1
10279                 if !(is32Bit(int64(off1) + int64(off2))) {
10280                         break
10281                 }
10282                 v.reset(OpAMD64MOVBload)
10283                 v.AuxInt = int32ToAuxInt(off1 + off2)
10284                 v.Aux = symToAux(sym)
10285                 v.AddArg2(ptr, mem)
10286                 return true
10287         }
10288         // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
10289         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10290         // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
10291         for {
10292                 off1 := auxIntToInt32(v.AuxInt)
10293                 sym1 := auxToSym(v.Aux)
10294                 if v_0.Op != OpAMD64LEAQ {
10295                         break
10296                 }
10297                 off2 := auxIntToInt32(v_0.AuxInt)
10298                 sym2 := auxToSym(v_0.Aux)
10299                 base := v_0.Args[0]
10300                 mem := v_1
10301                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10302                         break
10303                 }
10304                 v.reset(OpAMD64MOVBload)
10305                 v.AuxInt = int32ToAuxInt(off1 + off2)
10306                 v.Aux = symToAux(mergeSym(sym1, sym2))
10307                 v.AddArg2(base, mem)
10308                 return true
10309         }
10310         // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
10311         // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
10312         // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
10313         for {
10314                 off1 := auxIntToInt32(v.AuxInt)
10315                 sym1 := auxToSym(v.Aux)
10316                 if v_0.Op != OpAMD64LEAL {
10317                         break
10318                 }
10319                 off2 := auxIntToInt32(v_0.AuxInt)
10320                 sym2 := auxToSym(v_0.Aux)
10321                 base := v_0.Args[0]
10322                 mem := v_1
10323                 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
10324                         break
10325                 }
10326                 v.reset(OpAMD64MOVBload)
10327                 v.AuxInt = int32ToAuxInt(off1 + off2)
10328                 v.Aux = symToAux(mergeSym(sym1, sym2))
10329                 v.AddArg2(base, mem)
10330                 return true
10331         }
10332         // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
10333         // cond: is32Bit(int64(off1)+int64(off2))
10334         // result: (MOVBload [off1+off2] {sym} ptr mem)
10335         for {
10336                 off1 := auxIntToInt32(v.AuxInt)
10337                 sym := auxToSym(v.Aux)
10338                 if v_0.Op != OpAMD64ADDLconst {
10339                         break
10340                 }
10341                 off2 := auxIntToInt32(v_0.AuxInt)
10342                 ptr := v_0.Args[0]
10343                 mem := v_1
10344                 if !(is32Bit(int64(off1) + int64(off2))) {
10345                         break
10346                 }
10347                 v.reset(OpAMD64MOVBload)
10348                 v.AuxInt = int32ToAuxInt(off1 + off2)
10349                 v.Aux = symToAux(sym)
10350                 v.AddArg2(ptr, mem)
10351                 return true
10352         }
10353         // match: (MOVBload [off] {sym} (SB) _)
10354         // cond: symIsRO(sym)
10355         // result: (MOVLconst [int32(read8(sym, int64(off)))])
10356         for {
10357                 off := auxIntToInt32(v.AuxInt)
10358                 sym := auxToSym(v.Aux)
10359                 if v_0.Op != OpSB || !(symIsRO(sym)) {
10360                         break
10361                 }
10362                 v.reset(OpAMD64MOVLconst)
10363                 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
10364                 return true
10365         }
10366         return false
10367 }
10368 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
10369         v_2 := v.Args[2]
10370         v_1 := v.Args[1]
10371         v_0 := v.Args[0]
10372         b := v.Block
10373         typ := &b.Func.Config.Types
10374         // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
10375         // cond: y.Uses == 1
10376         // result: (SETLstore [off] {sym} ptr x mem)
10377         for {
10378                 off := auxIntToInt32(v.AuxInt)
10379                 sym := auxToSym(v.Aux)
10380                 ptr := v_0
10381                 y := v_1
10382                 if y.Op != OpAMD64SETL {
10383                         break
10384                 }
10385                 x := y.Args[0]
10386                 mem := v_2
10387                 if !(y.Uses == 1) {
10388                         break
10389                 }
10390                 v.reset(OpAMD64SETLstore)
10391                 v.AuxInt = int32ToAuxInt(off)
10392                 v.Aux = symToAux(sym)
10393                 v.AddArg3(ptr, x, mem)
10394                 return true
10395         }
10396         // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
10397         // cond: y.Uses == 1
10398         // result: (SETLEstore [off] {sym} ptr x mem)
10399         for {
10400                 off := auxIntToInt32(v.AuxInt)
10401                 sym := auxToSym(v.Aux)
10402                 ptr := v_0
10403                 y := v_1
10404                 if y.Op != OpAMD64SETLE {
10405                         break
10406                 }
10407                 x := y.Args[0]
10408                 mem := v_2
10409                 if !(y.Uses == 1) {
10410                         break
10411                 }
10412                 v.reset(OpAMD64SETLEstore)
10413                 v.AuxInt = int32ToAuxInt(off)
10414                 v.Aux = symToAux(sym)
10415                 v.AddArg3(ptr, x, mem)
10416                 return true
10417         }
10418         // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
10419         // cond: y.Uses == 1
10420         // result: (SETGstore [off] {sym} ptr x mem)
10421         for {
10422                 off := auxIntToInt32(v.AuxInt)
10423                 sym := auxToSym(v.Aux)
10424                 ptr := v_0
10425                 y := v_1
10426                 if y.Op != OpAMD64SETG {
10427                         break
10428                 }
10429                 x := y.Args[0]
10430                 mem := v_2
10431                 if !(y.Uses == 1) {
10432                         break
10433                 }
10434                 v.reset(OpAMD64SETGstore)
10435                 v.AuxInt = int32ToAuxInt(off)
10436                 v.Aux = symToAux(sym)
10437                 v.AddArg3(ptr, x, mem)
10438                 return true
10439         }
10440         // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
10441         // cond: y.Uses == 1
10442         // result: (SETGEstore [off] {sym} ptr x mem)
10443         for {
10444                 off := auxIntToInt32(v.AuxInt)
10445                 sym := auxToSym(v.Aux)
10446                 ptr := v_0
10447                 y := v_1
10448                 if y.Op != OpAMD64SETGE {
10449                         break
10450                 }
10451                 x := y.Args[0]
10452                 mem := v_2
10453                 if !(y.Uses == 1) {
10454                         break
10455                 }
10456                 v.reset(OpAMD64SETGEstore)
10457                 v.AuxInt = int32ToAuxInt(off)
10458                 v.Aux = symToAux(sym)
10459                 v.AddArg3(ptr, x, mem)
10460                 return true
10461         }
10462         // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
10463         // cond: y.Uses == 1
10464         // result: (SETEQstore [off] {sym} ptr x mem)
10465         for {
10466                 off := auxIntToInt32(v.AuxInt)
10467                 sym := auxToSym(v.Aux)
10468                 ptr := v_0
10469                 y := v_1
10470                 if y.Op != OpAMD64SETEQ {
10471                         break
10472                 }
10473                 x := y.Args[0]
10474                 mem := v_2
10475                 if !(y.Uses == 1) {
10476                         break
10477                 }
10478                 v.reset(OpAMD64SETEQstore)
10479                 v.AuxInt = int32ToAuxInt(off)
10480                 v.Aux = symToAux(sym)
10481                 v.AddArg3(ptr, x, mem)
10482                 return true
10483         }
10484         // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
10485         // cond: y.Uses == 1
10486         // result: (SETNEstore [off] {sym} ptr x mem)
10487         for {
10488                 off := auxIntToInt32(v.AuxInt)
10489                 sym := auxToSym(v.Aux)
10490                 ptr := v_0
10491                 y := v_1
10492                 if y.Op != OpAMD64SETNE {
10493                         break
10494                 }
10495                 x := y.Args[0]
10496                 mem := v_2
10497                 if !(y.Uses == 1) {
10498                         break
10499                 }
10500                 v.reset(OpAMD64SETNEstore)
10501                 v.AuxInt = int32ToAuxInt(off)
10502                 v.Aux = symToAux(sym)
10503                 v.AddArg3(ptr, x, mem)
10504                 return true
10505         }
10506         // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
10507         // cond: y.Uses == 1
10508         // result: (SETBstore [off] {sym} ptr x mem)
10509         for {
10510                 off := auxIntToInt32(v.AuxInt)
10511                 sym := auxToSym(v.Aux)
10512                 ptr := v_0
10513                 y := v_1
10514                 if y.Op != OpAMD64SETB {
10515                         break
10516                 }
10517                 x := y.Args[0]
10518                 mem := v_2
10519                 if !(y.Uses == 1) {
10520                         break
10521                 }
10522                 v.reset(OpAMD64SETBstore)
10523                 v.AuxInt = int32ToAuxInt(off)
10524                 v.Aux = symToAux(sym)
10525                 v.AddArg3(ptr, x, mem)
10526                 return true
10527         }
10528         // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
10529         // cond: y.Uses == 1
10530         // result: (SETBEstore [off] {sym} ptr x mem)
10531         for {
10532                 off := auxIntToInt32(v.AuxInt)
10533                 sym := auxToSym(v.Aux)
10534                 ptr := v_0
10535                 y := v_1
10536                 if y.Op != OpAMD64SETBE {
10537                         break
10538                 }
10539                 x := y.Args[0]
10540                 mem := v_2
10541                 if !(y.Uses == 1) {
10542                         break
10543                 }
10544                 v.reset(OpAMD64SETBEstore)
10545                 v.AuxInt = int32ToAuxInt(off)
10546                 v.Aux = symToAux(sym)
10547                 v.AddArg3(ptr, x, mem)
10548                 return true
10549         }
10550         // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
10551         // cond: y.Uses == 1
10552         // result: (SETAstore [off] {sym} ptr x mem)
10553         for {
10554                 off := auxIntToInt32(v.AuxInt)
10555                 sym := auxToSym(v.Aux)
10556                 ptr := v_0
10557                 y := v_1
10558                 if y.Op != OpAMD64SETA {
10559                         break
10560                 }
10561                 x := y.Args[0]
10562                 mem := v_2
10563                 if !(y.Uses == 1) {
10564                         break
10565                 }
10566                 v.reset(OpAMD64SETAstore)
10567                 v.AuxInt = int32ToAuxInt(off)
10568                 v.Aux = symToAux(sym)
10569                 v.AddArg3(ptr, x, mem)
10570                 return true
10571         }
10572         // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
10573         // cond: y.Uses == 1
10574         // result: (SETAEstore [off] {sym} ptr x mem)
10575         for {
10576                 off := auxIntToInt32(v.AuxInt)
10577                 sym := auxToSym(v.Aux)
10578                 ptr := v_0
10579                 y := v_1
10580                 if y.Op != OpAMD64SETAE {
10581                         break
10582                 }
10583                 x := y.Args[0]
10584                 mem := v_2
10585                 if !(y.Uses == 1) {
10586                         break
10587                 }
10588                 v.reset(OpAMD64SETAEstore)
10589                 v.AuxInt = int32ToAuxInt(off)
10590                 v.Aux = symToAux(sym)
10591                 v.AddArg3(ptr, x, mem)
10592                 return true
10593         }
10594         // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
10595         // result: (MOVBstore [off] {sym} ptr x mem)
10596         for {
10597                 off := auxIntToInt32(v.AuxInt)
10598                 sym := auxToSym(v.Aux)
10599                 ptr := v_0
10600                 if v_1.Op != OpAMD64MOVBQSX {
10601                         break
10602                 }
10603                 x := v_1.Args[0]
10604                 mem := v_2
10605                 v.reset(OpAMD64MOVBstore)
10606                 v.AuxInt = int32ToAuxInt(off)
10607                 v.Aux = symToAux(sym)
10608                 v.AddArg3(ptr, x, mem)
10609                 return true
10610         }
10611         // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
10612         // result: (MOVBstore [off] {sym} ptr x mem)
10613         for {
10614                 off := auxIntToInt32(v.AuxInt)
10615                 sym := auxToSym(v.Aux)
10616                 ptr := v_0
10617                 if v_1.Op != OpAMD64MOVBQZX {
10618                         break
10619                 }
10620                 x := v_1.Args[0]
10621                 mem := v_2
10622                 v.reset(OpAMD64MOVBstore)
10623                 v.AuxInt = int32ToAuxInt(off)
10624                 v.Aux = symToAux(sym)
10625                 v.AddArg3(ptr, x, mem)
10626                 return true
10627         }
10628         // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
10629         // cond: is32Bit(int64(off1)+int64(off2))
10630         // result: (MOVBstore [off1+off2] {sym} ptr val mem)
10631         for {
10632                 off1 := auxIntToInt32(v.AuxInt)
10633                 sym := auxToSym(v.Aux)
10634                 if v_0.Op != OpAMD64ADDQconst {
10635                         break
10636                 }
10637                 off2 := auxIntToInt32(v_0.AuxInt)
10638                 ptr := v_0.Args[0]
10639                 val := v_1
10640                 mem := v_2
10641                 if !(is32Bit(int64(off1) + int64(off2))) {
10642                         break
10643                 }
10644                 v.reset(OpAMD64MOVBstore)
10645                 v.AuxInt = int32ToAuxInt(off1 + off2)
10646                 v.Aux = symToAux(sym)
10647                 v.AddArg3(ptr, val, mem)
10648                 return true
10649         }
10650         // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
10651         // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
10652         for {
10653                 off := auxIntToInt32(v.AuxInt)
10654                 sym := auxToSym(v.Aux)
10655                 ptr := v_0
10656                 if v_1.Op != OpAMD64MOVLconst {
10657                         break
10658                 }
10659                 c := auxIntToInt32(v_1.AuxInt)
10660                 mem := v_2
10661                 v.reset(OpAMD64MOVBstoreconst)
10662                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10663                 v.Aux = symToAux(sym)
10664                 v.AddArg2(ptr, mem)
10665                 return true
10666         }
10667         // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem)
10668         // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
10669         for {
10670                 off := auxIntToInt32(v.AuxInt)
10671                 sym := auxToSym(v.Aux)
10672                 ptr := v_0
10673                 if v_1.Op != OpAMD64MOVQconst {
10674                         break
10675                 }
10676                 c := auxIntToInt64(v_1.AuxInt)
10677                 mem := v_2
10678                 v.reset(OpAMD64MOVBstoreconst)
10679                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10680                 v.Aux = symToAux(sym)
10681                 v.AddArg2(ptr, mem)
10682                 return true
10683         }
10684         // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
10685         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10686         // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
10687         for {
10688                 off1 := auxIntToInt32(v.AuxInt)
10689                 sym1 := auxToSym(v.Aux)
10690                 if v_0.Op != OpAMD64LEAQ {
10691                         break
10692                 }
10693                 off2 := auxIntToInt32(v_0.AuxInt)
10694                 sym2 := auxToSym(v_0.Aux)
10695                 base := v_0.Args[0]
10696                 val := v_1
10697                 mem := v_2
10698                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10699                         break
10700                 }
10701                 v.reset(OpAMD64MOVBstore)
10702                 v.AuxInt = int32ToAuxInt(off1 + off2)
10703                 v.Aux = symToAux(mergeSym(sym1, sym2))
10704                 v.AddArg3(base, val, mem)
10705                 return true
10706         }
10707         // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
10708         // cond: x0.Uses == 1 && clobber(x0)
10709         // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
10710         for {
10711                 i := auxIntToInt32(v.AuxInt)
10712                 s := auxToSym(v.Aux)
10713                 p := v_0
10714                 w := v_1
10715                 x0 := v_2
10716                 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
10717                         break
10718                 }
10719                 mem := x0.Args[2]
10720                 if p != x0.Args[0] {
10721                         break
10722                 }
10723                 x0_1 := x0.Args[1]
10724                 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) {
10725                         break
10726                 }
10727                 v.reset(OpAMD64MOVWstore)
10728                 v.AuxInt = int32ToAuxInt(i - 1)
10729                 v.Aux = symToAux(s)
10730                 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10731                 v0.AuxInt = int8ToAuxInt(8)
10732                 v0.AddArg(w)
10733                 v.AddArg3(p, v0, mem)
10734                 return true
10735         }
10736         // match: (MOVBstore [i] {s} p1 w x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem))
10737         // cond: x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)
10738         // result: (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
10739         for {
10740                 i := auxIntToInt32(v.AuxInt)
10741                 s := auxToSym(v.Aux)
10742                 p1 := v_0
10743                 w := v_1
10744                 x0 := v_2
10745                 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10746                         break
10747                 }
10748                 mem := x0.Args[2]
10749                 p0 := x0.Args[0]
10750                 x0_1 := x0.Args[1]
10751                 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) {
10752                         break
10753                 }
10754                 v.reset(OpAMD64MOVWstore)
10755                 v.AuxInt = int32ToAuxInt(i)
10756                 v.Aux = symToAux(s)
10757                 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10758                 v0.AuxInt = int8ToAuxInt(8)
10759                 v0.AddArg(w)
10760                 v.AddArg3(p0, v0, mem)
10761                 return true
10762         }
10763         // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
10764         // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
10765         // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
10766         for {
10767                 i := auxIntToInt32(v.AuxInt)
10768                 s := auxToSym(v.Aux)
10769                 p := v_0
10770                 w := v_1
10771                 x2 := v_2
10772                 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-1 || auxToSym(x2.Aux) != s {
10773                         break
10774                 }
10775                 _ = x2.Args[2]
10776                 if p != x2.Args[0] {
10777                         break
10778                 }
10779                 x2_1 := x2.Args[1]
10780                 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
10781                         break
10782                 }
10783                 x1 := x2.Args[2]
10784                 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
10785                         break
10786                 }
10787                 _ = x1.Args[2]
10788                 if p != x1.Args[0] {
10789                         break
10790                 }
10791                 x1_1 := x1.Args[1]
10792                 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
10793                         break
10794                 }
10795                 x0 := x1.Args[2]
10796                 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-3 || auxToSym(x0.Aux) != s {
10797                         break
10798                 }
10799                 mem := x0.Args[2]
10800                 if p != x0.Args[0] {
10801                         break
10802                 }
10803                 x0_1 := x0.Args[1]
10804                 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
10805                         break
10806                 }
10807                 v.reset(OpAMD64MOVLstore)
10808                 v.AuxInt = int32ToAuxInt(i - 3)
10809                 v.Aux = symToAux(s)
10810                 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10811                 v0.AddArg(w)
10812                 v.AddArg3(p, v0, mem)
10813                 return true
10814         }
10815         // match: (MOVBstore [i] {s} p3 w x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w) x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w) x0:(MOVBstore [i] {s} p0 (SHRLconst [24] w) mem))))
10816         // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)
10817         // result: (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
10818         for {
10819                 i := auxIntToInt32(v.AuxInt)
10820                 s := auxToSym(v.Aux)
10821                 p3 := v_0
10822                 w := v_1
10823                 x2 := v_2
10824                 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
10825                         break
10826                 }
10827                 _ = x2.Args[2]
10828                 p2 := x2.Args[0]
10829                 x2_1 := x2.Args[1]
10830                 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
10831                         break
10832                 }
10833                 x1 := x2.Args[2]
10834                 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
10835                         break
10836                 }
10837                 _ = x1.Args[2]
10838                 p1 := x1.Args[0]
10839                 x1_1 := x1.Args[1]
10840                 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
10841                         break
10842                 }
10843                 x0 := x1.Args[2]
10844                 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10845                         break
10846                 }
10847                 mem := x0.Args[2]
10848                 p0 := x0.Args[0]
10849                 x0_1 := x0.Args[1]
10850                 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) {
10851                         break
10852                 }
10853                 v.reset(OpAMD64MOVLstore)
10854                 v.AuxInt = int32ToAuxInt(i)
10855                 v.Aux = symToAux(s)
10856                 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10857                 v0.AddArg(w)
10858                 v.AddArg3(p0, v0, mem)
10859                 return true
10860         }
10861         // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
10862         // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
10863         // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
10864         for {
10865                 i := auxIntToInt32(v.AuxInt)
10866                 s := auxToSym(v.Aux)
10867                 p := v_0
10868                 w := v_1
10869                 x6 := v_2
10870                 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i-1 || auxToSym(x6.Aux) != s {
10871                         break
10872                 }
10873                 _ = x6.Args[2]
10874                 if p != x6.Args[0] {
10875                         break
10876                 }
10877                 x6_1 := x6.Args[1]
10878                 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
10879                         break
10880                 }
10881                 x5 := x6.Args[2]
10882                 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i-2 || auxToSym(x5.Aux) != s {
10883                         break
10884                 }
10885                 _ = x5.Args[2]
10886                 if p != x5.Args[0] {
10887                         break
10888                 }
10889                 x5_1 := x5.Args[1]
10890                 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
10891                         break
10892                 }
10893                 x4 := x5.Args[2]
10894                 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i-3 || auxToSym(x4.Aux) != s {
10895                         break
10896                 }
10897                 _ = x4.Args[2]
10898                 if p != x4.Args[0] {
10899                         break
10900                 }
10901                 x4_1 := x4.Args[1]
10902                 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
10903                         break
10904                 }
10905                 x3 := x4.Args[2]
10906                 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
10907                         break
10908                 }
10909                 _ = x3.Args[2]
10910                 if p != x3.Args[0] {
10911                         break
10912                 }
10913                 x3_1 := x3.Args[1]
10914                 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
10915                         break
10916                 }
10917                 x2 := x3.Args[2]
10918                 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-5 || auxToSym(x2.Aux) != s {
10919                         break
10920                 }
10921                 _ = x2.Args[2]
10922                 if p != x2.Args[0] {
10923                         break
10924                 }
10925                 x2_1 := x2.Args[1]
10926                 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
10927                         break
10928                 }
10929                 x1 := x2.Args[2]
10930                 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-6 || auxToSym(x1.Aux) != s {
10931                         break
10932                 }
10933                 _ = x1.Args[2]
10934                 if p != x1.Args[0] {
10935                         break
10936                 }
10937                 x1_1 := x1.Args[1]
10938                 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
10939                         break
10940                 }
10941                 x0 := x1.Args[2]
10942                 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-7 || auxToSym(x0.Aux) != s {
10943                         break
10944                 }
10945                 mem := x0.Args[2]
10946                 if p != x0.Args[0] {
10947                         break
10948                 }
10949                 x0_1 := x0.Args[1]
10950                 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
10951                         break
10952                 }
10953                 v.reset(OpAMD64MOVQstore)
10954                 v.AuxInt = int32ToAuxInt(i - 7)
10955                 v.Aux = symToAux(s)
10956                 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
10957                 v0.AddArg(w)
10958                 v.AddArg3(p, v0, mem)
10959                 return true
10960         }
10961         // match: (MOVBstore [i] {s} p7 w x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w) x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w) x4:(MOVBstore [i] {s} p4 (SHRQconst [24] w) x3:(MOVBstore [i] {s} p3 (SHRQconst [32] w) x2:(MOVBstore [i] {s} p2 (SHRQconst [40] w) x1:(MOVBstore [i] {s} p1 (SHRQconst [48] w) x0:(MOVBstore [i] {s} p0 (SHRQconst [56] w) mem))))))))
10962         // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)
10963         // result: (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
10964         for {
10965                 i := auxIntToInt32(v.AuxInt)
10966                 s := auxToSym(v.Aux)
10967                 p7 := v_0
10968                 w := v_1
10969                 x6 := v_2
10970                 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i || auxToSym(x6.Aux) != s {
10971                         break
10972                 }
10973                 _ = x6.Args[2]
10974                 p6 := x6.Args[0]
10975                 x6_1 := x6.Args[1]
10976                 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
10977                         break
10978                 }
10979                 x5 := x6.Args[2]
10980                 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i || auxToSym(x5.Aux) != s {
10981                         break
10982                 }
10983                 _ = x5.Args[2]
10984                 p5 := x5.Args[0]
10985                 x5_1 := x5.Args[1]
10986                 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
10987                         break
10988                 }
10989                 x4 := x5.Args[2]
10990                 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i || auxToSym(x4.Aux) != s {
10991                         break
10992                 }
10993                 _ = x4.Args[2]
10994                 p4 := x4.Args[0]
10995                 x4_1 := x4.Args[1]
10996                 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
10997                         break
10998                 }
10999                 x3 := x4.Args[2]
11000                 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i || auxToSym(x3.Aux) != s {
11001                         break
11002                 }
11003                 _ = x3.Args[2]
11004                 p3 := x3.Args[0]
11005                 x3_1 := x3.Args[1]
11006                 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
11007                         break
11008                 }
11009                 x2 := x3.Args[2]
11010                 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
11011                         break
11012                 }
11013                 _ = x2.Args[2]
11014                 p2 := x2.Args[0]
11015                 x2_1 := x2.Args[1]
11016                 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
11017                         break
11018                 }
11019                 x1 := x2.Args[2]
11020                 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
11021                         break
11022                 }
11023                 _ = x1.Args[2]
11024                 p1 := x1.Args[0]
11025                 x1_1 := x1.Args[1]
11026                 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
11027                         break
11028                 }
11029                 x0 := x1.Args[2]
11030                 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
11031                         break
11032                 }
11033                 mem := x0.Args[2]
11034                 p0 := x0.Args[0]
11035                 x0_1 := x0.Args[1]
11036                 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
11037                         break
11038                 }
11039                 v.reset(OpAMD64MOVQstore)
11040                 v.AuxInt = int32ToAuxInt(i)
11041                 v.Aux = symToAux(s)
11042                 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
11043                 v0.AddArg(w)
11044                 v.AddArg3(p0, v0, mem)
11045                 return true
11046         }
11047         // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
11048         // cond: x.Uses == 1 && clobber(x)
11049         // result: (MOVWstore [i-1] {s} p w mem)
11050         for {
11051                 i := auxIntToInt32(v.AuxInt)
11052                 s := auxToSym(v.Aux)
11053                 p := v_0
11054                 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
11055                         break
11056                 }
11057                 w := v_1.Args[0]
11058                 x := v_2
11059                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11060                         break
11061                 }
11062                 mem := x.Args[2]
11063                 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11064                         break
11065                 }
11066                 v.reset(OpAMD64MOVWstore)
11067                 v.AuxInt = int32ToAuxInt(i - 1)
11068                 v.Aux = symToAux(s)
11069                 v.AddArg3(p, w, mem)
11070                 return true
11071         }
11072         // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
11073         // cond: x.Uses == 1 && clobber(x)
11074         // result: (MOVWstore [i-1] {s} p w mem)
11075         for {
11076                 i := auxIntToInt32(v.AuxInt)
11077                 s := auxToSym(v.Aux)
11078                 p := v_0
11079                 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
11080                         break
11081                 }
11082                 w := v_1.Args[0]
11083                 x := v_2
11084                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11085                         break
11086                 }
11087                 mem := x.Args[2]
11088                 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11089                         break
11090                 }
11091                 v.reset(OpAMD64MOVWstore)
11092                 v.AuxInt = int32ToAuxInt(i - 1)
11093                 v.Aux = symToAux(s)
11094                 v.AddArg3(p, w, mem)
11095                 return true
11096         }
11097         // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
11098         // cond: x.Uses == 1 && clobber(x)
11099         // result: (MOVWstore [i-1] {s} p w mem)
11100         for {
11101                 i := auxIntToInt32(v.AuxInt)
11102                 s := auxToSym(v.Aux)
11103                 p := v_0
11104                 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
11105                         break
11106                 }
11107                 w := v_1.Args[0]
11108                 x := v_2
11109                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11110                         break
11111                 }
11112                 mem := x.Args[2]
11113                 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11114                         break
11115                 }
11116                 v.reset(OpAMD64MOVWstore)
11117                 v.AuxInt = int32ToAuxInt(i - 1)
11118                 v.Aux = symToAux(s)
11119                 v.AddArg3(p, w, mem)
11120                 return true
11121         }
11122         // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem))
11123         // cond: x.Uses == 1 && clobber(x)
11124         // result: (MOVWstore [i] {s} p w mem)
11125         for {
11126                 i := auxIntToInt32(v.AuxInt)
11127                 s := auxToSym(v.Aux)
11128                 p := v_0
11129                 w := v_1
11130                 x := v_2
11131                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
11132                         break
11133                 }
11134                 mem := x.Args[2]
11135                 if p != x.Args[0] {
11136                         break
11137                 }
11138                 x_1 := x.Args[1]
11139                 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
11140                         break
11141                 }
11142                 v.reset(OpAMD64MOVWstore)
11143                 v.AuxInt = int32ToAuxInt(i)
11144                 v.Aux = symToAux(s)
11145                 v.AddArg3(p, w, mem)
11146                 return true
11147         }
11148         // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem))
11149         // cond: x.Uses == 1 && clobber(x)
11150         // result: (MOVWstore [i] {s} p w mem)
11151         for {
11152                 i := auxIntToInt32(v.AuxInt)
11153                 s := auxToSym(v.Aux)
11154                 p := v_0
11155                 w := v_1
11156                 x := v_2
11157                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
11158                         break
11159                 }
11160                 mem := x.Args[2]
11161                 if p != x.Args[0] {
11162                         break
11163                 }
11164                 x_1 := x.Args[1]
11165                 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
11166                         break
11167                 }
11168                 v.reset(OpAMD64MOVWstore)
11169                 v.AuxInt = int32ToAuxInt(i)
11170                 v.Aux = symToAux(s)
11171                 v.AddArg3(p, w, mem)
11172                 return true
11173         }
11174         // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem))
11175         // cond: x.Uses == 1 && clobber(x)
11176         // result: (MOVWstore [i] {s} p w mem)
11177         for {
11178                 i := auxIntToInt32(v.AuxInt)
11179                 s := auxToSym(v.Aux)
11180                 p := v_0
11181                 w := v_1
11182                 x := v_2
11183                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
11184                         break
11185                 }
11186                 mem := x.Args[2]
11187                 if p != x.Args[0] {
11188                         break
11189                 }
11190                 x_1 := x.Args[1]
11191                 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
11192                         break
11193                 }
11194                 v.reset(OpAMD64MOVWstore)
11195                 v.AuxInt = int32ToAuxInt(i)
11196                 v.Aux = symToAux(s)
11197                 v.AddArg3(p, w, mem)
11198                 return true
11199         }
11200         // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
11201         // cond: x.Uses == 1 && clobber(x)
11202         // result: (MOVWstore [i-1] {s} p w0 mem)
11203         for {
11204                 i := auxIntToInt32(v.AuxInt)
11205                 s := auxToSym(v.Aux)
11206                 p := v_0
11207                 if v_1.Op != OpAMD64SHRLconst {
11208                         break
11209                 }
11210                 j := auxIntToInt8(v_1.AuxInt)
11211                 w := v_1.Args[0]
11212                 x := v_2
11213                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11214                         break
11215                 }
11216                 mem := x.Args[2]
11217                 if p != x.Args[0] {
11218                         break
11219                 }
11220                 w0 := x.Args[1]
11221                 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
11222                         break
11223                 }
11224                 v.reset(OpAMD64MOVWstore)
11225                 v.AuxInt = int32ToAuxInt(i - 1)
11226                 v.Aux = symToAux(s)
11227                 v.AddArg3(p, w0, mem)
11228                 return true
11229         }
11230         // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
11231         // cond: x.Uses == 1 && clobber(x)
11232         // result: (MOVWstore [i-1] {s} p w0 mem)
11233         for {
11234                 i := auxIntToInt32(v.AuxInt)
11235                 s := auxToSym(v.Aux)
11236                 p := v_0
11237                 if v_1.Op != OpAMD64SHRQconst {
11238                         break
11239                 }
11240                 j := auxIntToInt8(v_1.AuxInt)
11241                 w := v_1.Args[0]
11242                 x := v_2
11243                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11244                         break
11245                 }
11246                 mem := x.Args[2]
11247                 if p != x.Args[0] {
11248                         break
11249                 }
11250                 w0 := x.Args[1]
11251                 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
11252                         break
11253                 }
11254                 v.reset(OpAMD64MOVWstore)
11255                 v.AuxInt = int32ToAuxInt(i - 1)
11256                 v.Aux = symToAux(s)
11257                 v.AddArg3(p, w0, mem)
11258                 return true
11259         }
11260         // match: (MOVBstore [i] {s} p1 (SHRWconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
11261         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11262         // result: (MOVWstore [i] {s} p0 w mem)
11263         for {
11264                 i := auxIntToInt32(v.AuxInt)
11265                 s := auxToSym(v.Aux)
11266                 p1 := v_0
11267                 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
11268                         break
11269                 }
11270                 w := v_1.Args[0]
11271                 x := v_2
11272                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11273                         break
11274                 }
11275                 mem := x.Args[2]
11276                 p0 := x.Args[0]
11277                 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11278                         break
11279                 }
11280                 v.reset(OpAMD64MOVWstore)
11281                 v.AuxInt = int32ToAuxInt(i)
11282                 v.Aux = symToAux(s)
11283                 v.AddArg3(p0, w, mem)
11284                 return true
11285         }
11286         // match: (MOVBstore [i] {s} p1 (SHRLconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
11287         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11288         // result: (MOVWstore [i] {s} p0 w mem)
11289         for {
11290                 i := auxIntToInt32(v.AuxInt)
11291                 s := auxToSym(v.Aux)
11292                 p1 := v_0
11293                 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
11294                         break
11295                 }
11296                 w := v_1.Args[0]
11297                 x := v_2
11298                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11299                         break
11300                 }
11301                 mem := x.Args[2]
11302                 p0 := x.Args[0]
11303                 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11304                         break
11305                 }
11306                 v.reset(OpAMD64MOVWstore)
11307                 v.AuxInt = int32ToAuxInt(i)
11308                 v.Aux = symToAux(s)
11309                 v.AddArg3(p0, w, mem)
11310                 return true
11311         }
11312         // match: (MOVBstore [i] {s} p1 (SHRQconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
11313         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11314         // result: (MOVWstore [i] {s} p0 w mem)
11315         for {
11316                 i := auxIntToInt32(v.AuxInt)
11317                 s := auxToSym(v.Aux)
11318                 p1 := v_0
11319                 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
11320                         break
11321                 }
11322                 w := v_1.Args[0]
11323                 x := v_2
11324                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11325                         break
11326                 }
11327                 mem := x.Args[2]
11328                 p0 := x.Args[0]
11329                 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11330                         break
11331                 }
11332                 v.reset(OpAMD64MOVWstore)
11333                 v.AuxInt = int32ToAuxInt(i)
11334                 v.Aux = symToAux(s)
11335                 v.AddArg3(p0, w, mem)
11336                 return true
11337         }
11338         // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRWconst [8] w) mem))
11339         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11340         // result: (MOVWstore [i] {s} p0 w mem)
11341         for {
11342                 i := auxIntToInt32(v.AuxInt)
11343                 s := auxToSym(v.Aux)
11344                 p0 := v_0
11345                 w := v_1
11346                 x := v_2
11347                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11348                         break
11349                 }
11350                 mem := x.Args[2]
11351                 p1 := x.Args[0]
11352                 x_1 := x.Args[1]
11353                 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11354                         break
11355                 }
11356                 v.reset(OpAMD64MOVWstore)
11357                 v.AuxInt = int32ToAuxInt(i)
11358                 v.Aux = symToAux(s)
11359                 v.AddArg3(p0, w, mem)
11360                 return true
11361         }
11362         // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRLconst [8] w) mem))
11363         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11364         // result: (MOVWstore [i] {s} p0 w mem)
11365         for {
11366                 i := auxIntToInt32(v.AuxInt)
11367                 s := auxToSym(v.Aux)
11368                 p0 := v_0
11369                 w := v_1
11370                 x := v_2
11371                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11372                         break
11373                 }
11374                 mem := x.Args[2]
11375                 p1 := x.Args[0]
11376                 x_1 := x.Args[1]
11377                 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11378                         break
11379                 }
11380                 v.reset(OpAMD64MOVWstore)
11381                 v.AuxInt = int32ToAuxInt(i)
11382                 v.Aux = symToAux(s)
11383                 v.AddArg3(p0, w, mem)
11384                 return true
11385         }
11386         // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRQconst [8] w) mem))
11387         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11388         // result: (MOVWstore [i] {s} p0 w mem)
11389         for {
11390                 i := auxIntToInt32(v.AuxInt)
11391                 s := auxToSym(v.Aux)
11392                 p0 := v_0
11393                 w := v_1
11394                 x := v_2
11395                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11396                         break
11397                 }
11398                 mem := x.Args[2]
11399                 p1 := x.Args[0]
11400                 x_1 := x.Args[1]
11401                 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11402                         break
11403                 }
11404                 v.reset(OpAMD64MOVWstore)
11405                 v.AuxInt = int32ToAuxInt(i)
11406                 v.Aux = symToAux(s)
11407                 v.AddArg3(p0, w, mem)
11408                 return true
11409         }
11410         // match: (MOVBstore [i] {s} p1 (SHRLconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRLconst [j-8] w) mem))
11411         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11412         // result: (MOVWstore [i] {s} p0 w0 mem)
11413         for {
11414                 i := auxIntToInt32(v.AuxInt)
11415                 s := auxToSym(v.Aux)
11416                 p1 := v_0
11417                 if v_1.Op != OpAMD64SHRLconst {
11418                         break
11419                 }
11420                 j := auxIntToInt8(v_1.AuxInt)
11421                 w := v_1.Args[0]
11422                 x := v_2
11423                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11424                         break
11425                 }
11426                 mem := x.Args[2]
11427                 p0 := x.Args[0]
11428                 w0 := x.Args[1]
11429                 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11430                         break
11431                 }
11432                 v.reset(OpAMD64MOVWstore)
11433                 v.AuxInt = int32ToAuxInt(i)
11434                 v.Aux = symToAux(s)
11435                 v.AddArg3(p0, w0, mem)
11436                 return true
11437         }
11438         // match: (MOVBstore [i] {s} p1 (SHRQconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRQconst [j-8] w) mem))
11439         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11440         // result: (MOVWstore [i] {s} p0 w0 mem)
11441         for {
11442                 i := auxIntToInt32(v.AuxInt)
11443                 s := auxToSym(v.Aux)
11444                 p1 := v_0
11445                 if v_1.Op != OpAMD64SHRQconst {
11446                         break
11447                 }
11448                 j := auxIntToInt8(v_1.AuxInt)
11449                 w := v_1.Args[0]
11450                 x := v_2
11451                 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11452                         break
11453                 }
11454                 mem := x.Args[2]
11455                 p0 := x.Args[0]
11456                 w0 := x.Args[1]
11457                 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11458                         break
11459                 }
11460                 v.reset(OpAMD64MOVWstore)
11461                 v.AuxInt = int32ToAuxInt(i)
11462                 v.Aux = symToAux(s)
11463                 v.AddArg3(p0, w0, mem)
11464                 return true
11465         }
11466         // match: (MOVBstore [7] {s} p1 (SHRQconst [56] w) x1:(MOVWstore [5] {s} p1 (SHRQconst [40] w) x2:(MOVLstore [1] {s} p1 (SHRQconst [8] w) x3:(MOVBstore [0] {s} p1 w mem))))
11467         // cond: x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)
11468         // result: (MOVQstore {s} p1 w mem)
11469         for {
11470                 if auxIntToInt32(v.AuxInt) != 7 {
11471                         break
11472                 }
11473                 s := auxToSym(v.Aux)
11474                 p1 := v_0
11475                 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 56 {
11476                         break
11477                 }
11478                 w := v_1.Args[0]
11479                 x1 := v_2
11480                 if x1.Op != OpAMD64MOVWstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s {
11481                         break
11482                 }
11483                 _ = x1.Args[2]
11484                 if p1 != x1.Args[0] {
11485                         break
11486                 }
11487                 x1_1 := x1.Args[1]
11488                 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 40 || w != x1_1.Args[0] {
11489                         break
11490                 }
11491                 x2 := x1.Args[2]
11492                 if x2.Op != OpAMD64MOVLstore || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s {
11493                         break
11494                 }
11495                 _ = x2.Args[2]
11496                 if p1 != x2.Args[0] {
11497                         break
11498                 }
11499                 x2_1 := x2.Args[1]
11500                 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
11501                         break
11502                 }
11503                 x3 := x2.Args[2]
11504                 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != 0 || auxToSym(x3.Aux) != s {
11505                         break
11506                 }
11507                 mem := x3.Args[2]
11508                 if p1 != x3.Args[0] || w != x3.Args[1] || !(x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)) {
11509                         break
11510                 }
11511                 v.reset(OpAMD64MOVQstore)
11512                 v.Aux = symToAux(s)
11513                 v.AddArg3(p1, w, mem)
11514                 return true
11515         }
11516         // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem))
11517         // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
11518         // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
11519         for {
11520                 i := auxIntToInt32(v.AuxInt)
11521                 s := auxToSym(v.Aux)
11522                 p := v_0
11523                 x1 := v_1
11524                 if x1.Op != OpAMD64MOVBload {
11525                         break
11526                 }
11527                 j := auxIntToInt32(x1.AuxInt)
11528                 s2 := auxToSym(x1.Aux)
11529                 mem := x1.Args[1]
11530                 p2 := x1.Args[0]
11531                 mem2 := v_2
11532                 if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s {
11533                         break
11534                 }
11535                 _ = mem2.Args[2]
11536                 if p != mem2.Args[0] {
11537                         break
11538                 }
11539                 x2 := mem2.Args[1]
11540                 if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 {
11541                         break
11542                 }
11543                 _ = x2.Args[1]
11544                 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
11545                         break
11546                 }
11547                 v.reset(OpAMD64MOVWstore)
11548                 v.AuxInt = int32ToAuxInt(i - 1)
11549                 v.Aux = symToAux(s)
11550                 v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
11551                 v0.AuxInt = int32ToAuxInt(j - 1)
11552                 v0.Aux = symToAux(s2)
11553                 v0.AddArg2(p2, mem)
11554                 v.AddArg3(p, v0, mem)
11555                 return true
11556         }
11557         // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
11558         // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
11559         // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
11560         for {
11561                 off1 := auxIntToInt32(v.AuxInt)
11562                 sym1 := auxToSym(v.Aux)
11563                 if v_0.Op != OpAMD64LEAL {
11564                         break
11565                 }
11566                 off2 := auxIntToInt32(v_0.AuxInt)
11567                 sym2 := auxToSym(v_0.Aux)
11568                 base := v_0.Args[0]
11569                 val := v_1
11570                 mem := v_2
11571                 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
11572                         break
11573                 }
11574                 v.reset(OpAMD64MOVBstore)
11575                 v.AuxInt = int32ToAuxInt(off1 + off2)
11576                 v.Aux = symToAux(mergeSym(sym1, sym2))
11577                 v.AddArg3(base, val, mem)
11578                 return true
11579         }
11580         // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
11581         // cond: is32Bit(int64(off1)+int64(off2))
11582         // result: (MOVBstore [off1+off2] {sym} ptr val mem)
11583         for {
11584                 off1 := auxIntToInt32(v.AuxInt)
11585                 sym := auxToSym(v.Aux)
11586                 if v_0.Op != OpAMD64ADDLconst {
11587                         break
11588                 }
11589                 off2 := auxIntToInt32(v_0.AuxInt)
11590                 ptr := v_0.Args[0]
11591                 val := v_1
11592                 mem := v_2
11593                 if !(is32Bit(int64(off1) + int64(off2))) {
11594                         break
11595                 }
11596                 v.reset(OpAMD64MOVBstore)
11597                 v.AuxInt = int32ToAuxInt(off1 + off2)
11598                 v.Aux = symToAux(sym)
11599                 v.AddArg3(ptr, val, mem)
11600                 return true
11601         }
11602         return false
11603 }
11604 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
11605         v_1 := v.Args[1]
11606         v_0 := v.Args[0]
11607         // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
11608         // cond: ValAndOff(sc).canAdd32(off)
11609         // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
11610         for {
11611                 sc := auxIntToValAndOff(v.AuxInt)
11612                 s := auxToSym(v.Aux)
11613                 if v_0.Op != OpAMD64ADDQconst {
11614                         break
11615                 }
11616                 off := auxIntToInt32(v_0.AuxInt)
11617                 ptr := v_0.Args[0]
11618                 mem := v_1
11619                 if !(ValAndOff(sc).canAdd32(off)) {
11620                         break
11621                 }
11622                 v.reset(OpAMD64MOVBstoreconst)
11623                 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11624                 v.Aux = symToAux(s)
11625                 v.AddArg2(ptr, mem)
11626                 return true
11627         }
11628         // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
11629         // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
11630         // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
11631         for {
11632                 sc := auxIntToValAndOff(v.AuxInt)
11633                 sym1 := auxToSym(v.Aux)
11634                 if v_0.Op != OpAMD64LEAQ {
11635                         break
11636                 }
11637                 off := auxIntToInt32(v_0.AuxInt)
11638                 sym2 := auxToSym(v_0.Aux)
11639                 ptr := v_0.Args[0]
11640                 mem := v_1
11641                 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11642                         break
11643                 }
11644                 v.reset(OpAMD64MOVBstoreconst)
11645                 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11646                 v.Aux = symToAux(mergeSym(sym1, sym2))
11647                 v.AddArg2(ptr, mem)
11648                 return true
11649         }
11650         // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
11651         // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
11652         // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
11653         for {
11654                 c := auxIntToValAndOff(v.AuxInt)
11655                 s := auxToSym(v.Aux)
11656                 p := v_0
11657                 x := v_1
11658                 if x.Op != OpAMD64MOVBstoreconst {
11659                         break
11660                 }
11661                 a := auxIntToValAndOff(x.AuxInt)
11662                 if auxToSym(x.Aux) != s {
11663                         break
11664                 }
11665                 mem := x.Args[1]
11666                 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
11667                         break
11668                 }
11669                 v.reset(OpAMD64MOVWstoreconst)
11670                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
11671                 v.Aux = symToAux(s)
11672                 v.AddArg2(p, mem)
11673                 return true
11674         }
11675         // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
11676         // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
11677         // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
11678         for {
11679                 a := auxIntToValAndOff(v.AuxInt)
11680                 s := auxToSym(v.Aux)
11681                 p := v_0
11682                 x := v_1
11683                 if x.Op != OpAMD64MOVBstoreconst {
11684                         break
11685                 }
11686                 c := auxIntToValAndOff(x.AuxInt)
11687                 if auxToSym(x.Aux) != s {
11688                         break
11689                 }
11690                 mem := x.Args[1]
11691                 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
11692                         break
11693                 }
11694                 v.reset(OpAMD64MOVWstoreconst)
11695                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
11696                 v.Aux = symToAux(s)
11697                 v.AddArg2(p, mem)
11698                 return true
11699         }
11700         // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
11701         // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
11702         // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
11703         for {
11704                 sc := auxIntToValAndOff(v.AuxInt)
11705                 sym1 := auxToSym(v.Aux)
11706                 if v_0.Op != OpAMD64LEAL {
11707                         break
11708                 }
11709                 off := auxIntToInt32(v_0.AuxInt)
11710                 sym2 := auxToSym(v_0.Aux)
11711                 ptr := v_0.Args[0]
11712                 mem := v_1
11713                 if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
11714                         break
11715                 }
11716                 v.reset(OpAMD64MOVBstoreconst)
11717                 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
11718                 v.Aux = symToAux(mergeSym(sym1, sym2))
11719                 v.AddArg2(ptr, mem)
11720                 return true
11721         }
11722         // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
11723         // cond: sc.canAdd32(off)
11724         // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
11725         for {
11726                 sc := auxIntToValAndOff(v.AuxInt)
11727                 s := auxToSym(v.Aux)
11728                 if v_0.Op != OpAMD64ADDLconst {
11729                         break
11730                 }
11731                 off := auxIntToInt32(v_0.AuxInt)
11732                 ptr := v_0.Args[0]
11733                 mem := v_1
11734                 if !(sc.canAdd32(off)) {
11735                         break
11736                 }
11737                 v.reset(OpAMD64MOVBstoreconst)
11738                 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
11739                 v.Aux = symToAux(s)
11740                 v.AddArg2(ptr, mem)
11741                 return true
11742         }
11743         return false
11744 }
11745 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
11746         v_0 := v.Args[0]
11747         b := v.Block
11748         // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
11749         // cond: x.Uses == 1 && clobber(x)
11750         // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
11751         for {
11752                 x := v_0
11753                 if x.Op != OpAMD64MOVLload {
11754                         break
11755                 }
11756                 off := auxIntToInt32(x.AuxInt)
11757                 sym := auxToSym(x.Aux)
11758                 mem := x.Args[1]
11759                 ptr := x.Args[0]
11760                 if !(x.Uses == 1 && clobber(x)) {
11761                         break
11762                 }
11763                 b = x.Block
11764                 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11765                 v.copyOf(v0)
11766                 v0.AuxInt = int32ToAuxInt(off)
11767                 v0.Aux = symToAux(sym)
11768                 v0.AddArg2(ptr, mem)
11769                 return true
11770         }
11771         // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem))
11772         // cond: x.Uses == 1 && clobber(x)
11773         // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
11774         for {
11775                 x := v_0
11776                 if x.Op != OpAMD64MOVQload {
11777                         break
11778                 }
11779                 off := auxIntToInt32(x.AuxInt)
11780                 sym := auxToSym(x.Aux)
11781                 mem := x.Args[1]
11782                 ptr := x.Args[0]
11783                 if !(x.Uses == 1 && clobber(x)) {
11784                         break
11785                 }
11786                 b = x.Block
11787                 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11788                 v.copyOf(v0)
11789                 v0.AuxInt = int32ToAuxInt(off)
11790                 v0.Aux = symToAux(sym)
11791                 v0.AddArg2(ptr, mem)
11792                 return true
11793         }
11794         // match: (MOVLQSX (ANDLconst [c] x))
11795         // cond: uint32(c) & 0x80000000 == 0
11796         // result: (ANDLconst [c & 0x7fffffff] x)
11797         for {
11798                 if v_0.Op != OpAMD64ANDLconst {
11799                         break
11800                 }
11801                 c := auxIntToInt32(v_0.AuxInt)
11802                 x := v_0.Args[0]
11803                 if !(uint32(c)&0x80000000 == 0) {
11804                         break
11805                 }
11806                 v.reset(OpAMD64ANDLconst)
11807                 v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
11808                 v.AddArg(x)
11809                 return true
11810         }
11811         // match: (MOVLQSX (MOVLQSX x))
11812         // result: (MOVLQSX x)
11813         for {
11814                 if v_0.Op != OpAMD64MOVLQSX {
11815                         break
11816                 }
11817                 x := v_0.Args[0]
11818                 v.reset(OpAMD64MOVLQSX)
11819                 v.AddArg(x)
11820                 return true
11821         }
11822         // match: (MOVLQSX (MOVWQSX x))
11823         // result: (MOVWQSX x)
11824         for {
11825                 if v_0.Op != OpAMD64MOVWQSX {
11826                         break
11827                 }
11828                 x := v_0.Args[0]
11829                 v.reset(OpAMD64MOVWQSX)
11830                 v.AddArg(x)
11831                 return true
11832         }
11833         // match: (MOVLQSX (MOVBQSX x))
11834         // result: (MOVBQSX x)
11835         for {
11836                 if v_0.Op != OpAMD64MOVBQSX {
11837                         break
11838                 }
11839                 x := v_0.Args[0]
11840                 v.reset(OpAMD64MOVBQSX)
11841                 v.AddArg(x)
11842                 return true
11843         }
11844         return false
11845 }
11846 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
11847         v_1 := v.Args[1]
11848         v_0 := v.Args[0]
11849         // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
11850         // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
11851         // result: (MOVLQSX x)
11852         for {
11853                 off := auxIntToInt32(v.AuxInt)
11854                 sym := auxToSym(v.Aux)
11855                 ptr := v_0
11856                 if v_1.Op != OpAMD64MOVLstore {
11857                         break
11858                 }
11859                 off2 := auxIntToInt32(v_1.AuxInt)
11860                 sym2 := auxToSym(v_1.Aux)
11861                 x := v_1.Args[1]
11862                 ptr2 := v_1.Args[0]
11863                 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11864                         break
11865                 }
11866                 v.reset(OpAMD64MOVLQSX)
11867                 v.AddArg(x)
11868                 return true
11869         }
11870         // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
11871         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
11872         // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
11873         for {
11874                 off1 := auxIntToInt32(v.AuxInt)
11875                 sym1 := auxToSym(v.Aux)
11876                 if v_0.Op != OpAMD64LEAQ {
11877                         break
11878                 }
11879                 off2 := auxIntToInt32(v_0.AuxInt)
11880                 sym2 := auxToSym(v_0.Aux)
11881                 base := v_0.Args[0]
11882                 mem := v_1
11883                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11884                         break
11885                 }
11886                 v.reset(OpAMD64MOVLQSXload)
11887                 v.AuxInt = int32ToAuxInt(off1 + off2)
11888                 v.Aux = symToAux(mergeSym(sym1, sym2))
11889                 v.AddArg2(base, mem)
11890                 return true
11891         }
11892         return false
11893 }
11894 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
11895         v_0 := v.Args[0]
11896         b := v.Block
11897         // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
11898         // cond: x.Uses == 1 && clobber(x)
11899         // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
11900         for {
11901                 x := v_0
11902                 if x.Op != OpAMD64MOVLload {
11903                         break
11904                 }
11905                 off := auxIntToInt32(x.AuxInt)
11906                 sym := auxToSym(x.Aux)
11907                 mem := x.Args[1]
11908                 ptr := x.Args[0]
11909                 if !(x.Uses == 1 && clobber(x)) {
11910                         break
11911                 }
11912                 b = x.Block
11913                 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11914                 v.copyOf(v0)
11915                 v0.AuxInt = int32ToAuxInt(off)
11916                 v0.Aux = symToAux(sym)
11917                 v0.AddArg2(ptr, mem)
11918                 return true
11919         }
11920         // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem))
11921         // cond: x.Uses == 1 && clobber(x)
11922         // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
11923         for {
11924                 x := v_0
11925                 if x.Op != OpAMD64MOVQload {
11926                         break
11927                 }
11928                 off := auxIntToInt32(x.AuxInt)
11929                 sym := auxToSym(x.Aux)
11930                 mem := x.Args[1]
11931                 ptr := x.Args[0]
11932                 if !(x.Uses == 1 && clobber(x)) {
11933                         break
11934                 }
11935                 b = x.Block
11936                 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11937                 v.copyOf(v0)
11938                 v0.AuxInt = int32ToAuxInt(off)
11939                 v0.Aux = symToAux(sym)
11940                 v0.AddArg2(ptr, mem)
11941                 return true
11942         }
11943         // match: (MOVLQZX x)
11944         // cond: zeroUpper32Bits(x,3)
11945         // result: x
11946         for {
11947                 x := v_0
11948                 if !(zeroUpper32Bits(x, 3)) {
11949                         break
11950                 }
11951                 v.copyOf(x)
11952                 return true
11953         }
11954         // match: (MOVLQZX (ANDLconst [c] x))
11955         // result: (ANDLconst [c] x)
11956         for {
11957                 if v_0.Op != OpAMD64ANDLconst {
11958                         break
11959                 }
11960                 c := auxIntToInt32(v_0.AuxInt)
11961                 x := v_0.Args[0]
11962                 v.reset(OpAMD64ANDLconst)
11963                 v.AuxInt = int32ToAuxInt(c)
11964                 v.AddArg(x)
11965                 return true
11966         }
11967         // match: (MOVLQZX (MOVLQZX x))
11968         // result: (MOVLQZX x)
11969         for {
11970                 if v_0.Op != OpAMD64MOVLQZX {
11971                         break
11972                 }
11973                 x := v_0.Args[0]
11974                 v.reset(OpAMD64MOVLQZX)
11975                 v.AddArg(x)
11976                 return true
11977         }
11978         // match: (MOVLQZX (MOVWQZX x))
11979         // result: (MOVWQZX x)
11980         for {
11981                 if v_0.Op != OpAMD64MOVWQZX {
11982                         break
11983                 }
11984                 x := v_0.Args[0]
11985                 v.reset(OpAMD64MOVWQZX)
11986                 v.AddArg(x)
11987                 return true
11988         }
11989         // match: (MOVLQZX (MOVBQZX x))
11990         // result: (MOVBQZX x)
11991         for {
11992                 if v_0.Op != OpAMD64MOVBQZX {
11993                         break
11994                 }
11995                 x := v_0.Args[0]
11996                 v.reset(OpAMD64MOVBQZX)
11997                 v.AddArg(x)
11998                 return true
11999         }
12000         return false
12001 }
12002 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
12003         v_1 := v.Args[1]
12004         v_0 := v.Args[0]
12005         // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
12006         // cond: is32Bit(int64(off1)+int64(off2))
12007         // result: (MOVLatomicload [off1+off2] {sym} ptr mem)
12008         for {
12009                 off1 := auxIntToInt32(v.AuxInt)
12010                 sym := auxToSym(v.Aux)
12011                 if v_0.Op != OpAMD64ADDQconst {
12012                         break
12013                 }
12014                 off2 := auxIntToInt32(v_0.AuxInt)
12015                 ptr := v_0.Args[0]
12016                 mem := v_1
12017                 if !(is32Bit(int64(off1) + int64(off2))) {
12018                         break
12019                 }
12020                 v.reset(OpAMD64MOVLatomicload)
12021                 v.AuxInt = int32ToAuxInt(off1 + off2)
12022                 v.Aux = symToAux(sym)
12023                 v.AddArg2(ptr, mem)
12024                 return true
12025         }
12026         // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
12027         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12028         // result: (MOVLatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
12029         for {
12030                 off1 := auxIntToInt32(v.AuxInt)
12031                 sym1 := auxToSym(v.Aux)
12032                 if v_0.Op != OpAMD64LEAQ {
12033                         break
12034                 }
12035                 off2 := auxIntToInt32(v_0.AuxInt)
12036                 sym2 := auxToSym(v_0.Aux)
12037                 ptr := v_0.Args[0]
12038                 mem := v_1
12039                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12040                         break
12041                 }
12042                 v.reset(OpAMD64MOVLatomicload)
12043                 v.AuxInt = int32ToAuxInt(off1 + off2)
12044                 v.Aux = symToAux(mergeSym(sym1, sym2))
12045                 v.AddArg2(ptr, mem)
12046                 return true
12047         }
12048         return false
12049 }
12050 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
12051         v_0 := v.Args[0]
12052         b := v.Block
12053         // match: (MOVLf2i <t> (Arg <u> [off] {sym}))
12054         // cond: t.Size() == u.Size()
12055         // result: @b.Func.Entry (Arg <t> [off] {sym})
12056         for {
12057                 t := v.Type
12058                 if v_0.Op != OpArg {
12059                         break
12060                 }
12061                 u := v_0.Type
12062                 off := auxIntToInt32(v_0.AuxInt)
12063                 sym := auxToSym(v_0.Aux)
12064                 if !(t.Size() == u.Size()) {
12065                         break
12066                 }
12067                 b = b.Func.Entry
12068                 v0 := b.NewValue0(v.Pos, OpArg, t)
12069                 v.copyOf(v0)
12070                 v0.AuxInt = int32ToAuxInt(off)
12071                 v0.Aux = symToAux(sym)
12072                 return true
12073         }
12074         return false
12075 }
12076 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
12077         v_0 := v.Args[0]
12078         b := v.Block
12079         // match: (MOVLi2f <t> (Arg <u> [off] {sym}))
12080         // cond: t.Size() == u.Size()
12081         // result: @b.Func.Entry (Arg <t> [off] {sym})
12082         for {
12083                 t := v.Type
12084                 if v_0.Op != OpArg {
12085                         break
12086                 }
12087                 u := v_0.Type
12088                 off := auxIntToInt32(v_0.AuxInt)
12089                 sym := auxToSym(v_0.Aux)
12090                 if !(t.Size() == u.Size()) {
12091                         break
12092                 }
12093                 b = b.Func.Entry
12094                 v0 := b.NewValue0(v.Pos, OpArg, t)
12095                 v.copyOf(v0)
12096                 v0.AuxInt = int32ToAuxInt(off)
12097                 v0.Aux = symToAux(sym)
12098                 return true
12099         }
12100         return false
12101 }
12102 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
12103         v_1 := v.Args[1]
12104         v_0 := v.Args[0]
12105         b := v.Block
12106         config := b.Func.Config
12107         // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
12108         // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
12109         // result: (MOVLQZX x)
12110         for {
12111                 off := auxIntToInt32(v.AuxInt)
12112                 sym := auxToSym(v.Aux)
12113                 ptr := v_0
12114                 if v_1.Op != OpAMD64MOVLstore {
12115                         break
12116                 }
12117                 off2 := auxIntToInt32(v_1.AuxInt)
12118                 sym2 := auxToSym(v_1.Aux)
12119                 x := v_1.Args[1]
12120                 ptr2 := v_1.Args[0]
12121                 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12122                         break
12123                 }
12124                 v.reset(OpAMD64MOVLQZX)
12125                 v.AddArg(x)
12126                 return true
12127         }
12128         // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem)
12129         // cond: is32Bit(int64(off1)+int64(off2))
12130         // result: (MOVLload [off1+off2] {sym} ptr mem)
12131         for {
12132                 off1 := auxIntToInt32(v.AuxInt)
12133                 sym := auxToSym(v.Aux)
12134                 if v_0.Op != OpAMD64ADDQconst {
12135                         break
12136                 }
12137                 off2 := auxIntToInt32(v_0.AuxInt)
12138                 ptr := v_0.Args[0]
12139                 mem := v_1
12140                 if !(is32Bit(int64(off1) + int64(off2))) {
12141                         break
12142                 }
12143                 v.reset(OpAMD64MOVLload)
12144                 v.AuxInt = int32ToAuxInt(off1 + off2)
12145                 v.Aux = symToAux(sym)
12146                 v.AddArg2(ptr, mem)
12147                 return true
12148         }
12149         // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
12150         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12151         // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
12152         for {
12153                 off1 := auxIntToInt32(v.AuxInt)
12154                 sym1 := auxToSym(v.Aux)
12155                 if v_0.Op != OpAMD64LEAQ {
12156                         break
12157                 }
12158                 off2 := auxIntToInt32(v_0.AuxInt)
12159                 sym2 := auxToSym(v_0.Aux)
12160                 base := v_0.Args[0]
12161                 mem := v_1
12162                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12163                         break
12164                 }
12165                 v.reset(OpAMD64MOVLload)
12166                 v.AuxInt = int32ToAuxInt(off1 + off2)
12167                 v.Aux = symToAux(mergeSym(sym1, sym2))
12168                 v.AddArg2(base, mem)
12169                 return true
12170         }
12171         // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
12172         // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
12173         // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
12174         for {
12175                 off1 := auxIntToInt32(v.AuxInt)
12176                 sym1 := auxToSym(v.Aux)
12177                 if v_0.Op != OpAMD64LEAL {
12178                         break
12179                 }
12180                 off2 := auxIntToInt32(v_0.AuxInt)
12181                 sym2 := auxToSym(v_0.Aux)
12182                 base := v_0.Args[0]
12183                 mem := v_1
12184                 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
12185                         break
12186                 }
12187                 v.reset(OpAMD64MOVLload)
12188                 v.AuxInt = int32ToAuxInt(off1 + off2)
12189                 v.Aux = symToAux(mergeSym(sym1, sym2))
12190                 v.AddArg2(base, mem)
12191                 return true
12192         }
12193         // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
12194         // cond: is32Bit(int64(off1)+int64(off2))
12195         // result: (MOVLload [off1+off2] {sym} ptr mem)
12196         for {
12197                 off1 := auxIntToInt32(v.AuxInt)
12198                 sym := auxToSym(v.Aux)
12199                 if v_0.Op != OpAMD64ADDLconst {
12200                         break
12201                 }
12202                 off2 := auxIntToInt32(v_0.AuxInt)
12203                 ptr := v_0.Args[0]
12204                 mem := v_1
12205                 if !(is32Bit(int64(off1) + int64(off2))) {
12206                         break
12207                 }
12208                 v.reset(OpAMD64MOVLload)
12209                 v.AuxInt = int32ToAuxInt(off1 + off2)
12210                 v.Aux = symToAux(sym)
12211                 v.AddArg2(ptr, mem)
12212                 return true
12213         }
12214         // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _))
12215         // result: (MOVLf2i val)
12216         for {
12217                 off := auxIntToInt32(v.AuxInt)
12218                 sym := auxToSym(v.Aux)
12219                 ptr := v_0
12220                 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12221                         break
12222                 }
12223                 val := v_1.Args[1]
12224                 if ptr != v_1.Args[0] {
12225                         break
12226                 }
12227                 v.reset(OpAMD64MOVLf2i)
12228                 v.AddArg(val)
12229                 return true
12230         }
12231         // match: (MOVLload [off] {sym} (SB) _)
12232         // cond: symIsRO(sym)
12233         // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
12234         for {
12235                 off := auxIntToInt32(v.AuxInt)
12236                 sym := auxToSym(v.Aux)
12237                 if v_0.Op != OpSB || !(symIsRO(sym)) {
12238                         break
12239                 }
12240                 v.reset(OpAMD64MOVQconst)
12241                 v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
12242                 return true
12243         }
12244         return false
12245 }
12246 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
12247         v_2 := v.Args[2]
12248         v_1 := v.Args[1]
12249         v_0 := v.Args[0]
12250         b := v.Block
12251         typ := &b.Func.Config.Types
12252         // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
12253         // result: (MOVLstore [off] {sym} ptr x mem)
12254         for {
12255                 off := auxIntToInt32(v.AuxInt)
12256                 sym := auxToSym(v.Aux)
12257                 ptr := v_0
12258                 if v_1.Op != OpAMD64MOVLQSX {
12259                         break
12260                 }
12261                 x := v_1.Args[0]
12262                 mem := v_2
12263                 v.reset(OpAMD64MOVLstore)
12264                 v.AuxInt = int32ToAuxInt(off)
12265                 v.Aux = symToAux(sym)
12266                 v.AddArg3(ptr, x, mem)
12267                 return true
12268         }
12269         // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
12270         // result: (MOVLstore [off] {sym} ptr x mem)
12271         for {
12272                 off := auxIntToInt32(v.AuxInt)
12273                 sym := auxToSym(v.Aux)
12274                 ptr := v_0
12275                 if v_1.Op != OpAMD64MOVLQZX {
12276                         break
12277                 }
12278                 x := v_1.Args[0]
12279                 mem := v_2
12280                 v.reset(OpAMD64MOVLstore)
12281                 v.AuxInt = int32ToAuxInt(off)
12282                 v.Aux = symToAux(sym)
12283                 v.AddArg3(ptr, x, mem)
12284                 return true
12285         }
12286         // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
12287         // cond: is32Bit(int64(off1)+int64(off2))
12288         // result: (MOVLstore [off1+off2] {sym} ptr val mem)
12289         for {
12290                 off1 := auxIntToInt32(v.AuxInt)
12291                 sym := auxToSym(v.Aux)
12292                 if v_0.Op != OpAMD64ADDQconst {
12293                         break
12294                 }
12295                 off2 := auxIntToInt32(v_0.AuxInt)
12296                 ptr := v_0.Args[0]
12297                 val := v_1
12298                 mem := v_2
12299                 if !(is32Bit(int64(off1) + int64(off2))) {
12300                         break
12301                 }
12302                 v.reset(OpAMD64MOVLstore)
12303                 v.AuxInt = int32ToAuxInt(off1 + off2)
12304                 v.Aux = symToAux(sym)
12305                 v.AddArg3(ptr, val, mem)
12306                 return true
12307         }
12308         // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
12309         // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
12310         for {
12311                 off := auxIntToInt32(v.AuxInt)
12312                 sym := auxToSym(v.Aux)
12313                 ptr := v_0
12314                 if v_1.Op != OpAMD64MOVLconst {
12315                         break
12316                 }
12317                 c := auxIntToInt32(v_1.AuxInt)
12318                 mem := v_2
12319                 v.reset(OpAMD64MOVLstoreconst)
12320                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12321                 v.Aux = symToAux(sym)
12322                 v.AddArg2(ptr, mem)
12323                 return true
12324         }
12325         // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem)
12326         // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
12327         for {
12328                 off := auxIntToInt32(v.AuxInt)
12329                 sym := auxToSym(v.Aux)
12330                 ptr := v_0
12331                 if v_1.Op != OpAMD64MOVQconst {
12332                         break
12333                 }
12334                 c := auxIntToInt64(v_1.AuxInt)
12335                 mem := v_2
12336                 v.reset(OpAMD64MOVLstoreconst)
12337                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12338                 v.Aux = symToAux(sym)
12339                 v.AddArg2(ptr, mem)
12340                 return true
12341         }
12342         // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
12343         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12344         // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
12345         for {
12346                 off1 := auxIntToInt32(v.AuxInt)
12347                 sym1 := auxToSym(v.Aux)
12348                 if v_0.Op != OpAMD64LEAQ {
12349                         break
12350                 }
12351                 off2 := auxIntToInt32(v_0.AuxInt)
12352                 sym2 := auxToSym(v_0.Aux)
12353                 base := v_0.Args[0]
12354                 val := v_1
12355                 mem := v_2
12356                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12357                         break
12358                 }
12359                 v.reset(OpAMD64MOVLstore)
12360                 v.AuxInt = int32ToAuxInt(off1 + off2)
12361                 v.Aux = symToAux(mergeSym(sym1, sym2))
12362                 v.AddArg3(base, val, mem)
12363                 return true
12364         }
12365         // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
12366         // cond: x.Uses == 1 && clobber(x)
12367         // result: (MOVQstore [i-4] {s} p w mem)
12368         for {
12369                 i := auxIntToInt32(v.AuxInt)
12370                 s := auxToSym(v.Aux)
12371                 p := v_0
12372                 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
12373                         break
12374                 }
12375                 w := v_1.Args[0]
12376                 x := v_2
12377                 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
12378                         break
12379                 }
12380                 mem := x.Args[2]
12381                 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
12382                         break
12383                 }
12384                 v.reset(OpAMD64MOVQstore)
12385                 v.AuxInt = int32ToAuxInt(i - 4)
12386                 v.Aux = symToAux(s)
12387                 v.AddArg3(p, w, mem)
12388                 return true
12389         }
12390         // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
12391         // cond: x.Uses == 1 && clobber(x)
12392         // result: (MOVQstore [i-4] {s} p w0 mem)
12393         for {
12394                 i := auxIntToInt32(v.AuxInt)
12395                 s := auxToSym(v.Aux)
12396                 p := v_0
12397                 if v_1.Op != OpAMD64SHRQconst {
12398                         break
12399                 }
12400                 j := auxIntToInt8(v_1.AuxInt)
12401                 w := v_1.Args[0]
12402                 x := v_2
12403                 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
12404                         break
12405                 }
12406                 mem := x.Args[2]
12407                 if p != x.Args[0] {
12408                         break
12409                 }
12410                 w0 := x.Args[1]
12411                 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
12412                         break
12413                 }
12414                 v.reset(OpAMD64MOVQstore)
12415                 v.AuxInt = int32ToAuxInt(i - 4)
12416                 v.Aux = symToAux(s)
12417                 v.AddArg3(p, w0, mem)
12418                 return true
12419         }
12420         // match: (MOVLstore [i] {s} p1 (SHRQconst [32] w) x:(MOVLstore [i] {s} p0 w mem))
12421         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)
12422         // result: (MOVQstore [i] {s} p0 w mem)
12423         for {
12424                 i := auxIntToInt32(v.AuxInt)
12425                 s := auxToSym(v.Aux)
12426                 p1 := v_0
12427                 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
12428                         break
12429                 }
12430                 w := v_1.Args[0]
12431                 x := v_2
12432                 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
12433                         break
12434                 }
12435                 mem := x.Args[2]
12436                 p0 := x.Args[0]
12437                 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
12438                         break
12439                 }
12440                 v.reset(OpAMD64MOVQstore)
12441                 v.AuxInt = int32ToAuxInt(i)
12442                 v.Aux = symToAux(s)
12443                 v.AddArg3(p0, w, mem)
12444                 return true
12445         }
12446         // match: (MOVLstore [i] {s} p1 (SHRQconst [j] w) x:(MOVLstore [i] {s} p0 w0:(SHRQconst [j-32] w) mem))
12447         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)
12448         // result: (MOVQstore [i] {s} p0 w0 mem)
12449         for {
12450                 i := auxIntToInt32(v.AuxInt)
12451                 s := auxToSym(v.Aux)
12452                 p1 := v_0
12453                 if v_1.Op != OpAMD64SHRQconst {
12454                         break
12455                 }
12456                 j := auxIntToInt8(v_1.AuxInt)
12457                 w := v_1.Args[0]
12458                 x := v_2
12459                 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
12460                         break
12461                 }
12462                 mem := x.Args[2]
12463                 p0 := x.Args[0]
12464                 w0 := x.Args[1]
12465                 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
12466                         break
12467                 }
12468                 v.reset(OpAMD64MOVQstore)
12469                 v.AuxInt = int32ToAuxInt(i)
12470                 v.Aux = symToAux(s)
12471                 v.AddArg3(p0, w0, mem)
12472                 return true
12473         }
12474         // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem))
12475         // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
12476         // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
12477         for {
12478                 i := auxIntToInt32(v.AuxInt)
12479                 s := auxToSym(v.Aux)
12480                 p := v_0
12481                 x1 := v_1
12482                 if x1.Op != OpAMD64MOVLload {
12483                         break
12484                 }
12485                 j := auxIntToInt32(x1.AuxInt)
12486                 s2 := auxToSym(x1.Aux)
12487                 mem := x1.Args[1]
12488                 p2 := x1.Args[0]
12489                 mem2 := v_2
12490                 if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s {
12491                         break
12492                 }
12493                 _ = mem2.Args[2]
12494                 if p != mem2.Args[0] {
12495                         break
12496                 }
12497                 x2 := mem2.Args[1]
12498                 if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 {
12499                         break
12500                 }
12501                 _ = x2.Args[1]
12502                 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
12503                         break
12504                 }
12505                 v.reset(OpAMD64MOVQstore)
12506                 v.AuxInt = int32ToAuxInt(i - 4)
12507                 v.Aux = symToAux(s)
12508                 v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
12509                 v0.AuxInt = int32ToAuxInt(j - 4)
12510                 v0.Aux = symToAux(s2)
12511                 v0.AddArg2(p2, mem)
12512                 v.AddArg3(p, v0, mem)
12513                 return true
12514         }
12515         // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
12516         // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
12517         // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
12518         for {
12519                 off1 := auxIntToInt32(v.AuxInt)
12520                 sym1 := auxToSym(v.Aux)
12521                 if v_0.Op != OpAMD64LEAL {
12522                         break
12523                 }
12524                 off2 := auxIntToInt32(v_0.AuxInt)
12525                 sym2 := auxToSym(v_0.Aux)
12526                 base := v_0.Args[0]
12527                 val := v_1
12528                 mem := v_2
12529                 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
12530                         break
12531                 }
12532                 v.reset(OpAMD64MOVLstore)
12533                 v.AuxInt = int32ToAuxInt(off1 + off2)
12534                 v.Aux = symToAux(mergeSym(sym1, sym2))
12535                 v.AddArg3(base, val, mem)
12536                 return true
12537         }
12538         // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
12539         // cond: is32Bit(int64(off1)+int64(off2))
12540         // result: (MOVLstore [off1+off2] {sym} ptr val mem)
12541         for {
12542                 off1 := auxIntToInt32(v.AuxInt)
12543                 sym := auxToSym(v.Aux)
12544                 if v_0.Op != OpAMD64ADDLconst {
12545                         break
12546                 }
12547                 off2 := auxIntToInt32(v_0.AuxInt)
12548                 ptr := v_0.Args[0]
12549                 val := v_1
12550                 mem := v_2
12551                 if !(is32Bit(int64(off1) + int64(off2))) {
12552                         break
12553                 }
12554                 v.reset(OpAMD64MOVLstore)
12555                 v.AuxInt = int32ToAuxInt(off1 + off2)
12556                 v.Aux = symToAux(sym)
12557                 v.AddArg3(ptr, val, mem)
12558                 return true
12559         }
12560         // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
12561         // cond: y.Uses==1 && clobber(y)
12562         // result: (ADDLmodify [off] {sym} ptr x mem)
12563         for {
12564                 off := auxIntToInt32(v.AuxInt)
12565                 sym := auxToSym(v.Aux)
12566                 ptr := v_0
12567                 y := v_1
12568                 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12569                         break
12570                 }
12571                 mem := y.Args[2]
12572                 x := y.Args[0]
12573                 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12574                         break
12575                 }
12576                 v.reset(OpAMD64ADDLmodify)
12577                 v.AuxInt = int32ToAuxInt(off)
12578                 v.Aux = symToAux(sym)
12579                 v.AddArg3(ptr, x, mem)
12580                 return true
12581         }
12582         // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem)
12583         // cond: y.Uses==1 && clobber(y)
12584         // result: (ANDLmodify [off] {sym} ptr x mem)
12585         for {
12586                 off := auxIntToInt32(v.AuxInt)
12587                 sym := auxToSym(v.Aux)
12588                 ptr := v_0
12589                 y := v_1
12590                 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12591                         break
12592                 }
12593                 mem := y.Args[2]
12594                 x := y.Args[0]
12595                 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12596                         break
12597                 }
12598                 v.reset(OpAMD64ANDLmodify)
12599                 v.AuxInt = int32ToAuxInt(off)
12600                 v.Aux = symToAux(sym)
12601                 v.AddArg3(ptr, x, mem)
12602                 return true
12603         }
12604         // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem)
12605         // cond: y.Uses==1 && clobber(y)
12606         // result: (ORLmodify [off] {sym} ptr x mem)
12607         for {
12608                 off := auxIntToInt32(v.AuxInt)
12609                 sym := auxToSym(v.Aux)
12610                 ptr := v_0
12611                 y := v_1
12612                 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12613                         break
12614                 }
12615                 mem := y.Args[2]
12616                 x := y.Args[0]
12617                 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12618                         break
12619                 }
12620                 v.reset(OpAMD64ORLmodify)
12621                 v.AuxInt = int32ToAuxInt(off)
12622                 v.Aux = symToAux(sym)
12623                 v.AddArg3(ptr, x, mem)
12624                 return true
12625         }
12626         // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem)
12627         // cond: y.Uses==1 && clobber(y)
12628         // result: (XORLmodify [off] {sym} ptr x mem)
12629         for {
12630                 off := auxIntToInt32(v.AuxInt)
12631                 sym := auxToSym(v.Aux)
12632                 ptr := v_0
12633                 y := v_1
12634                 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12635                         break
12636                 }
12637                 mem := y.Args[2]
12638                 x := y.Args[0]
12639                 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12640                         break
12641                 }
12642                 v.reset(OpAMD64XORLmodify)
12643                 v.AuxInt = int32ToAuxInt(off)
12644                 v.Aux = symToAux(sym)
12645                 v.AddArg3(ptr, x, mem)
12646                 return true
12647         }
12648         // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem)
12649         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12650         // result: (ADDLmodify [off] {sym} ptr x mem)
12651         for {
12652                 off := auxIntToInt32(v.AuxInt)
12653                 sym := auxToSym(v.Aux)
12654                 ptr := v_0
12655                 y := v_1
12656                 if y.Op != OpAMD64ADDL {
12657                         break
12658                 }
12659                 _ = y.Args[1]
12660                 y_0 := y.Args[0]
12661                 y_1 := y.Args[1]
12662                 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12663                         l := y_0
12664                         if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12665                                 continue
12666                         }
12667                         mem := l.Args[1]
12668                         if ptr != l.Args[0] {
12669                                 continue
12670                         }
12671                         x := y_1
12672                         if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12673                                 continue
12674                         }
12675                         v.reset(OpAMD64ADDLmodify)
12676                         v.AuxInt = int32ToAuxInt(off)
12677                         v.Aux = symToAux(sym)
12678                         v.AddArg3(ptr, x, mem)
12679                         return true
12680                 }
12681                 break
12682         }
12683         // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem)
12684         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12685         // result: (SUBLmodify [off] {sym} ptr x mem)
12686         for {
12687                 off := auxIntToInt32(v.AuxInt)
12688                 sym := auxToSym(v.Aux)
12689                 ptr := v_0
12690                 y := v_1
12691                 if y.Op != OpAMD64SUBL {
12692                         break
12693                 }
12694                 x := y.Args[1]
12695                 l := y.Args[0]
12696                 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12697                         break
12698                 }
12699                 mem := l.Args[1]
12700                 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12701                         break
12702                 }
12703                 v.reset(OpAMD64SUBLmodify)
12704                 v.AuxInt = int32ToAuxInt(off)
12705                 v.Aux = symToAux(sym)
12706                 v.AddArg3(ptr, x, mem)
12707                 return true
12708         }
12709         // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem)
12710         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12711         // result: (ANDLmodify [off] {sym} ptr x mem)
12712         for {
12713                 off := auxIntToInt32(v.AuxInt)
12714                 sym := auxToSym(v.Aux)
12715                 ptr := v_0
12716                 y := v_1
12717                 if y.Op != OpAMD64ANDL {
12718                         break
12719                 }
12720                 _ = y.Args[1]
12721                 y_0 := y.Args[0]
12722                 y_1 := y.Args[1]
12723                 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12724                         l := y_0
12725                         if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12726                                 continue
12727                         }
12728                         mem := l.Args[1]
12729                         if ptr != l.Args[0] {
12730                                 continue
12731                         }
12732                         x := y_1
12733                         if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12734                                 continue
12735                         }
12736                         v.reset(OpAMD64ANDLmodify)
12737                         v.AuxInt = int32ToAuxInt(off)
12738                         v.Aux = symToAux(sym)
12739                         v.AddArg3(ptr, x, mem)
12740                         return true
12741                 }
12742                 break
12743         }
12744         // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem)
12745         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12746         // result: (ORLmodify [off] {sym} ptr x mem)
12747         for {
12748                 off := auxIntToInt32(v.AuxInt)
12749                 sym := auxToSym(v.Aux)
12750                 ptr := v_0
12751                 y := v_1
12752                 if y.Op != OpAMD64ORL {
12753                         break
12754                 }
12755                 _ = y.Args[1]
12756                 y_0 := y.Args[0]
12757                 y_1 := y.Args[1]
12758                 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12759                         l := y_0
12760                         if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12761                                 continue
12762                         }
12763                         mem := l.Args[1]
12764                         if ptr != l.Args[0] {
12765                                 continue
12766                         }
12767                         x := y_1
12768                         if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12769                                 continue
12770                         }
12771                         v.reset(OpAMD64ORLmodify)
12772                         v.AuxInt = int32ToAuxInt(off)
12773                         v.Aux = symToAux(sym)
12774                         v.AddArg3(ptr, x, mem)
12775                         return true
12776                 }
12777                 break
12778         }
12779         // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem)
12780         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12781         // result: (XORLmodify [off] {sym} ptr x mem)
12782         for {
12783                 off := auxIntToInt32(v.AuxInt)
12784                 sym := auxToSym(v.Aux)
12785                 ptr := v_0
12786                 y := v_1
12787                 if y.Op != OpAMD64XORL {
12788                         break
12789                 }
12790                 _ = y.Args[1]
12791                 y_0 := y.Args[0]
12792                 y_1 := y.Args[1]
12793                 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12794                         l := y_0
12795                         if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12796                                 continue
12797                         }
12798                         mem := l.Args[1]
12799                         if ptr != l.Args[0] {
12800                                 continue
12801                         }
12802                         x := y_1
12803                         if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12804                                 continue
12805                         }
12806                         v.reset(OpAMD64XORLmodify)
12807                         v.AuxInt = int32ToAuxInt(off)
12808                         v.Aux = symToAux(sym)
12809                         v.AddArg3(ptr, x, mem)
12810                         return true
12811                 }
12812                 break
12813         }
12814         // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) <t> x) mem)
12815         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12816         // result: (BTCLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
12817         for {
12818                 off := auxIntToInt32(v.AuxInt)
12819                 sym := auxToSym(v.Aux)
12820                 ptr := v_0
12821                 y := v_1
12822                 if y.Op != OpAMD64BTCL {
12823                         break
12824                 }
12825                 t := y.Type
12826                 x := y.Args[1]
12827                 l := y.Args[0]
12828                 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12829                         break
12830                 }
12831                 mem := l.Args[1]
12832                 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12833                         break
12834                 }
12835                 v.reset(OpAMD64BTCLmodify)
12836                 v.AuxInt = int32ToAuxInt(off)
12837                 v.Aux = symToAux(sym)
12838                 v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
12839                 v0.AuxInt = int32ToAuxInt(31)
12840                 v0.AddArg(x)
12841                 v.AddArg3(ptr, v0, mem)
12842                 return true
12843         }
12844         // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) <t> x) mem)
12845         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12846         // result: (BTRLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
12847         for {
12848                 off := auxIntToInt32(v.AuxInt)
12849                 sym := auxToSym(v.Aux)
12850                 ptr := v_0
12851                 y := v_1
12852                 if y.Op != OpAMD64BTRL {
12853                         break
12854                 }
12855                 t := y.Type
12856                 x := y.Args[1]
12857                 l := y.Args[0]
12858                 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12859                         break
12860                 }
12861                 mem := l.Args[1]
12862                 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12863                         break
12864                 }
12865                 v.reset(OpAMD64BTRLmodify)
12866                 v.AuxInt = int32ToAuxInt(off)
12867                 v.Aux = symToAux(sym)
12868                 v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
12869                 v0.AuxInt = int32ToAuxInt(31)
12870                 v0.AddArg(x)
12871                 v.AddArg3(ptr, v0, mem)
12872                 return true
12873         }
12874         // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) <t> x) mem)
12875         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12876         // result: (BTSLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
12877         for {
12878                 off := auxIntToInt32(v.AuxInt)
12879                 sym := auxToSym(v.Aux)
12880                 ptr := v_0
12881                 y := v_1
12882                 if y.Op != OpAMD64BTSL {
12883                         break
12884                 }
12885                 t := y.Type
12886                 x := y.Args[1]
12887                 l := y.Args[0]
12888                 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12889                         break
12890                 }
12891                 mem := l.Args[1]
12892                 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12893                         break
12894                 }
12895                 v.reset(OpAMD64BTSLmodify)
12896                 v.AuxInt = int32ToAuxInt(off)
12897                 v.Aux = symToAux(sym)
12898                 v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
12899                 v0.AuxInt = int32ToAuxInt(31)
12900                 v0.AddArg(x)
12901                 v.AddArg3(ptr, v0, mem)
12902                 return true
12903         }
12904         // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
12905         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
12906         // result: (ADDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
12907         for {
12908                 off := auxIntToInt32(v.AuxInt)
12909                 sym := auxToSym(v.Aux)
12910                 ptr := v_0
12911                 a := v_1
12912                 if a.Op != OpAMD64ADDLconst {
12913                         break
12914                 }
12915                 c := auxIntToInt32(a.AuxInt)
12916                 l := a.Args[0]
12917                 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12918                         break
12919                 }
12920                 mem := l.Args[1]
12921                 ptr2 := l.Args[0]
12922                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12923                         break
12924                 }
12925                 v.reset(OpAMD64ADDLconstmodify)
12926                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12927                 v.Aux = symToAux(sym)
12928                 v.AddArg2(ptr, mem)
12929                 return true
12930         }
12931         // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
12932         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
12933         // result: (ANDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
12934         for {
12935                 off := auxIntToInt32(v.AuxInt)
12936                 sym := auxToSym(v.Aux)
12937                 ptr := v_0
12938                 a := v_1
12939                 if a.Op != OpAMD64ANDLconst {
12940                         break
12941                 }
12942                 c := auxIntToInt32(a.AuxInt)
12943                 l := a.Args[0]
12944                 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12945                         break
12946                 }
12947                 mem := l.Args[1]
12948                 ptr2 := l.Args[0]
12949                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12950                         break
12951                 }
12952                 v.reset(OpAMD64ANDLconstmodify)
12953                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12954                 v.Aux = symToAux(sym)
12955                 v.AddArg2(ptr, mem)
12956                 return true
12957         }
12958         // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
12959         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
12960         // result: (ORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
12961         for {
12962                 off := auxIntToInt32(v.AuxInt)
12963                 sym := auxToSym(v.Aux)
12964                 ptr := v_0
12965                 a := v_1
12966                 if a.Op != OpAMD64ORLconst {
12967                         break
12968                 }
12969                 c := auxIntToInt32(a.AuxInt)
12970                 l := a.Args[0]
12971                 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12972                         break
12973                 }
12974                 mem := l.Args[1]
12975                 ptr2 := l.Args[0]
12976                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12977                         break
12978                 }
12979                 v.reset(OpAMD64ORLconstmodify)
12980                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12981                 v.Aux = symToAux(sym)
12982                 v.AddArg2(ptr, mem)
12983                 return true
12984         }
12985         // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
12986         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
12987         // result: (XORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
12988         for {
12989                 off := auxIntToInt32(v.AuxInt)
12990                 sym := auxToSym(v.Aux)
12991                 ptr := v_0
12992                 a := v_1
12993                 if a.Op != OpAMD64XORLconst {
12994                         break
12995                 }
12996                 c := auxIntToInt32(a.AuxInt)
12997                 l := a.Args[0]
12998                 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12999                         break
13000                 }
13001                 mem := l.Args[1]
13002                 ptr2 := l.Args[0]
13003                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
13004                         break
13005                 }
13006                 v.reset(OpAMD64XORLconstmodify)
13007                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13008                 v.Aux = symToAux(sym)
13009                 v.AddArg2(ptr, mem)
13010                 return true
13011         }
13012         // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
13013         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
13014         // result: (BTCLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
13015         for {
13016                 off := auxIntToInt32(v.AuxInt)
13017                 sym := auxToSym(v.Aux)
13018                 ptr := v_0
13019                 a := v_1
13020                 if a.Op != OpAMD64BTCLconst {
13021                         break
13022                 }
13023                 c := auxIntToInt8(a.AuxInt)
13024                 l := a.Args[0]
13025                 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13026                         break
13027                 }
13028                 mem := l.Args[1]
13029                 ptr2 := l.Args[0]
13030                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
13031                         break
13032                 }
13033                 v.reset(OpAMD64BTCLconstmodify)
13034                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13035                 v.Aux = symToAux(sym)
13036                 v.AddArg2(ptr, mem)
13037                 return true
13038         }
13039         // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
13040         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
13041         // result: (BTRLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
13042         for {
13043                 off := auxIntToInt32(v.AuxInt)
13044                 sym := auxToSym(v.Aux)
13045                 ptr := v_0
13046                 a := v_1
13047                 if a.Op != OpAMD64BTRLconst {
13048                         break
13049                 }
13050                 c := auxIntToInt8(a.AuxInt)
13051                 l := a.Args[0]
13052                 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13053                         break
13054                 }
13055                 mem := l.Args[1]
13056                 ptr2 := l.Args[0]
13057                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
13058                         break
13059                 }
13060                 v.reset(OpAMD64BTRLconstmodify)
13061                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13062                 v.Aux = symToAux(sym)
13063                 v.AddArg2(ptr, mem)
13064                 return true
13065         }
13066         // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
13067         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
13068         // result: (BTSLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
13069         for {
13070                 off := auxIntToInt32(v.AuxInt)
13071                 sym := auxToSym(v.Aux)
13072                 ptr := v_0
13073                 a := v_1
13074                 if a.Op != OpAMD64BTSLconst {
13075                         break
13076                 }
13077                 c := auxIntToInt8(a.AuxInt)
13078                 l := a.Args[0]
13079                 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13080                         break
13081                 }
13082                 mem := l.Args[1]
13083                 ptr2 := l.Args[0]
13084                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
13085                         break
13086                 }
13087                 v.reset(OpAMD64BTSLconstmodify)
13088                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13089                 v.Aux = symToAux(sym)
13090                 v.AddArg2(ptr, mem)
13091                 return true
13092         }
13093         // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem)
13094         // result: (MOVSSstore [off] {sym} ptr val mem)
13095         for {
13096                 off := auxIntToInt32(v.AuxInt)
13097                 sym := auxToSym(v.Aux)
13098                 ptr := v_0
13099                 if v_1.Op != OpAMD64MOVLf2i {
13100                         break
13101                 }
13102                 val := v_1.Args[0]
13103                 mem := v_2
13104                 v.reset(OpAMD64MOVSSstore)
13105                 v.AuxInt = int32ToAuxInt(off)
13106                 v.Aux = symToAux(sym)
13107                 v.AddArg3(ptr, val, mem)
13108                 return true
13109         }
13110         return false
13111 }
13112 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
13113         v_1 := v.Args[1]
13114         v_0 := v.Args[0]
13115         b := v.Block
13116         typ := &b.Func.Config.Types
13117         // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
13118         // cond: ValAndOff(sc).canAdd32(off)
13119         // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
13120         for {
13121                 sc := auxIntToValAndOff(v.AuxInt)
13122                 s := auxToSym(v.Aux)
13123                 if v_0.Op != OpAMD64ADDQconst {
13124                         break
13125                 }
13126                 off := auxIntToInt32(v_0.AuxInt)
13127                 ptr := v_0.Args[0]
13128                 mem := v_1
13129                 if !(ValAndOff(sc).canAdd32(off)) {
13130                         break
13131                 }
13132                 v.reset(OpAMD64MOVLstoreconst)
13133                 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13134                 v.Aux = symToAux(s)
13135                 v.AddArg2(ptr, mem)
13136                 return true
13137         }
13138         // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
13139         // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
13140         // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
13141         for {
13142                 sc := auxIntToValAndOff(v.AuxInt)
13143                 sym1 := auxToSym(v.Aux)
13144                 if v_0.Op != OpAMD64LEAQ {
13145                         break
13146                 }
13147                 off := auxIntToInt32(v_0.AuxInt)
13148                 sym2 := auxToSym(v_0.Aux)
13149                 ptr := v_0.Args[0]
13150                 mem := v_1
13151                 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
13152                         break
13153                 }
13154                 v.reset(OpAMD64MOVLstoreconst)
13155                 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13156                 v.Aux = symToAux(mergeSym(sym1, sym2))
13157                 v.AddArg2(ptr, mem)
13158                 return true
13159         }
13160         // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
13161         // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x)
13162         // result: (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem)
13163         for {
13164                 c := auxIntToValAndOff(v.AuxInt)
13165                 s := auxToSym(v.Aux)
13166                 p := v_0
13167                 x := v_1
13168                 if x.Op != OpAMD64MOVLstoreconst {
13169                         break
13170                 }
13171                 a := auxIntToValAndOff(x.AuxInt)
13172                 if auxToSym(x.Aux) != s {
13173                         break
13174                 }
13175                 mem := x.Args[1]
13176                 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
13177                         break
13178                 }
13179                 v.reset(OpAMD64MOVQstore)
13180                 v.AuxInt = int32ToAuxInt(a.Off())
13181                 v.Aux = symToAux(s)
13182                 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
13183                 v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32)
13184                 v.AddArg3(p, v0, mem)
13185                 return true
13186         }
13187         // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
13188         // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x)
13189         // result: (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem)
13190         for {
13191                 a := auxIntToValAndOff(v.AuxInt)
13192                 s := auxToSym(v.Aux)
13193                 p := v_0
13194                 x := v_1
13195                 if x.Op != OpAMD64MOVLstoreconst {
13196                         break
13197                 }
13198                 c := auxIntToValAndOff(x.AuxInt)
13199                 if auxToSym(x.Aux) != s {
13200                         break
13201                 }
13202                 mem := x.Args[1]
13203                 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
13204                         break
13205                 }
13206                 v.reset(OpAMD64MOVQstore)
13207                 v.AuxInt = int32ToAuxInt(a.Off())
13208                 v.Aux = symToAux(s)
13209                 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
13210                 v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32)
13211                 v.AddArg3(p, v0, mem)
13212                 return true
13213         }
13214         // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
13215         // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
13216         // result: (MOVLstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
13217         for {
13218                 sc := auxIntToValAndOff(v.AuxInt)
13219                 sym1 := auxToSym(v.Aux)
13220                 if v_0.Op != OpAMD64LEAL {
13221                         break
13222                 }
13223                 off := auxIntToInt32(v_0.AuxInt)
13224                 sym2 := auxToSym(v_0.Aux)
13225                 ptr := v_0.Args[0]
13226                 mem := v_1
13227                 if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
13228                         break
13229                 }
13230                 v.reset(OpAMD64MOVLstoreconst)
13231                 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
13232                 v.Aux = symToAux(mergeSym(sym1, sym2))
13233                 v.AddArg2(ptr, mem)
13234                 return true
13235         }
13236         // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
13237         // cond: sc.canAdd32(off)
13238         // result: (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem)
13239         for {
13240                 sc := auxIntToValAndOff(v.AuxInt)
13241                 s := auxToSym(v.Aux)
13242                 if v_0.Op != OpAMD64ADDLconst {
13243                         break
13244                 }
13245                 off := auxIntToInt32(v_0.AuxInt)
13246                 ptr := v_0.Args[0]
13247                 mem := v_1
13248                 if !(sc.canAdd32(off)) {
13249                         break
13250                 }
13251                 v.reset(OpAMD64MOVLstoreconst)
13252                 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
13253                 v.Aux = symToAux(s)
13254                 v.AddArg2(ptr, mem)
13255                 return true
13256         }
13257         return false
13258 }
13259 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
13260         v_1 := v.Args[1]
13261         v_0 := v.Args[0]
13262         // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem)
13263         // cond: is32Bit(int64(off1)+int64(off2))
13264         // result: (MOVOload [off1+off2] {sym} ptr mem)
13265         for {
13266                 off1 := auxIntToInt32(v.AuxInt)
13267                 sym := auxToSym(v.Aux)
13268                 if v_0.Op != OpAMD64ADDQconst {
13269                         break
13270                 }
13271                 off2 := auxIntToInt32(v_0.AuxInt)
13272                 ptr := v_0.Args[0]
13273                 mem := v_1
13274                 if !(is32Bit(int64(off1) + int64(off2))) {
13275                         break
13276                 }
13277                 v.reset(OpAMD64MOVOload)
13278                 v.AuxInt = int32ToAuxInt(off1 + off2)
13279                 v.Aux = symToAux(sym)
13280                 v.AddArg2(ptr, mem)
13281                 return true
13282         }
13283         // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
13284         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
13285         // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
13286         for {
13287                 off1 := auxIntToInt32(v.AuxInt)
13288                 sym1 := auxToSym(v.Aux)
13289                 if v_0.Op != OpAMD64LEAQ {
13290                         break
13291                 }
13292                 off2 := auxIntToInt32(v_0.AuxInt)
13293                 sym2 := auxToSym(v_0.Aux)
13294                 base := v_0.Args[0]
13295                 mem := v_1
13296                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13297                         break
13298                 }
13299                 v.reset(OpAMD64MOVOload)
13300                 v.AuxInt = int32ToAuxInt(off1 + off2)
13301                 v.Aux = symToAux(mergeSym(sym1, sym2))
13302                 v.AddArg2(base, mem)
13303                 return true
13304         }
13305         return false
13306 }
13307 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
13308         v_2 := v.Args[2]
13309         v_1 := v.Args[1]
13310         v_0 := v.Args[0]
13311         b := v.Block
13312         config := b.Func.Config
13313         typ := &b.Func.Config.Types
13314         // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
13315         // cond: is32Bit(int64(off1)+int64(off2))
13316         // result: (MOVOstore [off1+off2] {sym} ptr val mem)
13317         for {
13318                 off1 := auxIntToInt32(v.AuxInt)
13319                 sym := auxToSym(v.Aux)
13320                 if v_0.Op != OpAMD64ADDQconst {
13321                         break
13322                 }
13323                 off2 := auxIntToInt32(v_0.AuxInt)
13324                 ptr := v_0.Args[0]
13325                 val := v_1
13326                 mem := v_2
13327                 if !(is32Bit(int64(off1) + int64(off2))) {
13328                         break
13329                 }
13330                 v.reset(OpAMD64MOVOstore)
13331                 v.AuxInt = int32ToAuxInt(off1 + off2)
13332                 v.Aux = symToAux(sym)
13333                 v.AddArg3(ptr, val, mem)
13334                 return true
13335         }
13336         // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
13337         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
13338         // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
13339         for {
13340                 off1 := auxIntToInt32(v.AuxInt)
13341                 sym1 := auxToSym(v.Aux)
13342                 if v_0.Op != OpAMD64LEAQ {
13343                         break
13344                 }
13345                 off2 := auxIntToInt32(v_0.AuxInt)
13346                 sym2 := auxToSym(v_0.Aux)
13347                 base := v_0.Args[0]
13348                 val := v_1
13349                 mem := v_2
13350                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13351                         break
13352                 }
13353                 v.reset(OpAMD64MOVOstore)
13354                 v.AuxInt = int32ToAuxInt(off1 + off2)
13355                 v.Aux = symToAux(mergeSym(sym1, sym2))
13356                 v.AddArg3(base, val, mem)
13357                 return true
13358         }
13359         // match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem)
13360         // cond: symIsRO(srcSym)
13361         // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
13362         for {
13363                 dstOff := auxIntToInt32(v.AuxInt)
13364                 dstSym := auxToSym(v.Aux)
13365                 ptr := v_0
13366                 if v_1.Op != OpAMD64MOVOload {
13367                         break
13368                 }
13369                 srcOff := auxIntToInt32(v_1.AuxInt)
13370                 srcSym := auxToSym(v_1.Aux)
13371                 v_1_0 := v_1.Args[0]
13372                 if v_1_0.Op != OpSB {
13373                         break
13374                 }
13375                 mem := v_2
13376                 if !(symIsRO(srcSym)) {
13377                         break
13378                 }
13379                 v.reset(OpAMD64MOVQstore)
13380                 v.AuxInt = int32ToAuxInt(dstOff + 8)
13381                 v.Aux = symToAux(dstSym)
13382                 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
13383                 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
13384                 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
13385                 v1.AuxInt = int32ToAuxInt(dstOff)
13386                 v1.Aux = symToAux(dstSym)
13387                 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
13388                 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
13389                 v1.AddArg3(ptr, v2, mem)
13390                 v.AddArg3(ptr, v0, v1)
13391                 return true
13392         }
13393         return false
13394 }
13395 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
13396         v_1 := v.Args[1]
13397         v_0 := v.Args[0]
13398         // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
13399         // cond: is32Bit(int64(off1)+int64(off2))
13400         // result: (MOVQatomicload [off1+off2] {sym} ptr mem)
13401         for {
13402                 off1 := auxIntToInt32(v.AuxInt)
13403                 sym := auxToSym(v.Aux)
13404                 if v_0.Op != OpAMD64ADDQconst {
13405                         break
13406                 }
13407                 off2 := auxIntToInt32(v_0.AuxInt)
13408                 ptr := v_0.Args[0]
13409                 mem := v_1
13410                 if !(is32Bit(int64(off1) + int64(off2))) {
13411                         break
13412                 }
13413                 v.reset(OpAMD64MOVQatomicload)
13414                 v.AuxInt = int32ToAuxInt(off1 + off2)
13415                 v.Aux = symToAux(sym)
13416                 v.AddArg2(ptr, mem)
13417                 return true
13418         }
13419         // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
13420         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
13421         // result: (MOVQatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
13422         for {
13423                 off1 := auxIntToInt32(v.AuxInt)
13424                 sym1 := auxToSym(v.Aux)
13425                 if v_0.Op != OpAMD64LEAQ {
13426                         break
13427                 }
13428                 off2 := auxIntToInt32(v_0.AuxInt)
13429                 sym2 := auxToSym(v_0.Aux)
13430                 ptr := v_0.Args[0]
13431                 mem := v_1
13432                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13433                         break
13434                 }
13435                 v.reset(OpAMD64MOVQatomicload)
13436                 v.AuxInt = int32ToAuxInt(off1 + off2)
13437                 v.Aux = symToAux(mergeSym(sym1, sym2))
13438                 v.AddArg2(ptr, mem)
13439                 return true
13440         }
13441         return false
13442 }
13443 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
13444         v_0 := v.Args[0]
13445         b := v.Block
13446         // match: (MOVQf2i <t> (Arg <u> [off] {sym}))
13447         // cond: t.Size() == u.Size()
13448         // result: @b.Func.Entry (Arg <t> [off] {sym})
13449         for {
13450                 t := v.Type
13451                 if v_0.Op != OpArg {
13452                         break
13453                 }
13454                 u := v_0.Type
13455                 off := auxIntToInt32(v_0.AuxInt)
13456                 sym := auxToSym(v_0.Aux)
13457                 if !(t.Size() == u.Size()) {
13458                         break
13459                 }
13460                 b = b.Func.Entry
13461                 v0 := b.NewValue0(v.Pos, OpArg, t)
13462                 v.copyOf(v0)
13463                 v0.AuxInt = int32ToAuxInt(off)
13464                 v0.Aux = symToAux(sym)
13465                 return true
13466         }
13467         return false
13468 }
13469 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
13470         v_0 := v.Args[0]
13471         b := v.Block
13472         // match: (MOVQi2f <t> (Arg <u> [off] {sym}))
13473         // cond: t.Size() == u.Size()
13474         // result: @b.Func.Entry (Arg <t> [off] {sym})
13475         for {
13476                 t := v.Type
13477                 if v_0.Op != OpArg {
13478                         break
13479                 }
13480                 u := v_0.Type
13481                 off := auxIntToInt32(v_0.AuxInt)
13482                 sym := auxToSym(v_0.Aux)
13483                 if !(t.Size() == u.Size()) {
13484                         break
13485                 }
13486                 b = b.Func.Entry
13487                 v0 := b.NewValue0(v.Pos, OpArg, t)
13488                 v.copyOf(v0)
13489                 v0.AuxInt = int32ToAuxInt(off)
13490                 v0.Aux = symToAux(sym)
13491                 return true
13492         }
13493         return false
13494 }
13495 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
13496         v_1 := v.Args[1]
13497         v_0 := v.Args[0]
13498         b := v.Block
13499         config := b.Func.Config
13500         // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
13501         // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
13502         // result: x
13503         for {
13504                 off := auxIntToInt32(v.AuxInt)
13505                 sym := auxToSym(v.Aux)
13506                 ptr := v_0
13507                 if v_1.Op != OpAMD64MOVQstore {
13508                         break
13509                 }
13510                 off2 := auxIntToInt32(v_1.AuxInt)
13511                 sym2 := auxToSym(v_1.Aux)
13512                 x := v_1.Args[1]
13513                 ptr2 := v_1.Args[0]
13514                 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
13515                         break
13516                 }
13517                 v.copyOf(x)
13518                 return true
13519         }
13520         // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem)
13521         // cond: is32Bit(int64(off1)+int64(off2))
13522         // result: (MOVQload [off1+off2] {sym} ptr mem)
13523         for {
13524                 off1 := auxIntToInt32(v.AuxInt)
13525                 sym := auxToSym(v.Aux)
13526                 if v_0.Op != OpAMD64ADDQconst {
13527                         break
13528                 }
13529                 off2 := auxIntToInt32(v_0.AuxInt)
13530                 ptr := v_0.Args[0]
13531                 mem := v_1
13532                 if !(is32Bit(int64(off1) + int64(off2))) {
13533                         break
13534                 }
13535                 v.reset(OpAMD64MOVQload)
13536                 v.AuxInt = int32ToAuxInt(off1 + off2)
13537                 v.Aux = symToAux(sym)
13538                 v.AddArg2(ptr, mem)
13539                 return true
13540         }
13541         // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
13542         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
13543         // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
13544         for {
13545                 off1 := auxIntToInt32(v.AuxInt)
13546                 sym1 := auxToSym(v.Aux)
13547                 if v_0.Op != OpAMD64LEAQ {
13548                         break
13549                 }
13550                 off2 := auxIntToInt32(v_0.AuxInt)
13551                 sym2 := auxToSym(v_0.Aux)
13552                 base := v_0.Args[0]
13553                 mem := v_1
13554                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13555                         break
13556                 }
13557                 v.reset(OpAMD64MOVQload)
13558                 v.AuxInt = int32ToAuxInt(off1 + off2)
13559                 v.Aux = symToAux(mergeSym(sym1, sym2))
13560                 v.AddArg2(base, mem)
13561                 return true
13562         }
13563         // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
13564         // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
13565         // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
13566         for {
13567                 off1 := auxIntToInt32(v.AuxInt)
13568                 sym1 := auxToSym(v.Aux)
13569                 if v_0.Op != OpAMD64LEAL {
13570                         break
13571                 }
13572                 off2 := auxIntToInt32(v_0.AuxInt)
13573                 sym2 := auxToSym(v_0.Aux)
13574                 base := v_0.Args[0]
13575                 mem := v_1
13576                 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
13577                         break
13578                 }
13579                 v.reset(OpAMD64MOVQload)
13580                 v.AuxInt = int32ToAuxInt(off1 + off2)
13581                 v.Aux = symToAux(mergeSym(sym1, sym2))
13582                 v.AddArg2(base, mem)
13583                 return true
13584         }
13585         // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem)
13586         // cond: is32Bit(int64(off1)+int64(off2))
13587         // result: (MOVQload [off1+off2] {sym} ptr mem)
13588         for {
13589                 off1 := auxIntToInt32(v.AuxInt)
13590                 sym := auxToSym(v.Aux)
13591                 if v_0.Op != OpAMD64ADDLconst {
13592                         break
13593                 }
13594                 off2 := auxIntToInt32(v_0.AuxInt)
13595                 ptr := v_0.Args[0]
13596                 mem := v_1
13597                 if !(is32Bit(int64(off1) + int64(off2))) {
13598                         break
13599                 }
13600                 v.reset(OpAMD64MOVQload)
13601                 v.AuxInt = int32ToAuxInt(off1 + off2)
13602                 v.Aux = symToAux(sym)
13603                 v.AddArg2(ptr, mem)
13604                 return true
13605         }
13606         // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _))
13607         // result: (MOVQf2i val)
13608         for {
13609                 off := auxIntToInt32(v.AuxInt)
13610                 sym := auxToSym(v.Aux)
13611                 ptr := v_0
13612                 if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
13613                         break
13614                 }
13615                 val := v_1.Args[1]
13616                 if ptr != v_1.Args[0] {
13617                         break
13618                 }
13619                 v.reset(OpAMD64MOVQf2i)
13620                 v.AddArg(val)
13621                 return true
13622         }
13623         // match: (MOVQload [off] {sym} (SB) _)
13624         // cond: symIsRO(sym)
13625         // result: (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
13626         for {
13627                 off := auxIntToInt32(v.AuxInt)
13628                 sym := auxToSym(v.Aux)
13629                 if v_0.Op != OpSB || !(symIsRO(sym)) {
13630                         break
13631                 }
13632                 v.reset(OpAMD64MOVQconst)
13633                 v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
13634                 return true
13635         }
13636         return false
13637 }
13638 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
13639         v_2 := v.Args[2]
13640         v_1 := v.Args[1]
13641         v_0 := v.Args[0]
13642         b := v.Block
13643         // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
13644         // cond: is32Bit(int64(off1)+int64(off2))
13645         // result: (MOVQstore [off1+off2] {sym} ptr val mem)
13646         for {
13647                 off1 := auxIntToInt32(v.AuxInt)
13648                 sym := auxToSym(v.Aux)
13649                 if v_0.Op != OpAMD64ADDQconst {
13650                         break
13651                 }
13652                 off2 := auxIntToInt32(v_0.AuxInt)
13653                 ptr := v_0.Args[0]
13654                 val := v_1
13655                 mem := v_2
13656                 if !(is32Bit(int64(off1) + int64(off2))) {
13657                         break
13658                 }
13659                 v.reset(OpAMD64MOVQstore)
13660                 v.AuxInt = int32ToAuxInt(off1 + off2)
13661                 v.Aux = symToAux(sym)
13662                 v.AddArg3(ptr, val, mem)
13663                 return true
13664         }
13665         // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
13666         // cond: validVal(c)
13667         // result: (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
13668         for {
13669                 off := auxIntToInt32(v.AuxInt)
13670                 sym := auxToSym(v.Aux)
13671                 ptr := v_0
13672                 if v_1.Op != OpAMD64MOVQconst {
13673                         break
13674                 }
13675                 c := auxIntToInt64(v_1.AuxInt)
13676                 mem := v_2
13677                 if !(validVal(c)) {
13678                         break
13679                 }
13680                 v.reset(OpAMD64MOVQstoreconst)
13681                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13682                 v.Aux = symToAux(sym)
13683                 v.AddArg2(ptr, mem)
13684                 return true
13685         }
13686         // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
13687         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
13688         // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
13689         for {
13690                 off1 := auxIntToInt32(v.AuxInt)
13691                 sym1 := auxToSym(v.Aux)
13692                 if v_0.Op != OpAMD64LEAQ {
13693                         break
13694                 }
13695                 off2 := auxIntToInt32(v_0.AuxInt)
13696                 sym2 := auxToSym(v_0.Aux)
13697                 base := v_0.Args[0]
13698                 val := v_1
13699                 mem := v_2
13700                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13701                         break
13702                 }
13703                 v.reset(OpAMD64MOVQstore)
13704                 v.AuxInt = int32ToAuxInt(off1 + off2)
13705                 v.Aux = symToAux(mergeSym(sym1, sym2))
13706                 v.AddArg3(base, val, mem)
13707                 return true
13708         }
13709         // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
13710         // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
13711         // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
13712         for {
13713                 off1 := auxIntToInt32(v.AuxInt)
13714                 sym1 := auxToSym(v.Aux)
13715                 if v_0.Op != OpAMD64LEAL {
13716                         break
13717                 }
13718                 off2 := auxIntToInt32(v_0.AuxInt)
13719                 sym2 := auxToSym(v_0.Aux)
13720                 base := v_0.Args[0]
13721                 val := v_1
13722                 mem := v_2
13723                 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
13724                         break
13725                 }
13726                 v.reset(OpAMD64MOVQstore)
13727                 v.AuxInt = int32ToAuxInt(off1 + off2)
13728                 v.Aux = symToAux(mergeSym(sym1, sym2))
13729                 v.AddArg3(base, val, mem)
13730                 return true
13731         }
13732         // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
13733         // cond: is32Bit(int64(off1)+int64(off2))
13734         // result: (MOVQstore [off1+off2] {sym} ptr val mem)
13735         for {
13736                 off1 := auxIntToInt32(v.AuxInt)
13737                 sym := auxToSym(v.Aux)
13738                 if v_0.Op != OpAMD64ADDLconst {
13739                         break
13740                 }
13741                 off2 := auxIntToInt32(v_0.AuxInt)
13742                 ptr := v_0.Args[0]
13743                 val := v_1
13744                 mem := v_2
13745                 if !(is32Bit(int64(off1) + int64(off2))) {
13746                         break
13747                 }
13748                 v.reset(OpAMD64MOVQstore)
13749                 v.AuxInt = int32ToAuxInt(off1 + off2)
13750                 v.Aux = symToAux(sym)
13751                 v.AddArg3(ptr, val, mem)
13752                 return true
13753         }
13754         // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem)
13755         // cond: y.Uses==1 && clobber(y)
13756         // result: (ADDQmodify [off] {sym} ptr x mem)
13757         for {
13758                 off := auxIntToInt32(v.AuxInt)
13759                 sym := auxToSym(v.Aux)
13760                 ptr := v_0
13761                 y := v_1
13762                 if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
13763                         break
13764                 }
13765                 mem := y.Args[2]
13766                 x := y.Args[0]
13767                 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
13768                         break
13769                 }
13770                 v.reset(OpAMD64ADDQmodify)
13771                 v.AuxInt = int32ToAuxInt(off)
13772                 v.Aux = symToAux(sym)
13773                 v.AddArg3(ptr, x, mem)
13774                 return true
13775         }
13776         // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem)
13777         // cond: y.Uses==1 && clobber(y)
13778         // result: (ANDQmodify [off] {sym} ptr x mem)
13779         for {
13780                 off := auxIntToInt32(v.AuxInt)
13781                 sym := auxToSym(v.Aux)
13782                 ptr := v_0
13783                 y := v_1
13784                 if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
13785                         break
13786                 }
13787                 mem := y.Args[2]
13788                 x := y.Args[0]
13789                 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
13790                         break
13791                 }
13792                 v.reset(OpAMD64ANDQmodify)
13793                 v.AuxInt = int32ToAuxInt(off)
13794                 v.Aux = symToAux(sym)
13795                 v.AddArg3(ptr, x, mem)
13796                 return true
13797         }
13798         // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem)
13799         // cond: y.Uses==1 && clobber(y)
13800         // result: (ORQmodify [off] {sym} ptr x mem)
13801         for {
13802                 off := auxIntToInt32(v.AuxInt)
13803                 sym := auxToSym(v.Aux)
13804                 ptr := v_0
13805                 y := v_1
13806                 if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
13807                         break
13808                 }
13809                 mem := y.Args[2]
13810                 x := y.Args[0]
13811                 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
13812                         break
13813                 }
13814                 v.reset(OpAMD64ORQmodify)
13815                 v.AuxInt = int32ToAuxInt(off)
13816                 v.Aux = symToAux(sym)
13817                 v.AddArg3(ptr, x, mem)
13818                 return true
13819         }
13820         // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem)
13821         // cond: y.Uses==1 && clobber(y)
13822         // result: (XORQmodify [off] {sym} ptr x mem)
13823         for {
13824                 off := auxIntToInt32(v.AuxInt)
13825                 sym := auxToSym(v.Aux)
13826                 ptr := v_0
13827                 y := v_1
13828                 if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
13829                         break
13830                 }
13831                 mem := y.Args[2]
13832                 x := y.Args[0]
13833                 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
13834                         break
13835                 }
13836                 v.reset(OpAMD64XORQmodify)
13837                 v.AuxInt = int32ToAuxInt(off)
13838                 v.Aux = symToAux(sym)
13839                 v.AddArg3(ptr, x, mem)
13840                 return true
13841         }
13842         // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
13843         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
13844         // result: (ADDQmodify [off] {sym} ptr x mem)
13845         for {
13846                 off := auxIntToInt32(v.AuxInt)
13847                 sym := auxToSym(v.Aux)
13848                 ptr := v_0
13849                 y := v_1
13850                 if y.Op != OpAMD64ADDQ {
13851                         break
13852                 }
13853                 _ = y.Args[1]
13854                 y_0 := y.Args[0]
13855                 y_1 := y.Args[1]
13856                 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
13857                         l := y_0
13858                         if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13859                                 continue
13860                         }
13861                         mem := l.Args[1]
13862                         if ptr != l.Args[0] {
13863                                 continue
13864                         }
13865                         x := y_1
13866                         if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13867                                 continue
13868                         }
13869                         v.reset(OpAMD64ADDQmodify)
13870                         v.AuxInt = int32ToAuxInt(off)
13871                         v.Aux = symToAux(sym)
13872                         v.AddArg3(ptr, x, mem)
13873                         return true
13874                 }
13875                 break
13876         }
13877         // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem)
13878         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
13879         // result: (SUBQmodify [off] {sym} ptr x mem)
13880         for {
13881                 off := auxIntToInt32(v.AuxInt)
13882                 sym := auxToSym(v.Aux)
13883                 ptr := v_0
13884                 y := v_1
13885                 if y.Op != OpAMD64SUBQ {
13886                         break
13887                 }
13888                 x := y.Args[1]
13889                 l := y.Args[0]
13890                 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13891                         break
13892                 }
13893                 mem := l.Args[1]
13894                 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13895                         break
13896                 }
13897                 v.reset(OpAMD64SUBQmodify)
13898                 v.AuxInt = int32ToAuxInt(off)
13899                 v.Aux = symToAux(sym)
13900                 v.AddArg3(ptr, x, mem)
13901                 return true
13902         }
13903         // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
13904         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
13905         // result: (ANDQmodify [off] {sym} ptr x mem)
13906         for {
13907                 off := auxIntToInt32(v.AuxInt)
13908                 sym := auxToSym(v.Aux)
13909                 ptr := v_0
13910                 y := v_1
13911                 if y.Op != OpAMD64ANDQ {
13912                         break
13913                 }
13914                 _ = y.Args[1]
13915                 y_0 := y.Args[0]
13916                 y_1 := y.Args[1]
13917                 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
13918                         l := y_0
13919                         if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13920                                 continue
13921                         }
13922                         mem := l.Args[1]
13923                         if ptr != l.Args[0] {
13924                                 continue
13925                         }
13926                         x := y_1
13927                         if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13928                                 continue
13929                         }
13930                         v.reset(OpAMD64ANDQmodify)
13931                         v.AuxInt = int32ToAuxInt(off)
13932                         v.Aux = symToAux(sym)
13933                         v.AddArg3(ptr, x, mem)
13934                         return true
13935                 }
13936                 break
13937         }
13938         // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
13939         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
13940         // result: (ORQmodify [off] {sym} ptr x mem)
13941         for {
13942                 off := auxIntToInt32(v.AuxInt)
13943                 sym := auxToSym(v.Aux)
13944                 ptr := v_0
13945                 y := v_1
13946                 if y.Op != OpAMD64ORQ {
13947                         break
13948                 }
13949                 _ = y.Args[1]
13950                 y_0 := y.Args[0]
13951                 y_1 := y.Args[1]
13952                 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
13953                         l := y_0
13954                         if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13955                                 continue
13956                         }
13957                         mem := l.Args[1]
13958                         if ptr != l.Args[0] {
13959                                 continue
13960                         }
13961                         x := y_1
13962                         if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13963                                 continue
13964                         }
13965                         v.reset(OpAMD64ORQmodify)
13966                         v.AuxInt = int32ToAuxInt(off)
13967                         v.Aux = symToAux(sym)
13968                         v.AddArg3(ptr, x, mem)
13969                         return true
13970                 }
13971                 break
13972         }
13973         // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
13974         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
13975         // result: (XORQmodify [off] {sym} ptr x mem)
13976         for {
13977                 off := auxIntToInt32(v.AuxInt)
13978                 sym := auxToSym(v.Aux)
13979                 ptr := v_0
13980                 y := v_1
13981                 if y.Op != OpAMD64XORQ {
13982                         break
13983                 }
13984                 _ = y.Args[1]
13985                 y_0 := y.Args[0]
13986                 y_1 := y.Args[1]
13987                 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
13988                         l := y_0
13989                         if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13990                                 continue
13991                         }
13992                         mem := l.Args[1]
13993                         if ptr != l.Args[0] {
13994                                 continue
13995                         }
13996                         x := y_1
13997                         if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13998                                 continue
13999                         }
14000                         v.reset(OpAMD64XORQmodify)
14001                         v.AuxInt = int32ToAuxInt(off)
14002                         v.Aux = symToAux(sym)
14003                         v.AddArg3(ptr, x, mem)
14004                         return true
14005                 }
14006                 break
14007         }
14008         // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) <t> x) mem)
14009         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
14010         // result: (BTCQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
14011         for {
14012                 off := auxIntToInt32(v.AuxInt)
14013                 sym := auxToSym(v.Aux)
14014                 ptr := v_0
14015                 y := v_1
14016                 if y.Op != OpAMD64BTCQ {
14017                         break
14018                 }
14019                 t := y.Type
14020                 x := y.Args[1]
14021                 l := y.Args[0]
14022                 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14023                         break
14024                 }
14025                 mem := l.Args[1]
14026                 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
14027                         break
14028                 }
14029                 v.reset(OpAMD64BTCQmodify)
14030                 v.AuxInt = int32ToAuxInt(off)
14031                 v.Aux = symToAux(sym)
14032                 v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t)
14033                 v0.AuxInt = int32ToAuxInt(63)
14034                 v0.AddArg(x)
14035                 v.AddArg3(ptr, v0, mem)
14036                 return true
14037         }
14038         // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) <t> x) mem)
14039         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
14040         // result: (BTRQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
14041         for {
14042                 off := auxIntToInt32(v.AuxInt)
14043                 sym := auxToSym(v.Aux)
14044                 ptr := v_0
14045                 y := v_1
14046                 if y.Op != OpAMD64BTRQ {
14047                         break
14048                 }
14049                 t := y.Type
14050                 x := y.Args[1]
14051                 l := y.Args[0]
14052                 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14053                         break
14054                 }
14055                 mem := l.Args[1]
14056                 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
14057                         break
14058                 }
14059                 v.reset(OpAMD64BTRQmodify)
14060                 v.AuxInt = int32ToAuxInt(off)
14061                 v.Aux = symToAux(sym)
14062                 v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t)
14063                 v0.AuxInt = int32ToAuxInt(63)
14064                 v0.AddArg(x)
14065                 v.AddArg3(ptr, v0, mem)
14066                 return true
14067         }
14068         // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) <t> x) mem)
14069         // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
14070         // result: (BTSQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
14071         for {
14072                 off := auxIntToInt32(v.AuxInt)
14073                 sym := auxToSym(v.Aux)
14074                 ptr := v_0
14075                 y := v_1
14076                 if y.Op != OpAMD64BTSQ {
14077                         break
14078                 }
14079                 t := y.Type
14080                 x := y.Args[1]
14081                 l := y.Args[0]
14082                 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14083                         break
14084                 }
14085                 mem := l.Args[1]
14086                 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
14087                         break
14088                 }
14089                 v.reset(OpAMD64BTSQmodify)
14090                 v.AuxInt = int32ToAuxInt(off)
14091                 v.Aux = symToAux(sym)
14092                 v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t)
14093                 v0.AuxInt = int32ToAuxInt(63)
14094                 v0.AddArg(x)
14095                 v.AddArg3(ptr, v0, mem)
14096                 return true
14097         }
14098         // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14099         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14100         // result: (ADDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14101         for {
14102                 off := auxIntToInt32(v.AuxInt)
14103                 sym := auxToSym(v.Aux)
14104                 ptr := v_0
14105                 a := v_1
14106                 if a.Op != OpAMD64ADDQconst {
14107                         break
14108                 }
14109                 c := auxIntToInt32(a.AuxInt)
14110                 l := a.Args[0]
14111                 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14112                         break
14113                 }
14114                 mem := l.Args[1]
14115                 ptr2 := l.Args[0]
14116                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14117                         break
14118                 }
14119                 v.reset(OpAMD64ADDQconstmodify)
14120                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14121                 v.Aux = symToAux(sym)
14122                 v.AddArg2(ptr, mem)
14123                 return true
14124         }
14125         // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14126         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14127         // result: (ANDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14128         for {
14129                 off := auxIntToInt32(v.AuxInt)
14130                 sym := auxToSym(v.Aux)
14131                 ptr := v_0
14132                 a := v_1
14133                 if a.Op != OpAMD64ANDQconst {
14134                         break
14135                 }
14136                 c := auxIntToInt32(a.AuxInt)
14137                 l := a.Args[0]
14138                 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14139                         break
14140                 }
14141                 mem := l.Args[1]
14142                 ptr2 := l.Args[0]
14143                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14144                         break
14145                 }
14146                 v.reset(OpAMD64ANDQconstmodify)
14147                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14148                 v.Aux = symToAux(sym)
14149                 v.AddArg2(ptr, mem)
14150                 return true
14151         }
14152         // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14153         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14154         // result: (ORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14155         for {
14156                 off := auxIntToInt32(v.AuxInt)
14157                 sym := auxToSym(v.Aux)
14158                 ptr := v_0
14159                 a := v_1
14160                 if a.Op != OpAMD64ORQconst {
14161                         break
14162                 }
14163                 c := auxIntToInt32(a.AuxInt)
14164                 l := a.Args[0]
14165                 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14166                         break
14167                 }
14168                 mem := l.Args[1]
14169                 ptr2 := l.Args[0]
14170                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14171                         break
14172                 }
14173                 v.reset(OpAMD64ORQconstmodify)
14174                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14175                 v.Aux = symToAux(sym)
14176                 v.AddArg2(ptr, mem)
14177                 return true
14178         }
14179         // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14180         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14181         // result: (XORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14182         for {
14183                 off := auxIntToInt32(v.AuxInt)
14184                 sym := auxToSym(v.Aux)
14185                 ptr := v_0
14186                 a := v_1
14187                 if a.Op != OpAMD64XORQconst {
14188                         break
14189                 }
14190                 c := auxIntToInt32(a.AuxInt)
14191                 l := a.Args[0]
14192                 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14193                         break
14194                 }
14195                 mem := l.Args[1]
14196                 ptr2 := l.Args[0]
14197                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14198                         break
14199                 }
14200                 v.reset(OpAMD64XORQconstmodify)
14201                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14202                 v.Aux = symToAux(sym)
14203                 v.AddArg2(ptr, mem)
14204                 return true
14205         }
14206         // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14207         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14208         // result: (BTCQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14209         for {
14210                 off := auxIntToInt32(v.AuxInt)
14211                 sym := auxToSym(v.Aux)
14212                 ptr := v_0
14213                 a := v_1
14214                 if a.Op != OpAMD64BTCQconst {
14215                         break
14216                 }
14217                 c := auxIntToInt8(a.AuxInt)
14218                 l := a.Args[0]
14219                 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14220                         break
14221                 }
14222                 mem := l.Args[1]
14223                 ptr2 := l.Args[0]
14224                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14225                         break
14226                 }
14227                 v.reset(OpAMD64BTCQconstmodify)
14228                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14229                 v.Aux = symToAux(sym)
14230                 v.AddArg2(ptr, mem)
14231                 return true
14232         }
14233         // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14234         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14235         // result: (BTRQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14236         for {
14237                 off := auxIntToInt32(v.AuxInt)
14238                 sym := auxToSym(v.Aux)
14239                 ptr := v_0
14240                 a := v_1
14241                 if a.Op != OpAMD64BTRQconst {
14242                         break
14243                 }
14244                 c := auxIntToInt8(a.AuxInt)
14245                 l := a.Args[0]
14246                 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14247                         break
14248                 }
14249                 mem := l.Args[1]
14250                 ptr2 := l.Args[0]
14251                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14252                         break
14253                 }
14254                 v.reset(OpAMD64BTRQconstmodify)
14255                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14256                 v.Aux = symToAux(sym)
14257                 v.AddArg2(ptr, mem)
14258                 return true
14259         }
14260         // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14261         // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14262         // result: (BTSQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14263         for {
14264                 off := auxIntToInt32(v.AuxInt)
14265                 sym := auxToSym(v.Aux)
14266                 ptr := v_0
14267                 a := v_1
14268                 if a.Op != OpAMD64BTSQconst {
14269                         break
14270                 }
14271                 c := auxIntToInt8(a.AuxInt)
14272                 l := a.Args[0]
14273                 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14274                         break
14275                 }
14276                 mem := l.Args[1]
14277                 ptr2 := l.Args[0]
14278                 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14279                         break
14280                 }
14281                 v.reset(OpAMD64BTSQconstmodify)
14282                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14283                 v.Aux = symToAux(sym)
14284                 v.AddArg2(ptr, mem)
14285                 return true
14286         }
14287         // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
14288         // result: (MOVSDstore [off] {sym} ptr val mem)
14289         for {
14290                 off := auxIntToInt32(v.AuxInt)
14291                 sym := auxToSym(v.Aux)
14292                 ptr := v_0
14293                 if v_1.Op != OpAMD64MOVQf2i {
14294                         break
14295                 }
14296                 val := v_1.Args[0]
14297                 mem := v_2
14298                 v.reset(OpAMD64MOVSDstore)
14299                 v.AuxInt = int32ToAuxInt(off)
14300                 v.Aux = symToAux(sym)
14301                 v.AddArg3(ptr, val, mem)
14302                 return true
14303         }
14304         return false
14305 }
14306 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
14307         v_1 := v.Args[1]
14308         v_0 := v.Args[0]
14309         b := v.Block
14310         config := b.Func.Config
14311         // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
14312         // cond: ValAndOff(sc).canAdd32(off)
14313         // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
14314         for {
14315                 sc := auxIntToValAndOff(v.AuxInt)
14316                 s := auxToSym(v.Aux)
14317                 if v_0.Op != OpAMD64ADDQconst {
14318                         break
14319                 }
14320                 off := auxIntToInt32(v_0.AuxInt)
14321                 ptr := v_0.Args[0]
14322                 mem := v_1
14323                 if !(ValAndOff(sc).canAdd32(off)) {
14324                         break
14325                 }
14326                 v.reset(OpAMD64MOVQstoreconst)
14327                 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
14328                 v.Aux = symToAux(s)
14329                 v.AddArg2(ptr, mem)
14330                 return true
14331         }
14332         // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
14333         // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
14334         // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
14335         for {
14336                 sc := auxIntToValAndOff(v.AuxInt)
14337                 sym1 := auxToSym(v.Aux)
14338                 if v_0.Op != OpAMD64LEAQ {
14339                         break
14340                 }
14341                 off := auxIntToInt32(v_0.AuxInt)
14342                 sym2 := auxToSym(v_0.Aux)
14343                 ptr := v_0.Args[0]
14344                 mem := v_1
14345                 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
14346                         break
14347                 }
14348                 v.reset(OpAMD64MOVQstoreconst)
14349                 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
14350                 v.Aux = symToAux(mergeSym(sym1, sym2))
14351                 v.AddArg2(ptr, mem)
14352                 return true
14353         }
14354         // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
14355         // cond: config.useSSE && x.Uses == 1 && c2.Off() + 8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x)
14356         // result: (MOVOstorezero [c2.Off()] {s} p mem)
14357         for {
14358                 c := auxIntToValAndOff(v.AuxInt)
14359                 s := auxToSym(v.Aux)
14360                 p := v_0
14361                 x := v_1
14362                 if x.Op != OpAMD64MOVQstoreconst {
14363                         break
14364                 }
14365                 c2 := auxIntToValAndOff(x.AuxInt)
14366                 if auxToSym(x.Aux) != s {
14367                         break
14368                 }
14369                 mem := x.Args[1]
14370                 if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && c2.Off()+8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x)) {
14371                         break
14372                 }
14373                 v.reset(OpAMD64MOVOstorezero)
14374                 v.AuxInt = int32ToAuxInt(c2.Off())
14375                 v.Aux = symToAux(s)
14376                 v.AddArg2(p, mem)
14377                 return true
14378         }
14379         // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
14380         // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
14381         // result: (MOVQstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
14382         for {
14383                 sc := auxIntToValAndOff(v.AuxInt)
14384                 sym1 := auxToSym(v.Aux)
14385                 if v_0.Op != OpAMD64LEAL {
14386                         break
14387                 }
14388                 off := auxIntToInt32(v_0.AuxInt)
14389                 sym2 := auxToSym(v_0.Aux)
14390                 ptr := v_0.Args[0]
14391                 mem := v_1
14392                 if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
14393                         break
14394                 }
14395                 v.reset(OpAMD64MOVQstoreconst)
14396                 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
14397                 v.Aux = symToAux(mergeSym(sym1, sym2))
14398                 v.AddArg2(ptr, mem)
14399                 return true
14400         }
14401         // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
14402         // cond: sc.canAdd32(off)
14403         // result: (MOVQstoreconst [sc.addOffset32(off)] {s} ptr mem)
14404         for {
14405                 sc := auxIntToValAndOff(v.AuxInt)
14406                 s := auxToSym(v.Aux)
14407                 if v_0.Op != OpAMD64ADDLconst {
14408                         break
14409                 }
14410                 off := auxIntToInt32(v_0.AuxInt)
14411                 ptr := v_0.Args[0]
14412                 mem := v_1
14413                 if !(sc.canAdd32(off)) {
14414                         break
14415                 }
14416                 v.reset(OpAMD64MOVQstoreconst)
14417                 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
14418                 v.Aux = symToAux(s)
14419                 v.AddArg2(ptr, mem)
14420                 return true
14421         }
14422         return false
14423 }
14424 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
14425         v_1 := v.Args[1]
14426         v_0 := v.Args[0]
14427         // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
14428         // cond: is32Bit(int64(off1)+int64(off2))
14429         // result: (MOVSDload [off1+off2] {sym} ptr mem)
14430         for {
14431                 off1 := auxIntToInt32(v.AuxInt)
14432                 sym := auxToSym(v.Aux)
14433                 if v_0.Op != OpAMD64ADDQconst {
14434                         break
14435                 }
14436                 off2 := auxIntToInt32(v_0.AuxInt)
14437                 ptr := v_0.Args[0]
14438                 mem := v_1
14439                 if !(is32Bit(int64(off1) + int64(off2))) {
14440                         break
14441                 }
14442                 v.reset(OpAMD64MOVSDload)
14443                 v.AuxInt = int32ToAuxInt(off1 + off2)
14444                 v.Aux = symToAux(sym)
14445                 v.AddArg2(ptr, mem)
14446                 return true
14447         }
14448         // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
14449         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14450         // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
14451         for {
14452                 off1 := auxIntToInt32(v.AuxInt)
14453                 sym1 := auxToSym(v.Aux)
14454                 if v_0.Op != OpAMD64LEAQ {
14455                         break
14456                 }
14457                 off2 := auxIntToInt32(v_0.AuxInt)
14458                 sym2 := auxToSym(v_0.Aux)
14459                 base := v_0.Args[0]
14460                 mem := v_1
14461                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14462                         break
14463                 }
14464                 v.reset(OpAMD64MOVSDload)
14465                 v.AuxInt = int32ToAuxInt(off1 + off2)
14466                 v.Aux = symToAux(mergeSym(sym1, sym2))
14467                 v.AddArg2(base, mem)
14468                 return true
14469         }
14470         // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _))
14471         // result: (MOVQi2f val)
14472         for {
14473                 off := auxIntToInt32(v.AuxInt)
14474                 sym := auxToSym(v.Aux)
14475                 ptr := v_0
14476                 if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
14477                         break
14478                 }
14479                 val := v_1.Args[1]
14480                 if ptr != v_1.Args[0] {
14481                         break
14482                 }
14483                 v.reset(OpAMD64MOVQi2f)
14484                 v.AddArg(val)
14485                 return true
14486         }
14487         return false
14488 }
14489 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
14490         v_2 := v.Args[2]
14491         v_1 := v.Args[1]
14492         v_0 := v.Args[0]
14493         // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
14494         // cond: is32Bit(int64(off1)+int64(off2))
14495         // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
14496         for {
14497                 off1 := auxIntToInt32(v.AuxInt)
14498                 sym := auxToSym(v.Aux)
14499                 if v_0.Op != OpAMD64ADDQconst {
14500                         break
14501                 }
14502                 off2 := auxIntToInt32(v_0.AuxInt)
14503                 ptr := v_0.Args[0]
14504                 val := v_1
14505                 mem := v_2
14506                 if !(is32Bit(int64(off1) + int64(off2))) {
14507                         break
14508                 }
14509                 v.reset(OpAMD64MOVSDstore)
14510                 v.AuxInt = int32ToAuxInt(off1 + off2)
14511                 v.Aux = symToAux(sym)
14512                 v.AddArg3(ptr, val, mem)
14513                 return true
14514         }
14515         // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
14516         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14517         // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
14518         for {
14519                 off1 := auxIntToInt32(v.AuxInt)
14520                 sym1 := auxToSym(v.Aux)
14521                 if v_0.Op != OpAMD64LEAQ {
14522                         break
14523                 }
14524                 off2 := auxIntToInt32(v_0.AuxInt)
14525                 sym2 := auxToSym(v_0.Aux)
14526                 base := v_0.Args[0]
14527                 val := v_1
14528                 mem := v_2
14529                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14530                         break
14531                 }
14532                 v.reset(OpAMD64MOVSDstore)
14533                 v.AuxInt = int32ToAuxInt(off1 + off2)
14534                 v.Aux = symToAux(mergeSym(sym1, sym2))
14535                 v.AddArg3(base, val, mem)
14536                 return true
14537         }
14538         // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem)
14539         // result: (MOVQstore [off] {sym} ptr val mem)
14540         for {
14541                 off := auxIntToInt32(v.AuxInt)
14542                 sym := auxToSym(v.Aux)
14543                 ptr := v_0
14544                 if v_1.Op != OpAMD64MOVQi2f {
14545                         break
14546                 }
14547                 val := v_1.Args[0]
14548                 mem := v_2
14549                 v.reset(OpAMD64MOVQstore)
14550                 v.AuxInt = int32ToAuxInt(off)
14551                 v.Aux = symToAux(sym)
14552                 v.AddArg3(ptr, val, mem)
14553                 return true
14554         }
14555         return false
14556 }
14557 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
14558         v_1 := v.Args[1]
14559         v_0 := v.Args[0]
14560         // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
14561         // cond: is32Bit(int64(off1)+int64(off2))
14562         // result: (MOVSSload [off1+off2] {sym} ptr mem)
14563         for {
14564                 off1 := auxIntToInt32(v.AuxInt)
14565                 sym := auxToSym(v.Aux)
14566                 if v_0.Op != OpAMD64ADDQconst {
14567                         break
14568                 }
14569                 off2 := auxIntToInt32(v_0.AuxInt)
14570                 ptr := v_0.Args[0]
14571                 mem := v_1
14572                 if !(is32Bit(int64(off1) + int64(off2))) {
14573                         break
14574                 }
14575                 v.reset(OpAMD64MOVSSload)
14576                 v.AuxInt = int32ToAuxInt(off1 + off2)
14577                 v.Aux = symToAux(sym)
14578                 v.AddArg2(ptr, mem)
14579                 return true
14580         }
14581         // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
14582         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14583         // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
14584         for {
14585                 off1 := auxIntToInt32(v.AuxInt)
14586                 sym1 := auxToSym(v.Aux)
14587                 if v_0.Op != OpAMD64LEAQ {
14588                         break
14589                 }
14590                 off2 := auxIntToInt32(v_0.AuxInt)
14591                 sym2 := auxToSym(v_0.Aux)
14592                 base := v_0.Args[0]
14593                 mem := v_1
14594                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14595                         break
14596                 }
14597                 v.reset(OpAMD64MOVSSload)
14598                 v.AuxInt = int32ToAuxInt(off1 + off2)
14599                 v.Aux = symToAux(mergeSym(sym1, sym2))
14600                 v.AddArg2(base, mem)
14601                 return true
14602         }
14603         // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _))
14604         // result: (MOVLi2f val)
14605         for {
14606                 off := auxIntToInt32(v.AuxInt)
14607                 sym := auxToSym(v.Aux)
14608                 ptr := v_0
14609                 if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
14610                         break
14611                 }
14612                 val := v_1.Args[1]
14613                 if ptr != v_1.Args[0] {
14614                         break
14615                 }
14616                 v.reset(OpAMD64MOVLi2f)
14617                 v.AddArg(val)
14618                 return true
14619         }
14620         return false
14621 }
14622 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
14623         v_2 := v.Args[2]
14624         v_1 := v.Args[1]
14625         v_0 := v.Args[0]
14626         // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
14627         // cond: is32Bit(int64(off1)+int64(off2))
14628         // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
14629         for {
14630                 off1 := auxIntToInt32(v.AuxInt)
14631                 sym := auxToSym(v.Aux)
14632                 if v_0.Op != OpAMD64ADDQconst {
14633                         break
14634                 }
14635                 off2 := auxIntToInt32(v_0.AuxInt)
14636                 ptr := v_0.Args[0]
14637                 val := v_1
14638                 mem := v_2
14639                 if !(is32Bit(int64(off1) + int64(off2))) {
14640                         break
14641                 }
14642                 v.reset(OpAMD64MOVSSstore)
14643                 v.AuxInt = int32ToAuxInt(off1 + off2)
14644                 v.Aux = symToAux(sym)
14645                 v.AddArg3(ptr, val, mem)
14646                 return true
14647         }
14648         // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
14649         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14650         // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
14651         for {
14652                 off1 := auxIntToInt32(v.AuxInt)
14653                 sym1 := auxToSym(v.Aux)
14654                 if v_0.Op != OpAMD64LEAQ {
14655                         break
14656                 }
14657                 off2 := auxIntToInt32(v_0.AuxInt)
14658                 sym2 := auxToSym(v_0.Aux)
14659                 base := v_0.Args[0]
14660                 val := v_1
14661                 mem := v_2
14662                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14663                         break
14664                 }
14665                 v.reset(OpAMD64MOVSSstore)
14666                 v.AuxInt = int32ToAuxInt(off1 + off2)
14667                 v.Aux = symToAux(mergeSym(sym1, sym2))
14668                 v.AddArg3(base, val, mem)
14669                 return true
14670         }
14671         // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem)
14672         // result: (MOVLstore [off] {sym} ptr val mem)
14673         for {
14674                 off := auxIntToInt32(v.AuxInt)
14675                 sym := auxToSym(v.Aux)
14676                 ptr := v_0
14677                 if v_1.Op != OpAMD64MOVLi2f {
14678                         break
14679                 }
14680                 val := v_1.Args[0]
14681                 mem := v_2
14682                 v.reset(OpAMD64MOVLstore)
14683                 v.AuxInt = int32ToAuxInt(off)
14684                 v.Aux = symToAux(sym)
14685                 v.AddArg3(ptr, val, mem)
14686                 return true
14687         }
14688         return false
14689 }
14690 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
14691         v_0 := v.Args[0]
14692         b := v.Block
14693         // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
14694         // cond: x.Uses == 1 && clobber(x)
14695         // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
14696         for {
14697                 x := v_0
14698                 if x.Op != OpAMD64MOVWload {
14699                         break
14700                 }
14701                 off := auxIntToInt32(x.AuxInt)
14702                 sym := auxToSym(x.Aux)
14703                 mem := x.Args[1]
14704                 ptr := x.Args[0]
14705                 if !(x.Uses == 1 && clobber(x)) {
14706                         break
14707                 }
14708                 b = x.Block
14709                 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
14710                 v.copyOf(v0)
14711                 v0.AuxInt = int32ToAuxInt(off)
14712                 v0.Aux = symToAux(sym)
14713                 v0.AddArg2(ptr, mem)
14714                 return true
14715         }
14716         // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem))
14717         // cond: x.Uses == 1 && clobber(x)
14718         // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
14719         for {
14720                 x := v_0
14721                 if x.Op != OpAMD64MOVLload {
14722                         break
14723                 }
14724                 off := auxIntToInt32(x.AuxInt)
14725                 sym := auxToSym(x.Aux)
14726                 mem := x.Args[1]
14727                 ptr := x.Args[0]
14728                 if !(x.Uses == 1 && clobber(x)) {
14729                         break
14730                 }
14731                 b = x.Block
14732                 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
14733                 v.copyOf(v0)
14734                 v0.AuxInt = int32ToAuxInt(off)
14735                 v0.Aux = symToAux(sym)
14736                 v0.AddArg2(ptr, mem)
14737                 return true
14738         }
14739         // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem))
14740         // cond: x.Uses == 1 && clobber(x)
14741         // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
14742         for {
14743                 x := v_0
14744                 if x.Op != OpAMD64MOVQload {
14745                         break
14746                 }
14747                 off := auxIntToInt32(x.AuxInt)
14748                 sym := auxToSym(x.Aux)
14749                 mem := x.Args[1]
14750                 ptr := x.Args[0]
14751                 if !(x.Uses == 1 && clobber(x)) {
14752                         break
14753                 }
14754                 b = x.Block
14755                 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
14756                 v.copyOf(v0)
14757                 v0.AuxInt = int32ToAuxInt(off)
14758                 v0.Aux = symToAux(sym)
14759                 v0.AddArg2(ptr, mem)
14760                 return true
14761         }
14762         // match: (MOVWQSX (ANDLconst [c] x))
14763         // cond: c & 0x8000 == 0
14764         // result: (ANDLconst [c & 0x7fff] x)
14765         for {
14766                 if v_0.Op != OpAMD64ANDLconst {
14767                         break
14768                 }
14769                 c := auxIntToInt32(v_0.AuxInt)
14770                 x := v_0.Args[0]
14771                 if !(c&0x8000 == 0) {
14772                         break
14773                 }
14774                 v.reset(OpAMD64ANDLconst)
14775                 v.AuxInt = int32ToAuxInt(c & 0x7fff)
14776                 v.AddArg(x)
14777                 return true
14778         }
14779         // match: (MOVWQSX (MOVWQSX x))
14780         // result: (MOVWQSX x)
14781         for {
14782                 if v_0.Op != OpAMD64MOVWQSX {
14783                         break
14784                 }
14785                 x := v_0.Args[0]
14786                 v.reset(OpAMD64MOVWQSX)
14787                 v.AddArg(x)
14788                 return true
14789         }
14790         // match: (MOVWQSX (MOVBQSX x))
14791         // result: (MOVBQSX x)
14792         for {
14793                 if v_0.Op != OpAMD64MOVBQSX {
14794                         break
14795                 }
14796                 x := v_0.Args[0]
14797                 v.reset(OpAMD64MOVBQSX)
14798                 v.AddArg(x)
14799                 return true
14800         }
14801         return false
14802 }
14803 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
14804         v_1 := v.Args[1]
14805         v_0 := v.Args[0]
14806         // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
14807         // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
14808         // result: (MOVWQSX x)
14809         for {
14810                 off := auxIntToInt32(v.AuxInt)
14811                 sym := auxToSym(v.Aux)
14812                 ptr := v_0
14813                 if v_1.Op != OpAMD64MOVWstore {
14814                         break
14815                 }
14816                 off2 := auxIntToInt32(v_1.AuxInt)
14817                 sym2 := auxToSym(v_1.Aux)
14818                 x := v_1.Args[1]
14819                 ptr2 := v_1.Args[0]
14820                 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
14821                         break
14822                 }
14823                 v.reset(OpAMD64MOVWQSX)
14824                 v.AddArg(x)
14825                 return true
14826         }
14827         // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
14828         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14829         // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
14830         for {
14831                 off1 := auxIntToInt32(v.AuxInt)
14832                 sym1 := auxToSym(v.Aux)
14833                 if v_0.Op != OpAMD64LEAQ {
14834                         break
14835                 }
14836                 off2 := auxIntToInt32(v_0.AuxInt)
14837                 sym2 := auxToSym(v_0.Aux)
14838                 base := v_0.Args[0]
14839                 mem := v_1
14840                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14841                         break
14842                 }
14843                 v.reset(OpAMD64MOVWQSXload)
14844                 v.AuxInt = int32ToAuxInt(off1 + off2)
14845                 v.Aux = symToAux(mergeSym(sym1, sym2))
14846                 v.AddArg2(base, mem)
14847                 return true
14848         }
14849         return false
14850 }
14851 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
14852         v_0 := v.Args[0]
14853         b := v.Block
14854         // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
14855         // cond: x.Uses == 1 && clobber(x)
14856         // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
14857         for {
14858                 x := v_0
14859                 if x.Op != OpAMD64MOVWload {
14860                         break
14861                 }
14862                 off := auxIntToInt32(x.AuxInt)
14863                 sym := auxToSym(x.Aux)
14864                 mem := x.Args[1]
14865                 ptr := x.Args[0]
14866                 if !(x.Uses == 1 && clobber(x)) {
14867                         break
14868                 }
14869                 b = x.Block
14870                 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
14871                 v.copyOf(v0)
14872                 v0.AuxInt = int32ToAuxInt(off)
14873                 v0.Aux = symToAux(sym)
14874                 v0.AddArg2(ptr, mem)
14875                 return true
14876         }
14877         // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem))
14878         // cond: x.Uses == 1 && clobber(x)
14879         // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
14880         for {
14881                 x := v_0
14882                 if x.Op != OpAMD64MOVLload {
14883                         break
14884                 }
14885                 off := auxIntToInt32(x.AuxInt)
14886                 sym := auxToSym(x.Aux)
14887                 mem := x.Args[1]
14888                 ptr := x.Args[0]
14889                 if !(x.Uses == 1 && clobber(x)) {
14890                         break
14891                 }
14892                 b = x.Block
14893                 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
14894                 v.copyOf(v0)
14895                 v0.AuxInt = int32ToAuxInt(off)
14896                 v0.Aux = symToAux(sym)
14897                 v0.AddArg2(ptr, mem)
14898                 return true
14899         }
14900         // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem))
14901         // cond: x.Uses == 1 && clobber(x)
14902         // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
14903         for {
14904                 x := v_0
14905                 if x.Op != OpAMD64MOVQload {
14906                         break
14907                 }
14908                 off := auxIntToInt32(x.AuxInt)
14909                 sym := auxToSym(x.Aux)
14910                 mem := x.Args[1]
14911                 ptr := x.Args[0]
14912                 if !(x.Uses == 1 && clobber(x)) {
14913                         break
14914                 }
14915                 b = x.Block
14916                 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
14917                 v.copyOf(v0)
14918                 v0.AuxInt = int32ToAuxInt(off)
14919                 v0.Aux = symToAux(sym)
14920                 v0.AddArg2(ptr, mem)
14921                 return true
14922         }
14923         // match: (MOVWQZX x)
14924         // cond: zeroUpper48Bits(x,3)
14925         // result: x
14926         for {
14927                 x := v_0
14928                 if !(zeroUpper48Bits(x, 3)) {
14929                         break
14930                 }
14931                 v.copyOf(x)
14932                 return true
14933         }
14934         // match: (MOVWQZX (ANDLconst [c] x))
14935         // result: (ANDLconst [c & 0xffff] x)
14936         for {
14937                 if v_0.Op != OpAMD64ANDLconst {
14938                         break
14939                 }
14940                 c := auxIntToInt32(v_0.AuxInt)
14941                 x := v_0.Args[0]
14942                 v.reset(OpAMD64ANDLconst)
14943                 v.AuxInt = int32ToAuxInt(c & 0xffff)
14944                 v.AddArg(x)
14945                 return true
14946         }
14947         // match: (MOVWQZX (MOVWQZX x))
14948         // result: (MOVWQZX x)
14949         for {
14950                 if v_0.Op != OpAMD64MOVWQZX {
14951                         break
14952                 }
14953                 x := v_0.Args[0]
14954                 v.reset(OpAMD64MOVWQZX)
14955                 v.AddArg(x)
14956                 return true
14957         }
14958         // match: (MOVWQZX (MOVBQZX x))
14959         // result: (MOVBQZX x)
14960         for {
14961                 if v_0.Op != OpAMD64MOVBQZX {
14962                         break
14963                 }
14964                 x := v_0.Args[0]
14965                 v.reset(OpAMD64MOVBQZX)
14966                 v.AddArg(x)
14967                 return true
14968         }
14969         return false
14970 }
14971 func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
14972         v_1 := v.Args[1]
14973         v_0 := v.Args[0]
14974         b := v.Block
14975         config := b.Func.Config
14976         // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
14977         // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
14978         // result: (MOVWQZX x)
14979         for {
14980                 off := auxIntToInt32(v.AuxInt)
14981                 sym := auxToSym(v.Aux)
14982                 ptr := v_0
14983                 if v_1.Op != OpAMD64MOVWstore {
14984                         break
14985                 }
14986                 off2 := auxIntToInt32(v_1.AuxInt)
14987                 sym2 := auxToSym(v_1.Aux)
14988                 x := v_1.Args[1]
14989                 ptr2 := v_1.Args[0]
14990                 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
14991                         break
14992                 }
14993                 v.reset(OpAMD64MOVWQZX)
14994                 v.AddArg(x)
14995                 return true
14996         }
14997         // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
14998         // cond: is32Bit(int64(off1)+int64(off2))
14999         // result: (MOVWload [off1+off2] {sym} ptr mem)
15000         for {
15001                 off1 := auxIntToInt32(v.AuxInt)
15002                 sym := auxToSym(v.Aux)
15003                 if v_0.Op != OpAMD64ADDQconst {
15004                         break
15005                 }
15006                 off2 := auxIntToInt32(v_0.AuxInt)
15007                 ptr := v_0.Args[0]
15008                 mem := v_1
15009                 if !(is32Bit(int64(off1) + int64(off2))) {
15010                         break
15011                 }
15012                 v.reset(OpAMD64MOVWload)
15013                 v.AuxInt = int32ToAuxInt(off1 + off2)
15014                 v.Aux = symToAux(sym)
15015                 v.AddArg2(ptr, mem)
15016                 return true
15017         }
15018         // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
15019         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
15020         // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
15021         for {
15022                 off1 := auxIntToInt32(v.AuxInt)
15023                 sym1 := auxToSym(v.Aux)
15024                 if v_0.Op != OpAMD64LEAQ {
15025                         break
15026                 }
15027                 off2 := auxIntToInt32(v_0.AuxInt)
15028                 sym2 := auxToSym(v_0.Aux)
15029                 base := v_0.Args[0]
15030                 mem := v_1
15031                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15032                         break
15033                 }
15034                 v.reset(OpAMD64MOVWload)
15035                 v.AuxInt = int32ToAuxInt(off1 + off2)
15036                 v.Aux = symToAux(mergeSym(sym1, sym2))
15037                 v.AddArg2(base, mem)
15038                 return true
15039         }
15040         // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
15041         // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
15042         // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
15043         for {
15044                 off1 := auxIntToInt32(v.AuxInt)
15045                 sym1 := auxToSym(v.Aux)
15046                 if v_0.Op != OpAMD64LEAL {
15047                         break
15048                 }
15049                 off2 := auxIntToInt32(v_0.AuxInt)
15050                 sym2 := auxToSym(v_0.Aux)
15051                 base := v_0.Args[0]
15052                 mem := v_1
15053                 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
15054                         break
15055                 }
15056                 v.reset(OpAMD64MOVWload)
15057                 v.AuxInt = int32ToAuxInt(off1 + off2)
15058                 v.Aux = symToAux(mergeSym(sym1, sym2))
15059                 v.AddArg2(base, mem)
15060                 return true
15061         }
15062         // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
15063         // cond: is32Bit(int64(off1)+int64(off2))
15064         // result: (MOVWload [off1+off2] {sym} ptr mem)
15065         for {
15066                 off1 := auxIntToInt32(v.AuxInt)
15067                 sym := auxToSym(v.Aux)
15068                 if v_0.Op != OpAMD64ADDLconst {
15069                         break
15070                 }
15071                 off2 := auxIntToInt32(v_0.AuxInt)
15072                 ptr := v_0.Args[0]
15073                 mem := v_1
15074                 if !(is32Bit(int64(off1) + int64(off2))) {
15075                         break
15076                 }
15077                 v.reset(OpAMD64MOVWload)
15078                 v.AuxInt = int32ToAuxInt(off1 + off2)
15079                 v.Aux = symToAux(sym)
15080                 v.AddArg2(ptr, mem)
15081                 return true
15082         }
15083         // match: (MOVWload [off] {sym} (SB) _)
15084         // cond: symIsRO(sym)
15085         // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
15086         for {
15087                 off := auxIntToInt32(v.AuxInt)
15088                 sym := auxToSym(v.Aux)
15089                 if v_0.Op != OpSB || !(symIsRO(sym)) {
15090                         break
15091                 }
15092                 v.reset(OpAMD64MOVLconst)
15093                 v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
15094                 return true
15095         }
15096         return false
15097 }
15098 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
15099         v_2 := v.Args[2]
15100         v_1 := v.Args[1]
15101         v_0 := v.Args[0]
15102         b := v.Block
15103         typ := &b.Func.Config.Types
15104         // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
15105         // result: (MOVWstore [off] {sym} ptr x mem)
15106         for {
15107                 off := auxIntToInt32(v.AuxInt)
15108                 sym := auxToSym(v.Aux)
15109                 ptr := v_0
15110                 if v_1.Op != OpAMD64MOVWQSX {
15111                         break
15112                 }
15113                 x := v_1.Args[0]
15114                 mem := v_2
15115                 v.reset(OpAMD64MOVWstore)
15116                 v.AuxInt = int32ToAuxInt(off)
15117                 v.Aux = symToAux(sym)
15118                 v.AddArg3(ptr, x, mem)
15119                 return true
15120         }
15121         // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
15122         // result: (MOVWstore [off] {sym} ptr x mem)
15123         for {
15124                 off := auxIntToInt32(v.AuxInt)
15125                 sym := auxToSym(v.Aux)
15126                 ptr := v_0
15127                 if v_1.Op != OpAMD64MOVWQZX {
15128                         break
15129                 }
15130                 x := v_1.Args[0]
15131                 mem := v_2
15132                 v.reset(OpAMD64MOVWstore)
15133                 v.AuxInt = int32ToAuxInt(off)
15134                 v.Aux = symToAux(sym)
15135                 v.AddArg3(ptr, x, mem)
15136                 return true
15137         }
15138         // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
15139         // cond: is32Bit(int64(off1)+int64(off2))
15140         // result: (MOVWstore [off1+off2] {sym} ptr val mem)
15141         for {
15142                 off1 := auxIntToInt32(v.AuxInt)
15143                 sym := auxToSym(v.Aux)
15144                 if v_0.Op != OpAMD64ADDQconst {
15145                         break
15146                 }
15147                 off2 := auxIntToInt32(v_0.AuxInt)
15148                 ptr := v_0.Args[0]
15149                 val := v_1
15150                 mem := v_2
15151                 if !(is32Bit(int64(off1) + int64(off2))) {
15152                         break
15153                 }
15154                 v.reset(OpAMD64MOVWstore)
15155                 v.AuxInt = int32ToAuxInt(off1 + off2)
15156                 v.Aux = symToAux(sym)
15157                 v.AddArg3(ptr, val, mem)
15158                 return true
15159         }
15160         // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
15161         // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
15162         for {
15163                 off := auxIntToInt32(v.AuxInt)
15164                 sym := auxToSym(v.Aux)
15165                 ptr := v_0
15166                 if v_1.Op != OpAMD64MOVLconst {
15167                         break
15168                 }
15169                 c := auxIntToInt32(v_1.AuxInt)
15170                 mem := v_2
15171                 v.reset(OpAMD64MOVWstoreconst)
15172                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
15173                 v.Aux = symToAux(sym)
15174                 v.AddArg2(ptr, mem)
15175                 return true
15176         }
15177         // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem)
15178         // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
15179         for {
15180                 off := auxIntToInt32(v.AuxInt)
15181                 sym := auxToSym(v.Aux)
15182                 ptr := v_0
15183                 if v_1.Op != OpAMD64MOVQconst {
15184                         break
15185                 }
15186                 c := auxIntToInt64(v_1.AuxInt)
15187                 mem := v_2
15188                 v.reset(OpAMD64MOVWstoreconst)
15189                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
15190                 v.Aux = symToAux(sym)
15191                 v.AddArg2(ptr, mem)
15192                 return true
15193         }
15194         // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
15195         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
15196         // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
15197         for {
15198                 off1 := auxIntToInt32(v.AuxInt)
15199                 sym1 := auxToSym(v.Aux)
15200                 if v_0.Op != OpAMD64LEAQ {
15201                         break
15202                 }
15203                 off2 := auxIntToInt32(v_0.AuxInt)
15204                 sym2 := auxToSym(v_0.Aux)
15205                 base := v_0.Args[0]
15206                 val := v_1
15207                 mem := v_2
15208                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15209                         break
15210                 }
15211                 v.reset(OpAMD64MOVWstore)
15212                 v.AuxInt = int32ToAuxInt(off1 + off2)
15213                 v.Aux = symToAux(mergeSym(sym1, sym2))
15214                 v.AddArg3(base, val, mem)
15215                 return true
15216         }
15217         // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
15218         // cond: x.Uses == 1 && clobber(x)
15219         // result: (MOVLstore [i-2] {s} p w mem)
15220         for {
15221                 i := auxIntToInt32(v.AuxInt)
15222                 s := auxToSym(v.Aux)
15223                 p := v_0
15224                 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 {
15225                         break
15226                 }
15227                 w := v_1.Args[0]
15228                 x := v_2
15229                 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
15230                         break
15231                 }
15232                 mem := x.Args[2]
15233                 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
15234                         break
15235                 }
15236                 v.reset(OpAMD64MOVLstore)
15237                 v.AuxInt = int32ToAuxInt(i - 2)
15238                 v.Aux = symToAux(s)
15239                 v.AddArg3(p, w, mem)
15240                 return true
15241         }
15242         // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
15243         // cond: x.Uses == 1 && clobber(x)
15244         // result: (MOVLstore [i-2] {s} p w mem)
15245         for {
15246                 i := auxIntToInt32(v.AuxInt)
15247                 s := auxToSym(v.Aux)
15248                 p := v_0
15249                 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 {
15250                         break
15251                 }
15252                 w := v_1.Args[0]
15253                 x := v_2
15254                 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
15255                         break
15256                 }
15257                 mem := x.Args[2]
15258                 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
15259                         break
15260                 }
15261                 v.reset(OpAMD64MOVLstore)
15262                 v.AuxInt = int32ToAuxInt(i - 2)
15263                 v.Aux = symToAux(s)
15264                 v.AddArg3(p, w, mem)
15265                 return true
15266         }
15267         // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
15268         // cond: x.Uses == 1 && clobber(x)
15269         // result: (MOVLstore [i-2] {s} p w0 mem)
15270         for {
15271                 i := auxIntToInt32(v.AuxInt)
15272                 s := auxToSym(v.Aux)
15273                 p := v_0
15274                 if v_1.Op != OpAMD64SHRLconst {
15275                         break
15276                 }
15277                 j := auxIntToInt8(v_1.AuxInt)
15278                 w := v_1.Args[0]
15279                 x := v_2
15280                 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
15281                         break
15282                 }
15283                 mem := x.Args[2]
15284                 if p != x.Args[0] {
15285                         break
15286                 }
15287                 w0 := x.Args[1]
15288                 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
15289                         break
15290                 }
15291                 v.reset(OpAMD64MOVLstore)
15292                 v.AuxInt = int32ToAuxInt(i - 2)
15293                 v.Aux = symToAux(s)
15294                 v.AddArg3(p, w0, mem)
15295                 return true
15296         }
15297         // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
15298         // cond: x.Uses == 1 && clobber(x)
15299         // result: (MOVLstore [i-2] {s} p w0 mem)
15300         for {
15301                 i := auxIntToInt32(v.AuxInt)
15302                 s := auxToSym(v.Aux)
15303                 p := v_0
15304                 if v_1.Op != OpAMD64SHRQconst {
15305                         break
15306                 }
15307                 j := auxIntToInt8(v_1.AuxInt)
15308                 w := v_1.Args[0]
15309                 x := v_2
15310                 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
15311                         break
15312                 }
15313                 mem := x.Args[2]
15314                 if p != x.Args[0] {
15315                         break
15316                 }
15317                 w0 := x.Args[1]
15318                 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
15319                         break
15320                 }
15321                 v.reset(OpAMD64MOVLstore)
15322                 v.AuxInt = int32ToAuxInt(i - 2)
15323                 v.Aux = symToAux(s)
15324                 v.AddArg3(p, w0, mem)
15325                 return true
15326         }
15327         // match: (MOVWstore [i] {s} p1 (SHRLconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
15328         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
15329         // result: (MOVLstore [i] {s} p0 w mem)
15330         for {
15331                 i := auxIntToInt32(v.AuxInt)
15332                 s := auxToSym(v.Aux)
15333                 p1 := v_0
15334                 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 {
15335                         break
15336                 }
15337                 w := v_1.Args[0]
15338                 x := v_2
15339                 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
15340                         break
15341                 }
15342                 mem := x.Args[2]
15343                 p0 := x.Args[0]
15344                 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
15345                         break
15346                 }
15347                 v.reset(OpAMD64MOVLstore)
15348                 v.AuxInt = int32ToAuxInt(i)
15349                 v.Aux = symToAux(s)
15350                 v.AddArg3(p0, w, mem)
15351                 return true
15352         }
15353         // match: (MOVWstore [i] {s} p1 (SHRQconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
15354         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
15355         // result: (MOVLstore [i] {s} p0 w mem)
15356         for {
15357                 i := auxIntToInt32(v.AuxInt)
15358                 s := auxToSym(v.Aux)
15359                 p1 := v_0
15360                 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 {
15361                         break
15362                 }
15363                 w := v_1.Args[0]
15364                 x := v_2
15365                 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
15366                         break
15367                 }
15368                 mem := x.Args[2]
15369                 p0 := x.Args[0]
15370                 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
15371                         break
15372                 }
15373                 v.reset(OpAMD64MOVLstore)
15374                 v.AuxInt = int32ToAuxInt(i)
15375                 v.Aux = symToAux(s)
15376                 v.AddArg3(p0, w, mem)
15377                 return true
15378         }
15379         // match: (MOVWstore [i] {s} p1 (SHRLconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRLconst [j-16] w) mem))
15380         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
15381         // result: (MOVLstore [i] {s} p0 w0 mem)
15382         for {
15383                 i := auxIntToInt32(v.AuxInt)
15384                 s := auxToSym(v.Aux)
15385                 p1 := v_0
15386                 if v_1.Op != OpAMD64SHRLconst {
15387                         break
15388                 }
15389                 j := auxIntToInt8(v_1.AuxInt)
15390                 w := v_1.Args[0]
15391                 x := v_2
15392                 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
15393                         break
15394                 }
15395                 mem := x.Args[2]
15396                 p0 := x.Args[0]
15397                 w0 := x.Args[1]
15398                 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
15399                         break
15400                 }
15401                 v.reset(OpAMD64MOVLstore)
15402                 v.AuxInt = int32ToAuxInt(i)
15403                 v.Aux = symToAux(s)
15404                 v.AddArg3(p0, w0, mem)
15405                 return true
15406         }
15407         // match: (MOVWstore [i] {s} p1 (SHRQconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRQconst [j-16] w) mem))
15408         // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
15409         // result: (MOVLstore [i] {s} p0 w0 mem)
15410         for {
15411                 i := auxIntToInt32(v.AuxInt)
15412                 s := auxToSym(v.Aux)
15413                 p1 := v_0
15414                 if v_1.Op != OpAMD64SHRQconst {
15415                         break
15416                 }
15417                 j := auxIntToInt8(v_1.AuxInt)
15418                 w := v_1.Args[0]
15419                 x := v_2
15420                 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
15421                         break
15422                 }
15423                 mem := x.Args[2]
15424                 p0 := x.Args[0]
15425                 w0 := x.Args[1]
15426                 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
15427                         break
15428                 }
15429                 v.reset(OpAMD64MOVLstore)
15430                 v.AuxInt = int32ToAuxInt(i)
15431                 v.Aux = symToAux(s)
15432                 v.AddArg3(p0, w0, mem)
15433                 return true
15434         }
15435         // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem))
15436         // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
15437         // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
15438         for {
15439                 i := auxIntToInt32(v.AuxInt)
15440                 s := auxToSym(v.Aux)
15441                 p := v_0
15442                 x1 := v_1
15443                 if x1.Op != OpAMD64MOVWload {
15444                         break
15445                 }
15446                 j := auxIntToInt32(x1.AuxInt)
15447                 s2 := auxToSym(x1.Aux)
15448                 mem := x1.Args[1]
15449                 p2 := x1.Args[0]
15450                 mem2 := v_2
15451                 if mem2.Op != OpAMD64MOVWstore || auxIntToInt32(mem2.AuxInt) != i-2 || auxToSym(mem2.Aux) != s {
15452                         break
15453                 }
15454                 _ = mem2.Args[2]
15455                 if p != mem2.Args[0] {
15456                         break
15457                 }
15458                 x2 := mem2.Args[1]
15459                 if x2.Op != OpAMD64MOVWload || auxIntToInt32(x2.AuxInt) != j-2 || auxToSym(x2.Aux) != s2 {
15460                         break
15461                 }
15462                 _ = x2.Args[1]
15463                 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
15464                         break
15465                 }
15466                 v.reset(OpAMD64MOVLstore)
15467                 v.AuxInt = int32ToAuxInt(i - 2)
15468                 v.Aux = symToAux(s)
15469                 v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32)
15470                 v0.AuxInt = int32ToAuxInt(j - 2)
15471                 v0.Aux = symToAux(s2)
15472                 v0.AddArg2(p2, mem)
15473                 v.AddArg3(p, v0, mem)
15474                 return true
15475         }
15476         // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
15477         // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
15478         // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
15479         for {
15480                 off1 := auxIntToInt32(v.AuxInt)
15481                 sym1 := auxToSym(v.Aux)
15482                 if v_0.Op != OpAMD64LEAL {
15483                         break
15484                 }
15485                 off2 := auxIntToInt32(v_0.AuxInt)
15486                 sym2 := auxToSym(v_0.Aux)
15487                 base := v_0.Args[0]
15488                 val := v_1
15489                 mem := v_2
15490                 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
15491                         break
15492                 }
15493                 v.reset(OpAMD64MOVWstore)
15494                 v.AuxInt = int32ToAuxInt(off1 + off2)
15495                 v.Aux = symToAux(mergeSym(sym1, sym2))
15496                 v.AddArg3(base, val, mem)
15497                 return true
15498         }
15499         // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
15500         // cond: is32Bit(int64(off1)+int64(off2))
15501         // result: (MOVWstore [off1+off2] {sym} ptr val mem)
15502         for {
15503                 off1 := auxIntToInt32(v.AuxInt)
15504                 sym := auxToSym(v.Aux)
15505                 if v_0.Op != OpAMD64ADDLconst {
15506                         break
15507                 }
15508                 off2 := auxIntToInt32(v_0.AuxInt)
15509                 ptr := v_0.Args[0]
15510                 val := v_1
15511                 mem := v_2
15512                 if !(is32Bit(int64(off1) + int64(off2))) {
15513                         break
15514                 }
15515                 v.reset(OpAMD64MOVWstore)
15516                 v.AuxInt = int32ToAuxInt(off1 + off2)
15517                 v.Aux = symToAux(sym)
15518                 v.AddArg3(ptr, val, mem)
15519                 return true
15520         }
15521         return false
15522 }
15523 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
15524         v_1 := v.Args[1]
15525         v_0 := v.Args[0]
15526         // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
15527         // cond: ValAndOff(sc).canAdd32(off)
15528         // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
15529         for {
15530                 sc := auxIntToValAndOff(v.AuxInt)
15531                 s := auxToSym(v.Aux)
15532                 if v_0.Op != OpAMD64ADDQconst {
15533                         break
15534                 }
15535                 off := auxIntToInt32(v_0.AuxInt)
15536                 ptr := v_0.Args[0]
15537                 mem := v_1
15538                 if !(ValAndOff(sc).canAdd32(off)) {
15539                         break
15540                 }
15541                 v.reset(OpAMD64MOVWstoreconst)
15542                 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
15543                 v.Aux = symToAux(s)
15544                 v.AddArg2(ptr, mem)
15545                 return true
15546         }
15547         // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
15548         // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
15549         // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
15550         for {
15551                 sc := auxIntToValAndOff(v.AuxInt)
15552                 sym1 := auxToSym(v.Aux)
15553                 if v_0.Op != OpAMD64LEAQ {
15554                         break
15555                 }
15556                 off := auxIntToInt32(v_0.AuxInt)
15557                 sym2 := auxToSym(v_0.Aux)
15558                 ptr := v_0.Args[0]
15559                 mem := v_1
15560                 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
15561                         break
15562                 }
15563                 v.reset(OpAMD64MOVWstoreconst)
15564                 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
15565                 v.Aux = symToAux(mergeSym(sym1, sym2))
15566                 v.AddArg2(ptr, mem)
15567                 return true
15568         }
15569         // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
15570         // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
15571         // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
15572         for {
15573                 c := auxIntToValAndOff(v.AuxInt)
15574                 s := auxToSym(v.Aux)
15575                 p := v_0
15576                 x := v_1
15577                 if x.Op != OpAMD64MOVWstoreconst {
15578                         break
15579                 }
15580                 a := auxIntToValAndOff(x.AuxInt)
15581                 if auxToSym(x.Aux) != s {
15582                         break
15583                 }
15584                 mem := x.Args[1]
15585                 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
15586                         break
15587                 }
15588                 v.reset(OpAMD64MOVLstoreconst)
15589                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off()))
15590                 v.Aux = symToAux(s)
15591                 v.AddArg2(p, mem)
15592                 return true
15593         }
15594         // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
15595         // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
15596         // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
15597         for {
15598                 a := auxIntToValAndOff(v.AuxInt)
15599                 s := auxToSym(v.Aux)
15600                 p := v_0
15601                 x := v_1
15602                 if x.Op != OpAMD64MOVWstoreconst {
15603                         break
15604                 }
15605                 c := auxIntToValAndOff(x.AuxInt)
15606                 if auxToSym(x.Aux) != s {
15607                         break
15608                 }
15609                 mem := x.Args[1]
15610                 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
15611                         break
15612                 }
15613                 v.reset(OpAMD64MOVLstoreconst)
15614                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off()))
15615                 v.Aux = symToAux(s)
15616                 v.AddArg2(p, mem)
15617                 return true
15618         }
15619         // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
15620         // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
15621         // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
15622         for {
15623                 sc := auxIntToValAndOff(v.AuxInt)
15624                 sym1 := auxToSym(v.Aux)
15625                 if v_0.Op != OpAMD64LEAL {
15626                         break
15627                 }
15628                 off := auxIntToInt32(v_0.AuxInt)
15629                 sym2 := auxToSym(v_0.Aux)
15630                 ptr := v_0.Args[0]
15631                 mem := v_1
15632                 if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
15633                         break
15634                 }
15635                 v.reset(OpAMD64MOVWstoreconst)
15636                 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
15637                 v.Aux = symToAux(mergeSym(sym1, sym2))
15638                 v.AddArg2(ptr, mem)
15639                 return true
15640         }
15641         // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
15642         // cond: sc.canAdd32(off)
15643         // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
15644         for {
15645                 sc := auxIntToValAndOff(v.AuxInt)
15646                 s := auxToSym(v.Aux)
15647                 if v_0.Op != OpAMD64ADDLconst {
15648                         break
15649                 }
15650                 off := auxIntToInt32(v_0.AuxInt)
15651                 ptr := v_0.Args[0]
15652                 mem := v_1
15653                 if !(sc.canAdd32(off)) {
15654                         break
15655                 }
15656                 v.reset(OpAMD64MOVWstoreconst)
15657                 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
15658                 v.Aux = symToAux(s)
15659                 v.AddArg2(ptr, mem)
15660                 return true
15661         }
15662         return false
15663 }
15664 func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
15665         v_1 := v.Args[1]
15666         v_0 := v.Args[0]
15667         // match: (MULL x (MOVLconst [c]))
15668         // result: (MULLconst [c] x)
15669         for {
15670                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15671                         x := v_0
15672                         if v_1.Op != OpAMD64MOVLconst {
15673                                 continue
15674                         }
15675                         c := auxIntToInt32(v_1.AuxInt)
15676                         v.reset(OpAMD64MULLconst)
15677                         v.AuxInt = int32ToAuxInt(c)
15678                         v.AddArg(x)
15679                         return true
15680                 }
15681                 break
15682         }
15683         return false
15684 }
15685 func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
15686         v_0 := v.Args[0]
15687         b := v.Block
15688         // match: (MULLconst [c] (MULLconst [d] x))
15689         // result: (MULLconst [c * d] x)
15690         for {
15691                 c := auxIntToInt32(v.AuxInt)
15692                 if v_0.Op != OpAMD64MULLconst {
15693                         break
15694                 }
15695                 d := auxIntToInt32(v_0.AuxInt)
15696                 x := v_0.Args[0]
15697                 v.reset(OpAMD64MULLconst)
15698                 v.AuxInt = int32ToAuxInt(c * d)
15699                 v.AddArg(x)
15700                 return true
15701         }
15702         // match: (MULLconst [-9] x)
15703         // result: (NEGL (LEAL8 <v.Type> x x))
15704         for {
15705                 if auxIntToInt32(v.AuxInt) != -9 {
15706                         break
15707                 }
15708                 x := v_0
15709                 v.reset(OpAMD64NEGL)
15710                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
15711                 v0.AddArg2(x, x)
15712                 v.AddArg(v0)
15713                 return true
15714         }
15715         // match: (MULLconst [-5] x)
15716         // result: (NEGL (LEAL4 <v.Type> x x))
15717         for {
15718                 if auxIntToInt32(v.AuxInt) != -5 {
15719                         break
15720                 }
15721                 x := v_0
15722                 v.reset(OpAMD64NEGL)
15723                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
15724                 v0.AddArg2(x, x)
15725                 v.AddArg(v0)
15726                 return true
15727         }
15728         // match: (MULLconst [-3] x)
15729         // result: (NEGL (LEAL2 <v.Type> x x))
15730         for {
15731                 if auxIntToInt32(v.AuxInt) != -3 {
15732                         break
15733                 }
15734                 x := v_0
15735                 v.reset(OpAMD64NEGL)
15736                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
15737                 v0.AddArg2(x, x)
15738                 v.AddArg(v0)
15739                 return true
15740         }
15741         // match: (MULLconst [-1] x)
15742         // result: (NEGL x)
15743         for {
15744                 if auxIntToInt32(v.AuxInt) != -1 {
15745                         break
15746                 }
15747                 x := v_0
15748                 v.reset(OpAMD64NEGL)
15749                 v.AddArg(x)
15750                 return true
15751         }
15752         // match: (MULLconst [ 0] _)
15753         // result: (MOVLconst [0])
15754         for {
15755                 if auxIntToInt32(v.AuxInt) != 0 {
15756                         break
15757                 }
15758                 v.reset(OpAMD64MOVLconst)
15759                 v.AuxInt = int32ToAuxInt(0)
15760                 return true
15761         }
15762         // match: (MULLconst [ 1] x)
15763         // result: x
15764         for {
15765                 if auxIntToInt32(v.AuxInt) != 1 {
15766                         break
15767                 }
15768                 x := v_0
15769                 v.copyOf(x)
15770                 return true
15771         }
15772         // match: (MULLconst [ 3] x)
15773         // result: (LEAL2 x x)
15774         for {
15775                 if auxIntToInt32(v.AuxInt) != 3 {
15776                         break
15777                 }
15778                 x := v_0
15779                 v.reset(OpAMD64LEAL2)
15780                 v.AddArg2(x, x)
15781                 return true
15782         }
15783         // match: (MULLconst [ 5] x)
15784         // result: (LEAL4 x x)
15785         for {
15786                 if auxIntToInt32(v.AuxInt) != 5 {
15787                         break
15788                 }
15789                 x := v_0
15790                 v.reset(OpAMD64LEAL4)
15791                 v.AddArg2(x, x)
15792                 return true
15793         }
15794         // match: (MULLconst [ 7] x)
15795         // result: (LEAL2 x (LEAL2 <v.Type> x x))
15796         for {
15797                 if auxIntToInt32(v.AuxInt) != 7 {
15798                         break
15799                 }
15800                 x := v_0
15801                 v.reset(OpAMD64LEAL2)
15802                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
15803                 v0.AddArg2(x, x)
15804                 v.AddArg2(x, v0)
15805                 return true
15806         }
15807         // match: (MULLconst [ 9] x)
15808         // result: (LEAL8 x x)
15809         for {
15810                 if auxIntToInt32(v.AuxInt) != 9 {
15811                         break
15812                 }
15813                 x := v_0
15814                 v.reset(OpAMD64LEAL8)
15815                 v.AddArg2(x, x)
15816                 return true
15817         }
15818         // match: (MULLconst [11] x)
15819         // result: (LEAL2 x (LEAL4 <v.Type> x x))
15820         for {
15821                 if auxIntToInt32(v.AuxInt) != 11 {
15822                         break
15823                 }
15824                 x := v_0
15825                 v.reset(OpAMD64LEAL2)
15826                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
15827                 v0.AddArg2(x, x)
15828                 v.AddArg2(x, v0)
15829                 return true
15830         }
15831         // match: (MULLconst [13] x)
15832         // result: (LEAL4 x (LEAL2 <v.Type> x x))
15833         for {
15834                 if auxIntToInt32(v.AuxInt) != 13 {
15835                         break
15836                 }
15837                 x := v_0
15838                 v.reset(OpAMD64LEAL4)
15839                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
15840                 v0.AddArg2(x, x)
15841                 v.AddArg2(x, v0)
15842                 return true
15843         }
15844         // match: (MULLconst [19] x)
15845         // result: (LEAL2 x (LEAL8 <v.Type> x x))
15846         for {
15847                 if auxIntToInt32(v.AuxInt) != 19 {
15848                         break
15849                 }
15850                 x := v_0
15851                 v.reset(OpAMD64LEAL2)
15852                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
15853                 v0.AddArg2(x, x)
15854                 v.AddArg2(x, v0)
15855                 return true
15856         }
15857         // match: (MULLconst [21] x)
15858         // result: (LEAL4 x (LEAL4 <v.Type> x x))
15859         for {
15860                 if auxIntToInt32(v.AuxInt) != 21 {
15861                         break
15862                 }
15863                 x := v_0
15864                 v.reset(OpAMD64LEAL4)
15865                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
15866                 v0.AddArg2(x, x)
15867                 v.AddArg2(x, v0)
15868                 return true
15869         }
15870         // match: (MULLconst [25] x)
15871         // result: (LEAL8 x (LEAL2 <v.Type> x x))
15872         for {
15873                 if auxIntToInt32(v.AuxInt) != 25 {
15874                         break
15875                 }
15876                 x := v_0
15877                 v.reset(OpAMD64LEAL8)
15878                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
15879                 v0.AddArg2(x, x)
15880                 v.AddArg2(x, v0)
15881                 return true
15882         }
15883         // match: (MULLconst [27] x)
15884         // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
15885         for {
15886                 if auxIntToInt32(v.AuxInt) != 27 {
15887                         break
15888                 }
15889                 x := v_0
15890                 v.reset(OpAMD64LEAL8)
15891                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
15892                 v0.AddArg2(x, x)
15893                 v.AddArg2(v0, v0)
15894                 return true
15895         }
15896         // match: (MULLconst [37] x)
15897         // result: (LEAL4 x (LEAL8 <v.Type> x x))
15898         for {
15899                 if auxIntToInt32(v.AuxInt) != 37 {
15900                         break
15901                 }
15902                 x := v_0
15903                 v.reset(OpAMD64LEAL4)
15904                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
15905                 v0.AddArg2(x, x)
15906                 v.AddArg2(x, v0)
15907                 return true
15908         }
15909         // match: (MULLconst [41] x)
15910         // result: (LEAL8 x (LEAL4 <v.Type> x x))
15911         for {
15912                 if auxIntToInt32(v.AuxInt) != 41 {
15913                         break
15914                 }
15915                 x := v_0
15916                 v.reset(OpAMD64LEAL8)
15917                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
15918                 v0.AddArg2(x, x)
15919                 v.AddArg2(x, v0)
15920                 return true
15921         }
15922         // match: (MULLconst [45] x)
15923         // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
15924         for {
15925                 if auxIntToInt32(v.AuxInt) != 45 {
15926                         break
15927                 }
15928                 x := v_0
15929                 v.reset(OpAMD64LEAL8)
15930                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
15931                 v0.AddArg2(x, x)
15932                 v.AddArg2(v0, v0)
15933                 return true
15934         }
15935         // match: (MULLconst [73] x)
15936         // result: (LEAL8 x (LEAL8 <v.Type> x x))
15937         for {
15938                 if auxIntToInt32(v.AuxInt) != 73 {
15939                         break
15940                 }
15941                 x := v_0
15942                 v.reset(OpAMD64LEAL8)
15943                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
15944                 v0.AddArg2(x, x)
15945                 v.AddArg2(x, v0)
15946                 return true
15947         }
15948         // match: (MULLconst [81] x)
15949         // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
15950         for {
15951                 if auxIntToInt32(v.AuxInt) != 81 {
15952                         break
15953                 }
15954                 x := v_0
15955                 v.reset(OpAMD64LEAL8)
15956                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
15957                 v0.AddArg2(x, x)
15958                 v.AddArg2(v0, v0)
15959                 return true
15960         }
15961         // match: (MULLconst [c] x)
15962         // cond: isPowerOfTwo64(int64(c)+1) && c >= 15
15963         // result: (SUBL (SHLLconst <v.Type> [int8(log64(int64(c)+1))] x) x)
15964         for {
15965                 c := auxIntToInt32(v.AuxInt)
15966                 x := v_0
15967                 if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
15968                         break
15969                 }
15970                 v.reset(OpAMD64SUBL)
15971                 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
15972                 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
15973                 v0.AddArg(x)
15974                 v.AddArg2(v0, x)
15975                 return true
15976         }
15977         // match: (MULLconst [c] x)
15978         // cond: isPowerOfTwo32(c-1) && c >= 17
15979         // result: (LEAL1 (SHLLconst <v.Type> [int8(log32(c-1))] x) x)
15980         for {
15981                 c := auxIntToInt32(v.AuxInt)
15982                 x := v_0
15983                 if !(isPowerOfTwo32(c-1) && c >= 17) {
15984                         break
15985                 }
15986                 v.reset(OpAMD64LEAL1)
15987                 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
15988                 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
15989                 v0.AddArg(x)
15990                 v.AddArg2(v0, x)
15991                 return true
15992         }
15993         // match: (MULLconst [c] x)
15994         // cond: isPowerOfTwo32(c-2) && c >= 34
15995         // result: (LEAL2 (SHLLconst <v.Type> [int8(log32(c-2))] x) x)
15996         for {
15997                 c := auxIntToInt32(v.AuxInt)
15998                 x := v_0
15999                 if !(isPowerOfTwo32(c-2) && c >= 34) {
16000                         break
16001                 }
16002                 v.reset(OpAMD64LEAL2)
16003                 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
16004                 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
16005                 v0.AddArg(x)
16006                 v.AddArg2(v0, x)
16007                 return true
16008         }
16009         // match: (MULLconst [c] x)
16010         // cond: isPowerOfTwo32(c-4) && c >= 68
16011         // result: (LEAL4 (SHLLconst <v.Type> [int8(log32(c-4))] x) x)
16012         for {
16013                 c := auxIntToInt32(v.AuxInt)
16014                 x := v_0
16015                 if !(isPowerOfTwo32(c-4) && c >= 68) {
16016                         break
16017                 }
16018                 v.reset(OpAMD64LEAL4)
16019                 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
16020                 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
16021                 v0.AddArg(x)
16022                 v.AddArg2(v0, x)
16023                 return true
16024         }
16025         // match: (MULLconst [c] x)
16026         // cond: isPowerOfTwo32(c-8) && c >= 136
16027         // result: (LEAL8 (SHLLconst <v.Type> [int8(log32(c-8))] x) x)
16028         for {
16029                 c := auxIntToInt32(v.AuxInt)
16030                 x := v_0
16031                 if !(isPowerOfTwo32(c-8) && c >= 136) {
16032                         break
16033                 }
16034                 v.reset(OpAMD64LEAL8)
16035                 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
16036                 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
16037                 v0.AddArg(x)
16038                 v.AddArg2(v0, x)
16039                 return true
16040         }
16041         // match: (MULLconst [c] x)
16042         // cond: c%3 == 0 && isPowerOfTwo32(c/3)
16043         // result: (SHLLconst [int8(log32(c/3))] (LEAL2 <v.Type> x x))
16044         for {
16045                 c := auxIntToInt32(v.AuxInt)
16046                 x := v_0
16047                 if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
16048                         break
16049                 }
16050                 v.reset(OpAMD64SHLLconst)
16051                 v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
16052                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
16053                 v0.AddArg2(x, x)
16054                 v.AddArg(v0)
16055                 return true
16056         }
16057         // match: (MULLconst [c] x)
16058         // cond: c%5 == 0 && isPowerOfTwo32(c/5)
16059         // result: (SHLLconst [int8(log32(c/5))] (LEAL4 <v.Type> x x))
16060         for {
16061                 c := auxIntToInt32(v.AuxInt)
16062                 x := v_0
16063                 if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
16064                         break
16065                 }
16066                 v.reset(OpAMD64SHLLconst)
16067                 v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
16068                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
16069                 v0.AddArg2(x, x)
16070                 v.AddArg(v0)
16071                 return true
16072         }
16073         // match: (MULLconst [c] x)
16074         // cond: c%9 == 0 && isPowerOfTwo32(c/9)
16075         // result: (SHLLconst [int8(log32(c/9))] (LEAL8 <v.Type> x x))
16076         for {
16077                 c := auxIntToInt32(v.AuxInt)
16078                 x := v_0
16079                 if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
16080                         break
16081                 }
16082                 v.reset(OpAMD64SHLLconst)
16083                 v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
16084                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
16085                 v0.AddArg2(x, x)
16086                 v.AddArg(v0)
16087                 return true
16088         }
16089         // match: (MULLconst [c] (MOVLconst [d]))
16090         // result: (MOVLconst [c*d])
16091         for {
16092                 c := auxIntToInt32(v.AuxInt)
16093                 if v_0.Op != OpAMD64MOVLconst {
16094                         break
16095                 }
16096                 d := auxIntToInt32(v_0.AuxInt)
16097                 v.reset(OpAMD64MOVLconst)
16098                 v.AuxInt = int32ToAuxInt(c * d)
16099                 return true
16100         }
16101         return false
16102 }
16103 func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
16104         v_1 := v.Args[1]
16105         v_0 := v.Args[0]
16106         // match: (MULQ x (MOVQconst [c]))
16107         // cond: is32Bit(c)
16108         // result: (MULQconst [int32(c)] x)
16109         for {
16110                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16111                         x := v_0
16112                         if v_1.Op != OpAMD64MOVQconst {
16113                                 continue
16114                         }
16115                         c := auxIntToInt64(v_1.AuxInt)
16116                         if !(is32Bit(c)) {
16117                                 continue
16118                         }
16119                         v.reset(OpAMD64MULQconst)
16120                         v.AuxInt = int32ToAuxInt(int32(c))
16121                         v.AddArg(x)
16122                         return true
16123                 }
16124                 break
16125         }
16126         return false
16127 }
16128 func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
16129         v_0 := v.Args[0]
16130         b := v.Block
16131         // match: (MULQconst [c] (MULQconst [d] x))
16132         // cond: is32Bit(int64(c)*int64(d))
16133         // result: (MULQconst [c * d] x)
16134         for {
16135                 c := auxIntToInt32(v.AuxInt)
16136                 if v_0.Op != OpAMD64MULQconst {
16137                         break
16138                 }
16139                 d := auxIntToInt32(v_0.AuxInt)
16140                 x := v_0.Args[0]
16141                 if !(is32Bit(int64(c) * int64(d))) {
16142                         break
16143                 }
16144                 v.reset(OpAMD64MULQconst)
16145                 v.AuxInt = int32ToAuxInt(c * d)
16146                 v.AddArg(x)
16147                 return true
16148         }
16149         // match: (MULQconst [-9] x)
16150         // result: (NEGQ (LEAQ8 <v.Type> x x))
16151         for {
16152                 if auxIntToInt32(v.AuxInt) != -9 {
16153                         break
16154                 }
16155                 x := v_0
16156                 v.reset(OpAMD64NEGQ)
16157                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
16158                 v0.AddArg2(x, x)
16159                 v.AddArg(v0)
16160                 return true
16161         }
16162         // match: (MULQconst [-5] x)
16163         // result: (NEGQ (LEAQ4 <v.Type> x x))
16164         for {
16165                 if auxIntToInt32(v.AuxInt) != -5 {
16166                         break
16167                 }
16168                 x := v_0
16169                 v.reset(OpAMD64NEGQ)
16170                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
16171                 v0.AddArg2(x, x)
16172                 v.AddArg(v0)
16173                 return true
16174         }
16175         // match: (MULQconst [-3] x)
16176         // result: (NEGQ (LEAQ2 <v.Type> x x))
16177         for {
16178                 if auxIntToInt32(v.AuxInt) != -3 {
16179                         break
16180                 }
16181                 x := v_0
16182                 v.reset(OpAMD64NEGQ)
16183                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
16184                 v0.AddArg2(x, x)
16185                 v.AddArg(v0)
16186                 return true
16187         }
16188         // match: (MULQconst [-1] x)
16189         // result: (NEGQ x)
16190         for {
16191                 if auxIntToInt32(v.AuxInt) != -1 {
16192                         break
16193                 }
16194                 x := v_0
16195                 v.reset(OpAMD64NEGQ)
16196                 v.AddArg(x)
16197                 return true
16198         }
16199         // match: (MULQconst [ 0] _)
16200         // result: (MOVQconst [0])
16201         for {
16202                 if auxIntToInt32(v.AuxInt) != 0 {
16203                         break
16204                 }
16205                 v.reset(OpAMD64MOVQconst)
16206                 v.AuxInt = int64ToAuxInt(0)
16207                 return true
16208         }
16209         // match: (MULQconst [ 1] x)
16210         // result: x
16211         for {
16212                 if auxIntToInt32(v.AuxInt) != 1 {
16213                         break
16214                 }
16215                 x := v_0
16216                 v.copyOf(x)
16217                 return true
16218         }
16219         // match: (MULQconst [ 3] x)
16220         // result: (LEAQ2 x x)
16221         for {
16222                 if auxIntToInt32(v.AuxInt) != 3 {
16223                         break
16224                 }
16225                 x := v_0
16226                 v.reset(OpAMD64LEAQ2)
16227                 v.AddArg2(x, x)
16228                 return true
16229         }
16230         // match: (MULQconst [ 5] x)
16231         // result: (LEAQ4 x x)
16232         for {
16233                 if auxIntToInt32(v.AuxInt) != 5 {
16234                         break
16235                 }
16236                 x := v_0
16237                 v.reset(OpAMD64LEAQ4)
16238                 v.AddArg2(x, x)
16239                 return true
16240         }
16241         // match: (MULQconst [ 7] x)
16242         // result: (LEAQ2 x (LEAQ2 <v.Type> x x))
16243         for {
16244                 if auxIntToInt32(v.AuxInt) != 7 {
16245                         break
16246                 }
16247                 x := v_0
16248                 v.reset(OpAMD64LEAQ2)
16249                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
16250                 v0.AddArg2(x, x)
16251                 v.AddArg2(x, v0)
16252                 return true
16253         }
16254         // match: (MULQconst [ 9] x)
16255         // result: (LEAQ8 x x)
16256         for {
16257                 if auxIntToInt32(v.AuxInt) != 9 {
16258                         break
16259                 }
16260                 x := v_0
16261                 v.reset(OpAMD64LEAQ8)
16262                 v.AddArg2(x, x)
16263                 return true
16264         }
16265         // match: (MULQconst [11] x)
16266         // result: (LEAQ2 x (LEAQ4 <v.Type> x x))
16267         for {
16268                 if auxIntToInt32(v.AuxInt) != 11 {
16269                         break
16270                 }
16271                 x := v_0
16272                 v.reset(OpAMD64LEAQ2)
16273                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
16274                 v0.AddArg2(x, x)
16275                 v.AddArg2(x, v0)
16276                 return true
16277         }
16278         // match: (MULQconst [13] x)
16279         // result: (LEAQ4 x (LEAQ2 <v.Type> x x))
16280         for {
16281                 if auxIntToInt32(v.AuxInt) != 13 {
16282                         break
16283                 }
16284                 x := v_0
16285                 v.reset(OpAMD64LEAQ4)
16286                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
16287                 v0.AddArg2(x, x)
16288                 v.AddArg2(x, v0)
16289                 return true
16290         }
16291         // match: (MULQconst [19] x)
16292         // result: (LEAQ2 x (LEAQ8 <v.Type> x x))
16293         for {
16294                 if auxIntToInt32(v.AuxInt) != 19 {
16295                         break
16296                 }
16297                 x := v_0
16298                 v.reset(OpAMD64LEAQ2)
16299                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
16300                 v0.AddArg2(x, x)
16301                 v.AddArg2(x, v0)
16302                 return true
16303         }
16304         // match: (MULQconst [21] x)
16305         // result: (LEAQ4 x (LEAQ4 <v.Type> x x))
16306         for {
16307                 if auxIntToInt32(v.AuxInt) != 21 {
16308                         break
16309                 }
16310                 x := v_0
16311                 v.reset(OpAMD64LEAQ4)
16312                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
16313                 v0.AddArg2(x, x)
16314                 v.AddArg2(x, v0)
16315                 return true
16316         }
16317         // match: (MULQconst [25] x)
16318         // result: (LEAQ8 x (LEAQ2 <v.Type> x x))
16319         for {
16320                 if auxIntToInt32(v.AuxInt) != 25 {
16321                         break
16322                 }
16323                 x := v_0
16324                 v.reset(OpAMD64LEAQ8)
16325                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
16326                 v0.AddArg2(x, x)
16327                 v.AddArg2(x, v0)
16328                 return true
16329         }
16330         // match: (MULQconst [27] x)
16331         // result: (LEAQ8 (LEAQ2 <v.Type> x x) (LEAQ2 <v.Type> x x))
16332         for {
16333                 if auxIntToInt32(v.AuxInt) != 27 {
16334                         break
16335                 }
16336                 x := v_0
16337                 v.reset(OpAMD64LEAQ8)
16338                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
16339                 v0.AddArg2(x, x)
16340                 v.AddArg2(v0, v0)
16341                 return true
16342         }
16343         // match: (MULQconst [37] x)
16344         // result: (LEAQ4 x (LEAQ8 <v.Type> x x))
16345         for {
16346                 if auxIntToInt32(v.AuxInt) != 37 {
16347                         break
16348                 }
16349                 x := v_0
16350                 v.reset(OpAMD64LEAQ4)
16351                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
16352                 v0.AddArg2(x, x)
16353                 v.AddArg2(x, v0)
16354                 return true
16355         }
16356         // match: (MULQconst [41] x)
16357         // result: (LEAQ8 x (LEAQ4 <v.Type> x x))
16358         for {
16359                 if auxIntToInt32(v.AuxInt) != 41 {
16360                         break
16361                 }
16362                 x := v_0
16363                 v.reset(OpAMD64LEAQ8)
16364                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
16365                 v0.AddArg2(x, x)
16366                 v.AddArg2(x, v0)
16367                 return true
16368         }
16369         // match: (MULQconst [45] x)
16370         // result: (LEAQ8 (LEAQ4 <v.Type> x x) (LEAQ4 <v.Type> x x))
16371         for {
16372                 if auxIntToInt32(v.AuxInt) != 45 {
16373                         break
16374                 }
16375                 x := v_0
16376                 v.reset(OpAMD64LEAQ8)
16377                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
16378                 v0.AddArg2(x, x)
16379                 v.AddArg2(v0, v0)
16380                 return true
16381         }
16382         // match: (MULQconst [73] x)
16383         // result: (LEAQ8 x (LEAQ8 <v.Type> x x))
16384         for {
16385                 if auxIntToInt32(v.AuxInt) != 73 {
16386                         break
16387                 }
16388                 x := v_0
16389                 v.reset(OpAMD64LEAQ8)
16390                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
16391                 v0.AddArg2(x, x)
16392                 v.AddArg2(x, v0)
16393                 return true
16394         }
16395         // match: (MULQconst [81] x)
16396         // result: (LEAQ8 (LEAQ8 <v.Type> x x) (LEAQ8 <v.Type> x x))
16397         for {
16398                 if auxIntToInt32(v.AuxInt) != 81 {
16399                         break
16400                 }
16401                 x := v_0
16402                 v.reset(OpAMD64LEAQ8)
16403                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
16404                 v0.AddArg2(x, x)
16405                 v.AddArg2(v0, v0)
16406                 return true
16407         }
16408         // match: (MULQconst [c] x)
16409         // cond: isPowerOfTwo64(int64(c)+1) && c >= 15
16410         // result: (SUBQ (SHLQconst <v.Type> [int8(log64(int64(c)+1))] x) x)
16411         for {
16412                 c := auxIntToInt32(v.AuxInt)
16413                 x := v_0
16414                 if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
16415                         break
16416                 }
16417                 v.reset(OpAMD64SUBQ)
16418                 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
16419                 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
16420                 v0.AddArg(x)
16421                 v.AddArg2(v0, x)
16422                 return true
16423         }
16424         // match: (MULQconst [c] x)
16425         // cond: isPowerOfTwo32(c-1) && c >= 17
16426         // result: (LEAQ1 (SHLQconst <v.Type> [int8(log32(c-1))] x) x)
16427         for {
16428                 c := auxIntToInt32(v.AuxInt)
16429                 x := v_0
16430                 if !(isPowerOfTwo32(c-1) && c >= 17) {
16431                         break
16432                 }
16433                 v.reset(OpAMD64LEAQ1)
16434                 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
16435                 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
16436                 v0.AddArg(x)
16437                 v.AddArg2(v0, x)
16438                 return true
16439         }
16440         // match: (MULQconst [c] x)
16441         // cond: isPowerOfTwo32(c-2) && c >= 34
16442         // result: (LEAQ2 (SHLQconst <v.Type> [int8(log32(c-2))] x) x)
16443         for {
16444                 c := auxIntToInt32(v.AuxInt)
16445                 x := v_0
16446                 if !(isPowerOfTwo32(c-2) && c >= 34) {
16447                         break
16448                 }
16449                 v.reset(OpAMD64LEAQ2)
16450                 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
16451                 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
16452                 v0.AddArg(x)
16453                 v.AddArg2(v0, x)
16454                 return true
16455         }
16456         // match: (MULQconst [c] x)
16457         // cond: isPowerOfTwo32(c-4) && c >= 68
16458         // result: (LEAQ4 (SHLQconst <v.Type> [int8(log32(c-4))] x) x)
16459         for {
16460                 c := auxIntToInt32(v.AuxInt)
16461                 x := v_0
16462                 if !(isPowerOfTwo32(c-4) && c >= 68) {
16463                         break
16464                 }
16465                 v.reset(OpAMD64LEAQ4)
16466                 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
16467                 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
16468                 v0.AddArg(x)
16469                 v.AddArg2(v0, x)
16470                 return true
16471         }
16472         // match: (MULQconst [c] x)
16473         // cond: isPowerOfTwo32(c-8) && c >= 136
16474         // result: (LEAQ8 (SHLQconst <v.Type> [int8(log32(c-8))] x) x)
16475         for {
16476                 c := auxIntToInt32(v.AuxInt)
16477                 x := v_0
16478                 if !(isPowerOfTwo32(c-8) && c >= 136) {
16479                         break
16480                 }
16481                 v.reset(OpAMD64LEAQ8)
16482                 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
16483                 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
16484                 v0.AddArg(x)
16485                 v.AddArg2(v0, x)
16486                 return true
16487         }
16488         // match: (MULQconst [c] x)
16489         // cond: c%3 == 0 && isPowerOfTwo32(c/3)
16490         // result: (SHLQconst [int8(log32(c/3))] (LEAQ2 <v.Type> x x))
16491         for {
16492                 c := auxIntToInt32(v.AuxInt)
16493                 x := v_0
16494                 if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
16495                         break
16496                 }
16497                 v.reset(OpAMD64SHLQconst)
16498                 v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
16499                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
16500                 v0.AddArg2(x, x)
16501                 v.AddArg(v0)
16502                 return true
16503         }
16504         // match: (MULQconst [c] x)
16505         // cond: c%5 == 0 && isPowerOfTwo32(c/5)
16506         // result: (SHLQconst [int8(log32(c/5))] (LEAQ4 <v.Type> x x))
16507         for {
16508                 c := auxIntToInt32(v.AuxInt)
16509                 x := v_0
16510                 if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
16511                         break
16512                 }
16513                 v.reset(OpAMD64SHLQconst)
16514                 v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
16515                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
16516                 v0.AddArg2(x, x)
16517                 v.AddArg(v0)
16518                 return true
16519         }
16520         // match: (MULQconst [c] x)
16521         // cond: c%9 == 0 && isPowerOfTwo32(c/9)
16522         // result: (SHLQconst [int8(log32(c/9))] (LEAQ8 <v.Type> x x))
16523         for {
16524                 c := auxIntToInt32(v.AuxInt)
16525                 x := v_0
16526                 if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
16527                         break
16528                 }
16529                 v.reset(OpAMD64SHLQconst)
16530                 v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
16531                 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
16532                 v0.AddArg2(x, x)
16533                 v.AddArg(v0)
16534                 return true
16535         }
16536         // match: (MULQconst [c] (MOVQconst [d]))
16537         // result: (MOVQconst [int64(c)*d])
16538         for {
16539                 c := auxIntToInt32(v.AuxInt)
16540                 if v_0.Op != OpAMD64MOVQconst {
16541                         break
16542                 }
16543                 d := auxIntToInt64(v_0.AuxInt)
16544                 v.reset(OpAMD64MOVQconst)
16545                 v.AuxInt = int64ToAuxInt(int64(c) * d)
16546                 return true
16547         }
16548         // match: (MULQconst [c] (NEGQ x))
16549         // cond: c != -(1<<31)
16550         // result: (MULQconst [-c] x)
16551         for {
16552                 c := auxIntToInt32(v.AuxInt)
16553                 if v_0.Op != OpAMD64NEGQ {
16554                         break
16555                 }
16556                 x := v_0.Args[0]
16557                 if !(c != -(1 << 31)) {
16558                         break
16559                 }
16560                 v.reset(OpAMD64MULQconst)
16561                 v.AuxInt = int32ToAuxInt(-c)
16562                 v.AddArg(x)
16563                 return true
16564         }
16565         return false
16566 }
16567 func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
16568         v_1 := v.Args[1]
16569         v_0 := v.Args[0]
16570         // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
16571         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
16572         // result: (MULSDload x [off] {sym} ptr mem)
16573         for {
16574                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16575                         x := v_0
16576                         l := v_1
16577                         if l.Op != OpAMD64MOVSDload {
16578                                 continue
16579                         }
16580                         off := auxIntToInt32(l.AuxInt)
16581                         sym := auxToSym(l.Aux)
16582                         mem := l.Args[1]
16583                         ptr := l.Args[0]
16584                         if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
16585                                 continue
16586                         }
16587                         v.reset(OpAMD64MULSDload)
16588                         v.AuxInt = int32ToAuxInt(off)
16589                         v.Aux = symToAux(sym)
16590                         v.AddArg3(x, ptr, mem)
16591                         return true
16592                 }
16593                 break
16594         }
16595         return false
16596 }
16597 func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
16598         v_2 := v.Args[2]
16599         v_1 := v.Args[1]
16600         v_0 := v.Args[0]
16601         b := v.Block
16602         typ := &b.Func.Config.Types
16603         // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem)
16604         // cond: is32Bit(int64(off1)+int64(off2))
16605         // result: (MULSDload [off1+off2] {sym} val base mem)
16606         for {
16607                 off1 := auxIntToInt32(v.AuxInt)
16608                 sym := auxToSym(v.Aux)
16609                 val := v_0
16610                 if v_1.Op != OpAMD64ADDQconst {
16611                         break
16612                 }
16613                 off2 := auxIntToInt32(v_1.AuxInt)
16614                 base := v_1.Args[0]
16615                 mem := v_2
16616                 if !(is32Bit(int64(off1) + int64(off2))) {
16617                         break
16618                 }
16619                 v.reset(OpAMD64MULSDload)
16620                 v.AuxInt = int32ToAuxInt(off1 + off2)
16621                 v.Aux = symToAux(sym)
16622                 v.AddArg3(val, base, mem)
16623                 return true
16624         }
16625         // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
16626         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
16627         // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
16628         for {
16629                 off1 := auxIntToInt32(v.AuxInt)
16630                 sym1 := auxToSym(v.Aux)
16631                 val := v_0
16632                 if v_1.Op != OpAMD64LEAQ {
16633                         break
16634                 }
16635                 off2 := auxIntToInt32(v_1.AuxInt)
16636                 sym2 := auxToSym(v_1.Aux)
16637                 base := v_1.Args[0]
16638                 mem := v_2
16639                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16640                         break
16641                 }
16642                 v.reset(OpAMD64MULSDload)
16643                 v.AuxInt = int32ToAuxInt(off1 + off2)
16644                 v.Aux = symToAux(mergeSym(sym1, sym2))
16645                 v.AddArg3(val, base, mem)
16646                 return true
16647         }
16648         // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
16649         // result: (MULSD x (MOVQi2f y))
16650         for {
16651                 off := auxIntToInt32(v.AuxInt)
16652                 sym := auxToSym(v.Aux)
16653                 x := v_0
16654                 ptr := v_1
16655                 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
16656                         break
16657                 }
16658                 y := v_2.Args[1]
16659                 if ptr != v_2.Args[0] {
16660                         break
16661                 }
16662                 v.reset(OpAMD64MULSD)
16663                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
16664                 v0.AddArg(y)
16665                 v.AddArg2(x, v0)
16666                 return true
16667         }
16668         return false
16669 }
16670 func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
16671         v_1 := v.Args[1]
16672         v_0 := v.Args[0]
16673         // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
16674         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
16675         // result: (MULSSload x [off] {sym} ptr mem)
16676         for {
16677                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16678                         x := v_0
16679                         l := v_1
16680                         if l.Op != OpAMD64MOVSSload {
16681                                 continue
16682                         }
16683                         off := auxIntToInt32(l.AuxInt)
16684                         sym := auxToSym(l.Aux)
16685                         mem := l.Args[1]
16686                         ptr := l.Args[0]
16687                         if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
16688                                 continue
16689                         }
16690                         v.reset(OpAMD64MULSSload)
16691                         v.AuxInt = int32ToAuxInt(off)
16692                         v.Aux = symToAux(sym)
16693                         v.AddArg3(x, ptr, mem)
16694                         return true
16695                 }
16696                 break
16697         }
16698         return false
16699 }
16700 func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
16701         v_2 := v.Args[2]
16702         v_1 := v.Args[1]
16703         v_0 := v.Args[0]
16704         b := v.Block
16705         typ := &b.Func.Config.Types
16706         // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem)
16707         // cond: is32Bit(int64(off1)+int64(off2))
16708         // result: (MULSSload [off1+off2] {sym} val base mem)
16709         for {
16710                 off1 := auxIntToInt32(v.AuxInt)
16711                 sym := auxToSym(v.Aux)
16712                 val := v_0
16713                 if v_1.Op != OpAMD64ADDQconst {
16714                         break
16715                 }
16716                 off2 := auxIntToInt32(v_1.AuxInt)
16717                 base := v_1.Args[0]
16718                 mem := v_2
16719                 if !(is32Bit(int64(off1) + int64(off2))) {
16720                         break
16721                 }
16722                 v.reset(OpAMD64MULSSload)
16723                 v.AuxInt = int32ToAuxInt(off1 + off2)
16724                 v.Aux = symToAux(sym)
16725                 v.AddArg3(val, base, mem)
16726                 return true
16727         }
16728         // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
16729         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
16730         // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
16731         for {
16732                 off1 := auxIntToInt32(v.AuxInt)
16733                 sym1 := auxToSym(v.Aux)
16734                 val := v_0
16735                 if v_1.Op != OpAMD64LEAQ {
16736                         break
16737                 }
16738                 off2 := auxIntToInt32(v_1.AuxInt)
16739                 sym2 := auxToSym(v_1.Aux)
16740                 base := v_1.Args[0]
16741                 mem := v_2
16742                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16743                         break
16744                 }
16745                 v.reset(OpAMD64MULSSload)
16746                 v.AuxInt = int32ToAuxInt(off1 + off2)
16747                 v.Aux = symToAux(mergeSym(sym1, sym2))
16748                 v.AddArg3(val, base, mem)
16749                 return true
16750         }
16751         // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
16752         // result: (MULSS x (MOVLi2f y))
16753         for {
16754                 off := auxIntToInt32(v.AuxInt)
16755                 sym := auxToSym(v.Aux)
16756                 x := v_0
16757                 ptr := v_1
16758                 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
16759                         break
16760                 }
16761                 y := v_2.Args[1]
16762                 if ptr != v_2.Args[0] {
16763                         break
16764                 }
16765                 v.reset(OpAMD64MULSS)
16766                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
16767                 v0.AddArg(y)
16768                 v.AddArg2(x, v0)
16769                 return true
16770         }
16771         return false
16772 }
16773 func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
16774         v_0 := v.Args[0]
16775         // match: (NEGL (NEGL x))
16776         // result: x
16777         for {
16778                 if v_0.Op != OpAMD64NEGL {
16779                         break
16780                 }
16781                 x := v_0.Args[0]
16782                 v.copyOf(x)
16783                 return true
16784         }
16785         // match: (NEGL s:(SUBL x y))
16786         // cond: s.Uses == 1
16787         // result: (SUBL y x)
16788         for {
16789                 s := v_0
16790                 if s.Op != OpAMD64SUBL {
16791                         break
16792                 }
16793                 y := s.Args[1]
16794                 x := s.Args[0]
16795                 if !(s.Uses == 1) {
16796                         break
16797                 }
16798                 v.reset(OpAMD64SUBL)
16799                 v.AddArg2(y, x)
16800                 return true
16801         }
16802         // match: (NEGL (MOVLconst [c]))
16803         // result: (MOVLconst [-c])
16804         for {
16805                 if v_0.Op != OpAMD64MOVLconst {
16806                         break
16807                 }
16808                 c := auxIntToInt32(v_0.AuxInt)
16809                 v.reset(OpAMD64MOVLconst)
16810                 v.AuxInt = int32ToAuxInt(-c)
16811                 return true
16812         }
16813         return false
16814 }
16815 func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
16816         v_0 := v.Args[0]
16817         // match: (NEGQ (NEGQ x))
16818         // result: x
16819         for {
16820                 if v_0.Op != OpAMD64NEGQ {
16821                         break
16822                 }
16823                 x := v_0.Args[0]
16824                 v.copyOf(x)
16825                 return true
16826         }
16827         // match: (NEGQ s:(SUBQ x y))
16828         // cond: s.Uses == 1
16829         // result: (SUBQ y x)
16830         for {
16831                 s := v_0
16832                 if s.Op != OpAMD64SUBQ {
16833                         break
16834                 }
16835                 y := s.Args[1]
16836                 x := s.Args[0]
16837                 if !(s.Uses == 1) {
16838                         break
16839                 }
16840                 v.reset(OpAMD64SUBQ)
16841                 v.AddArg2(y, x)
16842                 return true
16843         }
16844         // match: (NEGQ (MOVQconst [c]))
16845         // result: (MOVQconst [-c])
16846         for {
16847                 if v_0.Op != OpAMD64MOVQconst {
16848                         break
16849                 }
16850                 c := auxIntToInt64(v_0.AuxInt)
16851                 v.reset(OpAMD64MOVQconst)
16852                 v.AuxInt = int64ToAuxInt(-c)
16853                 return true
16854         }
16855         // match: (NEGQ (ADDQconst [c] (NEGQ x)))
16856         // cond: c != -(1<<31)
16857         // result: (ADDQconst [-c] x)
16858         for {
16859                 if v_0.Op != OpAMD64ADDQconst {
16860                         break
16861                 }
16862                 c := auxIntToInt32(v_0.AuxInt)
16863                 v_0_0 := v_0.Args[0]
16864                 if v_0_0.Op != OpAMD64NEGQ {
16865                         break
16866                 }
16867                 x := v_0_0.Args[0]
16868                 if !(c != -(1 << 31)) {
16869                         break
16870                 }
16871                 v.reset(OpAMD64ADDQconst)
16872                 v.AuxInt = int32ToAuxInt(-c)
16873                 v.AddArg(x)
16874                 return true
16875         }
16876         return false
16877 }
16878 func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
16879         v_0 := v.Args[0]
16880         // match: (NOTL (MOVLconst [c]))
16881         // result: (MOVLconst [^c])
16882         for {
16883                 if v_0.Op != OpAMD64MOVLconst {
16884                         break
16885                 }
16886                 c := auxIntToInt32(v_0.AuxInt)
16887                 v.reset(OpAMD64MOVLconst)
16888                 v.AuxInt = int32ToAuxInt(^c)
16889                 return true
16890         }
16891         return false
16892 }
16893 func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
16894         v_0 := v.Args[0]
16895         // match: (NOTQ (MOVQconst [c]))
16896         // result: (MOVQconst [^c])
16897         for {
16898                 if v_0.Op != OpAMD64MOVQconst {
16899                         break
16900                 }
16901                 c := auxIntToInt64(v_0.AuxInt)
16902                 v.reset(OpAMD64MOVQconst)
16903                 v.AuxInt = int64ToAuxInt(^c)
16904                 return true
16905         }
16906         return false
16907 }
16908 func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
16909         v_1 := v.Args[1]
16910         v_0 := v.Args[0]
16911         b := v.Block
16912         typ := &b.Func.Config.Types
16913         // match: (ORL (SHLL (MOVLconst [1]) y) x)
16914         // result: (BTSL x y)
16915         for {
16916                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16917                         if v_0.Op != OpAMD64SHLL {
16918                                 continue
16919                         }
16920                         y := v_0.Args[1]
16921                         v_0_0 := v_0.Args[0]
16922                         if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
16923                                 continue
16924                         }
16925                         x := v_1
16926                         v.reset(OpAMD64BTSL)
16927                         v.AddArg2(x, y)
16928                         return true
16929                 }
16930                 break
16931         }
16932         // match: (ORL (MOVLconst [c]) x)
16933         // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
16934         // result: (BTSLconst [int8(log32(c))] x)
16935         for {
16936                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16937                         if v_0.Op != OpAMD64MOVLconst {
16938                                 continue
16939                         }
16940                         c := auxIntToInt32(v_0.AuxInt)
16941                         x := v_1
16942                         if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
16943                                 continue
16944                         }
16945                         v.reset(OpAMD64BTSLconst)
16946                         v.AuxInt = int8ToAuxInt(int8(log32(c)))
16947                         v.AddArg(x)
16948                         return true
16949                 }
16950                 break
16951         }
16952         // match: (ORL x (MOVLconst [c]))
16953         // result: (ORLconst [c] x)
16954         for {
16955                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16956                         x := v_0
16957                         if v_1.Op != OpAMD64MOVLconst {
16958                                 continue
16959                         }
16960                         c := auxIntToInt32(v_1.AuxInt)
16961                         v.reset(OpAMD64ORLconst)
16962                         v.AuxInt = int32ToAuxInt(c)
16963                         v.AddArg(x)
16964                         return true
16965                 }
16966                 break
16967         }
16968         // match: (ORL (SHLLconst x [c]) (SHRLconst x [d]))
16969         // cond: d==32-c
16970         // result: (ROLLconst x [c])
16971         for {
16972                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16973                         if v_0.Op != OpAMD64SHLLconst {
16974                                 continue
16975                         }
16976                         c := auxIntToInt8(v_0.AuxInt)
16977                         x := v_0.Args[0]
16978                         if v_1.Op != OpAMD64SHRLconst {
16979                                 continue
16980                         }
16981                         d := auxIntToInt8(v_1.AuxInt)
16982                         if x != v_1.Args[0] || !(d == 32-c) {
16983                                 continue
16984                         }
16985                         v.reset(OpAMD64ROLLconst)
16986                         v.AuxInt = int8ToAuxInt(c)
16987                         v.AddArg(x)
16988                         return true
16989                 }
16990                 break
16991         }
16992         // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
16993         // cond: d==16-c && c < 16 && t.Size() == 2
16994         // result: (ROLWconst x [c])
16995         for {
16996                 t := v.Type
16997                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16998                         if v_0.Op != OpAMD64SHLLconst {
16999                                 continue
17000                         }
17001                         c := auxIntToInt8(v_0.AuxInt)
17002                         x := v_0.Args[0]
17003                         if v_1.Op != OpAMD64SHRWconst {
17004                                 continue
17005                         }
17006                         d := auxIntToInt8(v_1.AuxInt)
17007                         if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
17008                                 continue
17009                         }
17010                         v.reset(OpAMD64ROLWconst)
17011                         v.AuxInt = int8ToAuxInt(c)
17012                         v.AddArg(x)
17013                         return true
17014                 }
17015                 break
17016         }
17017         // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
17018         // cond: d==8-c && c < 8 && t.Size() == 1
17019         // result: (ROLBconst x [c])
17020         for {
17021                 t := v.Type
17022                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17023                         if v_0.Op != OpAMD64SHLLconst {
17024                                 continue
17025                         }
17026                         c := auxIntToInt8(v_0.AuxInt)
17027                         x := v_0.Args[0]
17028                         if v_1.Op != OpAMD64SHRBconst {
17029                                 continue
17030                         }
17031                         d := auxIntToInt8(v_1.AuxInt)
17032                         if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
17033                                 continue
17034                         }
17035                         v.reset(OpAMD64ROLBconst)
17036                         v.AuxInt = int8ToAuxInt(c)
17037                         v.AddArg(x)
17038                         return true
17039                 }
17040                 break
17041         }
17042         // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
17043         // result: (ROLL x y)
17044         for {
17045                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17046                         if v_0.Op != OpAMD64SHLL {
17047                                 continue
17048                         }
17049                         y := v_0.Args[1]
17050                         x := v_0.Args[0]
17051                         if v_1.Op != OpAMD64ANDL {
17052                                 continue
17053                         }
17054                         _ = v_1.Args[1]
17055                         v_1_0 := v_1.Args[0]
17056                         v_1_1 := v_1.Args[1]
17057                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17058                                 if v_1_0.Op != OpAMD64SHRL {
17059                                         continue
17060                                 }
17061                                 _ = v_1_0.Args[1]
17062                                 if x != v_1_0.Args[0] {
17063                                         continue
17064                                 }
17065                                 v_1_0_1 := v_1_0.Args[1]
17066                                 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17067                                         continue
17068                                 }
17069                                 v_1_1_0 := v_1_1.Args[0]
17070                                 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
17071                                         continue
17072                                 }
17073                                 v_1_1_0_0 := v_1_1_0.Args[0]
17074                                 if v_1_1_0_0.Op != OpAMD64NEGQ {
17075                                         continue
17076                                 }
17077                                 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17078                                 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
17079                                         continue
17080                                 }
17081                                 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17082                                 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
17083                                         continue
17084                                 }
17085                                 v.reset(OpAMD64ROLL)
17086                                 v.AddArg2(x, y)
17087                                 return true
17088                         }
17089                 }
17090                 break
17091         }
17092         // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
17093         // result: (ROLL x y)
17094         for {
17095                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17096                         if v_0.Op != OpAMD64SHLL {
17097                                 continue
17098                         }
17099                         y := v_0.Args[1]
17100                         x := v_0.Args[0]
17101                         if v_1.Op != OpAMD64ANDL {
17102                                 continue
17103                         }
17104                         _ = v_1.Args[1]
17105                         v_1_0 := v_1.Args[0]
17106                         v_1_1 := v_1.Args[1]
17107                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17108                                 if v_1_0.Op != OpAMD64SHRL {
17109                                         continue
17110                                 }
17111                                 _ = v_1_0.Args[1]
17112                                 if x != v_1_0.Args[0] {
17113                                         continue
17114                                 }
17115                                 v_1_0_1 := v_1_0.Args[1]
17116                                 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17117                                         continue
17118                                 }
17119                                 v_1_1_0 := v_1_1.Args[0]
17120                                 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
17121                                         continue
17122                                 }
17123                                 v_1_1_0_0 := v_1_1_0.Args[0]
17124                                 if v_1_1_0_0.Op != OpAMD64NEGL {
17125                                         continue
17126                                 }
17127                                 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17128                                 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
17129                                         continue
17130                                 }
17131                                 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17132                                 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
17133                                         continue
17134                                 }
17135                                 v.reset(OpAMD64ROLL)
17136                                 v.AddArg2(x, y)
17137                                 return true
17138                         }
17139                 }
17140                 break
17141         }
17142         // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
17143         // result: (RORL x y)
17144         for {
17145                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17146                         if v_0.Op != OpAMD64SHRL {
17147                                 continue
17148                         }
17149                         y := v_0.Args[1]
17150                         x := v_0.Args[0]
17151                         if v_1.Op != OpAMD64ANDL {
17152                                 continue
17153                         }
17154                         _ = v_1.Args[1]
17155                         v_1_0 := v_1.Args[0]
17156                         v_1_1 := v_1.Args[1]
17157                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17158                                 if v_1_0.Op != OpAMD64SHLL {
17159                                         continue
17160                                 }
17161                                 _ = v_1_0.Args[1]
17162                                 if x != v_1_0.Args[0] {
17163                                         continue
17164                                 }
17165                                 v_1_0_1 := v_1_0.Args[1]
17166                                 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17167                                         continue
17168                                 }
17169                                 v_1_1_0 := v_1_1.Args[0]
17170                                 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
17171                                         continue
17172                                 }
17173                                 v_1_1_0_0 := v_1_1_0.Args[0]
17174                                 if v_1_1_0_0.Op != OpAMD64NEGQ {
17175                                         continue
17176                                 }
17177                                 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17178                                 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
17179                                         continue
17180                                 }
17181                                 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17182                                 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
17183                                         continue
17184                                 }
17185                                 v.reset(OpAMD64RORL)
17186                                 v.AddArg2(x, y)
17187                                 return true
17188                         }
17189                 }
17190                 break
17191         }
17192         // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
17193         // result: (RORL x y)
17194         for {
17195                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17196                         if v_0.Op != OpAMD64SHRL {
17197                                 continue
17198                         }
17199                         y := v_0.Args[1]
17200                         x := v_0.Args[0]
17201                         if v_1.Op != OpAMD64ANDL {
17202                                 continue
17203                         }
17204                         _ = v_1.Args[1]
17205                         v_1_0 := v_1.Args[0]
17206                         v_1_1 := v_1.Args[1]
17207                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17208                                 if v_1_0.Op != OpAMD64SHLL {
17209                                         continue
17210                                 }
17211                                 _ = v_1_0.Args[1]
17212                                 if x != v_1_0.Args[0] {
17213                                         continue
17214                                 }
17215                                 v_1_0_1 := v_1_0.Args[1]
17216                                 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17217                                         continue
17218                                 }
17219                                 v_1_1_0 := v_1_1.Args[0]
17220                                 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
17221                                         continue
17222                                 }
17223                                 v_1_1_0_0 := v_1_1_0.Args[0]
17224                                 if v_1_1_0_0.Op != OpAMD64NEGL {
17225                                         continue
17226                                 }
17227                                 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17228                                 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
17229                                         continue
17230                                 }
17231                                 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17232                                 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
17233                                         continue
17234                                 }
17235                                 v.reset(OpAMD64RORL)
17236                                 v.AddArg2(x, y)
17237                                 return true
17238                         }
17239                 }
17240                 break
17241         }
17242         // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
17243         // cond: v.Type.Size() == 2
17244         // result: (ROLW x y)
17245         for {
17246                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17247                         if v_0.Op != OpAMD64SHLL {
17248                                 continue
17249                         }
17250                         _ = v_0.Args[1]
17251                         x := v_0.Args[0]
17252                         v_0_1 := v_0.Args[1]
17253                         if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
17254                                 continue
17255                         }
17256                         y := v_0_1.Args[0]
17257                         if v_1.Op != OpAMD64ANDL {
17258                                 continue
17259                         }
17260                         _ = v_1.Args[1]
17261                         v_1_0 := v_1.Args[0]
17262                         v_1_1 := v_1.Args[1]
17263                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17264                                 if v_1_0.Op != OpAMD64SHRW {
17265                                         continue
17266                                 }
17267                                 _ = v_1_0.Args[1]
17268                                 if x != v_1_0.Args[0] {
17269                                         continue
17270                                 }
17271                                 v_1_0_1 := v_1_0.Args[1]
17272                                 if v_1_0_1.Op != OpAMD64NEGQ {
17273                                         continue
17274                                 }
17275                                 v_1_0_1_0 := v_1_0_1.Args[0]
17276                                 if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
17277                                         continue
17278                                 }
17279                                 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
17280                                 if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17281                                         continue
17282                                 }
17283                                 v_1_1_0 := v_1_1.Args[0]
17284                                 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
17285                                         continue
17286                                 }
17287                                 v_1_1_0_0 := v_1_1_0.Args[0]
17288                                 if v_1_1_0_0.Op != OpAMD64NEGQ {
17289                                         continue
17290                                 }
17291                                 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17292                                 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
17293                                         continue
17294                                 }
17295                                 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17296                                 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
17297                                         continue
17298                                 }
17299                                 v.reset(OpAMD64ROLW)
17300                                 v.AddArg2(x, y)
17301                                 return true
17302                         }
17303                 }
17304                 break
17305         }
17306         // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
17307         // cond: v.Type.Size() == 2
17308         // result: (ROLW x y)
17309         for {
17310                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17311                         if v_0.Op != OpAMD64SHLL {
17312                                 continue
17313                         }
17314                         _ = v_0.Args[1]
17315                         x := v_0.Args[0]
17316                         v_0_1 := v_0.Args[1]
17317                         if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
17318                                 continue
17319                         }
17320                         y := v_0_1.Args[0]
17321                         if v_1.Op != OpAMD64ANDL {
17322                                 continue
17323                         }
17324                         _ = v_1.Args[1]
17325                         v_1_0 := v_1.Args[0]
17326                         v_1_1 := v_1.Args[1]
17327                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17328                                 if v_1_0.Op != OpAMD64SHRW {
17329                                         continue
17330                                 }
17331                                 _ = v_1_0.Args[1]
17332                                 if x != v_1_0.Args[0] {
17333                                         continue
17334                                 }
17335                                 v_1_0_1 := v_1_0.Args[1]
17336                                 if v_1_0_1.Op != OpAMD64NEGL {
17337                                         continue
17338                                 }
17339                                 v_1_0_1_0 := v_1_0_1.Args[0]
17340                                 if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
17341                                         continue
17342                                 }
17343                                 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
17344                                 if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17345                                         continue
17346                                 }
17347                                 v_1_1_0 := v_1_1.Args[0]
17348                                 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
17349                                         continue
17350                                 }
17351                                 v_1_1_0_0 := v_1_1_0.Args[0]
17352                                 if v_1_1_0_0.Op != OpAMD64NEGL {
17353                                         continue
17354                                 }
17355                                 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17356                                 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
17357                                         continue
17358                                 }
17359                                 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17360                                 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
17361                                         continue
17362                                 }
17363                                 v.reset(OpAMD64ROLW)
17364                                 v.AddArg2(x, y)
17365                                 return true
17366                         }
17367                 }
17368                 break
17369         }
17370         // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
17371         // cond: v.Type.Size() == 2
17372         // result: (RORW x y)
17373         for {
17374                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17375                         if v_0.Op != OpAMD64SHRW {
17376                                 continue
17377                         }
17378                         _ = v_0.Args[1]
17379                         x := v_0.Args[0]
17380                         v_0_1 := v_0.Args[1]
17381                         if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
17382                                 continue
17383                         }
17384                         y := v_0_1.Args[0]
17385                         if v_1.Op != OpAMD64SHLL {
17386                                 continue
17387                         }
17388                         _ = v_1.Args[1]
17389                         if x != v_1.Args[0] {
17390                                 continue
17391                         }
17392                         v_1_1 := v_1.Args[1]
17393                         if v_1_1.Op != OpAMD64NEGQ {
17394                                 continue
17395                         }
17396                         v_1_1_0 := v_1_1.Args[0]
17397                         if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
17398                                 continue
17399                         }
17400                         v_1_1_0_0 := v_1_1_0.Args[0]
17401                         if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
17402                                 continue
17403                         }
17404                         v.reset(OpAMD64RORW)
17405                         v.AddArg2(x, y)
17406                         return true
17407                 }
17408                 break
17409         }
17410         // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
17411         // cond: v.Type.Size() == 2
17412         // result: (RORW x y)
17413         for {
17414                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17415                         if v_0.Op != OpAMD64SHRW {
17416                                 continue
17417                         }
17418                         _ = v_0.Args[1]
17419                         x := v_0.Args[0]
17420                         v_0_1 := v_0.Args[1]
17421                         if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
17422                                 continue
17423                         }
17424                         y := v_0_1.Args[0]
17425                         if v_1.Op != OpAMD64SHLL {
17426                                 continue
17427                         }
17428                         _ = v_1.Args[1]
17429                         if x != v_1.Args[0] {
17430                                 continue
17431                         }
17432                         v_1_1 := v_1.Args[1]
17433                         if v_1_1.Op != OpAMD64NEGL {
17434                                 continue
17435                         }
17436                         v_1_1_0 := v_1_1.Args[0]
17437                         if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
17438                                 continue
17439                         }
17440                         v_1_1_0_0 := v_1_1_0.Args[0]
17441                         if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
17442                                 continue
17443                         }
17444                         v.reset(OpAMD64RORW)
17445                         v.AddArg2(x, y)
17446                         return true
17447                 }
17448                 break
17449         }
17450         // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
17451         // cond: v.Type.Size() == 1
17452         // result: (ROLB x y)
17453         for {
17454                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17455                         if v_0.Op != OpAMD64SHLL {
17456                                 continue
17457                         }
17458                         _ = v_0.Args[1]
17459                         x := v_0.Args[0]
17460                         v_0_1 := v_0.Args[1]
17461                         if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
17462                                 continue
17463                         }
17464                         y := v_0_1.Args[0]
17465                         if v_1.Op != OpAMD64ANDL {
17466                                 continue
17467                         }
17468                         _ = v_1.Args[1]
17469                         v_1_0 := v_1.Args[0]
17470                         v_1_1 := v_1.Args[1]
17471                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17472                                 if v_1_0.Op != OpAMD64SHRB {
17473                                         continue
17474                                 }
17475                                 _ = v_1_0.Args[1]
17476                                 if x != v_1_0.Args[0] {
17477                                         continue
17478                                 }
17479                                 v_1_0_1 := v_1_0.Args[1]
17480                                 if v_1_0_1.Op != OpAMD64NEGQ {
17481                                         continue
17482                                 }
17483                                 v_1_0_1_0 := v_1_0_1.Args[0]
17484                                 if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
17485                                         continue
17486                                 }
17487                                 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
17488                                 if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17489                                         continue
17490                                 }
17491                                 v_1_1_0 := v_1_1.Args[0]
17492                                 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
17493                                         continue
17494                                 }
17495                                 v_1_1_0_0 := v_1_1_0.Args[0]
17496                                 if v_1_1_0_0.Op != OpAMD64NEGQ {
17497                                         continue
17498                                 }
17499                                 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17500                                 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
17501                                         continue
17502                                 }
17503                                 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17504                                 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
17505                                         continue
17506                                 }
17507                                 v.reset(OpAMD64ROLB)
17508                                 v.AddArg2(x, y)
17509                                 return true
17510                         }
17511                 }
17512                 break
17513         }
17514         // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
17515         // cond: v.Type.Size() == 1
17516         // result: (ROLB x y)
17517         for {
17518                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17519                         if v_0.Op != OpAMD64SHLL {
17520                                 continue
17521                         }
17522                         _ = v_0.Args[1]
17523                         x := v_0.Args[0]
17524                         v_0_1 := v_0.Args[1]
17525                         if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
17526                                 continue
17527                         }
17528                         y := v_0_1.Args[0]
17529                         if v_1.Op != OpAMD64ANDL {
17530                                 continue
17531                         }
17532                         _ = v_1.Args[1]
17533                         v_1_0 := v_1.Args[0]
17534                         v_1_1 := v_1.Args[1]
17535                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17536                                 if v_1_0.Op != OpAMD64SHRB {
17537                                         continue
17538                                 }
17539                                 _ = v_1_0.Args[1]
17540                                 if x != v_1_0.Args[0] {
17541                                         continue
17542                                 }
17543                                 v_1_0_1 := v_1_0.Args[1]
17544                                 if v_1_0_1.Op != OpAMD64NEGL {
17545                                         continue
17546                                 }
17547                                 v_1_0_1_0 := v_1_0_1.Args[0]
17548                                 if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
17549                                         continue
17550                                 }
17551                                 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
17552                                 if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17553                                         continue
17554                                 }
17555                                 v_1_1_0 := v_1_1.Args[0]
17556                                 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
17557                                         continue
17558                                 }
17559                                 v_1_1_0_0 := v_1_1_0.Args[0]
17560                                 if v_1_1_0_0.Op != OpAMD64NEGL {
17561                                         continue
17562                                 }
17563                                 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17564                                 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
17565                                         continue
17566                                 }
17567                                 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17568                                 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
17569                                         continue
17570                                 }
17571                                 v.reset(OpAMD64ROLB)
17572                                 v.AddArg2(x, y)
17573                                 return true
17574                         }
17575                 }
17576                 break
17577         }
17578         // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
17579         // cond: v.Type.Size() == 1
17580         // result: (RORB x y)
17581         for {
17582                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17583                         if v_0.Op != OpAMD64SHRB {
17584                                 continue
17585                         }
17586                         _ = v_0.Args[1]
17587                         x := v_0.Args[0]
17588                         v_0_1 := v_0.Args[1]
17589                         if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
17590                                 continue
17591                         }
17592                         y := v_0_1.Args[0]
17593                         if v_1.Op != OpAMD64SHLL {
17594                                 continue
17595                         }
17596                         _ = v_1.Args[1]
17597                         if x != v_1.Args[0] {
17598                                 continue
17599                         }
17600                         v_1_1 := v_1.Args[1]
17601                         if v_1_1.Op != OpAMD64NEGQ {
17602                                 continue
17603                         }
17604                         v_1_1_0 := v_1_1.Args[0]
17605                         if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
17606                                 continue
17607                         }
17608                         v_1_1_0_0 := v_1_1_0.Args[0]
17609                         if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
17610                                 continue
17611                         }
17612                         v.reset(OpAMD64RORB)
17613                         v.AddArg2(x, y)
17614                         return true
17615                 }
17616                 break
17617         }
17618         // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
17619         // cond: v.Type.Size() == 1
17620         // result: (RORB x y)
17621         for {
17622                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17623                         if v_0.Op != OpAMD64SHRB {
17624                                 continue
17625                         }
17626                         _ = v_0.Args[1]
17627                         x := v_0.Args[0]
17628                         v_0_1 := v_0.Args[1]
17629                         if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
17630                                 continue
17631                         }
17632                         y := v_0_1.Args[0]
17633                         if v_1.Op != OpAMD64SHLL {
17634                                 continue
17635                         }
17636                         _ = v_1.Args[1]
17637                         if x != v_1.Args[0] {
17638                                 continue
17639                         }
17640                         v_1_1 := v_1.Args[1]
17641                         if v_1_1.Op != OpAMD64NEGL {
17642                                 continue
17643                         }
17644                         v_1_1_0 := v_1_1.Args[0]
17645                         if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
17646                                 continue
17647                         }
17648                         v_1_1_0_0 := v_1_1_0.Args[0]
17649                         if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
17650                                 continue
17651                         }
17652                         v.reset(OpAMD64RORB)
17653                         v.AddArg2(x, y)
17654                         return true
17655                 }
17656                 break
17657         }
17658         // match: (ORL x x)
17659         // result: x
17660         for {
17661                 x := v_0
17662                 if x != v_1 {
17663                         break
17664                 }
17665                 v.copyOf(x)
17666                 return true
17667         }
17668         // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
17669         // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
17670         // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
17671         for {
17672                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17673                         x0 := v_0
17674                         if x0.Op != OpAMD64MOVBload {
17675                                 continue
17676                         }
17677                         i0 := auxIntToInt32(x0.AuxInt)
17678                         s := auxToSym(x0.Aux)
17679                         mem := x0.Args[1]
17680                         p := x0.Args[0]
17681                         sh := v_1
17682                         if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
17683                                 continue
17684                         }
17685                         x1 := sh.Args[0]
17686                         if x1.Op != OpAMD64MOVBload {
17687                                 continue
17688                         }
17689                         i1 := auxIntToInt32(x1.AuxInt)
17690                         if auxToSym(x1.Aux) != s {
17691                                 continue
17692                         }
17693                         _ = x1.Args[1]
17694                         if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17695                                 continue
17696                         }
17697                         b = mergePoint(b, x0, x1)
17698                         v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
17699                         v.copyOf(v0)
17700                         v0.AuxInt = int32ToAuxInt(i0)
17701                         v0.Aux = symToAux(s)
17702                         v0.AddArg2(p, mem)
17703                         return true
17704                 }
17705                 break
17706         }
17707         // match: (ORL x0:(MOVBload [i] {s} p0 mem) sh:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
17708         // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
17709         // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
17710         for {
17711                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17712                         x0 := v_0
17713                         if x0.Op != OpAMD64MOVBload {
17714                                 continue
17715                         }
17716                         i := auxIntToInt32(x0.AuxInt)
17717                         s := auxToSym(x0.Aux)
17718                         mem := x0.Args[1]
17719                         p0 := x0.Args[0]
17720                         sh := v_1
17721                         if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
17722                                 continue
17723                         }
17724                         x1 := sh.Args[0]
17725                         if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
17726                                 continue
17727                         }
17728                         _ = x1.Args[1]
17729                         p1 := x1.Args[0]
17730                         if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17731                                 continue
17732                         }
17733                         b = mergePoint(b, x0, x1)
17734                         v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
17735                         v.copyOf(v0)
17736                         v0.AuxInt = int32ToAuxInt(i)
17737                         v0.Aux = symToAux(s)
17738                         v0.AddArg2(p0, mem)
17739                         return true
17740                 }
17741                 break
17742         }
17743         // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)))
17744         // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
17745         // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
17746         for {
17747                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17748                         x0 := v_0
17749                         if x0.Op != OpAMD64MOVWload {
17750                                 continue
17751                         }
17752                         i0 := auxIntToInt32(x0.AuxInt)
17753                         s := auxToSym(x0.Aux)
17754                         mem := x0.Args[1]
17755                         p := x0.Args[0]
17756                         sh := v_1
17757                         if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
17758                                 continue
17759                         }
17760                         x1 := sh.Args[0]
17761                         if x1.Op != OpAMD64MOVWload {
17762                                 continue
17763                         }
17764                         i1 := auxIntToInt32(x1.AuxInt)
17765                         if auxToSym(x1.Aux) != s {
17766                                 continue
17767                         }
17768                         _ = x1.Args[1]
17769                         if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17770                                 continue
17771                         }
17772                         b = mergePoint(b, x0, x1)
17773                         v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
17774                         v.copyOf(v0)
17775                         v0.AuxInt = int32ToAuxInt(i0)
17776                         v0.Aux = symToAux(s)
17777                         v0.AddArg2(p, mem)
17778                         return true
17779                 }
17780                 break
17781         }
17782         // match: (ORL x0:(MOVWload [i] {s} p0 mem) sh:(SHLLconst [16] x1:(MOVWload [i] {s} p1 mem)))
17783         // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
17784         // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
17785         for {
17786                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17787                         x0 := v_0
17788                         if x0.Op != OpAMD64MOVWload {
17789                                 continue
17790                         }
17791                         i := auxIntToInt32(x0.AuxInt)
17792                         s := auxToSym(x0.Aux)
17793                         mem := x0.Args[1]
17794                         p0 := x0.Args[0]
17795                         sh := v_1
17796                         if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
17797                                 continue
17798                         }
17799                         x1 := sh.Args[0]
17800                         if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
17801                                 continue
17802                         }
17803                         _ = x1.Args[1]
17804                         p1 := x1.Args[0]
17805                         if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17806                                 continue
17807                         }
17808                         b = mergePoint(b, x0, x1)
17809                         v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
17810                         v.copyOf(v0)
17811                         v0.AuxInt = int32ToAuxInt(i)
17812                         v0.Aux = symToAux(s)
17813                         v0.AddArg2(p0, mem)
17814                         return true
17815                 }
17816                 break
17817         }
17818         // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
17819         // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
17820         // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
17821         for {
17822                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17823                         s1 := v_0
17824                         if s1.Op != OpAMD64SHLLconst {
17825                                 continue
17826                         }
17827                         j1 := auxIntToInt8(s1.AuxInt)
17828                         x1 := s1.Args[0]
17829                         if x1.Op != OpAMD64MOVBload {
17830                                 continue
17831                         }
17832                         i1 := auxIntToInt32(x1.AuxInt)
17833                         s := auxToSym(x1.Aux)
17834                         mem := x1.Args[1]
17835                         p := x1.Args[0]
17836                         or := v_1
17837                         if or.Op != OpAMD64ORL {
17838                                 continue
17839                         }
17840                         _ = or.Args[1]
17841                         or_0 := or.Args[0]
17842                         or_1 := or.Args[1]
17843                         for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
17844                                 s0 := or_0
17845                                 if s0.Op != OpAMD64SHLLconst {
17846                                         continue
17847                                 }
17848                                 j0 := auxIntToInt8(s0.AuxInt)
17849                                 x0 := s0.Args[0]
17850                                 if x0.Op != OpAMD64MOVBload {
17851                                         continue
17852                                 }
17853                                 i0 := auxIntToInt32(x0.AuxInt)
17854                                 if auxToSym(x0.Aux) != s {
17855                                         continue
17856                                 }
17857                                 _ = x0.Args[1]
17858                                 if p != x0.Args[0] || mem != x0.Args[1] {
17859                                         continue
17860                                 }
17861                                 y := or_1
17862                                 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
17863                                         continue
17864                                 }
17865                                 b = mergePoint(b, x0, x1, y)
17866                                 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
17867                                 v.copyOf(v0)
17868                                 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
17869                                 v1.AuxInt = int8ToAuxInt(j0)
17870                                 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
17871                                 v2.AuxInt = int32ToAuxInt(i0)
17872                                 v2.Aux = symToAux(s)
17873                                 v2.AddArg2(p, mem)
17874                                 v1.AddArg(v2)
17875                                 v0.AddArg2(v1, y)
17876                                 return true
17877                         }
17878                 }
17879                 break
17880         }
17881         // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i] {s} p1 mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i] {s} p0 mem)) y))
17882         // cond: j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
17883         // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
17884         for {
17885                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17886                         s1 := v_0
17887                         if s1.Op != OpAMD64SHLLconst {
17888                                 continue
17889                         }
17890                         j1 := auxIntToInt8(s1.AuxInt)
17891                         x1 := s1.Args[0]
17892                         if x1.Op != OpAMD64MOVBload {
17893                                 continue
17894                         }
17895                         i := auxIntToInt32(x1.AuxInt)
17896                         s := auxToSym(x1.Aux)
17897                         mem := x1.Args[1]
17898                         p1 := x1.Args[0]
17899                         or := v_1
17900                         if or.Op != OpAMD64ORL {
17901                                 continue
17902                         }
17903                         _ = or.Args[1]
17904                         or_0 := or.Args[0]
17905                         or_1 := or.Args[1]
17906                         for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
17907                                 s0 := or_0
17908                                 if s0.Op != OpAMD64SHLLconst {
17909                                         continue
17910                                 }
17911                                 j0 := auxIntToInt8(s0.AuxInt)
17912                                 x0 := s0.Args[0]
17913                                 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
17914                                         continue
17915                                 }
17916                                 _ = x0.Args[1]
17917                                 p0 := x0.Args[0]
17918                                 if mem != x0.Args[1] {
17919                                         continue
17920                                 }
17921                                 y := or_1
17922                                 if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
17923                                         continue
17924                                 }
17925                                 b = mergePoint(b, x0, x1, y)
17926                                 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
17927                                 v.copyOf(v0)
17928                                 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
17929                                 v1.AuxInt = int8ToAuxInt(j0)
17930                                 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
17931                                 v2.AuxInt = int32ToAuxInt(i)
17932                                 v2.Aux = symToAux(s)
17933                                 v2.AddArg2(p0, mem)
17934                                 v1.AddArg(v2)
17935                                 v0.AddArg2(v1, y)
17936                                 return true
17937                         }
17938                 }
17939                 break
17940         }
17941         // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)))
17942         // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
17943         // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
17944         for {
17945                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17946                         x1 := v_0
17947                         if x1.Op != OpAMD64MOVBload {
17948                                 continue
17949                         }
17950                         i1 := auxIntToInt32(x1.AuxInt)
17951                         s := auxToSym(x1.Aux)
17952                         mem := x1.Args[1]
17953                         p := x1.Args[0]
17954                         sh := v_1
17955                         if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
17956                                 continue
17957                         }
17958                         x0 := sh.Args[0]
17959                         if x0.Op != OpAMD64MOVBload {
17960                                 continue
17961                         }
17962                         i0 := auxIntToInt32(x0.AuxInt)
17963                         if auxToSym(x0.Aux) != s {
17964                                 continue
17965                         }
17966                         _ = x0.Args[1]
17967                         if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17968                                 continue
17969                         }
17970                         b = mergePoint(b, x0, x1)
17971                         v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
17972                         v.copyOf(v0)
17973                         v0.AuxInt = int8ToAuxInt(8)
17974                         v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
17975                         v1.AuxInt = int32ToAuxInt(i0)
17976                         v1.Aux = symToAux(s)
17977                         v1.AddArg2(p, mem)
17978                         v0.AddArg(v1)
17979                         return true
17980                 }
17981                 break
17982         }
17983         // match: (ORL x1:(MOVBload [i] {s} p1 mem) sh:(SHLLconst [8] x0:(MOVBload [i] {s} p0 mem)))
17984         // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
17985         // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
17986         for {
17987                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17988                         x1 := v_0
17989                         if x1.Op != OpAMD64MOVBload {
17990                                 continue
17991                         }
17992                         i := auxIntToInt32(x1.AuxInt)
17993                         s := auxToSym(x1.Aux)
17994                         mem := x1.Args[1]
17995                         p1 := x1.Args[0]
17996                         sh := v_1
17997                         if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
17998                                 continue
17999                         }
18000                         x0 := sh.Args[0]
18001                         if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
18002                                 continue
18003                         }
18004                         _ = x0.Args[1]
18005                         p0 := x0.Args[0]
18006                         if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
18007                                 continue
18008                         }
18009                         b = mergePoint(b, x0, x1)
18010                         v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
18011                         v.copyOf(v0)
18012                         v0.AuxInt = int8ToAuxInt(8)
18013                         v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
18014                         v1.AuxInt = int32ToAuxInt(i)
18015                         v1.Aux = symToAux(s)
18016                         v1.AddArg2(p0, mem)
18017                         v0.AddArg(v1)
18018                         return true
18019                 }
18020                 break
18021         }
18022         // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
18023         // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
18024         // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
18025         for {
18026                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18027                         r1 := v_0
18028                         if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
18029                                 continue
18030                         }
18031                         x1 := r1.Args[0]
18032                         if x1.Op != OpAMD64MOVWload {
18033                                 continue
18034                         }
18035                         i1 := auxIntToInt32(x1.AuxInt)
18036                         s := auxToSym(x1.Aux)
18037                         mem := x1.Args[1]
18038                         p := x1.Args[0]
18039                         sh := v_1
18040                         if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
18041                                 continue
18042                         }
18043                         r0 := sh.Args[0]
18044                         if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
18045                                 continue
18046                         }
18047                         x0 := r0.Args[0]
18048                         if x0.Op != OpAMD64MOVWload {
18049                                 continue
18050                         }
18051                         i0 := auxIntToInt32(x0.AuxInt)
18052                         if auxToSym(x0.Aux) != s {
18053                                 continue
18054                         }
18055                         _ = x0.Args[1]
18056                         if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
18057                                 continue
18058                         }
18059                         b = mergePoint(b, x0, x1)
18060                         v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
18061                         v.copyOf(v0)
18062                         v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
18063                         v1.AuxInt = int32ToAuxInt(i0)
18064                         v1.Aux = symToAux(s)
18065                         v1.AddArg2(p, mem)
18066                         v0.AddArg(v1)
18067                         return true
18068                 }
18069                 break
18070         }
18071         // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))))
18072         // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
18073         // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
18074         for {
18075                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18076                         r1 := v_0
18077                         if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
18078                                 continue
18079                         }
18080                         x1 := r1.Args[0]
18081                         if x1.Op != OpAMD64MOVWload {
18082                                 continue
18083                         }
18084                         i := auxIntToInt32(x1.AuxInt)
18085                         s := auxToSym(x1.Aux)
18086                         mem := x1.Args[1]
18087                         p1 := x1.Args[0]
18088                         sh := v_1
18089                         if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
18090                                 continue
18091                         }
18092                         r0 := sh.Args[0]
18093                         if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
18094                                 continue
18095                         }
18096                         x0 := r0.Args[0]
18097                         if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
18098                                 continue
18099                         }
18100                         _ = x0.Args[1]
18101                         p0 := x0.Args[0]
18102                         if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
18103                                 continue
18104                         }
18105                         b = mergePoint(b, x0, x1)
18106                         v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
18107                         v.copyOf(v0)
18108                         v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
18109                         v1.AuxInt = int32ToAuxInt(i)
18110                         v1.Aux = symToAux(s)
18111                         v1.AddArg2(p0, mem)
18112                         v0.AddArg(v1)
18113                         return true
18114                 }
18115                 break
18116         }
18117         // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
18118         // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
18119         // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
18120         for {
18121                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18122                         s0 := v_0
18123                         if s0.Op != OpAMD64SHLLconst {
18124                                 continue
18125                         }
18126                         j0 := auxIntToInt8(s0.AuxInt)
18127                         x0 := s0.Args[0]
18128                         if x0.Op != OpAMD64MOVBload {
18129                                 continue
18130                         }
18131                         i0 := auxIntToInt32(x0.AuxInt)
18132                         s := auxToSym(x0.Aux)
18133                         mem := x0.Args[1]
18134                         p := x0.Args[0]
18135                         or := v_1
18136                         if or.Op != OpAMD64ORL {
18137                                 continue
18138                         }
18139                         _ = or.Args[1]
18140                         or_0 := or.Args[0]
18141                         or_1 := or.Args[1]
18142                         for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
18143                                 s1 := or_0
18144                                 if s1.Op != OpAMD64SHLLconst {
18145                                         continue
18146                                 }
18147                                 j1 := auxIntToInt8(s1.AuxInt)
18148                                 x1 := s1.Args[0]
18149                                 if x1.Op != OpAMD64MOVBload {
18150                                         continue
18151                                 }
18152                                 i1 := auxIntToInt32(x1.AuxInt)
18153                                 if auxToSym(x1.Aux) != s {
18154                                         continue
18155                                 }
18156                                 _ = x1.Args[1]
18157                                 if p != x1.Args[0] || mem != x1.Args[1] {
18158                                         continue
18159                                 }
18160                                 y := or_1
18161                                 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
18162                                         continue
18163                                 }
18164                                 b = mergePoint(b, x0, x1, y)
18165                                 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
18166                                 v.copyOf(v0)
18167                                 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
18168                                 v1.AuxInt = int8ToAuxInt(j1)
18169                                 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
18170                                 v2.AuxInt = int8ToAuxInt(8)
18171                                 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
18172                                 v3.AuxInt = int32ToAuxInt(i0)
18173                                 v3.Aux = symToAux(s)
18174                                 v3.AddArg2(p, mem)
18175                                 v2.AddArg(v3)
18176                                 v1.AddArg(v2)
18177                                 v0.AddArg2(v1, y)
18178                                 return true
18179                         }
18180                 }
18181                 break
18182         }
18183         // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i] {s} p0 mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i] {s} p1 mem)) y))
18184         // cond: j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
18185         // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
18186         for {
18187                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18188                         s0 := v_0
18189                         if s0.Op != OpAMD64SHLLconst {
18190                                 continue
18191                         }
18192                         j0 := auxIntToInt8(s0.AuxInt)
18193                         x0 := s0.Args[0]
18194                         if x0.Op != OpAMD64MOVBload {
18195                                 continue
18196                         }
18197                         i := auxIntToInt32(x0.AuxInt)
18198                         s := auxToSym(x0.Aux)
18199                         mem := x0.Args[1]
18200                         p0 := x0.Args[0]
18201                         or := v_1
18202                         if or.Op != OpAMD64ORL {
18203                                 continue
18204                         }
18205                         _ = or.Args[1]
18206                         or_0 := or.Args[0]
18207                         or_1 := or.Args[1]
18208                         for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
18209                                 s1 := or_0
18210                                 if s1.Op != OpAMD64SHLLconst {
18211                                         continue
18212                                 }
18213                                 j1 := auxIntToInt8(s1.AuxInt)
18214                                 x1 := s1.Args[0]
18215                                 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
18216                                         continue
18217                                 }
18218                                 _ = x1.Args[1]
18219                                 p1 := x1.Args[0]
18220                                 if mem != x1.Args[1] {
18221                                         continue
18222                                 }
18223                                 y := or_1
18224                                 if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
18225                                         continue
18226                                 }
18227                                 b = mergePoint(b, x0, x1, y)
18228                                 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
18229                                 v.copyOf(v0)
18230                                 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
18231                                 v1.AuxInt = int8ToAuxInt(j1)
18232                                 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
18233                                 v2.AuxInt = int8ToAuxInt(8)
18234                                 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
18235                                 v3.AuxInt = int32ToAuxInt(i)
18236                                 v3.Aux = symToAux(s)
18237                                 v3.AddArg2(p0, mem)
18238                                 v2.AddArg(v3)
18239                                 v1.AddArg(v2)
18240                                 v0.AddArg2(v1, y)
18241                                 return true
18242                         }
18243                 }
18244                 break
18245         }
18246         // match: (ORL x l:(MOVLload [off] {sym} ptr mem))
18247         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
18248         // result: (ORLload x [off] {sym} ptr mem)
18249         for {
18250                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18251                         x := v_0
18252                         l := v_1
18253                         if l.Op != OpAMD64MOVLload {
18254                                 continue
18255                         }
18256                         off := auxIntToInt32(l.AuxInt)
18257                         sym := auxToSym(l.Aux)
18258                         mem := l.Args[1]
18259                         ptr := l.Args[0]
18260                         if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
18261                                 continue
18262                         }
18263                         v.reset(OpAMD64ORLload)
18264                         v.AuxInt = int32ToAuxInt(off)
18265                         v.Aux = symToAux(sym)
18266                         v.AddArg3(x, ptr, mem)
18267                         return true
18268                 }
18269                 break
18270         }
18271         return false
18272 }
18273 func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
18274         v_0 := v.Args[0]
18275         // match: (ORLconst [c] x)
18276         // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
18277         // result: (BTSLconst [int8(log32(c))] x)
18278         for {
18279                 c := auxIntToInt32(v.AuxInt)
18280                 x := v_0
18281                 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
18282                         break
18283                 }
18284                 v.reset(OpAMD64BTSLconst)
18285                 v.AuxInt = int8ToAuxInt(int8(log32(c)))
18286                 v.AddArg(x)
18287                 return true
18288         }
18289         // match: (ORLconst [c] (ORLconst [d] x))
18290         // result: (ORLconst [c | d] x)
18291         for {
18292                 c := auxIntToInt32(v.AuxInt)
18293                 if v_0.Op != OpAMD64ORLconst {
18294                         break
18295                 }
18296                 d := auxIntToInt32(v_0.AuxInt)
18297                 x := v_0.Args[0]
18298                 v.reset(OpAMD64ORLconst)
18299                 v.AuxInt = int32ToAuxInt(c | d)
18300                 v.AddArg(x)
18301                 return true
18302         }
18303         // match: (ORLconst [c] (BTSLconst [d] x))
18304         // result: (ORLconst [c | 1<<uint32(d)] x)
18305         for {
18306                 c := auxIntToInt32(v.AuxInt)
18307                 if v_0.Op != OpAMD64BTSLconst {
18308                         break
18309                 }
18310                 d := auxIntToInt8(v_0.AuxInt)
18311                 x := v_0.Args[0]
18312                 v.reset(OpAMD64ORLconst)
18313                 v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
18314                 v.AddArg(x)
18315                 return true
18316         }
18317         // match: (ORLconst [c] x)
18318         // cond: c==0
18319         // result: x
18320         for {
18321                 c := auxIntToInt32(v.AuxInt)
18322                 x := v_0
18323                 if !(c == 0) {
18324                         break
18325                 }
18326                 v.copyOf(x)
18327                 return true
18328         }
18329         // match: (ORLconst [c] _)
18330         // cond: c==-1
18331         // result: (MOVLconst [-1])
18332         for {
18333                 c := auxIntToInt32(v.AuxInt)
18334                 if !(c == -1) {
18335                         break
18336                 }
18337                 v.reset(OpAMD64MOVLconst)
18338                 v.AuxInt = int32ToAuxInt(-1)
18339                 return true
18340         }
18341         // match: (ORLconst [c] (MOVLconst [d]))
18342         // result: (MOVLconst [c|d])
18343         for {
18344                 c := auxIntToInt32(v.AuxInt)
18345                 if v_0.Op != OpAMD64MOVLconst {
18346                         break
18347                 }
18348                 d := auxIntToInt32(v_0.AuxInt)
18349                 v.reset(OpAMD64MOVLconst)
18350                 v.AuxInt = int32ToAuxInt(c | d)
18351                 return true
18352         }
18353         return false
18354 }
18355 func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
18356         v_1 := v.Args[1]
18357         v_0 := v.Args[0]
18358         // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
18359         // cond: ValAndOff(valoff1).canAdd32(off2)
18360         // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
18361         for {
18362                 valoff1 := auxIntToValAndOff(v.AuxInt)
18363                 sym := auxToSym(v.Aux)
18364                 if v_0.Op != OpAMD64ADDQconst {
18365                         break
18366                 }
18367                 off2 := auxIntToInt32(v_0.AuxInt)
18368                 base := v_0.Args[0]
18369                 mem := v_1
18370                 if !(ValAndOff(valoff1).canAdd32(off2)) {
18371                         break
18372                 }
18373                 v.reset(OpAMD64ORLconstmodify)
18374                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
18375                 v.Aux = symToAux(sym)
18376                 v.AddArg2(base, mem)
18377                 return true
18378         }
18379         // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
18380         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
18381         // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
18382         for {
18383                 valoff1 := auxIntToValAndOff(v.AuxInt)
18384                 sym1 := auxToSym(v.Aux)
18385                 if v_0.Op != OpAMD64LEAQ {
18386                         break
18387                 }
18388                 off2 := auxIntToInt32(v_0.AuxInt)
18389                 sym2 := auxToSym(v_0.Aux)
18390                 base := v_0.Args[0]
18391                 mem := v_1
18392                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
18393                         break
18394                 }
18395                 v.reset(OpAMD64ORLconstmodify)
18396                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
18397                 v.Aux = symToAux(mergeSym(sym1, sym2))
18398                 v.AddArg2(base, mem)
18399                 return true
18400         }
18401         return false
18402 }
18403 func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
18404         v_2 := v.Args[2]
18405         v_1 := v.Args[1]
18406         v_0 := v.Args[0]
18407         b := v.Block
18408         typ := &b.Func.Config.Types
18409         // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem)
18410         // cond: is32Bit(int64(off1)+int64(off2))
18411         // result: (ORLload [off1+off2] {sym} val base mem)
18412         for {
18413                 off1 := auxIntToInt32(v.AuxInt)
18414                 sym := auxToSym(v.Aux)
18415                 val := v_0
18416                 if v_1.Op != OpAMD64ADDQconst {
18417                         break
18418                 }
18419                 off2 := auxIntToInt32(v_1.AuxInt)
18420                 base := v_1.Args[0]
18421                 mem := v_2
18422                 if !(is32Bit(int64(off1) + int64(off2))) {
18423                         break
18424                 }
18425                 v.reset(OpAMD64ORLload)
18426                 v.AuxInt = int32ToAuxInt(off1 + off2)
18427                 v.Aux = symToAux(sym)
18428                 v.AddArg3(val, base, mem)
18429                 return true
18430         }
18431         // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
18432         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
18433         // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
18434         for {
18435                 off1 := auxIntToInt32(v.AuxInt)
18436                 sym1 := auxToSym(v.Aux)
18437                 val := v_0
18438                 if v_1.Op != OpAMD64LEAQ {
18439                         break
18440                 }
18441                 off2 := auxIntToInt32(v_1.AuxInt)
18442                 sym2 := auxToSym(v_1.Aux)
18443                 base := v_1.Args[0]
18444                 mem := v_2
18445                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18446                         break
18447                 }
18448                 v.reset(OpAMD64ORLload)
18449                 v.AuxInt = int32ToAuxInt(off1 + off2)
18450                 v.Aux = symToAux(mergeSym(sym1, sym2))
18451                 v.AddArg3(val, base, mem)
18452                 return true
18453         }
18454         // match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
18455         // result: ( ORL x (MOVLf2i y))
18456         for {
18457                 off := auxIntToInt32(v.AuxInt)
18458                 sym := auxToSym(v.Aux)
18459                 x := v_0
18460                 ptr := v_1
18461                 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
18462                         break
18463                 }
18464                 y := v_2.Args[1]
18465                 if ptr != v_2.Args[0] {
18466                         break
18467                 }
18468                 v.reset(OpAMD64ORL)
18469                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
18470                 v0.AddArg(y)
18471                 v.AddArg2(x, v0)
18472                 return true
18473         }
18474         return false
18475 }
18476 func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
18477         v_2 := v.Args[2]
18478         v_1 := v.Args[1]
18479         v_0 := v.Args[0]
18480         b := v.Block
18481         // match: (ORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) <t> x) mem)
18482         // result: (BTSLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
18483         for {
18484                 off := auxIntToInt32(v.AuxInt)
18485                 sym := auxToSym(v.Aux)
18486                 ptr := v_0
18487                 s := v_1
18488                 if s.Op != OpAMD64SHLL {
18489                         break
18490                 }
18491                 t := s.Type
18492                 x := s.Args[1]
18493                 s_0 := s.Args[0]
18494                 if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
18495                         break
18496                 }
18497                 mem := v_2
18498                 v.reset(OpAMD64BTSLmodify)
18499                 v.AuxInt = int32ToAuxInt(off)
18500                 v.Aux = symToAux(sym)
18501                 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
18502                 v0.AuxInt = int32ToAuxInt(31)
18503                 v0.AddArg(x)
18504                 v.AddArg3(ptr, v0, mem)
18505                 return true
18506         }
18507         // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
18508         // cond: is32Bit(int64(off1)+int64(off2))
18509         // result: (ORLmodify [off1+off2] {sym} base val mem)
18510         for {
18511                 off1 := auxIntToInt32(v.AuxInt)
18512                 sym := auxToSym(v.Aux)
18513                 if v_0.Op != OpAMD64ADDQconst {
18514                         break
18515                 }
18516                 off2 := auxIntToInt32(v_0.AuxInt)
18517                 base := v_0.Args[0]
18518                 val := v_1
18519                 mem := v_2
18520                 if !(is32Bit(int64(off1) + int64(off2))) {
18521                         break
18522                 }
18523                 v.reset(OpAMD64ORLmodify)
18524                 v.AuxInt = int32ToAuxInt(off1 + off2)
18525                 v.Aux = symToAux(sym)
18526                 v.AddArg3(base, val, mem)
18527                 return true
18528         }
18529         // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
18530         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
18531         // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
18532         for {
18533                 off1 := auxIntToInt32(v.AuxInt)
18534                 sym1 := auxToSym(v.Aux)
18535                 if v_0.Op != OpAMD64LEAQ {
18536                         break
18537                 }
18538                 off2 := auxIntToInt32(v_0.AuxInt)
18539                 sym2 := auxToSym(v_0.Aux)
18540                 base := v_0.Args[0]
18541                 val := v_1
18542                 mem := v_2
18543                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18544                         break
18545                 }
18546                 v.reset(OpAMD64ORLmodify)
18547                 v.AuxInt = int32ToAuxInt(off1 + off2)
18548                 v.Aux = symToAux(mergeSym(sym1, sym2))
18549                 v.AddArg3(base, val, mem)
18550                 return true
18551         }
18552         return false
18553 }
18554 func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
18555         v_1 := v.Args[1]
18556         v_0 := v.Args[0]
18557         b := v.Block
18558         typ := &b.Func.Config.Types
18559         // match: (ORQ (SHLQ (MOVQconst [1]) y) x)
18560         // result: (BTSQ x y)
18561         for {
18562                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18563                         if v_0.Op != OpAMD64SHLQ {
18564                                 continue
18565                         }
18566                         y := v_0.Args[1]
18567                         v_0_0 := v_0.Args[0]
18568                         if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
18569                                 continue
18570                         }
18571                         x := v_1
18572                         v.reset(OpAMD64BTSQ)
18573                         v.AddArg2(x, y)
18574                         return true
18575                 }
18576                 break
18577         }
18578         // match: (ORQ (MOVQconst [c]) x)
18579         // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
18580         // result: (BTSQconst [int8(log64(c))] x)
18581         for {
18582                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18583                         if v_0.Op != OpAMD64MOVQconst {
18584                                 continue
18585                         }
18586                         c := auxIntToInt64(v_0.AuxInt)
18587                         x := v_1
18588                         if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
18589                                 continue
18590                         }
18591                         v.reset(OpAMD64BTSQconst)
18592                         v.AuxInt = int8ToAuxInt(int8(log64(c)))
18593                         v.AddArg(x)
18594                         return true
18595                 }
18596                 break
18597         }
18598         // match: (ORQ x (MOVQconst [c]))
18599         // cond: is32Bit(c)
18600         // result: (ORQconst [int32(c)] x)
18601         for {
18602                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18603                         x := v_0
18604                         if v_1.Op != OpAMD64MOVQconst {
18605                                 continue
18606                         }
18607                         c := auxIntToInt64(v_1.AuxInt)
18608                         if !(is32Bit(c)) {
18609                                 continue
18610                         }
18611                         v.reset(OpAMD64ORQconst)
18612                         v.AuxInt = int32ToAuxInt(int32(c))
18613                         v.AddArg(x)
18614                         return true
18615                 }
18616                 break
18617         }
18618         // match: (ORQ x (MOVLconst [c]))
18619         // result: (ORQconst [c] x)
18620         for {
18621                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18622                         x := v_0
18623                         if v_1.Op != OpAMD64MOVLconst {
18624                                 continue
18625                         }
18626                         c := auxIntToInt32(v_1.AuxInt)
18627                         v.reset(OpAMD64ORQconst)
18628                         v.AuxInt = int32ToAuxInt(c)
18629                         v.AddArg(x)
18630                         return true
18631                 }
18632                 break
18633         }
18634         // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d]))
18635         // cond: d==64-c
18636         // result: (ROLQconst x [c])
18637         for {
18638                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18639                         if v_0.Op != OpAMD64SHLQconst {
18640                                 continue
18641                         }
18642                         c := auxIntToInt8(v_0.AuxInt)
18643                         x := v_0.Args[0]
18644                         if v_1.Op != OpAMD64SHRQconst {
18645                                 continue
18646                         }
18647                         d := auxIntToInt8(v_1.AuxInt)
18648                         if x != v_1.Args[0] || !(d == 64-c) {
18649                                 continue
18650                         }
18651                         v.reset(OpAMD64ROLQconst)
18652                         v.AuxInt = int8ToAuxInt(c)
18653                         v.AddArg(x)
18654                         return true
18655                 }
18656                 break
18657         }
18658         // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
18659         // result: (ROLQ x y)
18660         for {
18661                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18662                         if v_0.Op != OpAMD64SHLQ {
18663                                 continue
18664                         }
18665                         y := v_0.Args[1]
18666                         x := v_0.Args[0]
18667                         if v_1.Op != OpAMD64ANDQ {
18668                                 continue
18669                         }
18670                         _ = v_1.Args[1]
18671                         v_1_0 := v_1.Args[0]
18672                         v_1_1 := v_1.Args[1]
18673                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
18674                                 if v_1_0.Op != OpAMD64SHRQ {
18675                                         continue
18676                                 }
18677                                 _ = v_1_0.Args[1]
18678                                 if x != v_1_0.Args[0] {
18679                                         continue
18680                                 }
18681                                 v_1_0_1 := v_1_0.Args[1]
18682                                 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
18683                                         continue
18684                                 }
18685                                 v_1_1_0 := v_1_1.Args[0]
18686                                 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
18687                                         continue
18688                                 }
18689                                 v_1_1_0_0 := v_1_1_0.Args[0]
18690                                 if v_1_1_0_0.Op != OpAMD64NEGQ {
18691                                         continue
18692                                 }
18693                                 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
18694                                 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
18695                                         continue
18696                                 }
18697                                 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
18698                                 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
18699                                         continue
18700                                 }
18701                                 v.reset(OpAMD64ROLQ)
18702                                 v.AddArg2(x, y)
18703                                 return true
18704                         }
18705                 }
18706                 break
18707         }
18708         // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
18709         // result: (ROLQ x y)
18710         for {
18711                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18712                         if v_0.Op != OpAMD64SHLQ {
18713                                 continue
18714                         }
18715                         y := v_0.Args[1]
18716                         x := v_0.Args[0]
18717                         if v_1.Op != OpAMD64ANDQ {
18718                                 continue
18719                         }
18720                         _ = v_1.Args[1]
18721                         v_1_0 := v_1.Args[0]
18722                         v_1_1 := v_1.Args[1]
18723                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
18724                                 if v_1_0.Op != OpAMD64SHRQ {
18725                                         continue
18726                                 }
18727                                 _ = v_1_0.Args[1]
18728                                 if x != v_1_0.Args[0] {
18729                                         continue
18730                                 }
18731                                 v_1_0_1 := v_1_0.Args[1]
18732                                 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
18733                                         continue
18734                                 }
18735                                 v_1_1_0 := v_1_1.Args[0]
18736                                 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
18737                                         continue
18738                                 }
18739                                 v_1_1_0_0 := v_1_1_0.Args[0]
18740                                 if v_1_1_0_0.Op != OpAMD64NEGL {
18741                                         continue
18742                                 }
18743                                 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
18744                                 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
18745                                         continue
18746                                 }
18747                                 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
18748                                 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
18749                                         continue
18750                                 }
18751                                 v.reset(OpAMD64ROLQ)
18752                                 v.AddArg2(x, y)
18753                                 return true
18754                         }
18755                 }
18756                 break
18757         }
18758         // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
18759         // result: (RORQ x y)
18760         for {
18761                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18762                         if v_0.Op != OpAMD64SHRQ {
18763                                 continue
18764                         }
18765                         y := v_0.Args[1]
18766                         x := v_0.Args[0]
18767                         if v_1.Op != OpAMD64ANDQ {
18768                                 continue
18769                         }
18770                         _ = v_1.Args[1]
18771                         v_1_0 := v_1.Args[0]
18772                         v_1_1 := v_1.Args[1]
18773                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
18774                                 if v_1_0.Op != OpAMD64SHLQ {
18775                                         continue
18776                                 }
18777                                 _ = v_1_0.Args[1]
18778                                 if x != v_1_0.Args[0] {
18779                                         continue
18780                                 }
18781                                 v_1_0_1 := v_1_0.Args[1]
18782                                 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
18783                                         continue
18784                                 }
18785                                 v_1_1_0 := v_1_1.Args[0]
18786                                 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
18787                                         continue
18788                                 }
18789                                 v_1_1_0_0 := v_1_1_0.Args[0]
18790                                 if v_1_1_0_0.Op != OpAMD64NEGQ {
18791                                         continue
18792                                 }
18793                                 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
18794                                 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
18795                                         continue
18796                                 }
18797                                 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
18798                                 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
18799                                         continue
18800                                 }
18801                                 v.reset(OpAMD64RORQ)
18802                                 v.AddArg2(x, y)
18803                                 return true
18804                         }
18805                 }
18806                 break
18807         }
18808         // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
18809         // result: (RORQ x y)
18810         for {
18811                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18812                         if v_0.Op != OpAMD64SHRQ {
18813                                 continue
18814                         }
18815                         y := v_0.Args[1]
18816                         x := v_0.Args[0]
18817                         if v_1.Op != OpAMD64ANDQ {
18818                                 continue
18819                         }
18820                         _ = v_1.Args[1]
18821                         v_1_0 := v_1.Args[0]
18822                         v_1_1 := v_1.Args[1]
18823                         for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
18824                                 if v_1_0.Op != OpAMD64SHLQ {
18825                                         continue
18826                                 }
18827                                 _ = v_1_0.Args[1]
18828                                 if x != v_1_0.Args[0] {
18829                                         continue
18830                                 }
18831                                 v_1_0_1 := v_1_0.Args[1]
18832                                 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
18833                                         continue
18834                                 }
18835                                 v_1_1_0 := v_1_1.Args[0]
18836                                 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
18837                                         continue
18838                                 }
18839                                 v_1_1_0_0 := v_1_1_0.Args[0]
18840                                 if v_1_1_0_0.Op != OpAMD64NEGL {
18841                                         continue
18842                                 }
18843                                 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
18844                                 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
18845                                         continue
18846                                 }
18847                                 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
18848                                 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
18849                                         continue
18850                                 }
18851                                 v.reset(OpAMD64RORQ)
18852                                 v.AddArg2(x, y)
18853                                 return true
18854                         }
18855                 }
18856                 break
18857         }
18858         // match: (ORQ (SHRQ lo bits) (SHLQ hi (NEGQ bits)))
18859         // result: (SHRDQ lo hi bits)
18860         for {
18861                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18862                         if v_0.Op != OpAMD64SHRQ {
18863                                 continue
18864                         }
18865                         bits := v_0.Args[1]
18866                         lo := v_0.Args[0]
18867                         if v_1.Op != OpAMD64SHLQ {
18868                                 continue
18869                         }
18870                         _ = v_1.Args[1]
18871                         hi := v_1.Args[0]
18872                         v_1_1 := v_1.Args[1]
18873                         if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
18874                                 continue
18875                         }
18876                         v.reset(OpAMD64SHRDQ)
18877                         v.AddArg3(lo, hi, bits)
18878                         return true
18879                 }
18880                 break
18881         }
18882         // match: (ORQ (SHLQ lo bits) (SHRQ hi (NEGQ bits)))
18883         // result: (SHLDQ lo hi bits)
18884         for {
18885                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18886                         if v_0.Op != OpAMD64SHLQ {
18887                                 continue
18888                         }
18889                         bits := v_0.Args[1]
18890                         lo := v_0.Args[0]
18891                         if v_1.Op != OpAMD64SHRQ {
18892                                 continue
18893                         }
18894                         _ = v_1.Args[1]
18895                         hi := v_1.Args[0]
18896                         v_1_1 := v_1.Args[1]
18897                         if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
18898                                 continue
18899                         }
18900                         v.reset(OpAMD64SHLDQ)
18901                         v.AddArg3(lo, hi, bits)
18902                         return true
18903                 }
18904                 break
18905         }
18906         // match: (ORQ (MOVQconst [c]) (MOVQconst [d]))
18907         // result: (MOVQconst [c|d])
18908         for {
18909                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18910                         if v_0.Op != OpAMD64MOVQconst {
18911                                 continue
18912                         }
18913                         c := auxIntToInt64(v_0.AuxInt)
18914                         if v_1.Op != OpAMD64MOVQconst {
18915                                 continue
18916                         }
18917                         d := auxIntToInt64(v_1.AuxInt)
18918                         v.reset(OpAMD64MOVQconst)
18919                         v.AuxInt = int64ToAuxInt(c | d)
18920                         return true
18921                 }
18922                 break
18923         }
18924         // match: (ORQ x x)
18925         // result: x
18926         for {
18927                 x := v_0
18928                 if x != v_1 {
18929                         break
18930                 }
18931                 v.copyOf(x)
18932                 return true
18933         }
18934         // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)))
18935         // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
18936         // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
18937         for {
18938                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18939                         x0 := v_0
18940                         if x0.Op != OpAMD64MOVBload {
18941                                 continue
18942                         }
18943                         i0 := auxIntToInt32(x0.AuxInt)
18944                         s := auxToSym(x0.Aux)
18945                         mem := x0.Args[1]
18946                         p := x0.Args[0]
18947                         sh := v_1
18948                         if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
18949                                 continue
18950                         }
18951                         x1 := sh.Args[0]
18952                         if x1.Op != OpAMD64MOVBload {
18953                                 continue
18954                         }
18955                         i1 := auxIntToInt32(x1.AuxInt)
18956                         if auxToSym(x1.Aux) != s {
18957                                 continue
18958                         }
18959                         _ = x1.Args[1]
18960                         if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
18961                                 continue
18962                         }
18963                         b = mergePoint(b, x0, x1)
18964                         v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
18965                         v.copyOf(v0)
18966                         v0.AuxInt = int32ToAuxInt(i0)
18967                         v0.Aux = symToAux(s)
18968                         v0.AddArg2(p, mem)
18969                         return true
18970                 }
18971                 break
18972         }
18973         // match: (ORQ x0:(MOVBload [i] {s} p0 mem) sh:(SHLQconst [8] x1:(MOVBload [i] {s} p1 mem)))
18974         // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
18975         // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
18976         for {
18977                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18978                         x0 := v_0
18979                         if x0.Op != OpAMD64MOVBload {
18980                                 continue
18981                         }
18982                         i := auxIntToInt32(x0.AuxInt)
18983                         s := auxToSym(x0.Aux)
18984                         mem := x0.Args[1]
18985                         p0 := x0.Args[0]
18986                         sh := v_1
18987                         if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
18988                                 continue
18989                         }
18990                         x1 := sh.Args[0]
18991                         if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
18992                                 continue
18993                         }
18994                         _ = x1.Args[1]
18995                         p1 := x1.Args[0]
18996                         if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
18997                                 continue
18998                         }
18999                         b = mergePoint(b, x0, x1)
19000                         v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
19001                         v.copyOf(v0)
19002                         v0.AuxInt = int32ToAuxInt(i)
19003                         v0.Aux = symToAux(s)
19004                         v0.AddArg2(p0, mem)
19005                         return true
19006                 }
19007                 break
19008         }
19009         // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)))
19010         // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
19011         // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
19012         for {
19013                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19014                         x0 := v_0
19015                         if x0.Op != OpAMD64MOVWload {
19016                                 continue
19017                         }
19018                         i0 := auxIntToInt32(x0.AuxInt)
19019                         s := auxToSym(x0.Aux)
19020                         mem := x0.Args[1]
19021                         p := x0.Args[0]
19022                         sh := v_1
19023                         if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
19024                                 continue
19025                         }
19026                         x1 := sh.Args[0]
19027                         if x1.Op != OpAMD64MOVWload {
19028                                 continue
19029                         }
19030                         i1 := auxIntToInt32(x1.AuxInt)
19031                         if auxToSym(x1.Aux) != s {
19032                                 continue
19033                         }
19034                         _ = x1.Args[1]
19035                         if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
19036                                 continue
19037                         }
19038                         b = mergePoint(b, x0, x1)
19039                         v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
19040                         v.copyOf(v0)
19041                         v0.AuxInt = int32ToAuxInt(i0)
19042                         v0.Aux = symToAux(s)
19043                         v0.AddArg2(p, mem)
19044                         return true
19045                 }
19046                 break
19047         }
19048         // match: (ORQ x0:(MOVWload [i] {s} p0 mem) sh:(SHLQconst [16] x1:(MOVWload [i] {s} p1 mem)))
19049         // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
19050         // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
19051         for {
19052                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19053                         x0 := v_0
19054                         if x0.Op != OpAMD64MOVWload {
19055                                 continue
19056                         }
19057                         i := auxIntToInt32(x0.AuxInt)
19058                         s := auxToSym(x0.Aux)
19059                         mem := x0.Args[1]
19060                         p0 := x0.Args[0]
19061                         sh := v_1
19062                         if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
19063                                 continue
19064                         }
19065                         x1 := sh.Args[0]
19066                         if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
19067                                 continue
19068                         }
19069                         _ = x1.Args[1]
19070                         p1 := x1.Args[0]
19071                         if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
19072                                 continue
19073                         }
19074                         b = mergePoint(b, x0, x1)
19075                         v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
19076                         v.copyOf(v0)
19077                         v0.AuxInt = int32ToAuxInt(i)
19078                         v0.Aux = symToAux(s)
19079                         v0.AddArg2(p0, mem)
19080                         return true
19081                 }
19082                 break
19083         }
19084         // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)))
19085         // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
19086         // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
19087         for {
19088                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19089                         x0 := v_0
19090                         if x0.Op != OpAMD64MOVLload {
19091                                 continue
19092                         }
19093                         i0 := auxIntToInt32(x0.AuxInt)
19094                         s := auxToSym(x0.Aux)
19095                         mem := x0.Args[1]
19096                         p := x0.Args[0]
19097                         sh := v_1
19098                         if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
19099                                 continue
19100                         }
19101                         x1 := sh.Args[0]
19102                         if x1.Op != OpAMD64MOVLload {
19103                                 continue
19104                         }
19105                         i1 := auxIntToInt32(x1.AuxInt)
19106                         if auxToSym(x1.Aux) != s {
19107                                 continue
19108                         }
19109                         _ = x1.Args[1]
19110                         if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
19111                                 continue
19112                         }
19113                         b = mergePoint(b, x0, x1)
19114                         v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
19115                         v.copyOf(v0)
19116                         v0.AuxInt = int32ToAuxInt(i0)
19117                         v0.Aux = symToAux(s)
19118                         v0.AddArg2(p, mem)
19119                         return true
19120                 }
19121                 break
19122         }
19123         // match: (ORQ x0:(MOVLload [i] {s} p0 mem) sh:(SHLQconst [32] x1:(MOVLload [i] {s} p1 mem)))
19124         // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
19125         // result: @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem)
19126         for {
19127                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19128                         x0 := v_0
19129                         if x0.Op != OpAMD64MOVLload {
19130                                 continue
19131                         }
19132                         i := auxIntToInt32(x0.AuxInt)
19133                         s := auxToSym(x0.Aux)
19134                         mem := x0.Args[1]
19135                         p0 := x0.Args[0]
19136                         sh := v_1
19137                         if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
19138                                 continue
19139                         }
19140                         x1 := sh.Args[0]
19141                         if x1.Op != OpAMD64MOVLload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
19142                                 continue
19143                         }
19144                         _ = x1.Args[1]
19145                         p1 := x1.Args[0]
19146                         if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
19147                                 continue
19148                         }
19149                         b = mergePoint(b, x0, x1)
19150                         v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
19151                         v.copyOf(v0)
19152                         v0.AuxInt = int32ToAuxInt(i)
19153                         v0.Aux = symToAux(s)
19154                         v0.AddArg2(p0, mem)
19155                         return true
19156                 }
19157                 break
19158         }
19159         // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
19160         // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
19161         // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
19162         for {
19163                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19164                         s1 := v_0
19165                         if s1.Op != OpAMD64SHLQconst {
19166                                 continue
19167                         }
19168                         j1 := auxIntToInt8(s1.AuxInt)
19169                         x1 := s1.Args[0]
19170                         if x1.Op != OpAMD64MOVBload {
19171                                 continue
19172                         }
19173                         i1 := auxIntToInt32(x1.AuxInt)
19174                         s := auxToSym(x1.Aux)
19175                         mem := x1.Args[1]
19176                         p := x1.Args[0]
19177                         or := v_1
19178                         if or.Op != OpAMD64ORQ {
19179                                 continue
19180                         }
19181                         _ = or.Args[1]
19182                         or_0 := or.Args[0]
19183                         or_1 := or.Args[1]
19184                         for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19185                                 s0 := or_0
19186                                 if s0.Op != OpAMD64SHLQconst {
19187                                         continue
19188                                 }
19189                                 j0 := auxIntToInt8(s0.AuxInt)
19190                                 x0 := s0.Args[0]
19191                                 if x0.Op != OpAMD64MOVBload {
19192                                         continue
19193                                 }
19194                                 i0 := auxIntToInt32(x0.AuxInt)
19195                                 if auxToSym(x0.Aux) != s {
19196                                         continue
19197                                 }
19198                                 _ = x0.Args[1]
19199                                 if p != x0.Args[0] || mem != x0.Args[1] {
19200                                         continue
19201                                 }
19202                                 y := or_1
19203                                 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
19204                                         continue
19205                                 }
19206                                 b = mergePoint(b, x0, x1, y)
19207                                 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
19208                                 v.copyOf(v0)
19209                                 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
19210                                 v1.AuxInt = int8ToAuxInt(j0)
19211                                 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
19212                                 v2.AuxInt = int32ToAuxInt(i0)
19213                                 v2.Aux = symToAux(s)
19214                                 v2.AddArg2(p, mem)
19215                                 v1.AddArg(v2)
19216                                 v0.AddArg2(v1, y)
19217                                 return true
19218                         }
19219                 }
19220                 break
19221         }
19222         // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i] {s} p1 mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i] {s} p0 mem)) y))
19223         // cond: j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
19224         // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
19225         for {
19226                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19227                         s1 := v_0
19228                         if s1.Op != OpAMD64SHLQconst {
19229                                 continue
19230                         }
19231                         j1 := auxIntToInt8(s1.AuxInt)
19232                         x1 := s1.Args[0]
19233                         if x1.Op != OpAMD64MOVBload {
19234                                 continue
19235                         }
19236                         i := auxIntToInt32(x1.AuxInt)
19237                         s := auxToSym(x1.Aux)
19238                         mem := x1.Args[1]
19239                         p1 := x1.Args[0]
19240                         or := v_1
19241                         if or.Op != OpAMD64ORQ {
19242                                 continue
19243                         }
19244                         _ = or.Args[1]
19245                         or_0 := or.Args[0]
19246                         or_1 := or.Args[1]
19247                         for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19248                                 s0 := or_0
19249                                 if s0.Op != OpAMD64SHLQconst {
19250                                         continue
19251                                 }
19252                                 j0 := auxIntToInt8(s0.AuxInt)
19253                                 x0 := s0.Args[0]
19254                                 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
19255                                         continue
19256                                 }
19257                                 _ = x0.Args[1]
19258                                 p0 := x0.Args[0]
19259                                 if mem != x0.Args[1] {
19260                                         continue
19261                                 }
19262                                 y := or_1
19263                                 if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
19264                                         continue
19265                                 }
19266                                 b = mergePoint(b, x0, x1, y)
19267                                 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
19268                                 v.copyOf(v0)
19269                                 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
19270                                 v1.AuxInt = int8ToAuxInt(j0)
19271                                 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
19272                                 v2.AuxInt = int32ToAuxInt(i)
19273                                 v2.Aux = symToAux(s)
19274                                 v2.AddArg2(p0, mem)
19275                                 v1.AddArg(v2)
19276                                 v0.AddArg2(v1, y)
19277                                 return true
19278                         }
19279                 }
19280                 break
19281         }
19282         // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y))
19283         // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
19284         // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
19285         for {
19286                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19287                         s1 := v_0
19288                         if s1.Op != OpAMD64SHLQconst {
19289                                 continue
19290                         }
19291                         j1 := auxIntToInt8(s1.AuxInt)
19292                         x1 := s1.Args[0]
19293                         if x1.Op != OpAMD64MOVWload {
19294                                 continue
19295                         }
19296                         i1 := auxIntToInt32(x1.AuxInt)
19297                         s := auxToSym(x1.Aux)
19298                         mem := x1.Args[1]
19299                         p := x1.Args[0]
19300                         or := v_1
19301                         if or.Op != OpAMD64ORQ {
19302                                 continue
19303                         }
19304                         _ = or.Args[1]
19305                         or_0 := or.Args[0]
19306                         or_1 := or.Args[1]
19307                         for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19308                                 s0 := or_0
19309                                 if s0.Op != OpAMD64SHLQconst {
19310                                         continue
19311                                 }
19312                                 j0 := auxIntToInt8(s0.AuxInt)
19313                                 x0 := s0.Args[0]
19314                                 if x0.Op != OpAMD64MOVWload {
19315                                         continue
19316                                 }
19317                                 i0 := auxIntToInt32(x0.AuxInt)
19318                                 if auxToSym(x0.Aux) != s {
19319                                         continue
19320                                 }
19321                                 _ = x0.Args[1]
19322                                 if p != x0.Args[0] || mem != x0.Args[1] {
19323                                         continue
19324                                 }
19325                                 y := or_1
19326                                 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
19327                                         continue
19328                                 }
19329                                 b = mergePoint(b, x0, x1, y)
19330                                 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
19331                                 v.copyOf(v0)
19332                                 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
19333                                 v1.AuxInt = int8ToAuxInt(j0)
19334                                 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
19335                                 v2.AuxInt = int32ToAuxInt(i0)
19336                                 v2.Aux = symToAux(s)
19337                                 v2.AddArg2(p, mem)
19338                                 v1.AddArg(v2)
19339                                 v0.AddArg2(v1, y)
19340                                 return true
19341                         }
19342                 }
19343                 break
19344         }
19345         // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i] {s} p1 mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i] {s} p0 mem)) y))
19346         // cond: j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
19347         // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i] {s} p0 mem)) y)
19348         for {
19349                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19350                         s1 := v_0
19351                         if s1.Op != OpAMD64SHLQconst {
19352                                 continue
19353                         }
19354                         j1 := auxIntToInt8(s1.AuxInt)
19355                         x1 := s1.Args[0]
19356                         if x1.Op != OpAMD64MOVWload {
19357                                 continue
19358                         }
19359                         i := auxIntToInt32(x1.AuxInt)
19360                         s := auxToSym(x1.Aux)
19361                         mem := x1.Args[1]
19362                         p1 := x1.Args[0]
19363                         or := v_1
19364                         if or.Op != OpAMD64ORQ {
19365                                 continue
19366                         }
19367                         _ = or.Args[1]
19368                         or_0 := or.Args[0]
19369                         or_1 := or.Args[1]
19370                         for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19371                                 s0 := or_0
19372                                 if s0.Op != OpAMD64SHLQconst {
19373                                         continue
19374                                 }
19375                                 j0 := auxIntToInt8(s0.AuxInt)
19376                                 x0 := s0.Args[0]
19377                                 if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
19378                                         continue
19379                                 }
19380                                 _ = x0.Args[1]
19381                                 p0 := x0.Args[0]
19382                                 if mem != x0.Args[1] {
19383                                         continue
19384                                 }
19385                                 y := or_1
19386                                 if !(j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
19387                                         continue
19388                                 }
19389                                 b = mergePoint(b, x0, x1, y)
19390                                 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
19391                                 v.copyOf(v0)
19392                                 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
19393                                 v1.AuxInt = int8ToAuxInt(j0)
19394                                 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
19395                                 v2.AuxInt = int32ToAuxInt(i)
19396                                 v2.Aux = symToAux(s)
19397                                 v2.AddArg2(p0, mem)
19398                                 v1.AddArg(v2)
19399                                 v0.AddArg2(v1, y)
19400                                 return true
19401                         }
19402                 }
19403                 break
19404         }
19405         // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)))
19406         // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
19407         // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
19408         for {
19409                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19410                         x1 := v_0
19411                         if x1.Op != OpAMD64MOVBload {
19412                                 continue
19413                         }
19414                         i1 := auxIntToInt32(x1.AuxInt)
19415                         s := auxToSym(x1.Aux)
19416                         mem := x1.Args[1]
19417                         p := x1.Args[0]
19418                         sh := v_1
19419                         if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
19420                                 continue
19421                         }
19422                         x0 := sh.Args[0]
19423                         if x0.Op != OpAMD64MOVBload {
19424                                 continue
19425                         }
19426                         i0 := auxIntToInt32(x0.AuxInt)
19427                         if auxToSym(x0.Aux) != s {
19428                                 continue
19429                         }
19430                         _ = x0.Args[1]
19431                         if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
19432                                 continue
19433                         }
19434                         b = mergePoint(b, x0, x1)
19435                         v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
19436                         v.copyOf(v0)
19437                         v0.AuxInt = int8ToAuxInt(8)
19438                         v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
19439                         v1.AuxInt = int32ToAuxInt(i0)
19440                         v1.Aux = symToAux(s)
19441                         v1.AddArg2(p, mem)
19442                         v0.AddArg(v1)
19443                         return true
19444                 }
19445                 break
19446         }
19447         // match: (ORQ x1:(MOVBload [i] {s} p1 mem) sh:(SHLQconst [8] x0:(MOVBload [i] {s} p0 mem)))
19448         // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
19449         // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
19450         for {
19451                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19452                         x1 := v_0
19453                         if x1.Op != OpAMD64MOVBload {
19454                                 continue
19455                         }
19456                         i := auxIntToInt32(x1.AuxInt)
19457                         s := auxToSym(x1.Aux)
19458                         mem := x1.Args[1]
19459                         p1 := x1.Args[0]
19460                         sh := v_1
19461                         if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
19462                                 continue
19463                         }
19464                         x0 := sh.Args[0]
19465                         if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
19466                                 continue
19467                         }
19468                         _ = x0.Args[1]
19469                         p0 := x0.Args[0]
19470                         if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
19471                                 continue
19472                         }
19473                         b = mergePoint(b, x0, x1)
19474                         v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
19475                         v.copyOf(v0)
19476                         v0.AuxInt = int8ToAuxInt(8)
19477                         v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
19478                         v1.AuxInt = int32ToAuxInt(i)
19479                         v1.Aux = symToAux(s)
19480                         v1.AddArg2(p0, mem)
19481                         v0.AddArg(v1)
19482                         return true
19483                 }
19484                 break
19485         }
19486         // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
19487         // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
19488         // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
19489         for {
19490                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19491                         r1 := v_0
19492                         if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
19493                                 continue
19494                         }
19495                         x1 := r1.Args[0]
19496                         if x1.Op != OpAMD64MOVWload {
19497                                 continue
19498                         }
19499                         i1 := auxIntToInt32(x1.AuxInt)
19500                         s := auxToSym(x1.Aux)
19501                         mem := x1.Args[1]
19502                         p := x1.Args[0]
19503                         sh := v_1
19504                         if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
19505                                 continue
19506                         }
19507                         r0 := sh.Args[0]
19508                         if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
19509                                 continue
19510                         }
19511                         x0 := r0.Args[0]
19512                         if x0.Op != OpAMD64MOVWload {
19513                                 continue
19514                         }
19515                         i0 := auxIntToInt32(x0.AuxInt)
19516                         if auxToSym(x0.Aux) != s {
19517                                 continue
19518                         }
19519                         _ = x0.Args[1]
19520                         if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
19521                                 continue
19522                         }
19523                         b = mergePoint(b, x0, x1)
19524                         v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
19525                         v.copyOf(v0)
19526                         v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
19527                         v1.AuxInt = int32ToAuxInt(i0)
19528                         v1.Aux = symToAux(s)
19529                         v1.AddArg2(p, mem)
19530                         v0.AddArg(v1)
19531                         return true
19532                 }
19533                 break
19534         }
19535         // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))))
19536         // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
19537         // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
19538         for {
19539                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19540                         r1 := v_0
19541                         if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
19542                                 continue
19543                         }
19544                         x1 := r1.Args[0]
19545                         if x1.Op != OpAMD64MOVWload {
19546                                 continue
19547                         }
19548                         i := auxIntToInt32(x1.AuxInt)
19549                         s := auxToSym(x1.Aux)
19550                         mem := x1.Args[1]
19551                         p1 := x1.Args[0]
19552                         sh := v_1
19553                         if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
19554                                 continue
19555                         }
19556                         r0 := sh.Args[0]
19557                         if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
19558                                 continue
19559                         }
19560                         x0 := r0.Args[0]
19561                         if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
19562                                 continue
19563                         }
19564                         _ = x0.Args[1]
19565                         p0 := x0.Args[0]
19566                         if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
19567                                 continue
19568                         }
19569                         b = mergePoint(b, x0, x1)
19570                         v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
19571                         v.copyOf(v0)
19572                         v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
19573                         v1.AuxInt = int32ToAuxInt(i)
19574                         v1.Aux = symToAux(s)
19575                         v1.AddArg2(p0, mem)
19576                         v0.AddArg(v1)
19577                         return true
19578                 }
19579                 break
19580         }
19581         // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))))
19582         // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
19583         // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
19584         for {
19585                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19586                         r1 := v_0
19587                         if r1.Op != OpAMD64BSWAPL {
19588                                 continue
19589                         }
19590                         x1 := r1.Args[0]
19591                         if x1.Op != OpAMD64MOVLload {
19592                                 continue
19593                         }
19594                         i1 := auxIntToInt32(x1.AuxInt)
19595                         s := auxToSym(x1.Aux)
19596                         mem := x1.Args[1]
19597                         p := x1.Args[0]
19598                         sh := v_1
19599                         if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
19600                                 continue
19601                         }
19602                         r0 := sh.Args[0]
19603                         if r0.Op != OpAMD64BSWAPL {
19604                                 continue
19605                         }
19606                         x0 := r0.Args[0]
19607                         if x0.Op != OpAMD64MOVLload {
19608                                 continue
19609                         }
19610                         i0 := auxIntToInt32(x0.AuxInt)
19611                         if auxToSym(x0.Aux) != s {
19612                                 continue
19613                         }
19614                         _ = x0.Args[1]
19615                         if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
19616                                 continue
19617                         }
19618                         b = mergePoint(b, x0, x1)
19619                         v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
19620                         v.copyOf(v0)
19621                         v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
19622                         v1.AuxInt = int32ToAuxInt(i0)
19623                         v1.Aux = symToAux(s)
19624                         v1.AddArg2(p, mem)
19625                         v0.AddArg(v1)
19626                         return true
19627                 }
19628                 break
19629         }
19630         // match: (ORQ r1:(BSWAPL x1:(MOVLload [i] {s} p1 mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i] {s} p0 mem))))
19631         // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
19632         // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i] {s} p0 mem))
19633         for {
19634                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19635                         r1 := v_0
19636                         if r1.Op != OpAMD64BSWAPL {
19637                                 continue
19638                         }
19639                         x1 := r1.Args[0]
19640                         if x1.Op != OpAMD64MOVLload {
19641                                 continue
19642                         }
19643                         i := auxIntToInt32(x1.AuxInt)
19644                         s := auxToSym(x1.Aux)
19645                         mem := x1.Args[1]
19646                         p1 := x1.Args[0]
19647                         sh := v_1
19648                         if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
19649                                 continue
19650                         }
19651                         r0 := sh.Args[0]
19652                         if r0.Op != OpAMD64BSWAPL {
19653                                 continue
19654                         }
19655                         x0 := r0.Args[0]
19656                         if x0.Op != OpAMD64MOVLload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
19657                                 continue
19658                         }
19659                         _ = x0.Args[1]
19660                         p0 := x0.Args[0]
19661                         if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
19662                                 continue
19663                         }
19664                         b = mergePoint(b, x0, x1)
19665                         v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
19666                         v.copyOf(v0)
19667                         v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
19668                         v1.AuxInt = int32ToAuxInt(i)
19669                         v1.Aux = symToAux(s)
19670                         v1.AddArg2(p0, mem)
19671                         v0.AddArg(v1)
19672                         return true
19673                 }
19674                 break
19675         }
19676         // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
19677         // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
19678         // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
19679         for {
19680                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19681                         s0 := v_0
19682                         if s0.Op != OpAMD64SHLQconst {
19683                                 continue
19684                         }
19685                         j0 := auxIntToInt8(s0.AuxInt)
19686                         x0 := s0.Args[0]
19687                         if x0.Op != OpAMD64MOVBload {
19688                                 continue
19689                         }
19690                         i0 := auxIntToInt32(x0.AuxInt)
19691                         s := auxToSym(x0.Aux)
19692                         mem := x0.Args[1]
19693                         p := x0.Args[0]
19694                         or := v_1
19695                         if or.Op != OpAMD64ORQ {
19696                                 continue
19697                         }
19698                         _ = or.Args[1]
19699                         or_0 := or.Args[0]
19700                         or_1 := or.Args[1]
19701                         for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19702                                 s1 := or_0
19703                                 if s1.Op != OpAMD64SHLQconst {
19704                                         continue
19705                                 }
19706                                 j1 := auxIntToInt8(s1.AuxInt)
19707                                 x1 := s1.Args[0]
19708                                 if x1.Op != OpAMD64MOVBload {
19709                                         continue
19710                                 }
19711                                 i1 := auxIntToInt32(x1.AuxInt)
19712                                 if auxToSym(x1.Aux) != s {
19713                                         continue
19714                                 }
19715                                 _ = x1.Args[1]
19716                                 if p != x1.Args[0] || mem != x1.Args[1] {
19717                                         continue
19718                                 }
19719                                 y := or_1
19720                                 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
19721                                         continue
19722                                 }
19723                                 b = mergePoint(b, x0, x1, y)
19724                                 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
19725                                 v.copyOf(v0)
19726                                 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
19727                                 v1.AuxInt = int8ToAuxInt(j1)
19728                                 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
19729                                 v2.AuxInt = int8ToAuxInt(8)
19730                                 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
19731                                 v3.AuxInt = int32ToAuxInt(i0)
19732                                 v3.Aux = symToAux(s)
19733                                 v3.AddArg2(p, mem)
19734                                 v2.AddArg(v3)
19735                                 v1.AddArg(v2)
19736                                 v0.AddArg2(v1, y)
19737                                 return true
19738                         }
19739                 }
19740                 break
19741         }
19742         // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i] {s} p0 mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i] {s} p1 mem)) y))
19743         // cond: j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
19744         // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
19745         for {
19746                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19747                         s0 := v_0
19748                         if s0.Op != OpAMD64SHLQconst {
19749                                 continue
19750                         }
19751                         j0 := auxIntToInt8(s0.AuxInt)
19752                         x0 := s0.Args[0]
19753                         if x0.Op != OpAMD64MOVBload {
19754                                 continue
19755                         }
19756                         i := auxIntToInt32(x0.AuxInt)
19757                         s := auxToSym(x0.Aux)
19758                         mem := x0.Args[1]
19759                         p0 := x0.Args[0]
19760                         or := v_1
19761                         if or.Op != OpAMD64ORQ {
19762                                 continue
19763                         }
19764                         _ = or.Args[1]
19765                         or_0 := or.Args[0]
19766                         or_1 := or.Args[1]
19767                         for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19768                                 s1 := or_0
19769                                 if s1.Op != OpAMD64SHLQconst {
19770                                         continue
19771                                 }
19772                                 j1 := auxIntToInt8(s1.AuxInt)
19773                                 x1 := s1.Args[0]
19774                                 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
19775                                         continue
19776                                 }
19777                                 _ = x1.Args[1]
19778                                 p1 := x1.Args[0]
19779                                 if mem != x1.Args[1] {
19780                                         continue
19781                                 }
19782                                 y := or_1
19783                                 if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
19784                                         continue
19785                                 }
19786                                 b = mergePoint(b, x0, x1, y)
19787                                 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
19788                                 v.copyOf(v0)
19789                                 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
19790                                 v1.AuxInt = int8ToAuxInt(j1)
19791                                 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
19792                                 v2.AuxInt = int8ToAuxInt(8)
19793                                 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
19794                                 v3.AuxInt = int32ToAuxInt(i)
19795                                 v3.Aux = symToAux(s)
19796                                 v3.AddArg2(p0, mem)
19797                                 v2.AddArg(v3)
19798                                 v1.AddArg(v2)
19799                                 v0.AddArg2(v1, y)
19800                                 return true
19801                         }
19802                 }
19803                 break
19804         }
19805         // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y))
19806         // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)
19807         // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
19808         for {
19809                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19810                         s0 := v_0
19811                         if s0.Op != OpAMD64SHLQconst {
19812                                 continue
19813                         }
19814                         j0 := auxIntToInt8(s0.AuxInt)
19815                         r0 := s0.Args[0]
19816                         if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
19817                                 continue
19818                         }
19819                         x0 := r0.Args[0]
19820                         if x0.Op != OpAMD64MOVWload {
19821                                 continue
19822                         }
19823                         i0 := auxIntToInt32(x0.AuxInt)
19824                         s := auxToSym(x0.Aux)
19825                         mem := x0.Args[1]
19826                         p := x0.Args[0]
19827                         or := v_1
19828                         if or.Op != OpAMD64ORQ {
19829                                 continue
19830                         }
19831                         _ = or.Args[1]
19832                         or_0 := or.Args[0]
19833                         or_1 := or.Args[1]
19834                         for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19835                                 s1 := or_0
19836                                 if s1.Op != OpAMD64SHLQconst {
19837                                         continue
19838                                 }
19839                                 j1 := auxIntToInt8(s1.AuxInt)
19840                                 r1 := s1.Args[0]
19841                                 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
19842                                         continue
19843                                 }
19844                                 x1 := r1.Args[0]
19845                                 if x1.Op != OpAMD64MOVWload {
19846                                         continue
19847                                 }
19848                                 i1 := auxIntToInt32(x1.AuxInt)
19849                                 if auxToSym(x1.Aux) != s {
19850                                         continue
19851                                 }
19852                                 _ = x1.Args[1]
19853                                 if p != x1.Args[0] || mem != x1.Args[1] {
19854                                         continue
19855                                 }
19856                                 y := or_1
19857                                 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) {
19858                                         continue
19859                                 }
19860                                 b = mergePoint(b, x0, x1, y)
19861                                 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
19862                                 v.copyOf(v0)
19863                                 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
19864                                 v1.AuxInt = int8ToAuxInt(j1)
19865                                 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
19866                                 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
19867                                 v3.AuxInt = int32ToAuxInt(i0)
19868                                 v3.Aux = symToAux(s)
19869                                 v3.AddArg2(p, mem)
19870                                 v2.AddArg(v3)
19871                                 v1.AddArg(v2)
19872                                 v0.AddArg2(v1, y)
19873                                 return true
19874                         }
19875                 }
19876                 break
19877         }
19878         // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem))) y))
19879         // cond: j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)
19880         // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i] {s} p0 mem))) y)
19881         for {
19882                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19883                         s0 := v_0
19884                         if s0.Op != OpAMD64SHLQconst {
19885                                 continue
19886                         }
19887                         j0 := auxIntToInt8(s0.AuxInt)
19888                         r0 := s0.Args[0]
19889                         if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
19890                                 continue
19891                         }
19892                         x0 := r0.Args[0]
19893                         if x0.Op != OpAMD64MOVWload {
19894                                 continue
19895                         }
19896                         i := auxIntToInt32(x0.AuxInt)
19897                         s := auxToSym(x0.Aux)
19898                         mem := x0.Args[1]
19899                         p0 := x0.Args[0]
19900                         or := v_1
19901                         if or.Op != OpAMD64ORQ {
19902                                 continue
19903                         }
19904                         _ = or.Args[1]
19905                         or_0 := or.Args[0]
19906                         or_1 := or.Args[1]
19907                         for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19908                                 s1 := or_0
19909                                 if s1.Op != OpAMD64SHLQconst {
19910                                         continue
19911                                 }
19912                                 j1 := auxIntToInt8(s1.AuxInt)
19913                                 r1 := s1.Args[0]
19914                                 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
19915                                         continue
19916                                 }
19917                                 x1 := r1.Args[0]
19918                                 if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
19919                                         continue
19920                                 }
19921                                 _ = x1.Args[1]
19922                                 p1 := x1.Args[0]
19923                                 if mem != x1.Args[1] {
19924                                         continue
19925                                 }
19926                                 y := or_1
19927                                 if !(j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) {
19928                                         continue
19929                                 }
19930                                 b = mergePoint(b, x0, x1, y)
19931                                 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
19932                                 v.copyOf(v0)
19933                                 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
19934                                 v1.AuxInt = int8ToAuxInt(j1)
19935                                 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
19936                                 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
19937                                 v3.AuxInt = int32ToAuxInt(i)
19938                                 v3.Aux = symToAux(s)
19939                                 v3.AddArg2(p0, mem)
19940                                 v2.AddArg(v3)
19941                                 v1.AddArg(v2)
19942                                 v0.AddArg2(v1, y)
19943                                 return true
19944                         }
19945                 }
19946                 break
19947         }
19948         // match: (ORQ x l:(MOVQload [off] {sym} ptr mem))
19949         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
19950         // result: (ORQload x [off] {sym} ptr mem)
19951         for {
19952                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19953                         x := v_0
19954                         l := v_1
19955                         if l.Op != OpAMD64MOVQload {
19956                                 continue
19957                         }
19958                         off := auxIntToInt32(l.AuxInt)
19959                         sym := auxToSym(l.Aux)
19960                         mem := l.Args[1]
19961                         ptr := l.Args[0]
19962                         if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
19963                                 continue
19964                         }
19965                         v.reset(OpAMD64ORQload)
19966                         v.AuxInt = int32ToAuxInt(off)
19967                         v.Aux = symToAux(sym)
19968                         v.AddArg3(x, ptr, mem)
19969                         return true
19970                 }
19971                 break
19972         }
19973         return false
19974 }
19975 func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
19976         v_0 := v.Args[0]
19977         // match: (ORQconst [c] x)
19978         // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
19979         // result: (BTSQconst [int8(log32(c))] x)
19980         for {
19981                 c := auxIntToInt32(v.AuxInt)
19982                 x := v_0
19983                 if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
19984                         break
19985                 }
19986                 v.reset(OpAMD64BTSQconst)
19987                 v.AuxInt = int8ToAuxInt(int8(log32(c)))
19988                 v.AddArg(x)
19989                 return true
19990         }
19991         // match: (ORQconst [c] (ORQconst [d] x))
19992         // result: (ORQconst [c | d] x)
19993         for {
19994                 c := auxIntToInt32(v.AuxInt)
19995                 if v_0.Op != OpAMD64ORQconst {
19996                         break
19997                 }
19998                 d := auxIntToInt32(v_0.AuxInt)
19999                 x := v_0.Args[0]
20000                 v.reset(OpAMD64ORQconst)
20001                 v.AuxInt = int32ToAuxInt(c | d)
20002                 v.AddArg(x)
20003                 return true
20004         }
20005         // match: (ORQconst [c] (BTSQconst [d] x))
20006         // cond: is32Bit(int64(c) | 1<<uint32(d))
20007         // result: (ORQconst [c | 1<<uint32(d)] x)
20008         for {
20009                 c := auxIntToInt32(v.AuxInt)
20010                 if v_0.Op != OpAMD64BTSQconst {
20011                         break
20012                 }
20013                 d := auxIntToInt8(v_0.AuxInt)
20014                 x := v_0.Args[0]
20015                 if !(is32Bit(int64(c) | 1<<uint32(d))) {
20016                         break
20017                 }
20018                 v.reset(OpAMD64ORQconst)
20019                 v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
20020                 v.AddArg(x)
20021                 return true
20022         }
20023         // match: (ORQconst [0] x)
20024         // result: x
20025         for {
20026                 if auxIntToInt32(v.AuxInt) != 0 {
20027                         break
20028                 }
20029                 x := v_0
20030                 v.copyOf(x)
20031                 return true
20032         }
20033         // match: (ORQconst [-1] _)
20034         // result: (MOVQconst [-1])
20035         for {
20036                 if auxIntToInt32(v.AuxInt) != -1 {
20037                         break
20038                 }
20039                 v.reset(OpAMD64MOVQconst)
20040                 v.AuxInt = int64ToAuxInt(-1)
20041                 return true
20042         }
20043         // match: (ORQconst [c] (MOVQconst [d]))
20044         // result: (MOVQconst [int64(c)|d])
20045         for {
20046                 c := auxIntToInt32(v.AuxInt)
20047                 if v_0.Op != OpAMD64MOVQconst {
20048                         break
20049                 }
20050                 d := auxIntToInt64(v_0.AuxInt)
20051                 v.reset(OpAMD64MOVQconst)
20052                 v.AuxInt = int64ToAuxInt(int64(c) | d)
20053                 return true
20054         }
20055         return false
20056 }
20057 func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
20058         v_1 := v.Args[1]
20059         v_0 := v.Args[0]
20060         // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
20061         // cond: ValAndOff(valoff1).canAdd32(off2)
20062         // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
20063         for {
20064                 valoff1 := auxIntToValAndOff(v.AuxInt)
20065                 sym := auxToSym(v.Aux)
20066                 if v_0.Op != OpAMD64ADDQconst {
20067                         break
20068                 }
20069                 off2 := auxIntToInt32(v_0.AuxInt)
20070                 base := v_0.Args[0]
20071                 mem := v_1
20072                 if !(ValAndOff(valoff1).canAdd32(off2)) {
20073                         break
20074                 }
20075                 v.reset(OpAMD64ORQconstmodify)
20076                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
20077                 v.Aux = symToAux(sym)
20078                 v.AddArg2(base, mem)
20079                 return true
20080         }
20081         // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
20082         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
20083         // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
20084         for {
20085                 valoff1 := auxIntToValAndOff(v.AuxInt)
20086                 sym1 := auxToSym(v.Aux)
20087                 if v_0.Op != OpAMD64LEAQ {
20088                         break
20089                 }
20090                 off2 := auxIntToInt32(v_0.AuxInt)
20091                 sym2 := auxToSym(v_0.Aux)
20092                 base := v_0.Args[0]
20093                 mem := v_1
20094                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
20095                         break
20096                 }
20097                 v.reset(OpAMD64ORQconstmodify)
20098                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
20099                 v.Aux = symToAux(mergeSym(sym1, sym2))
20100                 v.AddArg2(base, mem)
20101                 return true
20102         }
20103         return false
20104 }
20105 func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
20106         v_2 := v.Args[2]
20107         v_1 := v.Args[1]
20108         v_0 := v.Args[0]
20109         b := v.Block
20110         typ := &b.Func.Config.Types
20111         // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem)
20112         // cond: is32Bit(int64(off1)+int64(off2))
20113         // result: (ORQload [off1+off2] {sym} val base mem)
20114         for {
20115                 off1 := auxIntToInt32(v.AuxInt)
20116                 sym := auxToSym(v.Aux)
20117                 val := v_0
20118                 if v_1.Op != OpAMD64ADDQconst {
20119                         break
20120                 }
20121                 off2 := auxIntToInt32(v_1.AuxInt)
20122                 base := v_1.Args[0]
20123                 mem := v_2
20124                 if !(is32Bit(int64(off1) + int64(off2))) {
20125                         break
20126                 }
20127                 v.reset(OpAMD64ORQload)
20128                 v.AuxInt = int32ToAuxInt(off1 + off2)
20129                 v.Aux = symToAux(sym)
20130                 v.AddArg3(val, base, mem)
20131                 return true
20132         }
20133         // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
20134         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
20135         // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
20136         for {
20137                 off1 := auxIntToInt32(v.AuxInt)
20138                 sym1 := auxToSym(v.Aux)
20139                 val := v_0
20140                 if v_1.Op != OpAMD64LEAQ {
20141                         break
20142                 }
20143                 off2 := auxIntToInt32(v_1.AuxInt)
20144                 sym2 := auxToSym(v_1.Aux)
20145                 base := v_1.Args[0]
20146                 mem := v_2
20147                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
20148                         break
20149                 }
20150                 v.reset(OpAMD64ORQload)
20151                 v.AuxInt = int32ToAuxInt(off1 + off2)
20152                 v.Aux = symToAux(mergeSym(sym1, sym2))
20153                 v.AddArg3(val, base, mem)
20154                 return true
20155         }
20156         // match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
20157         // result: ( ORQ x (MOVQf2i y))
20158         for {
20159                 off := auxIntToInt32(v.AuxInt)
20160                 sym := auxToSym(v.Aux)
20161                 x := v_0
20162                 ptr := v_1
20163                 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
20164                         break
20165                 }
20166                 y := v_2.Args[1]
20167                 if ptr != v_2.Args[0] {
20168                         break
20169                 }
20170                 v.reset(OpAMD64ORQ)
20171                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
20172                 v0.AddArg(y)
20173                 v.AddArg2(x, v0)
20174                 return true
20175         }
20176         return false
20177 }
20178 func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
20179         v_2 := v.Args[2]
20180         v_1 := v.Args[1]
20181         v_0 := v.Args[0]
20182         b := v.Block
20183         // match: (ORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) <t> x) mem)
20184         // result: (BTSQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
20185         for {
20186                 off := auxIntToInt32(v.AuxInt)
20187                 sym := auxToSym(v.Aux)
20188                 ptr := v_0
20189                 s := v_1
20190                 if s.Op != OpAMD64SHLQ {
20191                         break
20192                 }
20193                 t := s.Type
20194                 x := s.Args[1]
20195                 s_0 := s.Args[0]
20196                 if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
20197                         break
20198                 }
20199                 mem := v_2
20200                 v.reset(OpAMD64BTSQmodify)
20201                 v.AuxInt = int32ToAuxInt(off)
20202                 v.Aux = symToAux(sym)
20203                 v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
20204                 v0.AuxInt = int32ToAuxInt(63)
20205                 v0.AddArg(x)
20206                 v.AddArg3(ptr, v0, mem)
20207                 return true
20208         }
20209         // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
20210         // cond: is32Bit(int64(off1)+int64(off2))
20211         // result: (ORQmodify [off1+off2] {sym} base val mem)
20212         for {
20213                 off1 := auxIntToInt32(v.AuxInt)
20214                 sym := auxToSym(v.Aux)
20215                 if v_0.Op != OpAMD64ADDQconst {
20216                         break
20217                 }
20218                 off2 := auxIntToInt32(v_0.AuxInt)
20219                 base := v_0.Args[0]
20220                 val := v_1
20221                 mem := v_2
20222                 if !(is32Bit(int64(off1) + int64(off2))) {
20223                         break
20224                 }
20225                 v.reset(OpAMD64ORQmodify)
20226                 v.AuxInt = int32ToAuxInt(off1 + off2)
20227                 v.Aux = symToAux(sym)
20228                 v.AddArg3(base, val, mem)
20229                 return true
20230         }
20231         // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
20232         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
20233         // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
20234         for {
20235                 off1 := auxIntToInt32(v.AuxInt)
20236                 sym1 := auxToSym(v.Aux)
20237                 if v_0.Op != OpAMD64LEAQ {
20238                         break
20239                 }
20240                 off2 := auxIntToInt32(v_0.AuxInt)
20241                 sym2 := auxToSym(v_0.Aux)
20242                 base := v_0.Args[0]
20243                 val := v_1
20244                 mem := v_2
20245                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
20246                         break
20247                 }
20248                 v.reset(OpAMD64ORQmodify)
20249                 v.AuxInt = int32ToAuxInt(off1 + off2)
20250                 v.Aux = symToAux(mergeSym(sym1, sym2))
20251                 v.AddArg3(base, val, mem)
20252                 return true
20253         }
20254         return false
20255 }
20256 func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
20257         v_1 := v.Args[1]
20258         v_0 := v.Args[0]
20259         // match: (ROLB x (NEGQ y))
20260         // result: (RORB x y)
20261         for {
20262                 x := v_0
20263                 if v_1.Op != OpAMD64NEGQ {
20264                         break
20265                 }
20266                 y := v_1.Args[0]
20267                 v.reset(OpAMD64RORB)
20268                 v.AddArg2(x, y)
20269                 return true
20270         }
20271         // match: (ROLB x (NEGL y))
20272         // result: (RORB x y)
20273         for {
20274                 x := v_0
20275                 if v_1.Op != OpAMD64NEGL {
20276                         break
20277                 }
20278                 y := v_1.Args[0]
20279                 v.reset(OpAMD64RORB)
20280                 v.AddArg2(x, y)
20281                 return true
20282         }
20283         // match: (ROLB x (MOVQconst [c]))
20284         // result: (ROLBconst [int8(c&7) ] x)
20285         for {
20286                 x := v_0
20287                 if v_1.Op != OpAMD64MOVQconst {
20288                         break
20289                 }
20290                 c := auxIntToInt64(v_1.AuxInt)
20291                 v.reset(OpAMD64ROLBconst)
20292                 v.AuxInt = int8ToAuxInt(int8(c & 7))
20293                 v.AddArg(x)
20294                 return true
20295         }
20296         // match: (ROLB x (MOVLconst [c]))
20297         // result: (ROLBconst [int8(c&7) ] x)
20298         for {
20299                 x := v_0
20300                 if v_1.Op != OpAMD64MOVLconst {
20301                         break
20302                 }
20303                 c := auxIntToInt32(v_1.AuxInt)
20304                 v.reset(OpAMD64ROLBconst)
20305                 v.AuxInt = int8ToAuxInt(int8(c & 7))
20306                 v.AddArg(x)
20307                 return true
20308         }
20309         return false
20310 }
20311 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
20312         v_0 := v.Args[0]
20313         // match: (ROLBconst [c] (ROLBconst [d] x))
20314         // result: (ROLBconst [(c+d)& 7] x)
20315         for {
20316                 c := auxIntToInt8(v.AuxInt)
20317                 if v_0.Op != OpAMD64ROLBconst {
20318                         break
20319                 }
20320                 d := auxIntToInt8(v_0.AuxInt)
20321                 x := v_0.Args[0]
20322                 v.reset(OpAMD64ROLBconst)
20323                 v.AuxInt = int8ToAuxInt((c + d) & 7)
20324                 v.AddArg(x)
20325                 return true
20326         }
20327         // match: (ROLBconst x [0])
20328         // result: x
20329         for {
20330                 if auxIntToInt8(v.AuxInt) != 0 {
20331                         break
20332                 }
20333                 x := v_0
20334                 v.copyOf(x)
20335                 return true
20336         }
20337         return false
20338 }
20339 func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
20340         v_1 := v.Args[1]
20341         v_0 := v.Args[0]
20342         // match: (ROLL x (NEGQ y))
20343         // result: (RORL x y)
20344         for {
20345                 x := v_0
20346                 if v_1.Op != OpAMD64NEGQ {
20347                         break
20348                 }
20349                 y := v_1.Args[0]
20350                 v.reset(OpAMD64RORL)
20351                 v.AddArg2(x, y)
20352                 return true
20353         }
20354         // match: (ROLL x (NEGL y))
20355         // result: (RORL x y)
20356         for {
20357                 x := v_0
20358                 if v_1.Op != OpAMD64NEGL {
20359                         break
20360                 }
20361                 y := v_1.Args[0]
20362                 v.reset(OpAMD64RORL)
20363                 v.AddArg2(x, y)
20364                 return true
20365         }
20366         // match: (ROLL x (MOVQconst [c]))
20367         // result: (ROLLconst [int8(c&31)] x)
20368         for {
20369                 x := v_0
20370                 if v_1.Op != OpAMD64MOVQconst {
20371                         break
20372                 }
20373                 c := auxIntToInt64(v_1.AuxInt)
20374                 v.reset(OpAMD64ROLLconst)
20375                 v.AuxInt = int8ToAuxInt(int8(c & 31))
20376                 v.AddArg(x)
20377                 return true
20378         }
20379         // match: (ROLL x (MOVLconst [c]))
20380         // result: (ROLLconst [int8(c&31)] x)
20381         for {
20382                 x := v_0
20383                 if v_1.Op != OpAMD64MOVLconst {
20384                         break
20385                 }
20386                 c := auxIntToInt32(v_1.AuxInt)
20387                 v.reset(OpAMD64ROLLconst)
20388                 v.AuxInt = int8ToAuxInt(int8(c & 31))
20389                 v.AddArg(x)
20390                 return true
20391         }
20392         return false
20393 }
20394 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
20395         v_0 := v.Args[0]
20396         // match: (ROLLconst [c] (ROLLconst [d] x))
20397         // result: (ROLLconst [(c+d)&31] x)
20398         for {
20399                 c := auxIntToInt8(v.AuxInt)
20400                 if v_0.Op != OpAMD64ROLLconst {
20401                         break
20402                 }
20403                 d := auxIntToInt8(v_0.AuxInt)
20404                 x := v_0.Args[0]
20405                 v.reset(OpAMD64ROLLconst)
20406                 v.AuxInt = int8ToAuxInt((c + d) & 31)
20407                 v.AddArg(x)
20408                 return true
20409         }
20410         // match: (ROLLconst x [0])
20411         // result: x
20412         for {
20413                 if auxIntToInt8(v.AuxInt) != 0 {
20414                         break
20415                 }
20416                 x := v_0
20417                 v.copyOf(x)
20418                 return true
20419         }
20420         return false
20421 }
20422 func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
20423         v_1 := v.Args[1]
20424         v_0 := v.Args[0]
20425         // match: (ROLQ x (NEGQ y))
20426         // result: (RORQ x y)
20427         for {
20428                 x := v_0
20429                 if v_1.Op != OpAMD64NEGQ {
20430                         break
20431                 }
20432                 y := v_1.Args[0]
20433                 v.reset(OpAMD64RORQ)
20434                 v.AddArg2(x, y)
20435                 return true
20436         }
20437         // match: (ROLQ x (NEGL y))
20438         // result: (RORQ x y)
20439         for {
20440                 x := v_0
20441                 if v_1.Op != OpAMD64NEGL {
20442                         break
20443                 }
20444                 y := v_1.Args[0]
20445                 v.reset(OpAMD64RORQ)
20446                 v.AddArg2(x, y)
20447                 return true
20448         }
20449         // match: (ROLQ x (MOVQconst [c]))
20450         // result: (ROLQconst [int8(c&63)] x)
20451         for {
20452                 x := v_0
20453                 if v_1.Op != OpAMD64MOVQconst {
20454                         break
20455                 }
20456                 c := auxIntToInt64(v_1.AuxInt)
20457                 v.reset(OpAMD64ROLQconst)
20458                 v.AuxInt = int8ToAuxInt(int8(c & 63))
20459                 v.AddArg(x)
20460                 return true
20461         }
20462         // match: (ROLQ x (MOVLconst [c]))
20463         // result: (ROLQconst [int8(c&63)] x)
20464         for {
20465                 x := v_0
20466                 if v_1.Op != OpAMD64MOVLconst {
20467                         break
20468                 }
20469                 c := auxIntToInt32(v_1.AuxInt)
20470                 v.reset(OpAMD64ROLQconst)
20471                 v.AuxInt = int8ToAuxInt(int8(c & 63))
20472                 v.AddArg(x)
20473                 return true
20474         }
20475         return false
20476 }
20477 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
20478         v_0 := v.Args[0]
20479         // match: (ROLQconst [c] (ROLQconst [d] x))
20480         // result: (ROLQconst [(c+d)&63] x)
20481         for {
20482                 c := auxIntToInt8(v.AuxInt)
20483                 if v_0.Op != OpAMD64ROLQconst {
20484                         break
20485                 }
20486                 d := auxIntToInt8(v_0.AuxInt)
20487                 x := v_0.Args[0]
20488                 v.reset(OpAMD64ROLQconst)
20489                 v.AuxInt = int8ToAuxInt((c + d) & 63)
20490                 v.AddArg(x)
20491                 return true
20492         }
20493         // match: (ROLQconst x [0])
20494         // result: x
20495         for {
20496                 if auxIntToInt8(v.AuxInt) != 0 {
20497                         break
20498                 }
20499                 x := v_0
20500                 v.copyOf(x)
20501                 return true
20502         }
20503         return false
20504 }
20505 func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
20506         v_1 := v.Args[1]
20507         v_0 := v.Args[0]
20508         // match: (ROLW x (NEGQ y))
20509         // result: (RORW x y)
20510         for {
20511                 x := v_0
20512                 if v_1.Op != OpAMD64NEGQ {
20513                         break
20514                 }
20515                 y := v_1.Args[0]
20516                 v.reset(OpAMD64RORW)
20517                 v.AddArg2(x, y)
20518                 return true
20519         }
20520         // match: (ROLW x (NEGL y))
20521         // result: (RORW x y)
20522         for {
20523                 x := v_0
20524                 if v_1.Op != OpAMD64NEGL {
20525                         break
20526                 }
20527                 y := v_1.Args[0]
20528                 v.reset(OpAMD64RORW)
20529                 v.AddArg2(x, y)
20530                 return true
20531         }
20532         // match: (ROLW x (MOVQconst [c]))
20533         // result: (ROLWconst [int8(c&15)] x)
20534         for {
20535                 x := v_0
20536                 if v_1.Op != OpAMD64MOVQconst {
20537                         break
20538                 }
20539                 c := auxIntToInt64(v_1.AuxInt)
20540                 v.reset(OpAMD64ROLWconst)
20541                 v.AuxInt = int8ToAuxInt(int8(c & 15))
20542                 v.AddArg(x)
20543                 return true
20544         }
20545         // match: (ROLW x (MOVLconst [c]))
20546         // result: (ROLWconst [int8(c&15)] x)
20547         for {
20548                 x := v_0
20549                 if v_1.Op != OpAMD64MOVLconst {
20550                         break
20551                 }
20552                 c := auxIntToInt32(v_1.AuxInt)
20553                 v.reset(OpAMD64ROLWconst)
20554                 v.AuxInt = int8ToAuxInt(int8(c & 15))
20555                 v.AddArg(x)
20556                 return true
20557         }
20558         return false
20559 }
20560 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
20561         v_0 := v.Args[0]
20562         // match: (ROLWconst [c] (ROLWconst [d] x))
20563         // result: (ROLWconst [(c+d)&15] x)
20564         for {
20565                 c := auxIntToInt8(v.AuxInt)
20566                 if v_0.Op != OpAMD64ROLWconst {
20567                         break
20568                 }
20569                 d := auxIntToInt8(v_0.AuxInt)
20570                 x := v_0.Args[0]
20571                 v.reset(OpAMD64ROLWconst)
20572                 v.AuxInt = int8ToAuxInt((c + d) & 15)
20573                 v.AddArg(x)
20574                 return true
20575         }
20576         // match: (ROLWconst x [0])
20577         // result: x
20578         for {
20579                 if auxIntToInt8(v.AuxInt) != 0 {
20580                         break
20581                 }
20582                 x := v_0
20583                 v.copyOf(x)
20584                 return true
20585         }
20586         return false
20587 }
20588 func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
20589         v_1 := v.Args[1]
20590         v_0 := v.Args[0]
20591         // match: (RORB x (NEGQ y))
20592         // result: (ROLB x y)
20593         for {
20594                 x := v_0
20595                 if v_1.Op != OpAMD64NEGQ {
20596                         break
20597                 }
20598                 y := v_1.Args[0]
20599                 v.reset(OpAMD64ROLB)
20600                 v.AddArg2(x, y)
20601                 return true
20602         }
20603         // match: (RORB x (NEGL y))
20604         // result: (ROLB x y)
20605         for {
20606                 x := v_0
20607                 if v_1.Op != OpAMD64NEGL {
20608                         break
20609                 }
20610                 y := v_1.Args[0]
20611                 v.reset(OpAMD64ROLB)
20612                 v.AddArg2(x, y)
20613                 return true
20614         }
20615         // match: (RORB x (MOVQconst [c]))
20616         // result: (ROLBconst [int8((-c)&7) ] x)
20617         for {
20618                 x := v_0
20619                 if v_1.Op != OpAMD64MOVQconst {
20620                         break
20621                 }
20622                 c := auxIntToInt64(v_1.AuxInt)
20623                 v.reset(OpAMD64ROLBconst)
20624                 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
20625                 v.AddArg(x)
20626                 return true
20627         }
20628         // match: (RORB x (MOVLconst [c]))
20629         // result: (ROLBconst [int8((-c)&7) ] x)
20630         for {
20631                 x := v_0
20632                 if v_1.Op != OpAMD64MOVLconst {
20633                         break
20634                 }
20635                 c := auxIntToInt32(v_1.AuxInt)
20636                 v.reset(OpAMD64ROLBconst)
20637                 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
20638                 v.AddArg(x)
20639                 return true
20640         }
20641         return false
20642 }
20643 func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
20644         v_1 := v.Args[1]
20645         v_0 := v.Args[0]
20646         // match: (RORL x (NEGQ y))
20647         // result: (ROLL x y)
20648         for {
20649                 x := v_0
20650                 if v_1.Op != OpAMD64NEGQ {
20651                         break
20652                 }
20653                 y := v_1.Args[0]
20654                 v.reset(OpAMD64ROLL)
20655                 v.AddArg2(x, y)
20656                 return true
20657         }
20658         // match: (RORL x (NEGL y))
20659         // result: (ROLL x y)
20660         for {
20661                 x := v_0
20662                 if v_1.Op != OpAMD64NEGL {
20663                         break
20664                 }
20665                 y := v_1.Args[0]
20666                 v.reset(OpAMD64ROLL)
20667                 v.AddArg2(x, y)
20668                 return true
20669         }
20670         // match: (RORL x (MOVQconst [c]))
20671         // result: (ROLLconst [int8((-c)&31)] x)
20672         for {
20673                 x := v_0
20674                 if v_1.Op != OpAMD64MOVQconst {
20675                         break
20676                 }
20677                 c := auxIntToInt64(v_1.AuxInt)
20678                 v.reset(OpAMD64ROLLconst)
20679                 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
20680                 v.AddArg(x)
20681                 return true
20682         }
20683         // match: (RORL x (MOVLconst [c]))
20684         // result: (ROLLconst [int8((-c)&31)] x)
20685         for {
20686                 x := v_0
20687                 if v_1.Op != OpAMD64MOVLconst {
20688                         break
20689                 }
20690                 c := auxIntToInt32(v_1.AuxInt)
20691                 v.reset(OpAMD64ROLLconst)
20692                 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
20693                 v.AddArg(x)
20694                 return true
20695         }
20696         return false
20697 }
20698 func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
20699         v_1 := v.Args[1]
20700         v_0 := v.Args[0]
20701         // match: (RORQ x (NEGQ y))
20702         // result: (ROLQ x y)
20703         for {
20704                 x := v_0
20705                 if v_1.Op != OpAMD64NEGQ {
20706                         break
20707                 }
20708                 y := v_1.Args[0]
20709                 v.reset(OpAMD64ROLQ)
20710                 v.AddArg2(x, y)
20711                 return true
20712         }
20713         // match: (RORQ x (NEGL y))
20714         // result: (ROLQ x y)
20715         for {
20716                 x := v_0
20717                 if v_1.Op != OpAMD64NEGL {
20718                         break
20719                 }
20720                 y := v_1.Args[0]
20721                 v.reset(OpAMD64ROLQ)
20722                 v.AddArg2(x, y)
20723                 return true
20724         }
20725         // match: (RORQ x (MOVQconst [c]))
20726         // result: (ROLQconst [int8((-c)&63)] x)
20727         for {
20728                 x := v_0
20729                 if v_1.Op != OpAMD64MOVQconst {
20730                         break
20731                 }
20732                 c := auxIntToInt64(v_1.AuxInt)
20733                 v.reset(OpAMD64ROLQconst)
20734                 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
20735                 v.AddArg(x)
20736                 return true
20737         }
20738         // match: (RORQ x (MOVLconst [c]))
20739         // result: (ROLQconst [int8((-c)&63)] x)
20740         for {
20741                 x := v_0
20742                 if v_1.Op != OpAMD64MOVLconst {
20743                         break
20744                 }
20745                 c := auxIntToInt32(v_1.AuxInt)
20746                 v.reset(OpAMD64ROLQconst)
20747                 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
20748                 v.AddArg(x)
20749                 return true
20750         }
20751         return false
20752 }
20753 func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
20754         v_1 := v.Args[1]
20755         v_0 := v.Args[0]
20756         // match: (RORW x (NEGQ y))
20757         // result: (ROLW x y)
20758         for {
20759                 x := v_0
20760                 if v_1.Op != OpAMD64NEGQ {
20761                         break
20762                 }
20763                 y := v_1.Args[0]
20764                 v.reset(OpAMD64ROLW)
20765                 v.AddArg2(x, y)
20766                 return true
20767         }
20768         // match: (RORW x (NEGL y))
20769         // result: (ROLW x y)
20770         for {
20771                 x := v_0
20772                 if v_1.Op != OpAMD64NEGL {
20773                         break
20774                 }
20775                 y := v_1.Args[0]
20776                 v.reset(OpAMD64ROLW)
20777                 v.AddArg2(x, y)
20778                 return true
20779         }
20780         // match: (RORW x (MOVQconst [c]))
20781         // result: (ROLWconst [int8((-c)&15)] x)
20782         for {
20783                 x := v_0
20784                 if v_1.Op != OpAMD64MOVQconst {
20785                         break
20786                 }
20787                 c := auxIntToInt64(v_1.AuxInt)
20788                 v.reset(OpAMD64ROLWconst)
20789                 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
20790                 v.AddArg(x)
20791                 return true
20792         }
20793         // match: (RORW x (MOVLconst [c]))
20794         // result: (ROLWconst [int8((-c)&15)] x)
20795         for {
20796                 x := v_0
20797                 if v_1.Op != OpAMD64MOVLconst {
20798                         break
20799                 }
20800                 c := auxIntToInt32(v_1.AuxInt)
20801                 v.reset(OpAMD64ROLWconst)
20802                 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
20803                 v.AddArg(x)
20804                 return true
20805         }
20806         return false
20807 }
20808 func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
20809         v_1 := v.Args[1]
20810         v_0 := v.Args[0]
20811         // match: (SARB x (MOVQconst [c]))
20812         // result: (SARBconst [int8(min(int64(c)&31,7))] x)
20813         for {
20814                 x := v_0
20815                 if v_1.Op != OpAMD64MOVQconst {
20816                         break
20817                 }
20818                 c := auxIntToInt64(v_1.AuxInt)
20819                 v.reset(OpAMD64SARBconst)
20820                 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
20821                 v.AddArg(x)
20822                 return true
20823         }
20824         // match: (SARB x (MOVLconst [c]))
20825         // result: (SARBconst [int8(min(int64(c)&31,7))] x)
20826         for {
20827                 x := v_0
20828                 if v_1.Op != OpAMD64MOVLconst {
20829                         break
20830                 }
20831                 c := auxIntToInt32(v_1.AuxInt)
20832                 v.reset(OpAMD64SARBconst)
20833                 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
20834                 v.AddArg(x)
20835                 return true
20836         }
20837         return false
20838 }
20839 func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
20840         v_0 := v.Args[0]
20841         // match: (SARBconst x [0])
20842         // result: x
20843         for {
20844                 if auxIntToInt8(v.AuxInt) != 0 {
20845                         break
20846                 }
20847                 x := v_0
20848                 v.copyOf(x)
20849                 return true
20850         }
20851         // match: (SARBconst [c] (MOVQconst [d]))
20852         // result: (MOVQconst [int64(int8(d))>>uint64(c)])
20853         for {
20854                 c := auxIntToInt8(v.AuxInt)
20855                 if v_0.Op != OpAMD64MOVQconst {
20856                         break
20857                 }
20858                 d := auxIntToInt64(v_0.AuxInt)
20859                 v.reset(OpAMD64MOVQconst)
20860                 v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
20861                 return true
20862         }
20863         return false
20864 }
20865 func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
20866         v_1 := v.Args[1]
20867         v_0 := v.Args[0]
20868         b := v.Block
20869         // match: (SARL x (MOVQconst [c]))
20870         // result: (SARLconst [int8(c&31)] x)
20871         for {
20872                 x := v_0
20873                 if v_1.Op != OpAMD64MOVQconst {
20874                         break
20875                 }
20876                 c := auxIntToInt64(v_1.AuxInt)
20877                 v.reset(OpAMD64SARLconst)
20878                 v.AuxInt = int8ToAuxInt(int8(c & 31))
20879                 v.AddArg(x)
20880                 return true
20881         }
20882         // match: (SARL x (MOVLconst [c]))
20883         // result: (SARLconst [int8(c&31)] x)
20884         for {
20885                 x := v_0
20886                 if v_1.Op != OpAMD64MOVLconst {
20887                         break
20888                 }
20889                 c := auxIntToInt32(v_1.AuxInt)
20890                 v.reset(OpAMD64SARLconst)
20891                 v.AuxInt = int8ToAuxInt(int8(c & 31))
20892                 v.AddArg(x)
20893                 return true
20894         }
20895         // match: (SARL x (ADDQconst [c] y))
20896         // cond: c & 31 == 0
20897         // result: (SARL x y)
20898         for {
20899                 x := v_0
20900                 if v_1.Op != OpAMD64ADDQconst {
20901                         break
20902                 }
20903                 c := auxIntToInt32(v_1.AuxInt)
20904                 y := v_1.Args[0]
20905                 if !(c&31 == 0) {
20906                         break
20907                 }
20908                 v.reset(OpAMD64SARL)
20909                 v.AddArg2(x, y)
20910                 return true
20911         }
20912         // match: (SARL x (NEGQ <t> (ADDQconst [c] y)))
20913         // cond: c & 31 == 0
20914         // result: (SARL x (NEGQ <t> y))
20915         for {
20916                 x := v_0
20917                 if v_1.Op != OpAMD64NEGQ {
20918                         break
20919                 }
20920                 t := v_1.Type
20921                 v_1_0 := v_1.Args[0]
20922                 if v_1_0.Op != OpAMD64ADDQconst {
20923                         break
20924                 }
20925                 c := auxIntToInt32(v_1_0.AuxInt)
20926                 y := v_1_0.Args[0]
20927                 if !(c&31 == 0) {
20928                         break
20929                 }
20930                 v.reset(OpAMD64SARL)
20931                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20932                 v0.AddArg(y)
20933                 v.AddArg2(x, v0)
20934                 return true
20935         }
20936         // match: (SARL x (ANDQconst [c] y))
20937         // cond: c & 31 == 31
20938         // result: (SARL x y)
20939         for {
20940                 x := v_0
20941                 if v_1.Op != OpAMD64ANDQconst {
20942                         break
20943                 }
20944                 c := auxIntToInt32(v_1.AuxInt)
20945                 y := v_1.Args[0]
20946                 if !(c&31 == 31) {
20947                         break
20948                 }
20949                 v.reset(OpAMD64SARL)
20950                 v.AddArg2(x, y)
20951                 return true
20952         }
20953         // match: (SARL x (NEGQ <t> (ANDQconst [c] y)))
20954         // cond: c & 31 == 31
20955         // result: (SARL x (NEGQ <t> y))
20956         for {
20957                 x := v_0
20958                 if v_1.Op != OpAMD64NEGQ {
20959                         break
20960                 }
20961                 t := v_1.Type
20962                 v_1_0 := v_1.Args[0]
20963                 if v_1_0.Op != OpAMD64ANDQconst {
20964                         break
20965                 }
20966                 c := auxIntToInt32(v_1_0.AuxInt)
20967                 y := v_1_0.Args[0]
20968                 if !(c&31 == 31) {
20969                         break
20970                 }
20971                 v.reset(OpAMD64SARL)
20972                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20973                 v0.AddArg(y)
20974                 v.AddArg2(x, v0)
20975                 return true
20976         }
20977         // match: (SARL x (ADDLconst [c] y))
20978         // cond: c & 31 == 0
20979         // result: (SARL x y)
20980         for {
20981                 x := v_0
20982                 if v_1.Op != OpAMD64ADDLconst {
20983                         break
20984                 }
20985                 c := auxIntToInt32(v_1.AuxInt)
20986                 y := v_1.Args[0]
20987                 if !(c&31 == 0) {
20988                         break
20989                 }
20990                 v.reset(OpAMD64SARL)
20991                 v.AddArg2(x, y)
20992                 return true
20993         }
20994         // match: (SARL x (NEGL <t> (ADDLconst [c] y)))
20995         // cond: c & 31 == 0
20996         // result: (SARL x (NEGL <t> y))
20997         for {
20998                 x := v_0
20999                 if v_1.Op != OpAMD64NEGL {
21000                         break
21001                 }
21002                 t := v_1.Type
21003                 v_1_0 := v_1.Args[0]
21004                 if v_1_0.Op != OpAMD64ADDLconst {
21005                         break
21006                 }
21007                 c := auxIntToInt32(v_1_0.AuxInt)
21008                 y := v_1_0.Args[0]
21009                 if !(c&31 == 0) {
21010                         break
21011                 }
21012                 v.reset(OpAMD64SARL)
21013                 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21014                 v0.AddArg(y)
21015                 v.AddArg2(x, v0)
21016                 return true
21017         }
21018         // match: (SARL x (ANDLconst [c] y))
21019         // cond: c & 31 == 31
21020         // result: (SARL x y)
21021         for {
21022                 x := v_0
21023                 if v_1.Op != OpAMD64ANDLconst {
21024                         break
21025                 }
21026                 c := auxIntToInt32(v_1.AuxInt)
21027                 y := v_1.Args[0]
21028                 if !(c&31 == 31) {
21029                         break
21030                 }
21031                 v.reset(OpAMD64SARL)
21032                 v.AddArg2(x, y)
21033                 return true
21034         }
21035         // match: (SARL x (NEGL <t> (ANDLconst [c] y)))
21036         // cond: c & 31 == 31
21037         // result: (SARL x (NEGL <t> y))
21038         for {
21039                 x := v_0
21040                 if v_1.Op != OpAMD64NEGL {
21041                         break
21042                 }
21043                 t := v_1.Type
21044                 v_1_0 := v_1.Args[0]
21045                 if v_1_0.Op != OpAMD64ANDLconst {
21046                         break
21047                 }
21048                 c := auxIntToInt32(v_1_0.AuxInt)
21049                 y := v_1_0.Args[0]
21050                 if !(c&31 == 31) {
21051                         break
21052                 }
21053                 v.reset(OpAMD64SARL)
21054                 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21055                 v0.AddArg(y)
21056                 v.AddArg2(x, v0)
21057                 return true
21058         }
21059         return false
21060 }
21061 func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
21062         v_0 := v.Args[0]
21063         // match: (SARLconst x [0])
21064         // result: x
21065         for {
21066                 if auxIntToInt8(v.AuxInt) != 0 {
21067                         break
21068                 }
21069                 x := v_0
21070                 v.copyOf(x)
21071                 return true
21072         }
21073         // match: (SARLconst [c] (MOVQconst [d]))
21074         // result: (MOVQconst [int64(int32(d))>>uint64(c)])
21075         for {
21076                 c := auxIntToInt8(v.AuxInt)
21077                 if v_0.Op != OpAMD64MOVQconst {
21078                         break
21079                 }
21080                 d := auxIntToInt64(v_0.AuxInt)
21081                 v.reset(OpAMD64MOVQconst)
21082                 v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
21083                 return true
21084         }
21085         return false
21086 }
21087 func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
21088         v_1 := v.Args[1]
21089         v_0 := v.Args[0]
21090         b := v.Block
21091         // match: (SARQ x (MOVQconst [c]))
21092         // result: (SARQconst [int8(c&63)] x)
21093         for {
21094                 x := v_0
21095                 if v_1.Op != OpAMD64MOVQconst {
21096                         break
21097                 }
21098                 c := auxIntToInt64(v_1.AuxInt)
21099                 v.reset(OpAMD64SARQconst)
21100                 v.AuxInt = int8ToAuxInt(int8(c & 63))
21101                 v.AddArg(x)
21102                 return true
21103         }
21104         // match: (SARQ x (MOVLconst [c]))
21105         // result: (SARQconst [int8(c&63)] x)
21106         for {
21107                 x := v_0
21108                 if v_1.Op != OpAMD64MOVLconst {
21109                         break
21110                 }
21111                 c := auxIntToInt32(v_1.AuxInt)
21112                 v.reset(OpAMD64SARQconst)
21113                 v.AuxInt = int8ToAuxInt(int8(c & 63))
21114                 v.AddArg(x)
21115                 return true
21116         }
21117         // match: (SARQ x (ADDQconst [c] y))
21118         // cond: c & 63 == 0
21119         // result: (SARQ x y)
21120         for {
21121                 x := v_0
21122                 if v_1.Op != OpAMD64ADDQconst {
21123                         break
21124                 }
21125                 c := auxIntToInt32(v_1.AuxInt)
21126                 y := v_1.Args[0]
21127                 if !(c&63 == 0) {
21128                         break
21129                 }
21130                 v.reset(OpAMD64SARQ)
21131                 v.AddArg2(x, y)
21132                 return true
21133         }
21134         // match: (SARQ x (NEGQ <t> (ADDQconst [c] y)))
21135         // cond: c & 63 == 0
21136         // result: (SARQ x (NEGQ <t> y))
21137         for {
21138                 x := v_0
21139                 if v_1.Op != OpAMD64NEGQ {
21140                         break
21141                 }
21142                 t := v_1.Type
21143                 v_1_0 := v_1.Args[0]
21144                 if v_1_0.Op != OpAMD64ADDQconst {
21145                         break
21146                 }
21147                 c := auxIntToInt32(v_1_0.AuxInt)
21148                 y := v_1_0.Args[0]
21149                 if !(c&63 == 0) {
21150                         break
21151                 }
21152                 v.reset(OpAMD64SARQ)
21153                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21154                 v0.AddArg(y)
21155                 v.AddArg2(x, v0)
21156                 return true
21157         }
21158         // match: (SARQ x (ANDQconst [c] y))
21159         // cond: c & 63 == 63
21160         // result: (SARQ x y)
21161         for {
21162                 x := v_0
21163                 if v_1.Op != OpAMD64ANDQconst {
21164                         break
21165                 }
21166                 c := auxIntToInt32(v_1.AuxInt)
21167                 y := v_1.Args[0]
21168                 if !(c&63 == 63) {
21169                         break
21170                 }
21171                 v.reset(OpAMD64SARQ)
21172                 v.AddArg2(x, y)
21173                 return true
21174         }
21175         // match: (SARQ x (NEGQ <t> (ANDQconst [c] y)))
21176         // cond: c & 63 == 63
21177         // result: (SARQ x (NEGQ <t> y))
21178         for {
21179                 x := v_0
21180                 if v_1.Op != OpAMD64NEGQ {
21181                         break
21182                 }
21183                 t := v_1.Type
21184                 v_1_0 := v_1.Args[0]
21185                 if v_1_0.Op != OpAMD64ANDQconst {
21186                         break
21187                 }
21188                 c := auxIntToInt32(v_1_0.AuxInt)
21189                 y := v_1_0.Args[0]
21190                 if !(c&63 == 63) {
21191                         break
21192                 }
21193                 v.reset(OpAMD64SARQ)
21194                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21195                 v0.AddArg(y)
21196                 v.AddArg2(x, v0)
21197                 return true
21198         }
21199         // match: (SARQ x (ADDLconst [c] y))
21200         // cond: c & 63 == 0
21201         // result: (SARQ x y)
21202         for {
21203                 x := v_0
21204                 if v_1.Op != OpAMD64ADDLconst {
21205                         break
21206                 }
21207                 c := auxIntToInt32(v_1.AuxInt)
21208                 y := v_1.Args[0]
21209                 if !(c&63 == 0) {
21210                         break
21211                 }
21212                 v.reset(OpAMD64SARQ)
21213                 v.AddArg2(x, y)
21214                 return true
21215         }
21216         // match: (SARQ x (NEGL <t> (ADDLconst [c] y)))
21217         // cond: c & 63 == 0
21218         // result: (SARQ x (NEGL <t> y))
21219         for {
21220                 x := v_0
21221                 if v_1.Op != OpAMD64NEGL {
21222                         break
21223                 }
21224                 t := v_1.Type
21225                 v_1_0 := v_1.Args[0]
21226                 if v_1_0.Op != OpAMD64ADDLconst {
21227                         break
21228                 }
21229                 c := auxIntToInt32(v_1_0.AuxInt)
21230                 y := v_1_0.Args[0]
21231                 if !(c&63 == 0) {
21232                         break
21233                 }
21234                 v.reset(OpAMD64SARQ)
21235                 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21236                 v0.AddArg(y)
21237                 v.AddArg2(x, v0)
21238                 return true
21239         }
21240         // match: (SARQ x (ANDLconst [c] y))
21241         // cond: c & 63 == 63
21242         // result: (SARQ x y)
21243         for {
21244                 x := v_0
21245                 if v_1.Op != OpAMD64ANDLconst {
21246                         break
21247                 }
21248                 c := auxIntToInt32(v_1.AuxInt)
21249                 y := v_1.Args[0]
21250                 if !(c&63 == 63) {
21251                         break
21252                 }
21253                 v.reset(OpAMD64SARQ)
21254                 v.AddArg2(x, y)
21255                 return true
21256         }
21257         // match: (SARQ x (NEGL <t> (ANDLconst [c] y)))
21258         // cond: c & 63 == 63
21259         // result: (SARQ x (NEGL <t> y))
21260         for {
21261                 x := v_0
21262                 if v_1.Op != OpAMD64NEGL {
21263                         break
21264                 }
21265                 t := v_1.Type
21266                 v_1_0 := v_1.Args[0]
21267                 if v_1_0.Op != OpAMD64ANDLconst {
21268                         break
21269                 }
21270                 c := auxIntToInt32(v_1_0.AuxInt)
21271                 y := v_1_0.Args[0]
21272                 if !(c&63 == 63) {
21273                         break
21274                 }
21275                 v.reset(OpAMD64SARQ)
21276                 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21277                 v0.AddArg(y)
21278                 v.AddArg2(x, v0)
21279                 return true
21280         }
21281         return false
21282 }
21283 func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
21284         v_0 := v.Args[0]
21285         // match: (SARQconst x [0])
21286         // result: x
21287         for {
21288                 if auxIntToInt8(v.AuxInt) != 0 {
21289                         break
21290                 }
21291                 x := v_0
21292                 v.copyOf(x)
21293                 return true
21294         }
21295         // match: (SARQconst [c] (MOVQconst [d]))
21296         // result: (MOVQconst [d>>uint64(c)])
21297         for {
21298                 c := auxIntToInt8(v.AuxInt)
21299                 if v_0.Op != OpAMD64MOVQconst {
21300                         break
21301                 }
21302                 d := auxIntToInt64(v_0.AuxInt)
21303                 v.reset(OpAMD64MOVQconst)
21304                 v.AuxInt = int64ToAuxInt(d >> uint64(c))
21305                 return true
21306         }
21307         return false
21308 }
21309 func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
21310         v_1 := v.Args[1]
21311         v_0 := v.Args[0]
21312         // match: (SARW x (MOVQconst [c]))
21313         // result: (SARWconst [int8(min(int64(c)&31,15))] x)
21314         for {
21315                 x := v_0
21316                 if v_1.Op != OpAMD64MOVQconst {
21317                         break
21318                 }
21319                 c := auxIntToInt64(v_1.AuxInt)
21320                 v.reset(OpAMD64SARWconst)
21321                 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
21322                 v.AddArg(x)
21323                 return true
21324         }
21325         // match: (SARW x (MOVLconst [c]))
21326         // result: (SARWconst [int8(min(int64(c)&31,15))] x)
21327         for {
21328                 x := v_0
21329                 if v_1.Op != OpAMD64MOVLconst {
21330                         break
21331                 }
21332                 c := auxIntToInt32(v_1.AuxInt)
21333                 v.reset(OpAMD64SARWconst)
21334                 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
21335                 v.AddArg(x)
21336                 return true
21337         }
21338         return false
21339 }
21340 func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
21341         v_0 := v.Args[0]
21342         // match: (SARWconst x [0])
21343         // result: x
21344         for {
21345                 if auxIntToInt8(v.AuxInt) != 0 {
21346                         break
21347                 }
21348                 x := v_0
21349                 v.copyOf(x)
21350                 return true
21351         }
21352         // match: (SARWconst [c] (MOVQconst [d]))
21353         // result: (MOVQconst [int64(int16(d))>>uint64(c)])
21354         for {
21355                 c := auxIntToInt8(v.AuxInt)
21356                 if v_0.Op != OpAMD64MOVQconst {
21357                         break
21358                 }
21359                 d := auxIntToInt64(v_0.AuxInt)
21360                 v.reset(OpAMD64MOVQconst)
21361                 v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
21362                 return true
21363         }
21364         return false
21365 }
21366 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
21367         v_0 := v.Args[0]
21368         // match: (SBBLcarrymask (FlagEQ))
21369         // result: (MOVLconst [0])
21370         for {
21371                 if v_0.Op != OpAMD64FlagEQ {
21372                         break
21373                 }
21374                 v.reset(OpAMD64MOVLconst)
21375                 v.AuxInt = int32ToAuxInt(0)
21376                 return true
21377         }
21378         // match: (SBBLcarrymask (FlagLT_ULT))
21379         // result: (MOVLconst [-1])
21380         for {
21381                 if v_0.Op != OpAMD64FlagLT_ULT {
21382                         break
21383                 }
21384                 v.reset(OpAMD64MOVLconst)
21385                 v.AuxInt = int32ToAuxInt(-1)
21386                 return true
21387         }
21388         // match: (SBBLcarrymask (FlagLT_UGT))
21389         // result: (MOVLconst [0])
21390         for {
21391                 if v_0.Op != OpAMD64FlagLT_UGT {
21392                         break
21393                 }
21394                 v.reset(OpAMD64MOVLconst)
21395                 v.AuxInt = int32ToAuxInt(0)
21396                 return true
21397         }
21398         // match: (SBBLcarrymask (FlagGT_ULT))
21399         // result: (MOVLconst [-1])
21400         for {
21401                 if v_0.Op != OpAMD64FlagGT_ULT {
21402                         break
21403                 }
21404                 v.reset(OpAMD64MOVLconst)
21405                 v.AuxInt = int32ToAuxInt(-1)
21406                 return true
21407         }
21408         // match: (SBBLcarrymask (FlagGT_UGT))
21409         // result: (MOVLconst [0])
21410         for {
21411                 if v_0.Op != OpAMD64FlagGT_UGT {
21412                         break
21413                 }
21414                 v.reset(OpAMD64MOVLconst)
21415                 v.AuxInt = int32ToAuxInt(0)
21416                 return true
21417         }
21418         return false
21419 }
21420 func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
21421         v_2 := v.Args[2]
21422         v_1 := v.Args[1]
21423         v_0 := v.Args[0]
21424         // match: (SBBQ x (MOVQconst [c]) borrow)
21425         // cond: is32Bit(c)
21426         // result: (SBBQconst x [int32(c)] borrow)
21427         for {
21428                 x := v_0
21429                 if v_1.Op != OpAMD64MOVQconst {
21430                         break
21431                 }
21432                 c := auxIntToInt64(v_1.AuxInt)
21433                 borrow := v_2
21434                 if !(is32Bit(c)) {
21435                         break
21436                 }
21437                 v.reset(OpAMD64SBBQconst)
21438                 v.AuxInt = int32ToAuxInt(int32(c))
21439                 v.AddArg2(x, borrow)
21440                 return true
21441         }
21442         // match: (SBBQ x y (FlagEQ))
21443         // result: (SUBQborrow x y)
21444         for {
21445                 x := v_0
21446                 y := v_1
21447                 if v_2.Op != OpAMD64FlagEQ {
21448                         break
21449                 }
21450                 v.reset(OpAMD64SUBQborrow)
21451                 v.AddArg2(x, y)
21452                 return true
21453         }
21454         return false
21455 }
21456 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
21457         v_0 := v.Args[0]
21458         // match: (SBBQcarrymask (FlagEQ))
21459         // result: (MOVQconst [0])
21460         for {
21461                 if v_0.Op != OpAMD64FlagEQ {
21462                         break
21463                 }
21464                 v.reset(OpAMD64MOVQconst)
21465                 v.AuxInt = int64ToAuxInt(0)
21466                 return true
21467         }
21468         // match: (SBBQcarrymask (FlagLT_ULT))
21469         // result: (MOVQconst [-1])
21470         for {
21471                 if v_0.Op != OpAMD64FlagLT_ULT {
21472                         break
21473                 }
21474                 v.reset(OpAMD64MOVQconst)
21475                 v.AuxInt = int64ToAuxInt(-1)
21476                 return true
21477         }
21478         // match: (SBBQcarrymask (FlagLT_UGT))
21479         // result: (MOVQconst [0])
21480         for {
21481                 if v_0.Op != OpAMD64FlagLT_UGT {
21482                         break
21483                 }
21484                 v.reset(OpAMD64MOVQconst)
21485                 v.AuxInt = int64ToAuxInt(0)
21486                 return true
21487         }
21488         // match: (SBBQcarrymask (FlagGT_ULT))
21489         // result: (MOVQconst [-1])
21490         for {
21491                 if v_0.Op != OpAMD64FlagGT_ULT {
21492                         break
21493                 }
21494                 v.reset(OpAMD64MOVQconst)
21495                 v.AuxInt = int64ToAuxInt(-1)
21496                 return true
21497         }
21498         // match: (SBBQcarrymask (FlagGT_UGT))
21499         // result: (MOVQconst [0])
21500         for {
21501                 if v_0.Op != OpAMD64FlagGT_UGT {
21502                         break
21503                 }
21504                 v.reset(OpAMD64MOVQconst)
21505                 v.AuxInt = int64ToAuxInt(0)
21506                 return true
21507         }
21508         return false
21509 }
21510 func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
21511         v_1 := v.Args[1]
21512         v_0 := v.Args[0]
21513         // match: (SBBQconst x [c] (FlagEQ))
21514         // result: (SUBQconstborrow x [c])
21515         for {
21516                 c := auxIntToInt32(v.AuxInt)
21517                 x := v_0
21518                 if v_1.Op != OpAMD64FlagEQ {
21519                         break
21520                 }
21521                 v.reset(OpAMD64SUBQconstborrow)
21522                 v.AuxInt = int32ToAuxInt(c)
21523                 v.AddArg(x)
21524                 return true
21525         }
21526         return false
21527 }
21528 func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
21529         v_0 := v.Args[0]
21530         // match: (SETA (InvertFlags x))
21531         // result: (SETB x)
21532         for {
21533                 if v_0.Op != OpAMD64InvertFlags {
21534                         break
21535                 }
21536                 x := v_0.Args[0]
21537                 v.reset(OpAMD64SETB)
21538                 v.AddArg(x)
21539                 return true
21540         }
21541         // match: (SETA (FlagEQ))
21542         // result: (MOVLconst [0])
21543         for {
21544                 if v_0.Op != OpAMD64FlagEQ {
21545                         break
21546                 }
21547                 v.reset(OpAMD64MOVLconst)
21548                 v.AuxInt = int32ToAuxInt(0)
21549                 return true
21550         }
21551         // match: (SETA (FlagLT_ULT))
21552         // result: (MOVLconst [0])
21553         for {
21554                 if v_0.Op != OpAMD64FlagLT_ULT {
21555                         break
21556                 }
21557                 v.reset(OpAMD64MOVLconst)
21558                 v.AuxInt = int32ToAuxInt(0)
21559                 return true
21560         }
21561         // match: (SETA (FlagLT_UGT))
21562         // result: (MOVLconst [1])
21563         for {
21564                 if v_0.Op != OpAMD64FlagLT_UGT {
21565                         break
21566                 }
21567                 v.reset(OpAMD64MOVLconst)
21568                 v.AuxInt = int32ToAuxInt(1)
21569                 return true
21570         }
21571         // match: (SETA (FlagGT_ULT))
21572         // result: (MOVLconst [0])
21573         for {
21574                 if v_0.Op != OpAMD64FlagGT_ULT {
21575                         break
21576                 }
21577                 v.reset(OpAMD64MOVLconst)
21578                 v.AuxInt = int32ToAuxInt(0)
21579                 return true
21580         }
21581         // match: (SETA (FlagGT_UGT))
21582         // result: (MOVLconst [1])
21583         for {
21584                 if v_0.Op != OpAMD64FlagGT_UGT {
21585                         break
21586                 }
21587                 v.reset(OpAMD64MOVLconst)
21588                 v.AuxInt = int32ToAuxInt(1)
21589                 return true
21590         }
21591         return false
21592 }
21593 func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
21594         v_0 := v.Args[0]
21595         // match: (SETAE (TESTQ x x))
21596         // result: (ConstBool [true])
21597         for {
21598                 if v_0.Op != OpAMD64TESTQ {
21599                         break
21600                 }
21601                 x := v_0.Args[1]
21602                 if x != v_0.Args[0] {
21603                         break
21604                 }
21605                 v.reset(OpConstBool)
21606                 v.AuxInt = boolToAuxInt(true)
21607                 return true
21608         }
21609         // match: (SETAE (TESTL x x))
21610         // result: (ConstBool [true])
21611         for {
21612                 if v_0.Op != OpAMD64TESTL {
21613                         break
21614                 }
21615                 x := v_0.Args[1]
21616                 if x != v_0.Args[0] {
21617                         break
21618                 }
21619                 v.reset(OpConstBool)
21620                 v.AuxInt = boolToAuxInt(true)
21621                 return true
21622         }
21623         // match: (SETAE (TESTW x x))
21624         // result: (ConstBool [true])
21625         for {
21626                 if v_0.Op != OpAMD64TESTW {
21627                         break
21628                 }
21629                 x := v_0.Args[1]
21630                 if x != v_0.Args[0] {
21631                         break
21632                 }
21633                 v.reset(OpConstBool)
21634                 v.AuxInt = boolToAuxInt(true)
21635                 return true
21636         }
21637         // match: (SETAE (TESTB x x))
21638         // result: (ConstBool [true])
21639         for {
21640                 if v_0.Op != OpAMD64TESTB {
21641                         break
21642                 }
21643                 x := v_0.Args[1]
21644                 if x != v_0.Args[0] {
21645                         break
21646                 }
21647                 v.reset(OpConstBool)
21648                 v.AuxInt = boolToAuxInt(true)
21649                 return true
21650         }
21651         // match: (SETAE (InvertFlags x))
21652         // result: (SETBE x)
21653         for {
21654                 if v_0.Op != OpAMD64InvertFlags {
21655                         break
21656                 }
21657                 x := v_0.Args[0]
21658                 v.reset(OpAMD64SETBE)
21659                 v.AddArg(x)
21660                 return true
21661         }
21662         // match: (SETAE (FlagEQ))
21663         // result: (MOVLconst [1])
21664         for {
21665                 if v_0.Op != OpAMD64FlagEQ {
21666                         break
21667                 }
21668                 v.reset(OpAMD64MOVLconst)
21669                 v.AuxInt = int32ToAuxInt(1)
21670                 return true
21671         }
21672         // match: (SETAE (FlagLT_ULT))
21673         // result: (MOVLconst [0])
21674         for {
21675                 if v_0.Op != OpAMD64FlagLT_ULT {
21676                         break
21677                 }
21678                 v.reset(OpAMD64MOVLconst)
21679                 v.AuxInt = int32ToAuxInt(0)
21680                 return true
21681         }
21682         // match: (SETAE (FlagLT_UGT))
21683         // result: (MOVLconst [1])
21684         for {
21685                 if v_0.Op != OpAMD64FlagLT_UGT {
21686                         break
21687                 }
21688                 v.reset(OpAMD64MOVLconst)
21689                 v.AuxInt = int32ToAuxInt(1)
21690                 return true
21691         }
21692         // match: (SETAE (FlagGT_ULT))
21693         // result: (MOVLconst [0])
21694         for {
21695                 if v_0.Op != OpAMD64FlagGT_ULT {
21696                         break
21697                 }
21698                 v.reset(OpAMD64MOVLconst)
21699                 v.AuxInt = int32ToAuxInt(0)
21700                 return true
21701         }
21702         // match: (SETAE (FlagGT_UGT))
21703         // result: (MOVLconst [1])
21704         for {
21705                 if v_0.Op != OpAMD64FlagGT_UGT {
21706                         break
21707                 }
21708                 v.reset(OpAMD64MOVLconst)
21709                 v.AuxInt = int32ToAuxInt(1)
21710                 return true
21711         }
21712         return false
21713 }
21714 func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
21715         v_2 := v.Args[2]
21716         v_1 := v.Args[1]
21717         v_0 := v.Args[0]
21718         b := v.Block
21719         typ := &b.Func.Config.Types
21720         // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem)
21721         // result: (SETBEstore [off] {sym} ptr x mem)
21722         for {
21723                 off := auxIntToInt32(v.AuxInt)
21724                 sym := auxToSym(v.Aux)
21725                 ptr := v_0
21726                 if v_1.Op != OpAMD64InvertFlags {
21727                         break
21728                 }
21729                 x := v_1.Args[0]
21730                 mem := v_2
21731                 v.reset(OpAMD64SETBEstore)
21732                 v.AuxInt = int32ToAuxInt(off)
21733                 v.Aux = symToAux(sym)
21734                 v.AddArg3(ptr, x, mem)
21735                 return true
21736         }
21737         // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem)
21738         // cond: is32Bit(int64(off1)+int64(off2))
21739         // result: (SETAEstore [off1+off2] {sym} base val mem)
21740         for {
21741                 off1 := auxIntToInt32(v.AuxInt)
21742                 sym := auxToSym(v.Aux)
21743                 if v_0.Op != OpAMD64ADDQconst {
21744                         break
21745                 }
21746                 off2 := auxIntToInt32(v_0.AuxInt)
21747                 base := v_0.Args[0]
21748                 val := v_1
21749                 mem := v_2
21750                 if !(is32Bit(int64(off1) + int64(off2))) {
21751                         break
21752                 }
21753                 v.reset(OpAMD64SETAEstore)
21754                 v.AuxInt = int32ToAuxInt(off1 + off2)
21755                 v.Aux = symToAux(sym)
21756                 v.AddArg3(base, val, mem)
21757                 return true
21758         }
21759         // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
21760         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
21761         // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
21762         for {
21763                 off1 := auxIntToInt32(v.AuxInt)
21764                 sym1 := auxToSym(v.Aux)
21765                 if v_0.Op != OpAMD64LEAQ {
21766                         break
21767                 }
21768                 off2 := auxIntToInt32(v_0.AuxInt)
21769                 sym2 := auxToSym(v_0.Aux)
21770                 base := v_0.Args[0]
21771                 val := v_1
21772                 mem := v_2
21773                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21774                         break
21775                 }
21776                 v.reset(OpAMD64SETAEstore)
21777                 v.AuxInt = int32ToAuxInt(off1 + off2)
21778                 v.Aux = symToAux(mergeSym(sym1, sym2))
21779                 v.AddArg3(base, val, mem)
21780                 return true
21781         }
21782         // match: (SETAEstore [off] {sym} ptr (FlagEQ) mem)
21783         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
21784         for {
21785                 off := auxIntToInt32(v.AuxInt)
21786                 sym := auxToSym(v.Aux)
21787                 ptr := v_0
21788                 if v_1.Op != OpAMD64FlagEQ {
21789                         break
21790                 }
21791                 mem := v_2
21792                 v.reset(OpAMD64MOVBstore)
21793                 v.AuxInt = int32ToAuxInt(off)
21794                 v.Aux = symToAux(sym)
21795                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21796                 v0.AuxInt = int32ToAuxInt(1)
21797                 v.AddArg3(ptr, v0, mem)
21798                 return true
21799         }
21800         // match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem)
21801         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
21802         for {
21803                 off := auxIntToInt32(v.AuxInt)
21804                 sym := auxToSym(v.Aux)
21805                 ptr := v_0
21806                 if v_1.Op != OpAMD64FlagLT_ULT {
21807                         break
21808                 }
21809                 mem := v_2
21810                 v.reset(OpAMD64MOVBstore)
21811                 v.AuxInt = int32ToAuxInt(off)
21812                 v.Aux = symToAux(sym)
21813                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21814                 v0.AuxInt = int32ToAuxInt(0)
21815                 v.AddArg3(ptr, v0, mem)
21816                 return true
21817         }
21818         // match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem)
21819         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
21820         for {
21821                 off := auxIntToInt32(v.AuxInt)
21822                 sym := auxToSym(v.Aux)
21823                 ptr := v_0
21824                 if v_1.Op != OpAMD64FlagLT_UGT {
21825                         break
21826                 }
21827                 mem := v_2
21828                 v.reset(OpAMD64MOVBstore)
21829                 v.AuxInt = int32ToAuxInt(off)
21830                 v.Aux = symToAux(sym)
21831                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21832                 v0.AuxInt = int32ToAuxInt(1)
21833                 v.AddArg3(ptr, v0, mem)
21834                 return true
21835         }
21836         // match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem)
21837         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
21838         for {
21839                 off := auxIntToInt32(v.AuxInt)
21840                 sym := auxToSym(v.Aux)
21841                 ptr := v_0
21842                 if v_1.Op != OpAMD64FlagGT_ULT {
21843                         break
21844                 }
21845                 mem := v_2
21846                 v.reset(OpAMD64MOVBstore)
21847                 v.AuxInt = int32ToAuxInt(off)
21848                 v.Aux = symToAux(sym)
21849                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21850                 v0.AuxInt = int32ToAuxInt(0)
21851                 v.AddArg3(ptr, v0, mem)
21852                 return true
21853         }
21854         // match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem)
21855         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
21856         for {
21857                 off := auxIntToInt32(v.AuxInt)
21858                 sym := auxToSym(v.Aux)
21859                 ptr := v_0
21860                 if v_1.Op != OpAMD64FlagGT_UGT {
21861                         break
21862                 }
21863                 mem := v_2
21864                 v.reset(OpAMD64MOVBstore)
21865                 v.AuxInt = int32ToAuxInt(off)
21866                 v.Aux = symToAux(sym)
21867                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21868                 v0.AuxInt = int32ToAuxInt(1)
21869                 v.AddArg3(ptr, v0, mem)
21870                 return true
21871         }
21872         return false
21873 }
21874 func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
21875         v_2 := v.Args[2]
21876         v_1 := v.Args[1]
21877         v_0 := v.Args[0]
21878         b := v.Block
21879         typ := &b.Func.Config.Types
21880         // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem)
21881         // result: (SETBstore [off] {sym} ptr x mem)
21882         for {
21883                 off := auxIntToInt32(v.AuxInt)
21884                 sym := auxToSym(v.Aux)
21885                 ptr := v_0
21886                 if v_1.Op != OpAMD64InvertFlags {
21887                         break
21888                 }
21889                 x := v_1.Args[0]
21890                 mem := v_2
21891                 v.reset(OpAMD64SETBstore)
21892                 v.AuxInt = int32ToAuxInt(off)
21893                 v.Aux = symToAux(sym)
21894                 v.AddArg3(ptr, x, mem)
21895                 return true
21896         }
21897         // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem)
21898         // cond: is32Bit(int64(off1)+int64(off2))
21899         // result: (SETAstore [off1+off2] {sym} base val mem)
21900         for {
21901                 off1 := auxIntToInt32(v.AuxInt)
21902                 sym := auxToSym(v.Aux)
21903                 if v_0.Op != OpAMD64ADDQconst {
21904                         break
21905                 }
21906                 off2 := auxIntToInt32(v_0.AuxInt)
21907                 base := v_0.Args[0]
21908                 val := v_1
21909                 mem := v_2
21910                 if !(is32Bit(int64(off1) + int64(off2))) {
21911                         break
21912                 }
21913                 v.reset(OpAMD64SETAstore)
21914                 v.AuxInt = int32ToAuxInt(off1 + off2)
21915                 v.Aux = symToAux(sym)
21916                 v.AddArg3(base, val, mem)
21917                 return true
21918         }
21919         // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
21920         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
21921         // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
21922         for {
21923                 off1 := auxIntToInt32(v.AuxInt)
21924                 sym1 := auxToSym(v.Aux)
21925                 if v_0.Op != OpAMD64LEAQ {
21926                         break
21927                 }
21928                 off2 := auxIntToInt32(v_0.AuxInt)
21929                 sym2 := auxToSym(v_0.Aux)
21930                 base := v_0.Args[0]
21931                 val := v_1
21932                 mem := v_2
21933                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21934                         break
21935                 }
21936                 v.reset(OpAMD64SETAstore)
21937                 v.AuxInt = int32ToAuxInt(off1 + off2)
21938                 v.Aux = symToAux(mergeSym(sym1, sym2))
21939                 v.AddArg3(base, val, mem)
21940                 return true
21941         }
21942         // match: (SETAstore [off] {sym} ptr (FlagEQ) mem)
21943         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
21944         for {
21945                 off := auxIntToInt32(v.AuxInt)
21946                 sym := auxToSym(v.Aux)
21947                 ptr := v_0
21948                 if v_1.Op != OpAMD64FlagEQ {
21949                         break
21950                 }
21951                 mem := v_2
21952                 v.reset(OpAMD64MOVBstore)
21953                 v.AuxInt = int32ToAuxInt(off)
21954                 v.Aux = symToAux(sym)
21955                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21956                 v0.AuxInt = int32ToAuxInt(0)
21957                 v.AddArg3(ptr, v0, mem)
21958                 return true
21959         }
21960         // match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem)
21961         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
21962         for {
21963                 off := auxIntToInt32(v.AuxInt)
21964                 sym := auxToSym(v.Aux)
21965                 ptr := v_0
21966                 if v_1.Op != OpAMD64FlagLT_ULT {
21967                         break
21968                 }
21969                 mem := v_2
21970                 v.reset(OpAMD64MOVBstore)
21971                 v.AuxInt = int32ToAuxInt(off)
21972                 v.Aux = symToAux(sym)
21973                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21974                 v0.AuxInt = int32ToAuxInt(0)
21975                 v.AddArg3(ptr, v0, mem)
21976                 return true
21977         }
21978         // match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem)
21979         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
21980         for {
21981                 off := auxIntToInt32(v.AuxInt)
21982                 sym := auxToSym(v.Aux)
21983                 ptr := v_0
21984                 if v_1.Op != OpAMD64FlagLT_UGT {
21985                         break
21986                 }
21987                 mem := v_2
21988                 v.reset(OpAMD64MOVBstore)
21989                 v.AuxInt = int32ToAuxInt(off)
21990                 v.Aux = symToAux(sym)
21991                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21992                 v0.AuxInt = int32ToAuxInt(1)
21993                 v.AddArg3(ptr, v0, mem)
21994                 return true
21995         }
21996         // match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem)
21997         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
21998         for {
21999                 off := auxIntToInt32(v.AuxInt)
22000                 sym := auxToSym(v.Aux)
22001                 ptr := v_0
22002                 if v_1.Op != OpAMD64FlagGT_ULT {
22003                         break
22004                 }
22005                 mem := v_2
22006                 v.reset(OpAMD64MOVBstore)
22007                 v.AuxInt = int32ToAuxInt(off)
22008                 v.Aux = symToAux(sym)
22009                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22010                 v0.AuxInt = int32ToAuxInt(0)
22011                 v.AddArg3(ptr, v0, mem)
22012                 return true
22013         }
22014         // match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem)
22015         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
22016         for {
22017                 off := auxIntToInt32(v.AuxInt)
22018                 sym := auxToSym(v.Aux)
22019                 ptr := v_0
22020                 if v_1.Op != OpAMD64FlagGT_UGT {
22021                         break
22022                 }
22023                 mem := v_2
22024                 v.reset(OpAMD64MOVBstore)
22025                 v.AuxInt = int32ToAuxInt(off)
22026                 v.Aux = symToAux(sym)
22027                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22028                 v0.AuxInt = int32ToAuxInt(1)
22029                 v.AddArg3(ptr, v0, mem)
22030                 return true
22031         }
22032         return false
22033 }
22034 func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
22035         v_0 := v.Args[0]
22036         // match: (SETB (TESTQ x x))
22037         // result: (ConstBool [false])
22038         for {
22039                 if v_0.Op != OpAMD64TESTQ {
22040                         break
22041                 }
22042                 x := v_0.Args[1]
22043                 if x != v_0.Args[0] {
22044                         break
22045                 }
22046                 v.reset(OpConstBool)
22047                 v.AuxInt = boolToAuxInt(false)
22048                 return true
22049         }
22050         // match: (SETB (TESTL x x))
22051         // result: (ConstBool [false])
22052         for {
22053                 if v_0.Op != OpAMD64TESTL {
22054                         break
22055                 }
22056                 x := v_0.Args[1]
22057                 if x != v_0.Args[0] {
22058                         break
22059                 }
22060                 v.reset(OpConstBool)
22061                 v.AuxInt = boolToAuxInt(false)
22062                 return true
22063         }
22064         // match: (SETB (TESTW x x))
22065         // result: (ConstBool [false])
22066         for {
22067                 if v_0.Op != OpAMD64TESTW {
22068                         break
22069                 }
22070                 x := v_0.Args[1]
22071                 if x != v_0.Args[0] {
22072                         break
22073                 }
22074                 v.reset(OpConstBool)
22075                 v.AuxInt = boolToAuxInt(false)
22076                 return true
22077         }
22078         // match: (SETB (TESTB x x))
22079         // result: (ConstBool [false])
22080         for {
22081                 if v_0.Op != OpAMD64TESTB {
22082                         break
22083                 }
22084                 x := v_0.Args[1]
22085                 if x != v_0.Args[0] {
22086                         break
22087                 }
22088                 v.reset(OpConstBool)
22089                 v.AuxInt = boolToAuxInt(false)
22090                 return true
22091         }
22092         // match: (SETB (BTLconst [0] x))
22093         // result: (ANDLconst [1] x)
22094         for {
22095                 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
22096                         break
22097                 }
22098                 x := v_0.Args[0]
22099                 v.reset(OpAMD64ANDLconst)
22100                 v.AuxInt = int32ToAuxInt(1)
22101                 v.AddArg(x)
22102                 return true
22103         }
22104         // match: (SETB (BTQconst [0] x))
22105         // result: (ANDQconst [1] x)
22106         for {
22107                 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
22108                         break
22109                 }
22110                 x := v_0.Args[0]
22111                 v.reset(OpAMD64ANDQconst)
22112                 v.AuxInt = int32ToAuxInt(1)
22113                 v.AddArg(x)
22114                 return true
22115         }
22116         // match: (SETB (InvertFlags x))
22117         // result: (SETA x)
22118         for {
22119                 if v_0.Op != OpAMD64InvertFlags {
22120                         break
22121                 }
22122                 x := v_0.Args[0]
22123                 v.reset(OpAMD64SETA)
22124                 v.AddArg(x)
22125                 return true
22126         }
22127         // match: (SETB (FlagEQ))
22128         // result: (MOVLconst [0])
22129         for {
22130                 if v_0.Op != OpAMD64FlagEQ {
22131                         break
22132                 }
22133                 v.reset(OpAMD64MOVLconst)
22134                 v.AuxInt = int32ToAuxInt(0)
22135                 return true
22136         }
22137         // match: (SETB (FlagLT_ULT))
22138         // result: (MOVLconst [1])
22139         for {
22140                 if v_0.Op != OpAMD64FlagLT_ULT {
22141                         break
22142                 }
22143                 v.reset(OpAMD64MOVLconst)
22144                 v.AuxInt = int32ToAuxInt(1)
22145                 return true
22146         }
22147         // match: (SETB (FlagLT_UGT))
22148         // result: (MOVLconst [0])
22149         for {
22150                 if v_0.Op != OpAMD64FlagLT_UGT {
22151                         break
22152                 }
22153                 v.reset(OpAMD64MOVLconst)
22154                 v.AuxInt = int32ToAuxInt(0)
22155                 return true
22156         }
22157         // match: (SETB (FlagGT_ULT))
22158         // result: (MOVLconst [1])
22159         for {
22160                 if v_0.Op != OpAMD64FlagGT_ULT {
22161                         break
22162                 }
22163                 v.reset(OpAMD64MOVLconst)
22164                 v.AuxInt = int32ToAuxInt(1)
22165                 return true
22166         }
22167         // match: (SETB (FlagGT_UGT))
22168         // result: (MOVLconst [0])
22169         for {
22170                 if v_0.Op != OpAMD64FlagGT_UGT {
22171                         break
22172                 }
22173                 v.reset(OpAMD64MOVLconst)
22174                 v.AuxInt = int32ToAuxInt(0)
22175                 return true
22176         }
22177         return false
22178 }
22179 func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
22180         v_0 := v.Args[0]
22181         // match: (SETBE (InvertFlags x))
22182         // result: (SETAE x)
22183         for {
22184                 if v_0.Op != OpAMD64InvertFlags {
22185                         break
22186                 }
22187                 x := v_0.Args[0]
22188                 v.reset(OpAMD64SETAE)
22189                 v.AddArg(x)
22190                 return true
22191         }
22192         // match: (SETBE (FlagEQ))
22193         // result: (MOVLconst [1])
22194         for {
22195                 if v_0.Op != OpAMD64FlagEQ {
22196                         break
22197                 }
22198                 v.reset(OpAMD64MOVLconst)
22199                 v.AuxInt = int32ToAuxInt(1)
22200                 return true
22201         }
22202         // match: (SETBE (FlagLT_ULT))
22203         // result: (MOVLconst [1])
22204         for {
22205                 if v_0.Op != OpAMD64FlagLT_ULT {
22206                         break
22207                 }
22208                 v.reset(OpAMD64MOVLconst)
22209                 v.AuxInt = int32ToAuxInt(1)
22210                 return true
22211         }
22212         // match: (SETBE (FlagLT_UGT))
22213         // result: (MOVLconst [0])
22214         for {
22215                 if v_0.Op != OpAMD64FlagLT_UGT {
22216                         break
22217                 }
22218                 v.reset(OpAMD64MOVLconst)
22219                 v.AuxInt = int32ToAuxInt(0)
22220                 return true
22221         }
22222         // match: (SETBE (FlagGT_ULT))
22223         // result: (MOVLconst [1])
22224         for {
22225                 if v_0.Op != OpAMD64FlagGT_ULT {
22226                         break
22227                 }
22228                 v.reset(OpAMD64MOVLconst)
22229                 v.AuxInt = int32ToAuxInt(1)
22230                 return true
22231         }
22232         // match: (SETBE (FlagGT_UGT))
22233         // result: (MOVLconst [0])
22234         for {
22235                 if v_0.Op != OpAMD64FlagGT_UGT {
22236                         break
22237                 }
22238                 v.reset(OpAMD64MOVLconst)
22239                 v.AuxInt = int32ToAuxInt(0)
22240                 return true
22241         }
22242         return false
22243 }
22244 func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
22245         v_2 := v.Args[2]
22246         v_1 := v.Args[1]
22247         v_0 := v.Args[0]
22248         b := v.Block
22249         typ := &b.Func.Config.Types
22250         // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem)
22251         // result: (SETAEstore [off] {sym} ptr x mem)
22252         for {
22253                 off := auxIntToInt32(v.AuxInt)
22254                 sym := auxToSym(v.Aux)
22255                 ptr := v_0
22256                 if v_1.Op != OpAMD64InvertFlags {
22257                         break
22258                 }
22259                 x := v_1.Args[0]
22260                 mem := v_2
22261                 v.reset(OpAMD64SETAEstore)
22262                 v.AuxInt = int32ToAuxInt(off)
22263                 v.Aux = symToAux(sym)
22264                 v.AddArg3(ptr, x, mem)
22265                 return true
22266         }
22267         // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem)
22268         // cond: is32Bit(int64(off1)+int64(off2))
22269         // result: (SETBEstore [off1+off2] {sym} base val mem)
22270         for {
22271                 off1 := auxIntToInt32(v.AuxInt)
22272                 sym := auxToSym(v.Aux)
22273                 if v_0.Op != OpAMD64ADDQconst {
22274                         break
22275                 }
22276                 off2 := auxIntToInt32(v_0.AuxInt)
22277                 base := v_0.Args[0]
22278                 val := v_1
22279                 mem := v_2
22280                 if !(is32Bit(int64(off1) + int64(off2))) {
22281                         break
22282                 }
22283                 v.reset(OpAMD64SETBEstore)
22284                 v.AuxInt = int32ToAuxInt(off1 + off2)
22285                 v.Aux = symToAux(sym)
22286                 v.AddArg3(base, val, mem)
22287                 return true
22288         }
22289         // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
22290         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
22291         // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
22292         for {
22293                 off1 := auxIntToInt32(v.AuxInt)
22294                 sym1 := auxToSym(v.Aux)
22295                 if v_0.Op != OpAMD64LEAQ {
22296                         break
22297                 }
22298                 off2 := auxIntToInt32(v_0.AuxInt)
22299                 sym2 := auxToSym(v_0.Aux)
22300                 base := v_0.Args[0]
22301                 val := v_1
22302                 mem := v_2
22303                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22304                         break
22305                 }
22306                 v.reset(OpAMD64SETBEstore)
22307                 v.AuxInt = int32ToAuxInt(off1 + off2)
22308                 v.Aux = symToAux(mergeSym(sym1, sym2))
22309                 v.AddArg3(base, val, mem)
22310                 return true
22311         }
22312         // match: (SETBEstore [off] {sym} ptr (FlagEQ) mem)
22313         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
22314         for {
22315                 off := auxIntToInt32(v.AuxInt)
22316                 sym := auxToSym(v.Aux)
22317                 ptr := v_0
22318                 if v_1.Op != OpAMD64FlagEQ {
22319                         break
22320                 }
22321                 mem := v_2
22322                 v.reset(OpAMD64MOVBstore)
22323                 v.AuxInt = int32ToAuxInt(off)
22324                 v.Aux = symToAux(sym)
22325                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22326                 v0.AuxInt = int32ToAuxInt(1)
22327                 v.AddArg3(ptr, v0, mem)
22328                 return true
22329         }
22330         // match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem)
22331         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
22332         for {
22333                 off := auxIntToInt32(v.AuxInt)
22334                 sym := auxToSym(v.Aux)
22335                 ptr := v_0
22336                 if v_1.Op != OpAMD64FlagLT_ULT {
22337                         break
22338                 }
22339                 mem := v_2
22340                 v.reset(OpAMD64MOVBstore)
22341                 v.AuxInt = int32ToAuxInt(off)
22342                 v.Aux = symToAux(sym)
22343                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22344                 v0.AuxInt = int32ToAuxInt(1)
22345                 v.AddArg3(ptr, v0, mem)
22346                 return true
22347         }
22348         // match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem)
22349         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
22350         for {
22351                 off := auxIntToInt32(v.AuxInt)
22352                 sym := auxToSym(v.Aux)
22353                 ptr := v_0
22354                 if v_1.Op != OpAMD64FlagLT_UGT {
22355                         break
22356                 }
22357                 mem := v_2
22358                 v.reset(OpAMD64MOVBstore)
22359                 v.AuxInt = int32ToAuxInt(off)
22360                 v.Aux = symToAux(sym)
22361                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22362                 v0.AuxInt = int32ToAuxInt(0)
22363                 v.AddArg3(ptr, v0, mem)
22364                 return true
22365         }
22366         // match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem)
22367         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
22368         for {
22369                 off := auxIntToInt32(v.AuxInt)
22370                 sym := auxToSym(v.Aux)
22371                 ptr := v_0
22372                 if v_1.Op != OpAMD64FlagGT_ULT {
22373                         break
22374                 }
22375                 mem := v_2
22376                 v.reset(OpAMD64MOVBstore)
22377                 v.AuxInt = int32ToAuxInt(off)
22378                 v.Aux = symToAux(sym)
22379                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22380                 v0.AuxInt = int32ToAuxInt(1)
22381                 v.AddArg3(ptr, v0, mem)
22382                 return true
22383         }
22384         // match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem)
22385         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
22386         for {
22387                 off := auxIntToInt32(v.AuxInt)
22388                 sym := auxToSym(v.Aux)
22389                 ptr := v_0
22390                 if v_1.Op != OpAMD64FlagGT_UGT {
22391                         break
22392                 }
22393                 mem := v_2
22394                 v.reset(OpAMD64MOVBstore)
22395                 v.AuxInt = int32ToAuxInt(off)
22396                 v.Aux = symToAux(sym)
22397                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22398                 v0.AuxInt = int32ToAuxInt(0)
22399                 v.AddArg3(ptr, v0, mem)
22400                 return true
22401         }
22402         return false
22403 }
22404 func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
22405         v_2 := v.Args[2]
22406         v_1 := v.Args[1]
22407         v_0 := v.Args[0]
22408         b := v.Block
22409         typ := &b.Func.Config.Types
22410         // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem)
22411         // result: (SETAstore [off] {sym} ptr x mem)
22412         for {
22413                 off := auxIntToInt32(v.AuxInt)
22414                 sym := auxToSym(v.Aux)
22415                 ptr := v_0
22416                 if v_1.Op != OpAMD64InvertFlags {
22417                         break
22418                 }
22419                 x := v_1.Args[0]
22420                 mem := v_2
22421                 v.reset(OpAMD64SETAstore)
22422                 v.AuxInt = int32ToAuxInt(off)
22423                 v.Aux = symToAux(sym)
22424                 v.AddArg3(ptr, x, mem)
22425                 return true
22426         }
22427         // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem)
22428         // cond: is32Bit(int64(off1)+int64(off2))
22429         // result: (SETBstore [off1+off2] {sym} base val mem)
22430         for {
22431                 off1 := auxIntToInt32(v.AuxInt)
22432                 sym := auxToSym(v.Aux)
22433                 if v_0.Op != OpAMD64ADDQconst {
22434                         break
22435                 }
22436                 off2 := auxIntToInt32(v_0.AuxInt)
22437                 base := v_0.Args[0]
22438                 val := v_1
22439                 mem := v_2
22440                 if !(is32Bit(int64(off1) + int64(off2))) {
22441                         break
22442                 }
22443                 v.reset(OpAMD64SETBstore)
22444                 v.AuxInt = int32ToAuxInt(off1 + off2)
22445                 v.Aux = symToAux(sym)
22446                 v.AddArg3(base, val, mem)
22447                 return true
22448         }
22449         // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
22450         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
22451         // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
22452         for {
22453                 off1 := auxIntToInt32(v.AuxInt)
22454                 sym1 := auxToSym(v.Aux)
22455                 if v_0.Op != OpAMD64LEAQ {
22456                         break
22457                 }
22458                 off2 := auxIntToInt32(v_0.AuxInt)
22459                 sym2 := auxToSym(v_0.Aux)
22460                 base := v_0.Args[0]
22461                 val := v_1
22462                 mem := v_2
22463                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22464                         break
22465                 }
22466                 v.reset(OpAMD64SETBstore)
22467                 v.AuxInt = int32ToAuxInt(off1 + off2)
22468                 v.Aux = symToAux(mergeSym(sym1, sym2))
22469                 v.AddArg3(base, val, mem)
22470                 return true
22471         }
22472         // match: (SETBstore [off] {sym} ptr (FlagEQ) mem)
22473         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
22474         for {
22475                 off := auxIntToInt32(v.AuxInt)
22476                 sym := auxToSym(v.Aux)
22477                 ptr := v_0
22478                 if v_1.Op != OpAMD64FlagEQ {
22479                         break
22480                 }
22481                 mem := v_2
22482                 v.reset(OpAMD64MOVBstore)
22483                 v.AuxInt = int32ToAuxInt(off)
22484                 v.Aux = symToAux(sym)
22485                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22486                 v0.AuxInt = int32ToAuxInt(0)
22487                 v.AddArg3(ptr, v0, mem)
22488                 return true
22489         }
22490         // match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem)
22491         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
22492         for {
22493                 off := auxIntToInt32(v.AuxInt)
22494                 sym := auxToSym(v.Aux)
22495                 ptr := v_0
22496                 if v_1.Op != OpAMD64FlagLT_ULT {
22497                         break
22498                 }
22499                 mem := v_2
22500                 v.reset(OpAMD64MOVBstore)
22501                 v.AuxInt = int32ToAuxInt(off)
22502                 v.Aux = symToAux(sym)
22503                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22504                 v0.AuxInt = int32ToAuxInt(1)
22505                 v.AddArg3(ptr, v0, mem)
22506                 return true
22507         }
22508         // match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem)
22509         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
22510         for {
22511                 off := auxIntToInt32(v.AuxInt)
22512                 sym := auxToSym(v.Aux)
22513                 ptr := v_0
22514                 if v_1.Op != OpAMD64FlagLT_UGT {
22515                         break
22516                 }
22517                 mem := v_2
22518                 v.reset(OpAMD64MOVBstore)
22519                 v.AuxInt = int32ToAuxInt(off)
22520                 v.Aux = symToAux(sym)
22521                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22522                 v0.AuxInt = int32ToAuxInt(0)
22523                 v.AddArg3(ptr, v0, mem)
22524                 return true
22525         }
22526         // match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem)
22527         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
22528         for {
22529                 off := auxIntToInt32(v.AuxInt)
22530                 sym := auxToSym(v.Aux)
22531                 ptr := v_0
22532                 if v_1.Op != OpAMD64FlagGT_ULT {
22533                         break
22534                 }
22535                 mem := v_2
22536                 v.reset(OpAMD64MOVBstore)
22537                 v.AuxInt = int32ToAuxInt(off)
22538                 v.Aux = symToAux(sym)
22539                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22540                 v0.AuxInt = int32ToAuxInt(1)
22541                 v.AddArg3(ptr, v0, mem)
22542                 return true
22543         }
22544         // match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem)
22545         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
22546         for {
22547                 off := auxIntToInt32(v.AuxInt)
22548                 sym := auxToSym(v.Aux)
22549                 ptr := v_0
22550                 if v_1.Op != OpAMD64FlagGT_UGT {
22551                         break
22552                 }
22553                 mem := v_2
22554                 v.reset(OpAMD64MOVBstore)
22555                 v.AuxInt = int32ToAuxInt(off)
22556                 v.Aux = symToAux(sym)
22557                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22558                 v0.AuxInt = int32ToAuxInt(0)
22559                 v.AddArg3(ptr, v0, mem)
22560                 return true
22561         }
22562         return false
22563 }
22564 func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
22565         v_0 := v.Args[0]
22566         b := v.Block
22567         // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y))
22568         // result: (SETAE (BTL x y))
22569         for {
22570                 if v_0.Op != OpAMD64TESTL {
22571                         break
22572                 }
22573                 _ = v_0.Args[1]
22574                 v_0_0 := v_0.Args[0]
22575                 v_0_1 := v_0.Args[1]
22576                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22577                         if v_0_0.Op != OpAMD64SHLL {
22578                                 continue
22579                         }
22580                         x := v_0_0.Args[1]
22581                         v_0_0_0 := v_0_0.Args[0]
22582                         if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
22583                                 continue
22584                         }
22585                         y := v_0_1
22586                         v.reset(OpAMD64SETAE)
22587                         v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
22588                         v0.AddArg2(x, y)
22589                         v.AddArg(v0)
22590                         return true
22591                 }
22592                 break
22593         }
22594         // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
22595         // result: (SETAE (BTQ x y))
22596         for {
22597                 if v_0.Op != OpAMD64TESTQ {
22598                         break
22599                 }
22600                 _ = v_0.Args[1]
22601                 v_0_0 := v_0.Args[0]
22602                 v_0_1 := v_0.Args[1]
22603                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22604                         if v_0_0.Op != OpAMD64SHLQ {
22605                                 continue
22606                         }
22607                         x := v_0_0.Args[1]
22608                         v_0_0_0 := v_0_0.Args[0]
22609                         if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
22610                                 continue
22611                         }
22612                         y := v_0_1
22613                         v.reset(OpAMD64SETAE)
22614                         v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
22615                         v0.AddArg2(x, y)
22616                         v.AddArg(v0)
22617                         return true
22618                 }
22619                 break
22620         }
22621         // match: (SETEQ (TESTLconst [c] x))
22622         // cond: isUint32PowerOfTwo(int64(c))
22623         // result: (SETAE (BTLconst [int8(log32(c))] x))
22624         for {
22625                 if v_0.Op != OpAMD64TESTLconst {
22626                         break
22627                 }
22628                 c := auxIntToInt32(v_0.AuxInt)
22629                 x := v_0.Args[0]
22630                 if !(isUint32PowerOfTwo(int64(c))) {
22631                         break
22632                 }
22633                 v.reset(OpAMD64SETAE)
22634                 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
22635                 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
22636                 v0.AddArg(x)
22637                 v.AddArg(v0)
22638                 return true
22639         }
22640         // match: (SETEQ (TESTQconst [c] x))
22641         // cond: isUint64PowerOfTwo(int64(c))
22642         // result: (SETAE (BTQconst [int8(log32(c))] x))
22643         for {
22644                 if v_0.Op != OpAMD64TESTQconst {
22645                         break
22646                 }
22647                 c := auxIntToInt32(v_0.AuxInt)
22648                 x := v_0.Args[0]
22649                 if !(isUint64PowerOfTwo(int64(c))) {
22650                         break
22651                 }
22652                 v.reset(OpAMD64SETAE)
22653                 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22654                 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
22655                 v0.AddArg(x)
22656                 v.AddArg(v0)
22657                 return true
22658         }
22659         // match: (SETEQ (TESTQ (MOVQconst [c]) x))
22660         // cond: isUint64PowerOfTwo(c)
22661         // result: (SETAE (BTQconst [int8(log64(c))] x))
22662         for {
22663                 if v_0.Op != OpAMD64TESTQ {
22664                         break
22665                 }
22666                 _ = v_0.Args[1]
22667                 v_0_0 := v_0.Args[0]
22668                 v_0_1 := v_0.Args[1]
22669                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22670                         if v_0_0.Op != OpAMD64MOVQconst {
22671                                 continue
22672                         }
22673                         c := auxIntToInt64(v_0_0.AuxInt)
22674                         x := v_0_1
22675                         if !(isUint64PowerOfTwo(c)) {
22676                                 continue
22677                         }
22678                         v.reset(OpAMD64SETAE)
22679                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22680                         v0.AuxInt = int8ToAuxInt(int8(log64(c)))
22681                         v0.AddArg(x)
22682                         v.AddArg(v0)
22683                         return true
22684                 }
22685                 break
22686         }
22687         // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _)))
22688         // result: (SETNE (CMPLconst [0] s))
22689         for {
22690                 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
22691                         break
22692                 }
22693                 s := v_0.Args[0]
22694                 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
22695                         break
22696                 }
22697                 v.reset(OpAMD64SETNE)
22698                 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
22699                 v0.AuxInt = int32ToAuxInt(0)
22700                 v0.AddArg(s)
22701                 v.AddArg(v0)
22702                 return true
22703         }
22704         // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _)))
22705         // result: (SETNE (CMPQconst [0] s))
22706         for {
22707                 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
22708                         break
22709                 }
22710                 s := v_0.Args[0]
22711                 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
22712                         break
22713                 }
22714                 v.reset(OpAMD64SETNE)
22715                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
22716                 v0.AuxInt = int32ToAuxInt(0)
22717                 v0.AddArg(s)
22718                 v.AddArg(v0)
22719                 return true
22720         }
22721         // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
22722         // cond: z1==z2
22723         // result: (SETAE (BTQconst [63] x))
22724         for {
22725                 if v_0.Op != OpAMD64TESTQ {
22726                         break
22727                 }
22728                 _ = v_0.Args[1]
22729                 v_0_0 := v_0.Args[0]
22730                 v_0_1 := v_0.Args[1]
22731                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22732                         z1 := v_0_0
22733                         if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
22734                                 continue
22735                         }
22736                         z1_0 := z1.Args[0]
22737                         if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
22738                                 continue
22739                         }
22740                         x := z1_0.Args[0]
22741                         z2 := v_0_1
22742                         if !(z1 == z2) {
22743                                 continue
22744                         }
22745                         v.reset(OpAMD64SETAE)
22746                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22747                         v0.AuxInt = int8ToAuxInt(63)
22748                         v0.AddArg(x)
22749                         v.AddArg(v0)
22750                         return true
22751                 }
22752                 break
22753         }
22754         // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
22755         // cond: z1==z2
22756         // result: (SETAE (BTQconst [31] x))
22757         for {
22758                 if v_0.Op != OpAMD64TESTL {
22759                         break
22760                 }
22761                 _ = v_0.Args[1]
22762                 v_0_0 := v_0.Args[0]
22763                 v_0_1 := v_0.Args[1]
22764                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22765                         z1 := v_0_0
22766                         if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
22767                                 continue
22768                         }
22769                         z1_0 := z1.Args[0]
22770                         if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
22771                                 continue
22772                         }
22773                         x := z1_0.Args[0]
22774                         z2 := v_0_1
22775                         if !(z1 == z2) {
22776                                 continue
22777                         }
22778                         v.reset(OpAMD64SETAE)
22779                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22780                         v0.AuxInt = int8ToAuxInt(31)
22781                         v0.AddArg(x)
22782                         v.AddArg(v0)
22783                         return true
22784                 }
22785                 break
22786         }
22787         // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
22788         // cond: z1==z2
22789         // result: (SETAE (BTQconst [0] x))
22790         for {
22791                 if v_0.Op != OpAMD64TESTQ {
22792                         break
22793                 }
22794                 _ = v_0.Args[1]
22795                 v_0_0 := v_0.Args[0]
22796                 v_0_1 := v_0.Args[1]
22797                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22798                         z1 := v_0_0
22799                         if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
22800                                 continue
22801                         }
22802                         z1_0 := z1.Args[0]
22803                         if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
22804                                 continue
22805                         }
22806                         x := z1_0.Args[0]
22807                         z2 := v_0_1
22808                         if !(z1 == z2) {
22809                                 continue
22810                         }
22811                         v.reset(OpAMD64SETAE)
22812                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22813                         v0.AuxInt = int8ToAuxInt(0)
22814                         v0.AddArg(x)
22815                         v.AddArg(v0)
22816                         return true
22817                 }
22818                 break
22819         }
22820         // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
22821         // cond: z1==z2
22822         // result: (SETAE (BTLconst [0] x))
22823         for {
22824                 if v_0.Op != OpAMD64TESTL {
22825                         break
22826                 }
22827                 _ = v_0.Args[1]
22828                 v_0_0 := v_0.Args[0]
22829                 v_0_1 := v_0.Args[1]
22830                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22831                         z1 := v_0_0
22832                         if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
22833                                 continue
22834                         }
22835                         z1_0 := z1.Args[0]
22836                         if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
22837                                 continue
22838                         }
22839                         x := z1_0.Args[0]
22840                         z2 := v_0_1
22841                         if !(z1 == z2) {
22842                                 continue
22843                         }
22844                         v.reset(OpAMD64SETAE)
22845                         v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
22846                         v0.AuxInt = int8ToAuxInt(0)
22847                         v0.AddArg(x)
22848                         v.AddArg(v0)
22849                         return true
22850                 }
22851                 break
22852         }
22853         // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2))
22854         // cond: z1==z2
22855         // result: (SETAE (BTQconst [63] x))
22856         for {
22857                 if v_0.Op != OpAMD64TESTQ {
22858                         break
22859                 }
22860                 _ = v_0.Args[1]
22861                 v_0_0 := v_0.Args[0]
22862                 v_0_1 := v_0.Args[1]
22863                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22864                         z1 := v_0_0
22865                         if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
22866                                 continue
22867                         }
22868                         x := z1.Args[0]
22869                         z2 := v_0_1
22870                         if !(z1 == z2) {
22871                                 continue
22872                         }
22873                         v.reset(OpAMD64SETAE)
22874                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22875                         v0.AuxInt = int8ToAuxInt(63)
22876                         v0.AddArg(x)
22877                         v.AddArg(v0)
22878                         return true
22879                 }
22880                 break
22881         }
22882         // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2))
22883         // cond: z1==z2
22884         // result: (SETAE (BTLconst [31] x))
22885         for {
22886                 if v_0.Op != OpAMD64TESTL {
22887                         break
22888                 }
22889                 _ = v_0.Args[1]
22890                 v_0_0 := v_0.Args[0]
22891                 v_0_1 := v_0.Args[1]
22892                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22893                         z1 := v_0_0
22894                         if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
22895                                 continue
22896                         }
22897                         x := z1.Args[0]
22898                         z2 := v_0_1
22899                         if !(z1 == z2) {
22900                                 continue
22901                         }
22902                         v.reset(OpAMD64SETAE)
22903                         v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
22904                         v0.AuxInt = int8ToAuxInt(31)
22905                         v0.AddArg(x)
22906                         v.AddArg(v0)
22907                         return true
22908                 }
22909                 break
22910         }
22911         // match: (SETEQ (InvertFlags x))
22912         // result: (SETEQ x)
22913         for {
22914                 if v_0.Op != OpAMD64InvertFlags {
22915                         break
22916                 }
22917                 x := v_0.Args[0]
22918                 v.reset(OpAMD64SETEQ)
22919                 v.AddArg(x)
22920                 return true
22921         }
22922         // match: (SETEQ (FlagEQ))
22923         // result: (MOVLconst [1])
22924         for {
22925                 if v_0.Op != OpAMD64FlagEQ {
22926                         break
22927                 }
22928                 v.reset(OpAMD64MOVLconst)
22929                 v.AuxInt = int32ToAuxInt(1)
22930                 return true
22931         }
22932         // match: (SETEQ (FlagLT_ULT))
22933         // result: (MOVLconst [0])
22934         for {
22935                 if v_0.Op != OpAMD64FlagLT_ULT {
22936                         break
22937                 }
22938                 v.reset(OpAMD64MOVLconst)
22939                 v.AuxInt = int32ToAuxInt(0)
22940                 return true
22941         }
22942         // match: (SETEQ (FlagLT_UGT))
22943         // result: (MOVLconst [0])
22944         for {
22945                 if v_0.Op != OpAMD64FlagLT_UGT {
22946                         break
22947                 }
22948                 v.reset(OpAMD64MOVLconst)
22949                 v.AuxInt = int32ToAuxInt(0)
22950                 return true
22951         }
22952         // match: (SETEQ (FlagGT_ULT))
22953         // result: (MOVLconst [0])
22954         for {
22955                 if v_0.Op != OpAMD64FlagGT_ULT {
22956                         break
22957                 }
22958                 v.reset(OpAMD64MOVLconst)
22959                 v.AuxInt = int32ToAuxInt(0)
22960                 return true
22961         }
22962         // match: (SETEQ (FlagGT_UGT))
22963         // result: (MOVLconst [0])
22964         for {
22965                 if v_0.Op != OpAMD64FlagGT_UGT {
22966                         break
22967                 }
22968                 v.reset(OpAMD64MOVLconst)
22969                 v.AuxInt = int32ToAuxInt(0)
22970                 return true
22971         }
22972         return false
22973 }
22974 func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
22975         v_2 := v.Args[2]
22976         v_1 := v.Args[1]
22977         v_0 := v.Args[0]
22978         b := v.Block
22979         typ := &b.Func.Config.Types
22980         // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
22981         // result: (SETAEstore [off] {sym} ptr (BTL x y) mem)
22982         for {
22983                 off := auxIntToInt32(v.AuxInt)
22984                 sym := auxToSym(v.Aux)
22985                 ptr := v_0
22986                 if v_1.Op != OpAMD64TESTL {
22987                         break
22988                 }
22989                 _ = v_1.Args[1]
22990                 v_1_0 := v_1.Args[0]
22991                 v_1_1 := v_1.Args[1]
22992                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
22993                         if v_1_0.Op != OpAMD64SHLL {
22994                                 continue
22995                         }
22996                         x := v_1_0.Args[1]
22997                         v_1_0_0 := v_1_0.Args[0]
22998                         if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
22999                                 continue
23000                         }
23001                         y := v_1_1
23002                         mem := v_2
23003                         v.reset(OpAMD64SETAEstore)
23004                         v.AuxInt = int32ToAuxInt(off)
23005                         v.Aux = symToAux(sym)
23006                         v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
23007                         v0.AddArg2(x, y)
23008                         v.AddArg3(ptr, v0, mem)
23009                         return true
23010                 }
23011                 break
23012         }
23013         // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
23014         // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem)
23015         for {
23016                 off := auxIntToInt32(v.AuxInt)
23017                 sym := auxToSym(v.Aux)
23018                 ptr := v_0
23019                 if v_1.Op != OpAMD64TESTQ {
23020                         break
23021                 }
23022                 _ = v_1.Args[1]
23023                 v_1_0 := v_1.Args[0]
23024                 v_1_1 := v_1.Args[1]
23025                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23026                         if v_1_0.Op != OpAMD64SHLQ {
23027                                 continue
23028                         }
23029                         x := v_1_0.Args[1]
23030                         v_1_0_0 := v_1_0.Args[0]
23031                         if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
23032                                 continue
23033                         }
23034                         y := v_1_1
23035                         mem := v_2
23036                         v.reset(OpAMD64SETAEstore)
23037                         v.AuxInt = int32ToAuxInt(off)
23038                         v.Aux = symToAux(sym)
23039                         v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
23040                         v0.AddArg2(x, y)
23041                         v.AddArg3(ptr, v0, mem)
23042                         return true
23043                 }
23044                 break
23045         }
23046         // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem)
23047         // cond: isUint32PowerOfTwo(int64(c))
23048         // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
23049         for {
23050                 off := auxIntToInt32(v.AuxInt)
23051                 sym := auxToSym(v.Aux)
23052                 ptr := v_0
23053                 if v_1.Op != OpAMD64TESTLconst {
23054                         break
23055                 }
23056                 c := auxIntToInt32(v_1.AuxInt)
23057                 x := v_1.Args[0]
23058                 mem := v_2
23059                 if !(isUint32PowerOfTwo(int64(c))) {
23060                         break
23061                 }
23062                 v.reset(OpAMD64SETAEstore)
23063                 v.AuxInt = int32ToAuxInt(off)
23064                 v.Aux = symToAux(sym)
23065                 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
23066                 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
23067                 v0.AddArg(x)
23068                 v.AddArg3(ptr, v0, mem)
23069                 return true
23070         }
23071         // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem)
23072         // cond: isUint64PowerOfTwo(int64(c))
23073         // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
23074         for {
23075                 off := auxIntToInt32(v.AuxInt)
23076                 sym := auxToSym(v.Aux)
23077                 ptr := v_0
23078                 if v_1.Op != OpAMD64TESTQconst {
23079                         break
23080                 }
23081                 c := auxIntToInt32(v_1.AuxInt)
23082                 x := v_1.Args[0]
23083                 mem := v_2
23084                 if !(isUint64PowerOfTwo(int64(c))) {
23085                         break
23086                 }
23087                 v.reset(OpAMD64SETAEstore)
23088                 v.AuxInt = int32ToAuxInt(off)
23089                 v.Aux = symToAux(sym)
23090                 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23091                 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
23092                 v0.AddArg(x)
23093                 v.AddArg3(ptr, v0, mem)
23094                 return true
23095         }
23096         // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
23097         // cond: isUint64PowerOfTwo(c)
23098         // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
23099         for {
23100                 off := auxIntToInt32(v.AuxInt)
23101                 sym := auxToSym(v.Aux)
23102                 ptr := v_0
23103                 if v_1.Op != OpAMD64TESTQ {
23104                         break
23105                 }
23106                 _ = v_1.Args[1]
23107                 v_1_0 := v_1.Args[0]
23108                 v_1_1 := v_1.Args[1]
23109                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23110                         if v_1_0.Op != OpAMD64MOVQconst {
23111                                 continue
23112                         }
23113                         c := auxIntToInt64(v_1_0.AuxInt)
23114                         x := v_1_1
23115                         mem := v_2
23116                         if !(isUint64PowerOfTwo(c)) {
23117                                 continue
23118                         }
23119                         v.reset(OpAMD64SETAEstore)
23120                         v.AuxInt = int32ToAuxInt(off)
23121                         v.Aux = symToAux(sym)
23122                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23123                         v0.AuxInt = int8ToAuxInt(int8(log64(c)))
23124                         v0.AddArg(x)
23125                         v.AddArg3(ptr, v0, mem)
23126                         return true
23127                 }
23128                 break
23129         }
23130         // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
23131         // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem)
23132         for {
23133                 off := auxIntToInt32(v.AuxInt)
23134                 sym := auxToSym(v.Aux)
23135                 ptr := v_0
23136                 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
23137                         break
23138                 }
23139                 s := v_1.Args[0]
23140                 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
23141                         break
23142                 }
23143                 mem := v_2
23144                 v.reset(OpAMD64SETNEstore)
23145                 v.AuxInt = int32ToAuxInt(off)
23146                 v.Aux = symToAux(sym)
23147                 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
23148                 v0.AuxInt = int32ToAuxInt(0)
23149                 v0.AddArg(s)
23150                 v.AddArg3(ptr, v0, mem)
23151                 return true
23152         }
23153         // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
23154         // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem)
23155         for {
23156                 off := auxIntToInt32(v.AuxInt)
23157                 sym := auxToSym(v.Aux)
23158                 ptr := v_0
23159                 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
23160                         break
23161                 }
23162                 s := v_1.Args[0]
23163                 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
23164                         break
23165                 }
23166                 mem := v_2
23167                 v.reset(OpAMD64SETNEstore)
23168                 v.AuxInt = int32ToAuxInt(off)
23169                 v.Aux = symToAux(sym)
23170                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
23171                 v0.AuxInt = int32ToAuxInt(0)
23172                 v0.AddArg(s)
23173                 v.AddArg3(ptr, v0, mem)
23174                 return true
23175         }
23176         // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
23177         // cond: z1==z2
23178         // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
23179         for {
23180                 off := auxIntToInt32(v.AuxInt)
23181                 sym := auxToSym(v.Aux)
23182                 ptr := v_0
23183                 if v_1.Op != OpAMD64TESTQ {
23184                         break
23185                 }
23186                 _ = v_1.Args[1]
23187                 v_1_0 := v_1.Args[0]
23188                 v_1_1 := v_1.Args[1]
23189                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23190                         z1 := v_1_0
23191                         if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
23192                                 continue
23193                         }
23194                         z1_0 := z1.Args[0]
23195                         if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
23196                                 continue
23197                         }
23198                         x := z1_0.Args[0]
23199                         z2 := v_1_1
23200                         mem := v_2
23201                         if !(z1 == z2) {
23202                                 continue
23203                         }
23204                         v.reset(OpAMD64SETAEstore)
23205                         v.AuxInt = int32ToAuxInt(off)
23206                         v.Aux = symToAux(sym)
23207                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23208                         v0.AuxInt = int8ToAuxInt(63)
23209                         v0.AddArg(x)
23210                         v.AddArg3(ptr, v0, mem)
23211                         return true
23212                 }
23213                 break
23214         }
23215         // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
23216         // cond: z1==z2
23217         // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
23218         for {
23219                 off := auxIntToInt32(v.AuxInt)
23220                 sym := auxToSym(v.Aux)
23221                 ptr := v_0
23222                 if v_1.Op != OpAMD64TESTL {
23223                         break
23224                 }
23225                 _ = v_1.Args[1]
23226                 v_1_0 := v_1.Args[0]
23227                 v_1_1 := v_1.Args[1]
23228                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23229                         z1 := v_1_0
23230                         if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
23231                                 continue
23232                         }
23233                         z1_0 := z1.Args[0]
23234                         if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
23235                                 continue
23236                         }
23237                         x := z1_0.Args[0]
23238                         z2 := v_1_1
23239                         mem := v_2
23240                         if !(z1 == z2) {
23241                                 continue
23242                         }
23243                         v.reset(OpAMD64SETAEstore)
23244                         v.AuxInt = int32ToAuxInt(off)
23245                         v.Aux = symToAux(sym)
23246                         v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
23247                         v0.AuxInt = int8ToAuxInt(31)
23248                         v0.AddArg(x)
23249                         v.AddArg3(ptr, v0, mem)
23250                         return true
23251                 }
23252                 break
23253         }
23254         // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
23255         // cond: z1==z2
23256         // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem)
23257         for {
23258                 off := auxIntToInt32(v.AuxInt)
23259                 sym := auxToSym(v.Aux)
23260                 ptr := v_0
23261                 if v_1.Op != OpAMD64TESTQ {
23262                         break
23263                 }
23264                 _ = v_1.Args[1]
23265                 v_1_0 := v_1.Args[0]
23266                 v_1_1 := v_1.Args[1]
23267                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23268                         z1 := v_1_0
23269                         if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
23270                                 continue
23271                         }
23272                         z1_0 := z1.Args[0]
23273                         if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
23274                                 continue
23275                         }
23276                         x := z1_0.Args[0]
23277                         z2 := v_1_1
23278                         mem := v_2
23279                         if !(z1 == z2) {
23280                                 continue
23281                         }
23282                         v.reset(OpAMD64SETAEstore)
23283                         v.AuxInt = int32ToAuxInt(off)
23284                         v.Aux = symToAux(sym)
23285                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23286                         v0.AuxInt = int8ToAuxInt(0)
23287                         v0.AddArg(x)
23288                         v.AddArg3(ptr, v0, mem)
23289                         return true
23290                 }
23291                 break
23292         }
23293         // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
23294         // cond: z1==z2
23295         // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem)
23296         for {
23297                 off := auxIntToInt32(v.AuxInt)
23298                 sym := auxToSym(v.Aux)
23299                 ptr := v_0
23300                 if v_1.Op != OpAMD64TESTL {
23301                         break
23302                 }
23303                 _ = v_1.Args[1]
23304                 v_1_0 := v_1.Args[0]
23305                 v_1_1 := v_1.Args[1]
23306                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23307                         z1 := v_1_0
23308                         if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
23309                                 continue
23310                         }
23311                         z1_0 := z1.Args[0]
23312                         if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
23313                                 continue
23314                         }
23315                         x := z1_0.Args[0]
23316                         z2 := v_1_1
23317                         mem := v_2
23318                         if !(z1 == z2) {
23319                                 continue
23320                         }
23321                         v.reset(OpAMD64SETAEstore)
23322                         v.AuxInt = int32ToAuxInt(off)
23323                         v.Aux = symToAux(sym)
23324                         v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
23325                         v0.AuxInt = int8ToAuxInt(0)
23326                         v0.AddArg(x)
23327                         v.AddArg3(ptr, v0, mem)
23328                         return true
23329                 }
23330                 break
23331         }
23332         // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
23333         // cond: z1==z2
23334         // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
23335         for {
23336                 off := auxIntToInt32(v.AuxInt)
23337                 sym := auxToSym(v.Aux)
23338                 ptr := v_0
23339                 if v_1.Op != OpAMD64TESTQ {
23340                         break
23341                 }
23342                 _ = v_1.Args[1]
23343                 v_1_0 := v_1.Args[0]
23344                 v_1_1 := v_1.Args[1]
23345                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23346                         z1 := v_1_0
23347                         if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
23348                                 continue
23349                         }
23350                         x := z1.Args[0]
23351                         z2 := v_1_1
23352                         mem := v_2
23353                         if !(z1 == z2) {
23354                                 continue
23355                         }
23356                         v.reset(OpAMD64SETAEstore)
23357                         v.AuxInt = int32ToAuxInt(off)
23358                         v.Aux = symToAux(sym)
23359                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23360                         v0.AuxInt = int8ToAuxInt(63)
23361                         v0.AddArg(x)
23362                         v.AddArg3(ptr, v0, mem)
23363                         return true
23364                 }
23365                 break
23366         }
23367         // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
23368         // cond: z1==z2
23369         // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
23370         for {
23371                 off := auxIntToInt32(v.AuxInt)
23372                 sym := auxToSym(v.Aux)
23373                 ptr := v_0
23374                 if v_1.Op != OpAMD64TESTL {
23375                         break
23376                 }
23377                 _ = v_1.Args[1]
23378                 v_1_0 := v_1.Args[0]
23379                 v_1_1 := v_1.Args[1]
23380                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23381                         z1 := v_1_0
23382                         if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
23383                                 continue
23384                         }
23385                         x := z1.Args[0]
23386                         z2 := v_1_1
23387                         mem := v_2
23388                         if !(z1 == z2) {
23389                                 continue
23390                         }
23391                         v.reset(OpAMD64SETAEstore)
23392                         v.AuxInt = int32ToAuxInt(off)
23393                         v.Aux = symToAux(sym)
23394                         v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
23395                         v0.AuxInt = int8ToAuxInt(31)
23396                         v0.AddArg(x)
23397                         v.AddArg3(ptr, v0, mem)
23398                         return true
23399                 }
23400                 break
23401         }
23402         // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem)
23403         // result: (SETEQstore [off] {sym} ptr x mem)
23404         for {
23405                 off := auxIntToInt32(v.AuxInt)
23406                 sym := auxToSym(v.Aux)
23407                 ptr := v_0
23408                 if v_1.Op != OpAMD64InvertFlags {
23409                         break
23410                 }
23411                 x := v_1.Args[0]
23412                 mem := v_2
23413                 v.reset(OpAMD64SETEQstore)
23414                 v.AuxInt = int32ToAuxInt(off)
23415                 v.Aux = symToAux(sym)
23416                 v.AddArg3(ptr, x, mem)
23417                 return true
23418         }
23419         // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem)
23420         // cond: is32Bit(int64(off1)+int64(off2))
23421         // result: (SETEQstore [off1+off2] {sym} base val mem)
23422         for {
23423                 off1 := auxIntToInt32(v.AuxInt)
23424                 sym := auxToSym(v.Aux)
23425                 if v_0.Op != OpAMD64ADDQconst {
23426                         break
23427                 }
23428                 off2 := auxIntToInt32(v_0.AuxInt)
23429                 base := v_0.Args[0]
23430                 val := v_1
23431                 mem := v_2
23432                 if !(is32Bit(int64(off1) + int64(off2))) {
23433                         break
23434                 }
23435                 v.reset(OpAMD64SETEQstore)
23436                 v.AuxInt = int32ToAuxInt(off1 + off2)
23437                 v.Aux = symToAux(sym)
23438                 v.AddArg3(base, val, mem)
23439                 return true
23440         }
23441         // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
23442         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
23443         // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
23444         for {
23445                 off1 := auxIntToInt32(v.AuxInt)
23446                 sym1 := auxToSym(v.Aux)
23447                 if v_0.Op != OpAMD64LEAQ {
23448                         break
23449                 }
23450                 off2 := auxIntToInt32(v_0.AuxInt)
23451                 sym2 := auxToSym(v_0.Aux)
23452                 base := v_0.Args[0]
23453                 val := v_1
23454                 mem := v_2
23455                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23456                         break
23457                 }
23458                 v.reset(OpAMD64SETEQstore)
23459                 v.AuxInt = int32ToAuxInt(off1 + off2)
23460                 v.Aux = symToAux(mergeSym(sym1, sym2))
23461                 v.AddArg3(base, val, mem)
23462                 return true
23463         }
23464         // match: (SETEQstore [off] {sym} ptr (FlagEQ) mem)
23465         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
23466         for {
23467                 off := auxIntToInt32(v.AuxInt)
23468                 sym := auxToSym(v.Aux)
23469                 ptr := v_0
23470                 if v_1.Op != OpAMD64FlagEQ {
23471                         break
23472                 }
23473                 mem := v_2
23474                 v.reset(OpAMD64MOVBstore)
23475                 v.AuxInt = int32ToAuxInt(off)
23476                 v.Aux = symToAux(sym)
23477                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23478                 v0.AuxInt = int32ToAuxInt(1)
23479                 v.AddArg3(ptr, v0, mem)
23480                 return true
23481         }
23482         // match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem)
23483         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23484         for {
23485                 off := auxIntToInt32(v.AuxInt)
23486                 sym := auxToSym(v.Aux)
23487                 ptr := v_0
23488                 if v_1.Op != OpAMD64FlagLT_ULT {
23489                         break
23490                 }
23491                 mem := v_2
23492                 v.reset(OpAMD64MOVBstore)
23493                 v.AuxInt = int32ToAuxInt(off)
23494                 v.Aux = symToAux(sym)
23495                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23496                 v0.AuxInt = int32ToAuxInt(0)
23497                 v.AddArg3(ptr, v0, mem)
23498                 return true
23499         }
23500         // match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem)
23501         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23502         for {
23503                 off := auxIntToInt32(v.AuxInt)
23504                 sym := auxToSym(v.Aux)
23505                 ptr := v_0
23506                 if v_1.Op != OpAMD64FlagLT_UGT {
23507                         break
23508                 }
23509                 mem := v_2
23510                 v.reset(OpAMD64MOVBstore)
23511                 v.AuxInt = int32ToAuxInt(off)
23512                 v.Aux = symToAux(sym)
23513                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23514                 v0.AuxInt = int32ToAuxInt(0)
23515                 v.AddArg3(ptr, v0, mem)
23516                 return true
23517         }
23518         // match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem)
23519         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23520         for {
23521                 off := auxIntToInt32(v.AuxInt)
23522                 sym := auxToSym(v.Aux)
23523                 ptr := v_0
23524                 if v_1.Op != OpAMD64FlagGT_ULT {
23525                         break
23526                 }
23527                 mem := v_2
23528                 v.reset(OpAMD64MOVBstore)
23529                 v.AuxInt = int32ToAuxInt(off)
23530                 v.Aux = symToAux(sym)
23531                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23532                 v0.AuxInt = int32ToAuxInt(0)
23533                 v.AddArg3(ptr, v0, mem)
23534                 return true
23535         }
23536         // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem)
23537         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23538         for {
23539                 off := auxIntToInt32(v.AuxInt)
23540                 sym := auxToSym(v.Aux)
23541                 ptr := v_0
23542                 if v_1.Op != OpAMD64FlagGT_UGT {
23543                         break
23544                 }
23545                 mem := v_2
23546                 v.reset(OpAMD64MOVBstore)
23547                 v.AuxInt = int32ToAuxInt(off)
23548                 v.Aux = symToAux(sym)
23549                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23550                 v0.AuxInt = int32ToAuxInt(0)
23551                 v.AddArg3(ptr, v0, mem)
23552                 return true
23553         }
23554         return false
23555 }
23556 func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
23557         v_0 := v.Args[0]
23558         // match: (SETG (InvertFlags x))
23559         // result: (SETL x)
23560         for {
23561                 if v_0.Op != OpAMD64InvertFlags {
23562                         break
23563                 }
23564                 x := v_0.Args[0]
23565                 v.reset(OpAMD64SETL)
23566                 v.AddArg(x)
23567                 return true
23568         }
23569         // match: (SETG (FlagEQ))
23570         // result: (MOVLconst [0])
23571         for {
23572                 if v_0.Op != OpAMD64FlagEQ {
23573                         break
23574                 }
23575                 v.reset(OpAMD64MOVLconst)
23576                 v.AuxInt = int32ToAuxInt(0)
23577                 return true
23578         }
23579         // match: (SETG (FlagLT_ULT))
23580         // result: (MOVLconst [0])
23581         for {
23582                 if v_0.Op != OpAMD64FlagLT_ULT {
23583                         break
23584                 }
23585                 v.reset(OpAMD64MOVLconst)
23586                 v.AuxInt = int32ToAuxInt(0)
23587                 return true
23588         }
23589         // match: (SETG (FlagLT_UGT))
23590         // result: (MOVLconst [0])
23591         for {
23592                 if v_0.Op != OpAMD64FlagLT_UGT {
23593                         break
23594                 }
23595                 v.reset(OpAMD64MOVLconst)
23596                 v.AuxInt = int32ToAuxInt(0)
23597                 return true
23598         }
23599         // match: (SETG (FlagGT_ULT))
23600         // result: (MOVLconst [1])
23601         for {
23602                 if v_0.Op != OpAMD64FlagGT_ULT {
23603                         break
23604                 }
23605                 v.reset(OpAMD64MOVLconst)
23606                 v.AuxInt = int32ToAuxInt(1)
23607                 return true
23608         }
23609         // match: (SETG (FlagGT_UGT))
23610         // result: (MOVLconst [1])
23611         for {
23612                 if v_0.Op != OpAMD64FlagGT_UGT {
23613                         break
23614                 }
23615                 v.reset(OpAMD64MOVLconst)
23616                 v.AuxInt = int32ToAuxInt(1)
23617                 return true
23618         }
23619         return false
23620 }
23621 func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
23622         v_0 := v.Args[0]
23623         // match: (SETGE (InvertFlags x))
23624         // result: (SETLE x)
23625         for {
23626                 if v_0.Op != OpAMD64InvertFlags {
23627                         break
23628                 }
23629                 x := v_0.Args[0]
23630                 v.reset(OpAMD64SETLE)
23631                 v.AddArg(x)
23632                 return true
23633         }
23634         // match: (SETGE (FlagEQ))
23635         // result: (MOVLconst [1])
23636         for {
23637                 if v_0.Op != OpAMD64FlagEQ {
23638                         break
23639                 }
23640                 v.reset(OpAMD64MOVLconst)
23641                 v.AuxInt = int32ToAuxInt(1)
23642                 return true
23643         }
23644         // match: (SETGE (FlagLT_ULT))
23645         // result: (MOVLconst [0])
23646         for {
23647                 if v_0.Op != OpAMD64FlagLT_ULT {
23648                         break
23649                 }
23650                 v.reset(OpAMD64MOVLconst)
23651                 v.AuxInt = int32ToAuxInt(0)
23652                 return true
23653         }
23654         // match: (SETGE (FlagLT_UGT))
23655         // result: (MOVLconst [0])
23656         for {
23657                 if v_0.Op != OpAMD64FlagLT_UGT {
23658                         break
23659                 }
23660                 v.reset(OpAMD64MOVLconst)
23661                 v.AuxInt = int32ToAuxInt(0)
23662                 return true
23663         }
23664         // match: (SETGE (FlagGT_ULT))
23665         // result: (MOVLconst [1])
23666         for {
23667                 if v_0.Op != OpAMD64FlagGT_ULT {
23668                         break
23669                 }
23670                 v.reset(OpAMD64MOVLconst)
23671                 v.AuxInt = int32ToAuxInt(1)
23672                 return true
23673         }
23674         // match: (SETGE (FlagGT_UGT))
23675         // result: (MOVLconst [1])
23676         for {
23677                 if v_0.Op != OpAMD64FlagGT_UGT {
23678                         break
23679                 }
23680                 v.reset(OpAMD64MOVLconst)
23681                 v.AuxInt = int32ToAuxInt(1)
23682                 return true
23683         }
23684         return false
23685 }
23686 func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
23687         v_2 := v.Args[2]
23688         v_1 := v.Args[1]
23689         v_0 := v.Args[0]
23690         b := v.Block
23691         typ := &b.Func.Config.Types
23692         // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem)
23693         // result: (SETLEstore [off] {sym} ptr x mem)
23694         for {
23695                 off := auxIntToInt32(v.AuxInt)
23696                 sym := auxToSym(v.Aux)
23697                 ptr := v_0
23698                 if v_1.Op != OpAMD64InvertFlags {
23699                         break
23700                 }
23701                 x := v_1.Args[0]
23702                 mem := v_2
23703                 v.reset(OpAMD64SETLEstore)
23704                 v.AuxInt = int32ToAuxInt(off)
23705                 v.Aux = symToAux(sym)
23706                 v.AddArg3(ptr, x, mem)
23707                 return true
23708         }
23709         // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem)
23710         // cond: is32Bit(int64(off1)+int64(off2))
23711         // result: (SETGEstore [off1+off2] {sym} base val mem)
23712         for {
23713                 off1 := auxIntToInt32(v.AuxInt)
23714                 sym := auxToSym(v.Aux)
23715                 if v_0.Op != OpAMD64ADDQconst {
23716                         break
23717                 }
23718                 off2 := auxIntToInt32(v_0.AuxInt)
23719                 base := v_0.Args[0]
23720                 val := v_1
23721                 mem := v_2
23722                 if !(is32Bit(int64(off1) + int64(off2))) {
23723                         break
23724                 }
23725                 v.reset(OpAMD64SETGEstore)
23726                 v.AuxInt = int32ToAuxInt(off1 + off2)
23727                 v.Aux = symToAux(sym)
23728                 v.AddArg3(base, val, mem)
23729                 return true
23730         }
23731         // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
23732         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
23733         // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
23734         for {
23735                 off1 := auxIntToInt32(v.AuxInt)
23736                 sym1 := auxToSym(v.Aux)
23737                 if v_0.Op != OpAMD64LEAQ {
23738                         break
23739                 }
23740                 off2 := auxIntToInt32(v_0.AuxInt)
23741                 sym2 := auxToSym(v_0.Aux)
23742                 base := v_0.Args[0]
23743                 val := v_1
23744                 mem := v_2
23745                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23746                         break
23747                 }
23748                 v.reset(OpAMD64SETGEstore)
23749                 v.AuxInt = int32ToAuxInt(off1 + off2)
23750                 v.Aux = symToAux(mergeSym(sym1, sym2))
23751                 v.AddArg3(base, val, mem)
23752                 return true
23753         }
23754         // match: (SETGEstore [off] {sym} ptr (FlagEQ) mem)
23755         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
23756         for {
23757                 off := auxIntToInt32(v.AuxInt)
23758                 sym := auxToSym(v.Aux)
23759                 ptr := v_0
23760                 if v_1.Op != OpAMD64FlagEQ {
23761                         break
23762                 }
23763                 mem := v_2
23764                 v.reset(OpAMD64MOVBstore)
23765                 v.AuxInt = int32ToAuxInt(off)
23766                 v.Aux = symToAux(sym)
23767                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23768                 v0.AuxInt = int32ToAuxInt(1)
23769                 v.AddArg3(ptr, v0, mem)
23770                 return true
23771         }
23772         // match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem)
23773         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23774         for {
23775                 off := auxIntToInt32(v.AuxInt)
23776                 sym := auxToSym(v.Aux)
23777                 ptr := v_0
23778                 if v_1.Op != OpAMD64FlagLT_ULT {
23779                         break
23780                 }
23781                 mem := v_2
23782                 v.reset(OpAMD64MOVBstore)
23783                 v.AuxInt = int32ToAuxInt(off)
23784                 v.Aux = symToAux(sym)
23785                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23786                 v0.AuxInt = int32ToAuxInt(0)
23787                 v.AddArg3(ptr, v0, mem)
23788                 return true
23789         }
23790         // match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem)
23791         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23792         for {
23793                 off := auxIntToInt32(v.AuxInt)
23794                 sym := auxToSym(v.Aux)
23795                 ptr := v_0
23796                 if v_1.Op != OpAMD64FlagLT_UGT {
23797                         break
23798                 }
23799                 mem := v_2
23800                 v.reset(OpAMD64MOVBstore)
23801                 v.AuxInt = int32ToAuxInt(off)
23802                 v.Aux = symToAux(sym)
23803                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23804                 v0.AuxInt = int32ToAuxInt(0)
23805                 v.AddArg3(ptr, v0, mem)
23806                 return true
23807         }
23808         // match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem)
23809         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
23810         for {
23811                 off := auxIntToInt32(v.AuxInt)
23812                 sym := auxToSym(v.Aux)
23813                 ptr := v_0
23814                 if v_1.Op != OpAMD64FlagGT_ULT {
23815                         break
23816                 }
23817                 mem := v_2
23818                 v.reset(OpAMD64MOVBstore)
23819                 v.AuxInt = int32ToAuxInt(off)
23820                 v.Aux = symToAux(sym)
23821                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23822                 v0.AuxInt = int32ToAuxInt(1)
23823                 v.AddArg3(ptr, v0, mem)
23824                 return true
23825         }
23826         // match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem)
23827         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
23828         for {
23829                 off := auxIntToInt32(v.AuxInt)
23830                 sym := auxToSym(v.Aux)
23831                 ptr := v_0
23832                 if v_1.Op != OpAMD64FlagGT_UGT {
23833                         break
23834                 }
23835                 mem := v_2
23836                 v.reset(OpAMD64MOVBstore)
23837                 v.AuxInt = int32ToAuxInt(off)
23838                 v.Aux = symToAux(sym)
23839                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23840                 v0.AuxInt = int32ToAuxInt(1)
23841                 v.AddArg3(ptr, v0, mem)
23842                 return true
23843         }
23844         return false
23845 }
23846 func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
23847         v_2 := v.Args[2]
23848         v_1 := v.Args[1]
23849         v_0 := v.Args[0]
23850         b := v.Block
23851         typ := &b.Func.Config.Types
23852         // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem)
23853         // result: (SETLstore [off] {sym} ptr x mem)
23854         for {
23855                 off := auxIntToInt32(v.AuxInt)
23856                 sym := auxToSym(v.Aux)
23857                 ptr := v_0
23858                 if v_1.Op != OpAMD64InvertFlags {
23859                         break
23860                 }
23861                 x := v_1.Args[0]
23862                 mem := v_2
23863                 v.reset(OpAMD64SETLstore)
23864                 v.AuxInt = int32ToAuxInt(off)
23865                 v.Aux = symToAux(sym)
23866                 v.AddArg3(ptr, x, mem)
23867                 return true
23868         }
23869         // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem)
23870         // cond: is32Bit(int64(off1)+int64(off2))
23871         // result: (SETGstore [off1+off2] {sym} base val mem)
23872         for {
23873                 off1 := auxIntToInt32(v.AuxInt)
23874                 sym := auxToSym(v.Aux)
23875                 if v_0.Op != OpAMD64ADDQconst {
23876                         break
23877                 }
23878                 off2 := auxIntToInt32(v_0.AuxInt)
23879                 base := v_0.Args[0]
23880                 val := v_1
23881                 mem := v_2
23882                 if !(is32Bit(int64(off1) + int64(off2))) {
23883                         break
23884                 }
23885                 v.reset(OpAMD64SETGstore)
23886                 v.AuxInt = int32ToAuxInt(off1 + off2)
23887                 v.Aux = symToAux(sym)
23888                 v.AddArg3(base, val, mem)
23889                 return true
23890         }
23891         // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
23892         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
23893         // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
23894         for {
23895                 off1 := auxIntToInt32(v.AuxInt)
23896                 sym1 := auxToSym(v.Aux)
23897                 if v_0.Op != OpAMD64LEAQ {
23898                         break
23899                 }
23900                 off2 := auxIntToInt32(v_0.AuxInt)
23901                 sym2 := auxToSym(v_0.Aux)
23902                 base := v_0.Args[0]
23903                 val := v_1
23904                 mem := v_2
23905                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23906                         break
23907                 }
23908                 v.reset(OpAMD64SETGstore)
23909                 v.AuxInt = int32ToAuxInt(off1 + off2)
23910                 v.Aux = symToAux(mergeSym(sym1, sym2))
23911                 v.AddArg3(base, val, mem)
23912                 return true
23913         }
23914         // match: (SETGstore [off] {sym} ptr (FlagEQ) mem)
23915         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23916         for {
23917                 off := auxIntToInt32(v.AuxInt)
23918                 sym := auxToSym(v.Aux)
23919                 ptr := v_0
23920                 if v_1.Op != OpAMD64FlagEQ {
23921                         break
23922                 }
23923                 mem := v_2
23924                 v.reset(OpAMD64MOVBstore)
23925                 v.AuxInt = int32ToAuxInt(off)
23926                 v.Aux = symToAux(sym)
23927                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23928                 v0.AuxInt = int32ToAuxInt(0)
23929                 v.AddArg3(ptr, v0, mem)
23930                 return true
23931         }
23932         // match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem)
23933         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23934         for {
23935                 off := auxIntToInt32(v.AuxInt)
23936                 sym := auxToSym(v.Aux)
23937                 ptr := v_0
23938                 if v_1.Op != OpAMD64FlagLT_ULT {
23939                         break
23940                 }
23941                 mem := v_2
23942                 v.reset(OpAMD64MOVBstore)
23943                 v.AuxInt = int32ToAuxInt(off)
23944                 v.Aux = symToAux(sym)
23945                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23946                 v0.AuxInt = int32ToAuxInt(0)
23947                 v.AddArg3(ptr, v0, mem)
23948                 return true
23949         }
23950         // match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem)
23951         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23952         for {
23953                 off := auxIntToInt32(v.AuxInt)
23954                 sym := auxToSym(v.Aux)
23955                 ptr := v_0
23956                 if v_1.Op != OpAMD64FlagLT_UGT {
23957                         break
23958                 }
23959                 mem := v_2
23960                 v.reset(OpAMD64MOVBstore)
23961                 v.AuxInt = int32ToAuxInt(off)
23962                 v.Aux = symToAux(sym)
23963                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23964                 v0.AuxInt = int32ToAuxInt(0)
23965                 v.AddArg3(ptr, v0, mem)
23966                 return true
23967         }
23968         // match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem)
23969         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
23970         for {
23971                 off := auxIntToInt32(v.AuxInt)
23972                 sym := auxToSym(v.Aux)
23973                 ptr := v_0
23974                 if v_1.Op != OpAMD64FlagGT_ULT {
23975                         break
23976                 }
23977                 mem := v_2
23978                 v.reset(OpAMD64MOVBstore)
23979                 v.AuxInt = int32ToAuxInt(off)
23980                 v.Aux = symToAux(sym)
23981                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23982                 v0.AuxInt = int32ToAuxInt(1)
23983                 v.AddArg3(ptr, v0, mem)
23984                 return true
23985         }
23986         // match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem)
23987         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
23988         for {
23989                 off := auxIntToInt32(v.AuxInt)
23990                 sym := auxToSym(v.Aux)
23991                 ptr := v_0
23992                 if v_1.Op != OpAMD64FlagGT_UGT {
23993                         break
23994                 }
23995                 mem := v_2
23996                 v.reset(OpAMD64MOVBstore)
23997                 v.AuxInt = int32ToAuxInt(off)
23998                 v.Aux = symToAux(sym)
23999                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24000                 v0.AuxInt = int32ToAuxInt(1)
24001                 v.AddArg3(ptr, v0, mem)
24002                 return true
24003         }
24004         return false
24005 }
24006 func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
24007         v_0 := v.Args[0]
24008         // match: (SETL (InvertFlags x))
24009         // result: (SETG x)
24010         for {
24011                 if v_0.Op != OpAMD64InvertFlags {
24012                         break
24013                 }
24014                 x := v_0.Args[0]
24015                 v.reset(OpAMD64SETG)
24016                 v.AddArg(x)
24017                 return true
24018         }
24019         // match: (SETL (FlagEQ))
24020         // result: (MOVLconst [0])
24021         for {
24022                 if v_0.Op != OpAMD64FlagEQ {
24023                         break
24024                 }
24025                 v.reset(OpAMD64MOVLconst)
24026                 v.AuxInt = int32ToAuxInt(0)
24027                 return true
24028         }
24029         // match: (SETL (FlagLT_ULT))
24030         // result: (MOVLconst [1])
24031         for {
24032                 if v_0.Op != OpAMD64FlagLT_ULT {
24033                         break
24034                 }
24035                 v.reset(OpAMD64MOVLconst)
24036                 v.AuxInt = int32ToAuxInt(1)
24037                 return true
24038         }
24039         // match: (SETL (FlagLT_UGT))
24040         // result: (MOVLconst [1])
24041         for {
24042                 if v_0.Op != OpAMD64FlagLT_UGT {
24043                         break
24044                 }
24045                 v.reset(OpAMD64MOVLconst)
24046                 v.AuxInt = int32ToAuxInt(1)
24047                 return true
24048         }
24049         // match: (SETL (FlagGT_ULT))
24050         // result: (MOVLconst [0])
24051         for {
24052                 if v_0.Op != OpAMD64FlagGT_ULT {
24053                         break
24054                 }
24055                 v.reset(OpAMD64MOVLconst)
24056                 v.AuxInt = int32ToAuxInt(0)
24057                 return true
24058         }
24059         // match: (SETL (FlagGT_UGT))
24060         // result: (MOVLconst [0])
24061         for {
24062                 if v_0.Op != OpAMD64FlagGT_UGT {
24063                         break
24064                 }
24065                 v.reset(OpAMD64MOVLconst)
24066                 v.AuxInt = int32ToAuxInt(0)
24067                 return true
24068         }
24069         return false
24070 }
24071 func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
24072         v_0 := v.Args[0]
24073         // match: (SETLE (InvertFlags x))
24074         // result: (SETGE x)
24075         for {
24076                 if v_0.Op != OpAMD64InvertFlags {
24077                         break
24078                 }
24079                 x := v_0.Args[0]
24080                 v.reset(OpAMD64SETGE)
24081                 v.AddArg(x)
24082                 return true
24083         }
24084         // match: (SETLE (FlagEQ))
24085         // result: (MOVLconst [1])
24086         for {
24087                 if v_0.Op != OpAMD64FlagEQ {
24088                         break
24089                 }
24090                 v.reset(OpAMD64MOVLconst)
24091                 v.AuxInt = int32ToAuxInt(1)
24092                 return true
24093         }
24094         // match: (SETLE (FlagLT_ULT))
24095         // result: (MOVLconst [1])
24096         for {
24097                 if v_0.Op != OpAMD64FlagLT_ULT {
24098                         break
24099                 }
24100                 v.reset(OpAMD64MOVLconst)
24101                 v.AuxInt = int32ToAuxInt(1)
24102                 return true
24103         }
24104         // match: (SETLE (FlagLT_UGT))
24105         // result: (MOVLconst [1])
24106         for {
24107                 if v_0.Op != OpAMD64FlagLT_UGT {
24108                         break
24109                 }
24110                 v.reset(OpAMD64MOVLconst)
24111                 v.AuxInt = int32ToAuxInt(1)
24112                 return true
24113         }
24114         // match: (SETLE (FlagGT_ULT))
24115         // result: (MOVLconst [0])
24116         for {
24117                 if v_0.Op != OpAMD64FlagGT_ULT {
24118                         break
24119                 }
24120                 v.reset(OpAMD64MOVLconst)
24121                 v.AuxInt = int32ToAuxInt(0)
24122                 return true
24123         }
24124         // match: (SETLE (FlagGT_UGT))
24125         // result: (MOVLconst [0])
24126         for {
24127                 if v_0.Op != OpAMD64FlagGT_UGT {
24128                         break
24129                 }
24130                 v.reset(OpAMD64MOVLconst)
24131                 v.AuxInt = int32ToAuxInt(0)
24132                 return true
24133         }
24134         return false
24135 }
24136 func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
24137         v_2 := v.Args[2]
24138         v_1 := v.Args[1]
24139         v_0 := v.Args[0]
24140         b := v.Block
24141         typ := &b.Func.Config.Types
24142         // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem)
24143         // result: (SETGEstore [off] {sym} ptr x mem)
24144         for {
24145                 off := auxIntToInt32(v.AuxInt)
24146                 sym := auxToSym(v.Aux)
24147                 ptr := v_0
24148                 if v_1.Op != OpAMD64InvertFlags {
24149                         break
24150                 }
24151                 x := v_1.Args[0]
24152                 mem := v_2
24153                 v.reset(OpAMD64SETGEstore)
24154                 v.AuxInt = int32ToAuxInt(off)
24155                 v.Aux = symToAux(sym)
24156                 v.AddArg3(ptr, x, mem)
24157                 return true
24158         }
24159         // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem)
24160         // cond: is32Bit(int64(off1)+int64(off2))
24161         // result: (SETLEstore [off1+off2] {sym} base val mem)
24162         for {
24163                 off1 := auxIntToInt32(v.AuxInt)
24164                 sym := auxToSym(v.Aux)
24165                 if v_0.Op != OpAMD64ADDQconst {
24166                         break
24167                 }
24168                 off2 := auxIntToInt32(v_0.AuxInt)
24169                 base := v_0.Args[0]
24170                 val := v_1
24171                 mem := v_2
24172                 if !(is32Bit(int64(off1) + int64(off2))) {
24173                         break
24174                 }
24175                 v.reset(OpAMD64SETLEstore)
24176                 v.AuxInt = int32ToAuxInt(off1 + off2)
24177                 v.Aux = symToAux(sym)
24178                 v.AddArg3(base, val, mem)
24179                 return true
24180         }
24181         // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
24182         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
24183         // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
24184         for {
24185                 off1 := auxIntToInt32(v.AuxInt)
24186                 sym1 := auxToSym(v.Aux)
24187                 if v_0.Op != OpAMD64LEAQ {
24188                         break
24189                 }
24190                 off2 := auxIntToInt32(v_0.AuxInt)
24191                 sym2 := auxToSym(v_0.Aux)
24192                 base := v_0.Args[0]
24193                 val := v_1
24194                 mem := v_2
24195                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
24196                         break
24197                 }
24198                 v.reset(OpAMD64SETLEstore)
24199                 v.AuxInt = int32ToAuxInt(off1 + off2)
24200                 v.Aux = symToAux(mergeSym(sym1, sym2))
24201                 v.AddArg3(base, val, mem)
24202                 return true
24203         }
24204         // match: (SETLEstore [off] {sym} ptr (FlagEQ) mem)
24205         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
24206         for {
24207                 off := auxIntToInt32(v.AuxInt)
24208                 sym := auxToSym(v.Aux)
24209                 ptr := v_0
24210                 if v_1.Op != OpAMD64FlagEQ {
24211                         break
24212                 }
24213                 mem := v_2
24214                 v.reset(OpAMD64MOVBstore)
24215                 v.AuxInt = int32ToAuxInt(off)
24216                 v.Aux = symToAux(sym)
24217                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24218                 v0.AuxInt = int32ToAuxInt(1)
24219                 v.AddArg3(ptr, v0, mem)
24220                 return true
24221         }
24222         // match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem)
24223         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
24224         for {
24225                 off := auxIntToInt32(v.AuxInt)
24226                 sym := auxToSym(v.Aux)
24227                 ptr := v_0
24228                 if v_1.Op != OpAMD64FlagLT_ULT {
24229                         break
24230                 }
24231                 mem := v_2
24232                 v.reset(OpAMD64MOVBstore)
24233                 v.AuxInt = int32ToAuxInt(off)
24234                 v.Aux = symToAux(sym)
24235                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24236                 v0.AuxInt = int32ToAuxInt(1)
24237                 v.AddArg3(ptr, v0, mem)
24238                 return true
24239         }
24240         // match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem)
24241         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
24242         for {
24243                 off := auxIntToInt32(v.AuxInt)
24244                 sym := auxToSym(v.Aux)
24245                 ptr := v_0
24246                 if v_1.Op != OpAMD64FlagLT_UGT {
24247                         break
24248                 }
24249                 mem := v_2
24250                 v.reset(OpAMD64MOVBstore)
24251                 v.AuxInt = int32ToAuxInt(off)
24252                 v.Aux = symToAux(sym)
24253                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24254                 v0.AuxInt = int32ToAuxInt(1)
24255                 v.AddArg3(ptr, v0, mem)
24256                 return true
24257         }
24258         // match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem)
24259         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
24260         for {
24261                 off := auxIntToInt32(v.AuxInt)
24262                 sym := auxToSym(v.Aux)
24263                 ptr := v_0
24264                 if v_1.Op != OpAMD64FlagGT_ULT {
24265                         break
24266                 }
24267                 mem := v_2
24268                 v.reset(OpAMD64MOVBstore)
24269                 v.AuxInt = int32ToAuxInt(off)
24270                 v.Aux = symToAux(sym)
24271                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24272                 v0.AuxInt = int32ToAuxInt(0)
24273                 v.AddArg3(ptr, v0, mem)
24274                 return true
24275         }
24276         // match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem)
24277         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
24278         for {
24279                 off := auxIntToInt32(v.AuxInt)
24280                 sym := auxToSym(v.Aux)
24281                 ptr := v_0
24282                 if v_1.Op != OpAMD64FlagGT_UGT {
24283                         break
24284                 }
24285                 mem := v_2
24286                 v.reset(OpAMD64MOVBstore)
24287                 v.AuxInt = int32ToAuxInt(off)
24288                 v.Aux = symToAux(sym)
24289                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24290                 v0.AuxInt = int32ToAuxInt(0)
24291                 v.AddArg3(ptr, v0, mem)
24292                 return true
24293         }
24294         return false
24295 }
24296 func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
24297         v_2 := v.Args[2]
24298         v_1 := v.Args[1]
24299         v_0 := v.Args[0]
24300         b := v.Block
24301         typ := &b.Func.Config.Types
24302         // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem)
24303         // result: (SETGstore [off] {sym} ptr x mem)
24304         for {
24305                 off := auxIntToInt32(v.AuxInt)
24306                 sym := auxToSym(v.Aux)
24307                 ptr := v_0
24308                 if v_1.Op != OpAMD64InvertFlags {
24309                         break
24310                 }
24311                 x := v_1.Args[0]
24312                 mem := v_2
24313                 v.reset(OpAMD64SETGstore)
24314                 v.AuxInt = int32ToAuxInt(off)
24315                 v.Aux = symToAux(sym)
24316                 v.AddArg3(ptr, x, mem)
24317                 return true
24318         }
24319         // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem)
24320         // cond: is32Bit(int64(off1)+int64(off2))
24321         // result: (SETLstore [off1+off2] {sym} base val mem)
24322         for {
24323                 off1 := auxIntToInt32(v.AuxInt)
24324                 sym := auxToSym(v.Aux)
24325                 if v_0.Op != OpAMD64ADDQconst {
24326                         break
24327                 }
24328                 off2 := auxIntToInt32(v_0.AuxInt)
24329                 base := v_0.Args[0]
24330                 val := v_1
24331                 mem := v_2
24332                 if !(is32Bit(int64(off1) + int64(off2))) {
24333                         break
24334                 }
24335                 v.reset(OpAMD64SETLstore)
24336                 v.AuxInt = int32ToAuxInt(off1 + off2)
24337                 v.Aux = symToAux(sym)
24338                 v.AddArg3(base, val, mem)
24339                 return true
24340         }
24341         // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
24342         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
24343         // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
24344         for {
24345                 off1 := auxIntToInt32(v.AuxInt)
24346                 sym1 := auxToSym(v.Aux)
24347                 if v_0.Op != OpAMD64LEAQ {
24348                         break
24349                 }
24350                 off2 := auxIntToInt32(v_0.AuxInt)
24351                 sym2 := auxToSym(v_0.Aux)
24352                 base := v_0.Args[0]
24353                 val := v_1
24354                 mem := v_2
24355                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
24356                         break
24357                 }
24358                 v.reset(OpAMD64SETLstore)
24359                 v.AuxInt = int32ToAuxInt(off1 + off2)
24360                 v.Aux = symToAux(mergeSym(sym1, sym2))
24361                 v.AddArg3(base, val, mem)
24362                 return true
24363         }
24364         // match: (SETLstore [off] {sym} ptr (FlagEQ) mem)
24365         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
24366         for {
24367                 off := auxIntToInt32(v.AuxInt)
24368                 sym := auxToSym(v.Aux)
24369                 ptr := v_0
24370                 if v_1.Op != OpAMD64FlagEQ {
24371                         break
24372                 }
24373                 mem := v_2
24374                 v.reset(OpAMD64MOVBstore)
24375                 v.AuxInt = int32ToAuxInt(off)
24376                 v.Aux = symToAux(sym)
24377                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24378                 v0.AuxInt = int32ToAuxInt(0)
24379                 v.AddArg3(ptr, v0, mem)
24380                 return true
24381         }
24382         // match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem)
24383         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
24384         for {
24385                 off := auxIntToInt32(v.AuxInt)
24386                 sym := auxToSym(v.Aux)
24387                 ptr := v_0
24388                 if v_1.Op != OpAMD64FlagLT_ULT {
24389                         break
24390                 }
24391                 mem := v_2
24392                 v.reset(OpAMD64MOVBstore)
24393                 v.AuxInt = int32ToAuxInt(off)
24394                 v.Aux = symToAux(sym)
24395                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24396                 v0.AuxInt = int32ToAuxInt(1)
24397                 v.AddArg3(ptr, v0, mem)
24398                 return true
24399         }
24400         // match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem)
24401         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
24402         for {
24403                 off := auxIntToInt32(v.AuxInt)
24404                 sym := auxToSym(v.Aux)
24405                 ptr := v_0
24406                 if v_1.Op != OpAMD64FlagLT_UGT {
24407                         break
24408                 }
24409                 mem := v_2
24410                 v.reset(OpAMD64MOVBstore)
24411                 v.AuxInt = int32ToAuxInt(off)
24412                 v.Aux = symToAux(sym)
24413                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24414                 v0.AuxInt = int32ToAuxInt(1)
24415                 v.AddArg3(ptr, v0, mem)
24416                 return true
24417         }
24418         // match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem)
24419         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
24420         for {
24421                 off := auxIntToInt32(v.AuxInt)
24422                 sym := auxToSym(v.Aux)
24423                 ptr := v_0
24424                 if v_1.Op != OpAMD64FlagGT_ULT {
24425                         break
24426                 }
24427                 mem := v_2
24428                 v.reset(OpAMD64MOVBstore)
24429                 v.AuxInt = int32ToAuxInt(off)
24430                 v.Aux = symToAux(sym)
24431                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24432                 v0.AuxInt = int32ToAuxInt(0)
24433                 v.AddArg3(ptr, v0, mem)
24434                 return true
24435         }
24436         // match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem)
24437         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
24438         for {
24439                 off := auxIntToInt32(v.AuxInt)
24440                 sym := auxToSym(v.Aux)
24441                 ptr := v_0
24442                 if v_1.Op != OpAMD64FlagGT_UGT {
24443                         break
24444                 }
24445                 mem := v_2
24446                 v.reset(OpAMD64MOVBstore)
24447                 v.AuxInt = int32ToAuxInt(off)
24448                 v.Aux = symToAux(sym)
24449                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24450                 v0.AuxInt = int32ToAuxInt(0)
24451                 v.AddArg3(ptr, v0, mem)
24452                 return true
24453         }
24454         return false
24455 }
24456 func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
24457         v_0 := v.Args[0]
24458         b := v.Block
24459         // match: (SETNE (TESTBconst [1] x))
24460         // result: (ANDLconst [1] x)
24461         for {
24462                 if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 {
24463                         break
24464                 }
24465                 x := v_0.Args[0]
24466                 v.reset(OpAMD64ANDLconst)
24467                 v.AuxInt = int32ToAuxInt(1)
24468                 v.AddArg(x)
24469                 return true
24470         }
24471         // match: (SETNE (TESTWconst [1] x))
24472         // result: (ANDLconst [1] x)
24473         for {
24474                 if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 {
24475                         break
24476                 }
24477                 x := v_0.Args[0]
24478                 v.reset(OpAMD64ANDLconst)
24479                 v.AuxInt = int32ToAuxInt(1)
24480                 v.AddArg(x)
24481                 return true
24482         }
24483         // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y))
24484         // result: (SETB (BTL x y))
24485         for {
24486                 if v_0.Op != OpAMD64TESTL {
24487                         break
24488                 }
24489                 _ = v_0.Args[1]
24490                 v_0_0 := v_0.Args[0]
24491                 v_0_1 := v_0.Args[1]
24492                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24493                         if v_0_0.Op != OpAMD64SHLL {
24494                                 continue
24495                         }
24496                         x := v_0_0.Args[1]
24497                         v_0_0_0 := v_0_0.Args[0]
24498                         if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
24499                                 continue
24500                         }
24501                         y := v_0_1
24502                         v.reset(OpAMD64SETB)
24503                         v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
24504                         v0.AddArg2(x, y)
24505                         v.AddArg(v0)
24506                         return true
24507                 }
24508                 break
24509         }
24510         // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y))
24511         // result: (SETB (BTQ x y))
24512         for {
24513                 if v_0.Op != OpAMD64TESTQ {
24514                         break
24515                 }
24516                 _ = v_0.Args[1]
24517                 v_0_0 := v_0.Args[0]
24518                 v_0_1 := v_0.Args[1]
24519                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24520                         if v_0_0.Op != OpAMD64SHLQ {
24521                                 continue
24522                         }
24523                         x := v_0_0.Args[1]
24524                         v_0_0_0 := v_0_0.Args[0]
24525                         if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
24526                                 continue
24527                         }
24528                         y := v_0_1
24529                         v.reset(OpAMD64SETB)
24530                         v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
24531                         v0.AddArg2(x, y)
24532                         v.AddArg(v0)
24533                         return true
24534                 }
24535                 break
24536         }
24537         // match: (SETNE (TESTLconst [c] x))
24538         // cond: isUint32PowerOfTwo(int64(c))
24539         // result: (SETB (BTLconst [int8(log32(c))] x))
24540         for {
24541                 if v_0.Op != OpAMD64TESTLconst {
24542                         break
24543                 }
24544                 c := auxIntToInt32(v_0.AuxInt)
24545                 x := v_0.Args[0]
24546                 if !(isUint32PowerOfTwo(int64(c))) {
24547                         break
24548                 }
24549                 v.reset(OpAMD64SETB)
24550                 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
24551                 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
24552                 v0.AddArg(x)
24553                 v.AddArg(v0)
24554                 return true
24555         }
24556         // match: (SETNE (TESTQconst [c] x))
24557         // cond: isUint64PowerOfTwo(int64(c))
24558         // result: (SETB (BTQconst [int8(log32(c))] x))
24559         for {
24560                 if v_0.Op != OpAMD64TESTQconst {
24561                         break
24562                 }
24563                 c := auxIntToInt32(v_0.AuxInt)
24564                 x := v_0.Args[0]
24565                 if !(isUint64PowerOfTwo(int64(c))) {
24566                         break
24567                 }
24568                 v.reset(OpAMD64SETB)
24569                 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24570                 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
24571                 v0.AddArg(x)
24572                 v.AddArg(v0)
24573                 return true
24574         }
24575         // match: (SETNE (TESTQ (MOVQconst [c]) x))
24576         // cond: isUint64PowerOfTwo(c)
24577         // result: (SETB (BTQconst [int8(log64(c))] x))
24578         for {
24579                 if v_0.Op != OpAMD64TESTQ {
24580                         break
24581                 }
24582                 _ = v_0.Args[1]
24583                 v_0_0 := v_0.Args[0]
24584                 v_0_1 := v_0.Args[1]
24585                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24586                         if v_0_0.Op != OpAMD64MOVQconst {
24587                                 continue
24588                         }
24589                         c := auxIntToInt64(v_0_0.AuxInt)
24590                         x := v_0_1
24591                         if !(isUint64PowerOfTwo(c)) {
24592                                 continue
24593                         }
24594                         v.reset(OpAMD64SETB)
24595                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24596                         v0.AuxInt = int8ToAuxInt(int8(log64(c)))
24597                         v0.AddArg(x)
24598                         v.AddArg(v0)
24599                         return true
24600                 }
24601                 break
24602         }
24603         // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _)))
24604         // result: (SETEQ (CMPLconst [0] s))
24605         for {
24606                 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
24607                         break
24608                 }
24609                 s := v_0.Args[0]
24610                 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
24611                         break
24612                 }
24613                 v.reset(OpAMD64SETEQ)
24614                 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
24615                 v0.AuxInt = int32ToAuxInt(0)
24616                 v0.AddArg(s)
24617                 v.AddArg(v0)
24618                 return true
24619         }
24620         // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _)))
24621         // result: (SETEQ (CMPQconst [0] s))
24622         for {
24623                 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
24624                         break
24625                 }
24626                 s := v_0.Args[0]
24627                 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
24628                         break
24629                 }
24630                 v.reset(OpAMD64SETEQ)
24631                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
24632                 v0.AuxInt = int32ToAuxInt(0)
24633                 v0.AddArg(s)
24634                 v.AddArg(v0)
24635                 return true
24636         }
24637         // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
24638         // cond: z1==z2
24639         // result: (SETB (BTQconst [63] x))
24640         for {
24641                 if v_0.Op != OpAMD64TESTQ {
24642                         break
24643                 }
24644                 _ = v_0.Args[1]
24645                 v_0_0 := v_0.Args[0]
24646                 v_0_1 := v_0.Args[1]
24647                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24648                         z1 := v_0_0
24649                         if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
24650                                 continue
24651                         }
24652                         z1_0 := z1.Args[0]
24653                         if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
24654                                 continue
24655                         }
24656                         x := z1_0.Args[0]
24657                         z2 := v_0_1
24658                         if !(z1 == z2) {
24659                                 continue
24660                         }
24661                         v.reset(OpAMD64SETB)
24662                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24663                         v0.AuxInt = int8ToAuxInt(63)
24664                         v0.AddArg(x)
24665                         v.AddArg(v0)
24666                         return true
24667                 }
24668                 break
24669         }
24670         // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
24671         // cond: z1==z2
24672         // result: (SETB (BTQconst [31] x))
24673         for {
24674                 if v_0.Op != OpAMD64TESTL {
24675                         break
24676                 }
24677                 _ = v_0.Args[1]
24678                 v_0_0 := v_0.Args[0]
24679                 v_0_1 := v_0.Args[1]
24680                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24681                         z1 := v_0_0
24682                         if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
24683                                 continue
24684                         }
24685                         z1_0 := z1.Args[0]
24686                         if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
24687                                 continue
24688                         }
24689                         x := z1_0.Args[0]
24690                         z2 := v_0_1
24691                         if !(z1 == z2) {
24692                                 continue
24693                         }
24694                         v.reset(OpAMD64SETB)
24695                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24696                         v0.AuxInt = int8ToAuxInt(31)
24697                         v0.AddArg(x)
24698                         v.AddArg(v0)
24699                         return true
24700                 }
24701                 break
24702         }
24703         // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
24704         // cond: z1==z2
24705         // result: (SETB (BTQconst [0] x))
24706         for {
24707                 if v_0.Op != OpAMD64TESTQ {
24708                         break
24709                 }
24710                 _ = v_0.Args[1]
24711                 v_0_0 := v_0.Args[0]
24712                 v_0_1 := v_0.Args[1]
24713                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24714                         z1 := v_0_0
24715                         if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
24716                                 continue
24717                         }
24718                         z1_0 := z1.Args[0]
24719                         if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
24720                                 continue
24721                         }
24722                         x := z1_0.Args[0]
24723                         z2 := v_0_1
24724                         if !(z1 == z2) {
24725                                 continue
24726                         }
24727                         v.reset(OpAMD64SETB)
24728                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24729                         v0.AuxInt = int8ToAuxInt(0)
24730                         v0.AddArg(x)
24731                         v.AddArg(v0)
24732                         return true
24733                 }
24734                 break
24735         }
24736         // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
24737         // cond: z1==z2
24738         // result: (SETB (BTLconst [0] x))
24739         for {
24740                 if v_0.Op != OpAMD64TESTL {
24741                         break
24742                 }
24743                 _ = v_0.Args[1]
24744                 v_0_0 := v_0.Args[0]
24745                 v_0_1 := v_0.Args[1]
24746                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24747                         z1 := v_0_0
24748                         if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
24749                                 continue
24750                         }
24751                         z1_0 := z1.Args[0]
24752                         if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
24753                                 continue
24754                         }
24755                         x := z1_0.Args[0]
24756                         z2 := v_0_1
24757                         if !(z1 == z2) {
24758                                 continue
24759                         }
24760                         v.reset(OpAMD64SETB)
24761                         v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
24762                         v0.AuxInt = int8ToAuxInt(0)
24763                         v0.AddArg(x)
24764                         v.AddArg(v0)
24765                         return true
24766                 }
24767                 break
24768         }
24769         // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2))
24770         // cond: z1==z2
24771         // result: (SETB (BTQconst [63] x))
24772         for {
24773                 if v_0.Op != OpAMD64TESTQ {
24774                         break
24775                 }
24776                 _ = v_0.Args[1]
24777                 v_0_0 := v_0.Args[0]
24778                 v_0_1 := v_0.Args[1]
24779                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24780                         z1 := v_0_0
24781                         if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
24782                                 continue
24783                         }
24784                         x := z1.Args[0]
24785                         z2 := v_0_1
24786                         if !(z1 == z2) {
24787                                 continue
24788                         }
24789                         v.reset(OpAMD64SETB)
24790                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24791                         v0.AuxInt = int8ToAuxInt(63)
24792                         v0.AddArg(x)
24793                         v.AddArg(v0)
24794                         return true
24795                 }
24796                 break
24797         }
24798         // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2))
24799         // cond: z1==z2
24800         // result: (SETB (BTLconst [31] x))
24801         for {
24802                 if v_0.Op != OpAMD64TESTL {
24803                         break
24804                 }
24805                 _ = v_0.Args[1]
24806                 v_0_0 := v_0.Args[0]
24807                 v_0_1 := v_0.Args[1]
24808                 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24809                         z1 := v_0_0
24810                         if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
24811                                 continue
24812                         }
24813                         x := z1.Args[0]
24814                         z2 := v_0_1
24815                         if !(z1 == z2) {
24816                                 continue
24817                         }
24818                         v.reset(OpAMD64SETB)
24819                         v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
24820                         v0.AuxInt = int8ToAuxInt(31)
24821                         v0.AddArg(x)
24822                         v.AddArg(v0)
24823                         return true
24824                 }
24825                 break
24826         }
24827         // match: (SETNE (InvertFlags x))
24828         // result: (SETNE x)
24829         for {
24830                 if v_0.Op != OpAMD64InvertFlags {
24831                         break
24832                 }
24833                 x := v_0.Args[0]
24834                 v.reset(OpAMD64SETNE)
24835                 v.AddArg(x)
24836                 return true
24837         }
24838         // match: (SETNE (FlagEQ))
24839         // result: (MOVLconst [0])
24840         for {
24841                 if v_0.Op != OpAMD64FlagEQ {
24842                         break
24843                 }
24844                 v.reset(OpAMD64MOVLconst)
24845                 v.AuxInt = int32ToAuxInt(0)
24846                 return true
24847         }
24848         // match: (SETNE (FlagLT_ULT))
24849         // result: (MOVLconst [1])
24850         for {
24851                 if v_0.Op != OpAMD64FlagLT_ULT {
24852                         break
24853                 }
24854                 v.reset(OpAMD64MOVLconst)
24855                 v.AuxInt = int32ToAuxInt(1)
24856                 return true
24857         }
24858         // match: (SETNE (FlagLT_UGT))
24859         // result: (MOVLconst [1])
24860         for {
24861                 if v_0.Op != OpAMD64FlagLT_UGT {
24862                         break
24863                 }
24864                 v.reset(OpAMD64MOVLconst)
24865                 v.AuxInt = int32ToAuxInt(1)
24866                 return true
24867         }
24868         // match: (SETNE (FlagGT_ULT))
24869         // result: (MOVLconst [1])
24870         for {
24871                 if v_0.Op != OpAMD64FlagGT_ULT {
24872                         break
24873                 }
24874                 v.reset(OpAMD64MOVLconst)
24875                 v.AuxInt = int32ToAuxInt(1)
24876                 return true
24877         }
24878         // match: (SETNE (FlagGT_UGT))
24879         // result: (MOVLconst [1])
24880         for {
24881                 if v_0.Op != OpAMD64FlagGT_UGT {
24882                         break
24883                 }
24884                 v.reset(OpAMD64MOVLconst)
24885                 v.AuxInt = int32ToAuxInt(1)
24886                 return true
24887         }
24888         return false
24889 }
24890 func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
24891         v_2 := v.Args[2]
24892         v_1 := v.Args[1]
24893         v_0 := v.Args[0]
24894         b := v.Block
24895         typ := &b.Func.Config.Types
24896         // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
24897         // result: (SETBstore [off] {sym} ptr (BTL x y) mem)
24898         for {
24899                 off := auxIntToInt32(v.AuxInt)
24900                 sym := auxToSym(v.Aux)
24901                 ptr := v_0
24902                 if v_1.Op != OpAMD64TESTL {
24903                         break
24904                 }
24905                 _ = v_1.Args[1]
24906                 v_1_0 := v_1.Args[0]
24907                 v_1_1 := v_1.Args[1]
24908                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
24909                         if v_1_0.Op != OpAMD64SHLL {
24910                                 continue
24911                         }
24912                         x := v_1_0.Args[1]
24913                         v_1_0_0 := v_1_0.Args[0]
24914                         if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
24915                                 continue
24916                         }
24917                         y := v_1_1
24918                         mem := v_2
24919                         v.reset(OpAMD64SETBstore)
24920                         v.AuxInt = int32ToAuxInt(off)
24921                         v.Aux = symToAux(sym)
24922                         v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
24923                         v0.AddArg2(x, y)
24924                         v.AddArg3(ptr, v0, mem)
24925                         return true
24926                 }
24927                 break
24928         }
24929         // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
24930         // result: (SETBstore [off] {sym} ptr (BTQ x y) mem)
24931         for {
24932                 off := auxIntToInt32(v.AuxInt)
24933                 sym := auxToSym(v.Aux)
24934                 ptr := v_0
24935                 if v_1.Op != OpAMD64TESTQ {
24936                         break
24937                 }
24938                 _ = v_1.Args[1]
24939                 v_1_0 := v_1.Args[0]
24940                 v_1_1 := v_1.Args[1]
24941                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
24942                         if v_1_0.Op != OpAMD64SHLQ {
24943                                 continue
24944                         }
24945                         x := v_1_0.Args[1]
24946                         v_1_0_0 := v_1_0.Args[0]
24947                         if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
24948                                 continue
24949                         }
24950                         y := v_1_1
24951                         mem := v_2
24952                         v.reset(OpAMD64SETBstore)
24953                         v.AuxInt = int32ToAuxInt(off)
24954                         v.Aux = symToAux(sym)
24955                         v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
24956                         v0.AddArg2(x, y)
24957                         v.AddArg3(ptr, v0, mem)
24958                         return true
24959                 }
24960                 break
24961         }
24962         // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem)
24963         // cond: isUint32PowerOfTwo(int64(c))
24964         // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
24965         for {
24966                 off := auxIntToInt32(v.AuxInt)
24967                 sym := auxToSym(v.Aux)
24968                 ptr := v_0
24969                 if v_1.Op != OpAMD64TESTLconst {
24970                         break
24971                 }
24972                 c := auxIntToInt32(v_1.AuxInt)
24973                 x := v_1.Args[0]
24974                 mem := v_2
24975                 if !(isUint32PowerOfTwo(int64(c))) {
24976                         break
24977                 }
24978                 v.reset(OpAMD64SETBstore)
24979                 v.AuxInt = int32ToAuxInt(off)
24980                 v.Aux = symToAux(sym)
24981                 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
24982                 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
24983                 v0.AddArg(x)
24984                 v.AddArg3(ptr, v0, mem)
24985                 return true
24986         }
24987         // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem)
24988         // cond: isUint64PowerOfTwo(int64(c))
24989         // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
24990         for {
24991                 off := auxIntToInt32(v.AuxInt)
24992                 sym := auxToSym(v.Aux)
24993                 ptr := v_0
24994                 if v_1.Op != OpAMD64TESTQconst {
24995                         break
24996                 }
24997                 c := auxIntToInt32(v_1.AuxInt)
24998                 x := v_1.Args[0]
24999                 mem := v_2
25000                 if !(isUint64PowerOfTwo(int64(c))) {
25001                         break
25002                 }
25003                 v.reset(OpAMD64SETBstore)
25004                 v.AuxInt = int32ToAuxInt(off)
25005                 v.Aux = symToAux(sym)
25006                 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
25007                 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
25008                 v0.AddArg(x)
25009                 v.AddArg3(ptr, v0, mem)
25010                 return true
25011         }
25012         // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
25013         // cond: isUint64PowerOfTwo(c)
25014         // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
25015         for {
25016                 off := auxIntToInt32(v.AuxInt)
25017                 sym := auxToSym(v.Aux)
25018                 ptr := v_0
25019                 if v_1.Op != OpAMD64TESTQ {
25020                         break
25021                 }
25022                 _ = v_1.Args[1]
25023                 v_1_0 := v_1.Args[0]
25024                 v_1_1 := v_1.Args[1]
25025                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25026                         if v_1_0.Op != OpAMD64MOVQconst {
25027                                 continue
25028                         }
25029                         c := auxIntToInt64(v_1_0.AuxInt)
25030                         x := v_1_1
25031                         mem := v_2
25032                         if !(isUint64PowerOfTwo(c)) {
25033                                 continue
25034                         }
25035                         v.reset(OpAMD64SETBstore)
25036                         v.AuxInt = int32ToAuxInt(off)
25037                         v.Aux = symToAux(sym)
25038                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
25039                         v0.AuxInt = int8ToAuxInt(int8(log64(c)))
25040                         v0.AddArg(x)
25041                         v.AddArg3(ptr, v0, mem)
25042                         return true
25043                 }
25044                 break
25045         }
25046         // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
25047         // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem)
25048         for {
25049                 off := auxIntToInt32(v.AuxInt)
25050                 sym := auxToSym(v.Aux)
25051                 ptr := v_0
25052                 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
25053                         break
25054                 }
25055                 s := v_1.Args[0]
25056                 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
25057                         break
25058                 }
25059                 mem := v_2
25060                 v.reset(OpAMD64SETEQstore)
25061                 v.AuxInt = int32ToAuxInt(off)
25062                 v.Aux = symToAux(sym)
25063                 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
25064                 v0.AuxInt = int32ToAuxInt(0)
25065                 v0.AddArg(s)
25066                 v.AddArg3(ptr, v0, mem)
25067                 return true
25068         }
25069         // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
25070         // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem)
25071         for {
25072                 off := auxIntToInt32(v.AuxInt)
25073                 sym := auxToSym(v.Aux)
25074                 ptr := v_0
25075                 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
25076                         break
25077                 }
25078                 s := v_1.Args[0]
25079                 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
25080                         break
25081                 }
25082                 mem := v_2
25083                 v.reset(OpAMD64SETEQstore)
25084                 v.AuxInt = int32ToAuxInt(off)
25085                 v.Aux = symToAux(sym)
25086                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25087                 v0.AuxInt = int32ToAuxInt(0)
25088                 v0.AddArg(s)
25089                 v.AddArg3(ptr, v0, mem)
25090                 return true
25091         }
25092         // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
25093         // cond: z1==z2
25094         // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
25095         for {
25096                 off := auxIntToInt32(v.AuxInt)
25097                 sym := auxToSym(v.Aux)
25098                 ptr := v_0
25099                 if v_1.Op != OpAMD64TESTQ {
25100                         break
25101                 }
25102                 _ = v_1.Args[1]
25103                 v_1_0 := v_1.Args[0]
25104                 v_1_1 := v_1.Args[1]
25105                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25106                         z1 := v_1_0
25107                         if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
25108                                 continue
25109                         }
25110                         z1_0 := z1.Args[0]
25111                         if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
25112                                 continue
25113                         }
25114                         x := z1_0.Args[0]
25115                         z2 := v_1_1
25116                         mem := v_2
25117                         if !(z1 == z2) {
25118                                 continue
25119                         }
25120                         v.reset(OpAMD64SETBstore)
25121                         v.AuxInt = int32ToAuxInt(off)
25122                         v.Aux = symToAux(sym)
25123                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
25124                         v0.AuxInt = int8ToAuxInt(63)
25125                         v0.AddArg(x)
25126                         v.AddArg3(ptr, v0, mem)
25127                         return true
25128                 }
25129                 break
25130         }
25131         // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
25132         // cond: z1==z2
25133         // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
25134         for {
25135                 off := auxIntToInt32(v.AuxInt)
25136                 sym := auxToSym(v.Aux)
25137                 ptr := v_0
25138                 if v_1.Op != OpAMD64TESTL {
25139                         break
25140                 }
25141                 _ = v_1.Args[1]
25142                 v_1_0 := v_1.Args[0]
25143                 v_1_1 := v_1.Args[1]
25144                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25145                         z1 := v_1_0
25146                         if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
25147                                 continue
25148                         }
25149                         z1_0 := z1.Args[0]
25150                         if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
25151                                 continue
25152                         }
25153                         x := z1_0.Args[0]
25154                         z2 := v_1_1
25155                         mem := v_2
25156                         if !(z1 == z2) {
25157                                 continue
25158                         }
25159                         v.reset(OpAMD64SETBstore)
25160                         v.AuxInt = int32ToAuxInt(off)
25161                         v.Aux = symToAux(sym)
25162                         v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
25163                         v0.AuxInt = int8ToAuxInt(31)
25164                         v0.AddArg(x)
25165                         v.AddArg3(ptr, v0, mem)
25166                         return true
25167                 }
25168                 break
25169         }
25170         // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
25171         // cond: z1==z2
25172         // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem)
25173         for {
25174                 off := auxIntToInt32(v.AuxInt)
25175                 sym := auxToSym(v.Aux)
25176                 ptr := v_0
25177                 if v_1.Op != OpAMD64TESTQ {
25178                         break
25179                 }
25180                 _ = v_1.Args[1]
25181                 v_1_0 := v_1.Args[0]
25182                 v_1_1 := v_1.Args[1]
25183                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25184                         z1 := v_1_0
25185                         if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
25186                                 continue
25187                         }
25188                         z1_0 := z1.Args[0]
25189                         if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
25190                                 continue
25191                         }
25192                         x := z1_0.Args[0]
25193                         z2 := v_1_1
25194                         mem := v_2
25195                         if !(z1 == z2) {
25196                                 continue
25197                         }
25198                         v.reset(OpAMD64SETBstore)
25199                         v.AuxInt = int32ToAuxInt(off)
25200                         v.Aux = symToAux(sym)
25201                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
25202                         v0.AuxInt = int8ToAuxInt(0)
25203                         v0.AddArg(x)
25204                         v.AddArg3(ptr, v0, mem)
25205                         return true
25206                 }
25207                 break
25208         }
25209         // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
25210         // cond: z1==z2
25211         // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem)
25212         for {
25213                 off := auxIntToInt32(v.AuxInt)
25214                 sym := auxToSym(v.Aux)
25215                 ptr := v_0
25216                 if v_1.Op != OpAMD64TESTL {
25217                         break
25218                 }
25219                 _ = v_1.Args[1]
25220                 v_1_0 := v_1.Args[0]
25221                 v_1_1 := v_1.Args[1]
25222                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25223                         z1 := v_1_0
25224                         if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
25225                                 continue
25226                         }
25227                         z1_0 := z1.Args[0]
25228                         if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
25229                                 continue
25230                         }
25231                         x := z1_0.Args[0]
25232                         z2 := v_1_1
25233                         mem := v_2
25234                         if !(z1 == z2) {
25235                                 continue
25236                         }
25237                         v.reset(OpAMD64SETBstore)
25238                         v.AuxInt = int32ToAuxInt(off)
25239                         v.Aux = symToAux(sym)
25240                         v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
25241                         v0.AuxInt = int8ToAuxInt(0)
25242                         v0.AddArg(x)
25243                         v.AddArg3(ptr, v0, mem)
25244                         return true
25245                 }
25246                 break
25247         }
25248         // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
25249         // cond: z1==z2
25250         // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
25251         for {
25252                 off := auxIntToInt32(v.AuxInt)
25253                 sym := auxToSym(v.Aux)
25254                 ptr := v_0
25255                 if v_1.Op != OpAMD64TESTQ {
25256                         break
25257                 }
25258                 _ = v_1.Args[1]
25259                 v_1_0 := v_1.Args[0]
25260                 v_1_1 := v_1.Args[1]
25261                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25262                         z1 := v_1_0
25263                         if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
25264                                 continue
25265                         }
25266                         x := z1.Args[0]
25267                         z2 := v_1_1
25268                         mem := v_2
25269                         if !(z1 == z2) {
25270                                 continue
25271                         }
25272                         v.reset(OpAMD64SETBstore)
25273                         v.AuxInt = int32ToAuxInt(off)
25274                         v.Aux = symToAux(sym)
25275                         v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
25276                         v0.AuxInt = int8ToAuxInt(63)
25277                         v0.AddArg(x)
25278                         v.AddArg3(ptr, v0, mem)
25279                         return true
25280                 }
25281                 break
25282         }
25283         // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
25284         // cond: z1==z2
25285         // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
25286         for {
25287                 off := auxIntToInt32(v.AuxInt)
25288                 sym := auxToSym(v.Aux)
25289                 ptr := v_0
25290                 if v_1.Op != OpAMD64TESTL {
25291                         break
25292                 }
25293                 _ = v_1.Args[1]
25294                 v_1_0 := v_1.Args[0]
25295                 v_1_1 := v_1.Args[1]
25296                 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25297                         z1 := v_1_0
25298                         if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
25299                                 continue
25300                         }
25301                         x := z1.Args[0]
25302                         z2 := v_1_1
25303                         mem := v_2
25304                         if !(z1 == z2) {
25305                                 continue
25306                         }
25307                         v.reset(OpAMD64SETBstore)
25308                         v.AuxInt = int32ToAuxInt(off)
25309                         v.Aux = symToAux(sym)
25310                         v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
25311                         v0.AuxInt = int8ToAuxInt(31)
25312                         v0.AddArg(x)
25313                         v.AddArg3(ptr, v0, mem)
25314                         return true
25315                 }
25316                 break
25317         }
25318         // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem)
25319         // result: (SETNEstore [off] {sym} ptr x mem)
25320         for {
25321                 off := auxIntToInt32(v.AuxInt)
25322                 sym := auxToSym(v.Aux)
25323                 ptr := v_0
25324                 if v_1.Op != OpAMD64InvertFlags {
25325                         break
25326                 }
25327                 x := v_1.Args[0]
25328                 mem := v_2
25329                 v.reset(OpAMD64SETNEstore)
25330                 v.AuxInt = int32ToAuxInt(off)
25331                 v.Aux = symToAux(sym)
25332                 v.AddArg3(ptr, x, mem)
25333                 return true
25334         }
25335         // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem)
25336         // cond: is32Bit(int64(off1)+int64(off2))
25337         // result: (SETNEstore [off1+off2] {sym} base val mem)
25338         for {
25339                 off1 := auxIntToInt32(v.AuxInt)
25340                 sym := auxToSym(v.Aux)
25341                 if v_0.Op != OpAMD64ADDQconst {
25342                         break
25343                 }
25344                 off2 := auxIntToInt32(v_0.AuxInt)
25345                 base := v_0.Args[0]
25346                 val := v_1
25347                 mem := v_2
25348                 if !(is32Bit(int64(off1) + int64(off2))) {
25349                         break
25350                 }
25351                 v.reset(OpAMD64SETNEstore)
25352                 v.AuxInt = int32ToAuxInt(off1 + off2)
25353                 v.Aux = symToAux(sym)
25354                 v.AddArg3(base, val, mem)
25355                 return true
25356         }
25357         // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
25358         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
25359         // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
25360         for {
25361                 off1 := auxIntToInt32(v.AuxInt)
25362                 sym1 := auxToSym(v.Aux)
25363                 if v_0.Op != OpAMD64LEAQ {
25364                         break
25365                 }
25366                 off2 := auxIntToInt32(v_0.AuxInt)
25367                 sym2 := auxToSym(v_0.Aux)
25368                 base := v_0.Args[0]
25369                 val := v_1
25370                 mem := v_2
25371                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
25372                         break
25373                 }
25374                 v.reset(OpAMD64SETNEstore)
25375                 v.AuxInt = int32ToAuxInt(off1 + off2)
25376                 v.Aux = symToAux(mergeSym(sym1, sym2))
25377                 v.AddArg3(base, val, mem)
25378                 return true
25379         }
25380         // match: (SETNEstore [off] {sym} ptr (FlagEQ) mem)
25381         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
25382         for {
25383                 off := auxIntToInt32(v.AuxInt)
25384                 sym := auxToSym(v.Aux)
25385                 ptr := v_0
25386                 if v_1.Op != OpAMD64FlagEQ {
25387                         break
25388                 }
25389                 mem := v_2
25390                 v.reset(OpAMD64MOVBstore)
25391                 v.AuxInt = int32ToAuxInt(off)
25392                 v.Aux = symToAux(sym)
25393                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
25394                 v0.AuxInt = int32ToAuxInt(0)
25395                 v.AddArg3(ptr, v0, mem)
25396                 return true
25397         }
25398         // match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem)
25399         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
25400         for {
25401                 off := auxIntToInt32(v.AuxInt)
25402                 sym := auxToSym(v.Aux)
25403                 ptr := v_0
25404                 if v_1.Op != OpAMD64FlagLT_ULT {
25405                         break
25406                 }
25407                 mem := v_2
25408                 v.reset(OpAMD64MOVBstore)
25409                 v.AuxInt = int32ToAuxInt(off)
25410                 v.Aux = symToAux(sym)
25411                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
25412                 v0.AuxInt = int32ToAuxInt(1)
25413                 v.AddArg3(ptr, v0, mem)
25414                 return true
25415         }
25416         // match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem)
25417         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
25418         for {
25419                 off := auxIntToInt32(v.AuxInt)
25420                 sym := auxToSym(v.Aux)
25421                 ptr := v_0
25422                 if v_1.Op != OpAMD64FlagLT_UGT {
25423                         break
25424                 }
25425                 mem := v_2
25426                 v.reset(OpAMD64MOVBstore)
25427                 v.AuxInt = int32ToAuxInt(off)
25428                 v.Aux = symToAux(sym)
25429                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
25430                 v0.AuxInt = int32ToAuxInt(1)
25431                 v.AddArg3(ptr, v0, mem)
25432                 return true
25433         }
25434         // match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem)
25435         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
25436         for {
25437                 off := auxIntToInt32(v.AuxInt)
25438                 sym := auxToSym(v.Aux)
25439                 ptr := v_0
25440                 if v_1.Op != OpAMD64FlagGT_ULT {
25441                         break
25442                 }
25443                 mem := v_2
25444                 v.reset(OpAMD64MOVBstore)
25445                 v.AuxInt = int32ToAuxInt(off)
25446                 v.Aux = symToAux(sym)
25447                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
25448                 v0.AuxInt = int32ToAuxInt(1)
25449                 v.AddArg3(ptr, v0, mem)
25450                 return true
25451         }
25452         // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem)
25453         // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
25454         for {
25455                 off := auxIntToInt32(v.AuxInt)
25456                 sym := auxToSym(v.Aux)
25457                 ptr := v_0
25458                 if v_1.Op != OpAMD64FlagGT_UGT {
25459                         break
25460                 }
25461                 mem := v_2
25462                 v.reset(OpAMD64MOVBstore)
25463                 v.AuxInt = int32ToAuxInt(off)
25464                 v.Aux = symToAux(sym)
25465                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
25466                 v0.AuxInt = int32ToAuxInt(1)
25467                 v.AddArg3(ptr, v0, mem)
25468                 return true
25469         }
25470         return false
25471 }
25472 func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
25473         v_1 := v.Args[1]
25474         v_0 := v.Args[0]
25475         b := v.Block
25476         // match: (SHLL x (MOVQconst [c]))
25477         // result: (SHLLconst [int8(c&31)] x)
25478         for {
25479                 x := v_0
25480                 if v_1.Op != OpAMD64MOVQconst {
25481                         break
25482                 }
25483                 c := auxIntToInt64(v_1.AuxInt)
25484                 v.reset(OpAMD64SHLLconst)
25485                 v.AuxInt = int8ToAuxInt(int8(c & 31))
25486                 v.AddArg(x)
25487                 return true
25488         }
25489         // match: (SHLL x (MOVLconst [c]))
25490         // result: (SHLLconst [int8(c&31)] x)
25491         for {
25492                 x := v_0
25493                 if v_1.Op != OpAMD64MOVLconst {
25494                         break
25495                 }
25496                 c := auxIntToInt32(v_1.AuxInt)
25497                 v.reset(OpAMD64SHLLconst)
25498                 v.AuxInt = int8ToAuxInt(int8(c & 31))
25499                 v.AddArg(x)
25500                 return true
25501         }
25502         // match: (SHLL x (ADDQconst [c] y))
25503         // cond: c & 31 == 0
25504         // result: (SHLL x y)
25505         for {
25506                 x := v_0
25507                 if v_1.Op != OpAMD64ADDQconst {
25508                         break
25509                 }
25510                 c := auxIntToInt32(v_1.AuxInt)
25511                 y := v_1.Args[0]
25512                 if !(c&31 == 0) {
25513                         break
25514                 }
25515                 v.reset(OpAMD64SHLL)
25516                 v.AddArg2(x, y)
25517                 return true
25518         }
25519         // match: (SHLL x (NEGQ <t> (ADDQconst [c] y)))
25520         // cond: c & 31 == 0
25521         // result: (SHLL x (NEGQ <t> y))
25522         for {
25523                 x := v_0
25524                 if v_1.Op != OpAMD64NEGQ {
25525                         break
25526                 }
25527                 t := v_1.Type
25528                 v_1_0 := v_1.Args[0]
25529                 if v_1_0.Op != OpAMD64ADDQconst {
25530                         break
25531                 }
25532                 c := auxIntToInt32(v_1_0.AuxInt)
25533                 y := v_1_0.Args[0]
25534                 if !(c&31 == 0) {
25535                         break
25536                 }
25537                 v.reset(OpAMD64SHLL)
25538                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
25539                 v0.AddArg(y)
25540                 v.AddArg2(x, v0)
25541                 return true
25542         }
25543         // match: (SHLL x (ANDQconst [c] y))
25544         // cond: c & 31 == 31
25545         // result: (SHLL x y)
25546         for {
25547                 x := v_0
25548                 if v_1.Op != OpAMD64ANDQconst {
25549                         break
25550                 }
25551                 c := auxIntToInt32(v_1.AuxInt)
25552                 y := v_1.Args[0]
25553                 if !(c&31 == 31) {
25554                         break
25555                 }
25556                 v.reset(OpAMD64SHLL)
25557                 v.AddArg2(x, y)
25558                 return true
25559         }
25560         // match: (SHLL x (NEGQ <t> (ANDQconst [c] y)))
25561         // cond: c & 31 == 31
25562         // result: (SHLL x (NEGQ <t> y))
25563         for {
25564                 x := v_0
25565                 if v_1.Op != OpAMD64NEGQ {
25566                         break
25567                 }
25568                 t := v_1.Type
25569                 v_1_0 := v_1.Args[0]
25570                 if v_1_0.Op != OpAMD64ANDQconst {
25571                         break
25572                 }
25573                 c := auxIntToInt32(v_1_0.AuxInt)
25574                 y := v_1_0.Args[0]
25575                 if !(c&31 == 31) {
25576                         break
25577                 }
25578                 v.reset(OpAMD64SHLL)
25579                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
25580                 v0.AddArg(y)
25581                 v.AddArg2(x, v0)
25582                 return true
25583         }
25584         // match: (SHLL x (ADDLconst [c] y))
25585         // cond: c & 31 == 0
25586         // result: (SHLL x y)
25587         for {
25588                 x := v_0
25589                 if v_1.Op != OpAMD64ADDLconst {
25590                         break
25591                 }
25592                 c := auxIntToInt32(v_1.AuxInt)
25593                 y := v_1.Args[0]
25594                 if !(c&31 == 0) {
25595                         break
25596                 }
25597                 v.reset(OpAMD64SHLL)
25598                 v.AddArg2(x, y)
25599                 return true
25600         }
25601         // match: (SHLL x (NEGL <t> (ADDLconst [c] y)))
25602         // cond: c & 31 == 0
25603         // result: (SHLL x (NEGL <t> y))
25604         for {
25605                 x := v_0
25606                 if v_1.Op != OpAMD64NEGL {
25607                         break
25608                 }
25609                 t := v_1.Type
25610                 v_1_0 := v_1.Args[0]
25611                 if v_1_0.Op != OpAMD64ADDLconst {
25612                         break
25613                 }
25614                 c := auxIntToInt32(v_1_0.AuxInt)
25615                 y := v_1_0.Args[0]
25616                 if !(c&31 == 0) {
25617                         break
25618                 }
25619                 v.reset(OpAMD64SHLL)
25620                 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
25621                 v0.AddArg(y)
25622                 v.AddArg2(x, v0)
25623                 return true
25624         }
25625         // match: (SHLL x (ANDLconst [c] y))
25626         // cond: c & 31 == 31
25627         // result: (SHLL x y)
25628         for {
25629                 x := v_0
25630                 if v_1.Op != OpAMD64ANDLconst {
25631                         break
25632                 }
25633                 c := auxIntToInt32(v_1.AuxInt)
25634                 y := v_1.Args[0]
25635                 if !(c&31 == 31) {
25636                         break
25637                 }
25638                 v.reset(OpAMD64SHLL)
25639                 v.AddArg2(x, y)
25640                 return true
25641         }
25642         // match: (SHLL x (NEGL <t> (ANDLconst [c] y)))
25643         // cond: c & 31 == 31
25644         // result: (SHLL x (NEGL <t> y))
25645         for {
25646                 x := v_0
25647                 if v_1.Op != OpAMD64NEGL {
25648                         break
25649                 }
25650                 t := v_1.Type
25651                 v_1_0 := v_1.Args[0]
25652                 if v_1_0.Op != OpAMD64ANDLconst {
25653                         break
25654                 }
25655                 c := auxIntToInt32(v_1_0.AuxInt)
25656                 y := v_1_0.Args[0]
25657                 if !(c&31 == 31) {
25658                         break
25659                 }
25660                 v.reset(OpAMD64SHLL)
25661                 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
25662                 v0.AddArg(y)
25663                 v.AddArg2(x, v0)
25664                 return true
25665         }
25666         return false
25667 }
25668 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
25669         v_0 := v.Args[0]
25670         // match: (SHLLconst [1] (SHRLconst [1] x))
25671         // result: (BTRLconst [0] x)
25672         for {
25673                 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
25674                         break
25675                 }
25676                 x := v_0.Args[0]
25677                 v.reset(OpAMD64BTRLconst)
25678                 v.AuxInt = int8ToAuxInt(0)
25679                 v.AddArg(x)
25680                 return true
25681         }
25682         // match: (SHLLconst x [0])
25683         // result: x
25684         for {
25685                 if auxIntToInt8(v.AuxInt) != 0 {
25686                         break
25687                 }
25688                 x := v_0
25689                 v.copyOf(x)
25690                 return true
25691         }
25692         // match: (SHLLconst [d] (MOVLconst [c]))
25693         // result: (MOVLconst [c << uint64(d)])
25694         for {
25695                 d := auxIntToInt8(v.AuxInt)
25696                 if v_0.Op != OpAMD64MOVLconst {
25697                         break
25698                 }
25699                 c := auxIntToInt32(v_0.AuxInt)
25700                 v.reset(OpAMD64MOVLconst)
25701                 v.AuxInt = int32ToAuxInt(c << uint64(d))
25702                 return true
25703         }
25704         return false
25705 }
25706 func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
25707         v_1 := v.Args[1]
25708         v_0 := v.Args[0]
25709         b := v.Block
25710         // match: (SHLQ x (MOVQconst [c]))
25711         // result: (SHLQconst [int8(c&63)] x)
25712         for {
25713                 x := v_0
25714                 if v_1.Op != OpAMD64MOVQconst {
25715                         break
25716                 }
25717                 c := auxIntToInt64(v_1.AuxInt)
25718                 v.reset(OpAMD64SHLQconst)
25719                 v.AuxInt = int8ToAuxInt(int8(c & 63))
25720                 v.AddArg(x)
25721                 return true
25722         }
25723         // match: (SHLQ x (MOVLconst [c]))
25724         // result: (SHLQconst [int8(c&63)] x)
25725         for {
25726                 x := v_0
25727                 if v_1.Op != OpAMD64MOVLconst {
25728                         break
25729                 }
25730                 c := auxIntToInt32(v_1.AuxInt)
25731                 v.reset(OpAMD64SHLQconst)
25732                 v.AuxInt = int8ToAuxInt(int8(c & 63))
25733                 v.AddArg(x)
25734                 return true
25735         }
25736         // match: (SHLQ x (ADDQconst [c] y))
25737         // cond: c & 63 == 0
25738         // result: (SHLQ x y)
25739         for {
25740                 x := v_0
25741                 if v_1.Op != OpAMD64ADDQconst {
25742                         break
25743                 }
25744                 c := auxIntToInt32(v_1.AuxInt)
25745                 y := v_1.Args[0]
25746                 if !(c&63 == 0) {
25747                         break
25748                 }
25749                 v.reset(OpAMD64SHLQ)
25750                 v.AddArg2(x, y)
25751                 return true
25752         }
25753         // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y)))
25754         // cond: c & 63 == 0
25755         // result: (SHLQ x (NEGQ <t> y))
25756         for {
25757                 x := v_0
25758                 if v_1.Op != OpAMD64NEGQ {
25759                         break
25760                 }
25761                 t := v_1.Type
25762                 v_1_0 := v_1.Args[0]
25763                 if v_1_0.Op != OpAMD64ADDQconst {
25764                         break
25765                 }
25766                 c := auxIntToInt32(v_1_0.AuxInt)
25767                 y := v_1_0.Args[0]
25768                 if !(c&63 == 0) {
25769                         break
25770                 }
25771                 v.reset(OpAMD64SHLQ)
25772                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
25773                 v0.AddArg(y)
25774                 v.AddArg2(x, v0)
25775                 return true
25776         }
25777         // match: (SHLQ x (ANDQconst [c] y))
25778         // cond: c & 63 == 63
25779         // result: (SHLQ x y)
25780         for {
25781                 x := v_0
25782                 if v_1.Op != OpAMD64ANDQconst {
25783                         break
25784                 }
25785                 c := auxIntToInt32(v_1.AuxInt)
25786                 y := v_1.Args[0]
25787                 if !(c&63 == 63) {
25788                         break
25789                 }
25790                 v.reset(OpAMD64SHLQ)
25791                 v.AddArg2(x, y)
25792                 return true
25793         }
25794         // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y)))
25795         // cond: c & 63 == 63
25796         // result: (SHLQ x (NEGQ <t> y))
25797         for {
25798                 x := v_0
25799                 if v_1.Op != OpAMD64NEGQ {
25800                         break
25801                 }
25802                 t := v_1.Type
25803                 v_1_0 := v_1.Args[0]
25804                 if v_1_0.Op != OpAMD64ANDQconst {
25805                         break
25806                 }
25807                 c := auxIntToInt32(v_1_0.AuxInt)
25808                 y := v_1_0.Args[0]
25809                 if !(c&63 == 63) {
25810                         break
25811                 }
25812                 v.reset(OpAMD64SHLQ)
25813                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
25814                 v0.AddArg(y)
25815                 v.AddArg2(x, v0)
25816                 return true
25817         }
25818         // match: (SHLQ x (ADDLconst [c] y))
25819         // cond: c & 63 == 0
25820         // result: (SHLQ x y)
25821         for {
25822                 x := v_0
25823                 if v_1.Op != OpAMD64ADDLconst {
25824                         break
25825                 }
25826                 c := auxIntToInt32(v_1.AuxInt)
25827                 y := v_1.Args[0]
25828                 if !(c&63 == 0) {
25829                         break
25830                 }
25831                 v.reset(OpAMD64SHLQ)
25832                 v.AddArg2(x, y)
25833                 return true
25834         }
25835         // match: (SHLQ x (NEGL <t> (ADDLconst [c] y)))
25836         // cond: c & 63 == 0
25837         // result: (SHLQ x (NEGL <t> y))
25838         for {
25839                 x := v_0
25840                 if v_1.Op != OpAMD64NEGL {
25841                         break
25842                 }
25843                 t := v_1.Type
25844                 v_1_0 := v_1.Args[0]
25845                 if v_1_0.Op != OpAMD64ADDLconst {
25846                         break
25847                 }
25848                 c := auxIntToInt32(v_1_0.AuxInt)
25849                 y := v_1_0.Args[0]
25850                 if !(c&63 == 0) {
25851                         break
25852                 }
25853                 v.reset(OpAMD64SHLQ)
25854                 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
25855                 v0.AddArg(y)
25856                 v.AddArg2(x, v0)
25857                 return true
25858         }
25859         // match: (SHLQ x (ANDLconst [c] y))
25860         // cond: c & 63 == 63
25861         // result: (SHLQ x y)
25862         for {
25863                 x := v_0
25864                 if v_1.Op != OpAMD64ANDLconst {
25865                         break
25866                 }
25867                 c := auxIntToInt32(v_1.AuxInt)
25868                 y := v_1.Args[0]
25869                 if !(c&63 == 63) {
25870                         break
25871                 }
25872                 v.reset(OpAMD64SHLQ)
25873                 v.AddArg2(x, y)
25874                 return true
25875         }
25876         // match: (SHLQ x (NEGL <t> (ANDLconst [c] y)))
25877         // cond: c & 63 == 63
25878         // result: (SHLQ x (NEGL <t> y))
25879         for {
25880                 x := v_0
25881                 if v_1.Op != OpAMD64NEGL {
25882                         break
25883                 }
25884                 t := v_1.Type
25885                 v_1_0 := v_1.Args[0]
25886                 if v_1_0.Op != OpAMD64ANDLconst {
25887                         break
25888                 }
25889                 c := auxIntToInt32(v_1_0.AuxInt)
25890                 y := v_1_0.Args[0]
25891                 if !(c&63 == 63) {
25892                         break
25893                 }
25894                 v.reset(OpAMD64SHLQ)
25895                 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
25896                 v0.AddArg(y)
25897                 v.AddArg2(x, v0)
25898                 return true
25899         }
25900         return false
25901 }
25902 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
25903         v_0 := v.Args[0]
25904         // match: (SHLQconst [1] (SHRQconst [1] x))
25905         // result: (BTRQconst [0] x)
25906         for {
25907                 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
25908                         break
25909                 }
25910                 x := v_0.Args[0]
25911                 v.reset(OpAMD64BTRQconst)
25912                 v.AuxInt = int8ToAuxInt(0)
25913                 v.AddArg(x)
25914                 return true
25915         }
25916         // match: (SHLQconst x [0])
25917         // result: x
25918         for {
25919                 if auxIntToInt8(v.AuxInt) != 0 {
25920                         break
25921                 }
25922                 x := v_0
25923                 v.copyOf(x)
25924                 return true
25925         }
25926         // match: (SHLQconst [d] (MOVQconst [c]))
25927         // result: (MOVQconst [c << uint64(d)])
25928         for {
25929                 d := auxIntToInt8(v.AuxInt)
25930                 if v_0.Op != OpAMD64MOVQconst {
25931                         break
25932                 }
25933                 c := auxIntToInt64(v_0.AuxInt)
25934                 v.reset(OpAMD64MOVQconst)
25935                 v.AuxInt = int64ToAuxInt(c << uint64(d))
25936                 return true
25937         }
25938         // match: (SHLQconst [d] (MOVLconst [c]))
25939         // result: (MOVQconst [int64(c) << uint64(d)])
25940         for {
25941                 d := auxIntToInt8(v.AuxInt)
25942                 if v_0.Op != OpAMD64MOVLconst {
25943                         break
25944                 }
25945                 c := auxIntToInt32(v_0.AuxInt)
25946                 v.reset(OpAMD64MOVQconst)
25947                 v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
25948                 return true
25949         }
25950         return false
25951 }
25952 func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
25953         v_1 := v.Args[1]
25954         v_0 := v.Args[0]
25955         // match: (SHRB x (MOVQconst [c]))
25956         // cond: c&31 < 8
25957         // result: (SHRBconst [int8(c&31)] x)
25958         for {
25959                 x := v_0
25960                 if v_1.Op != OpAMD64MOVQconst {
25961                         break
25962                 }
25963                 c := auxIntToInt64(v_1.AuxInt)
25964                 if !(c&31 < 8) {
25965                         break
25966                 }
25967                 v.reset(OpAMD64SHRBconst)
25968                 v.AuxInt = int8ToAuxInt(int8(c & 31))
25969                 v.AddArg(x)
25970                 return true
25971         }
25972         // match: (SHRB x (MOVLconst [c]))
25973         // cond: c&31 < 8
25974         // result: (SHRBconst [int8(c&31)] x)
25975         for {
25976                 x := v_0
25977                 if v_1.Op != OpAMD64MOVLconst {
25978                         break
25979                 }
25980                 c := auxIntToInt32(v_1.AuxInt)
25981                 if !(c&31 < 8) {
25982                         break
25983                 }
25984                 v.reset(OpAMD64SHRBconst)
25985                 v.AuxInt = int8ToAuxInt(int8(c & 31))
25986                 v.AddArg(x)
25987                 return true
25988         }
25989         // match: (SHRB _ (MOVQconst [c]))
25990         // cond: c&31 >= 8
25991         // result: (MOVLconst [0])
25992         for {
25993                 if v_1.Op != OpAMD64MOVQconst {
25994                         break
25995                 }
25996                 c := auxIntToInt64(v_1.AuxInt)
25997                 if !(c&31 >= 8) {
25998                         break
25999                 }
26000                 v.reset(OpAMD64MOVLconst)
26001                 v.AuxInt = int32ToAuxInt(0)
26002                 return true
26003         }
26004         // match: (SHRB _ (MOVLconst [c]))
26005         // cond: c&31 >= 8
26006         // result: (MOVLconst [0])
26007         for {
26008                 if v_1.Op != OpAMD64MOVLconst {
26009                         break
26010                 }
26011                 c := auxIntToInt32(v_1.AuxInt)
26012                 if !(c&31 >= 8) {
26013                         break
26014                 }
26015                 v.reset(OpAMD64MOVLconst)
26016                 v.AuxInt = int32ToAuxInt(0)
26017                 return true
26018         }
26019         return false
26020 }
26021 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
26022         v_0 := v.Args[0]
26023         // match: (SHRBconst x [0])
26024         // result: x
26025         for {
26026                 if auxIntToInt8(v.AuxInt) != 0 {
26027                         break
26028                 }
26029                 x := v_0
26030                 v.copyOf(x)
26031                 return true
26032         }
26033         return false
26034 }
26035 func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
26036         v_1 := v.Args[1]
26037         v_0 := v.Args[0]
26038         b := v.Block
26039         // match: (SHRL x (MOVQconst [c]))
26040         // result: (SHRLconst [int8(c&31)] x)
26041         for {
26042                 x := v_0
26043                 if v_1.Op != OpAMD64MOVQconst {
26044                         break
26045                 }
26046                 c := auxIntToInt64(v_1.AuxInt)
26047                 v.reset(OpAMD64SHRLconst)
26048                 v.AuxInt = int8ToAuxInt(int8(c & 31))
26049                 v.AddArg(x)
26050                 return true
26051         }
26052         // match: (SHRL x (MOVLconst [c]))
26053         // result: (SHRLconst [int8(c&31)] x)
26054         for {
26055                 x := v_0
26056                 if v_1.Op != OpAMD64MOVLconst {
26057                         break
26058                 }
26059                 c := auxIntToInt32(v_1.AuxInt)
26060                 v.reset(OpAMD64SHRLconst)
26061                 v.AuxInt = int8ToAuxInt(int8(c & 31))
26062                 v.AddArg(x)
26063                 return true
26064         }
26065         // match: (SHRL x (ADDQconst [c] y))
26066         // cond: c & 31 == 0
26067         // result: (SHRL x y)
26068         for {
26069                 x := v_0
26070                 if v_1.Op != OpAMD64ADDQconst {
26071                         break
26072                 }
26073                 c := auxIntToInt32(v_1.AuxInt)
26074                 y := v_1.Args[0]
26075                 if !(c&31 == 0) {
26076                         break
26077                 }
26078                 v.reset(OpAMD64SHRL)
26079                 v.AddArg2(x, y)
26080                 return true
26081         }
26082         // match: (SHRL x (NEGQ <t> (ADDQconst [c] y)))
26083         // cond: c & 31 == 0
26084         // result: (SHRL x (NEGQ <t> y))
26085         for {
26086                 x := v_0
26087                 if v_1.Op != OpAMD64NEGQ {
26088                         break
26089                 }
26090                 t := v_1.Type
26091                 v_1_0 := v_1.Args[0]
26092                 if v_1_0.Op != OpAMD64ADDQconst {
26093                         break
26094                 }
26095                 c := auxIntToInt32(v_1_0.AuxInt)
26096                 y := v_1_0.Args[0]
26097                 if !(c&31 == 0) {
26098                         break
26099                 }
26100                 v.reset(OpAMD64SHRL)
26101                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
26102                 v0.AddArg(y)
26103                 v.AddArg2(x, v0)
26104                 return true
26105         }
26106         // match: (SHRL x (ANDQconst [c] y))
26107         // cond: c & 31 == 31
26108         // result: (SHRL x y)
26109         for {
26110                 x := v_0
26111                 if v_1.Op != OpAMD64ANDQconst {
26112                         break
26113                 }
26114                 c := auxIntToInt32(v_1.AuxInt)
26115                 y := v_1.Args[0]
26116                 if !(c&31 == 31) {
26117                         break
26118                 }
26119                 v.reset(OpAMD64SHRL)
26120                 v.AddArg2(x, y)
26121                 return true
26122         }
26123         // match: (SHRL x (NEGQ <t> (ANDQconst [c] y)))
26124         // cond: c & 31 == 31
26125         // result: (SHRL x (NEGQ <t> y))
26126         for {
26127                 x := v_0
26128                 if v_1.Op != OpAMD64NEGQ {
26129                         break
26130                 }
26131                 t := v_1.Type
26132                 v_1_0 := v_1.Args[0]
26133                 if v_1_0.Op != OpAMD64ANDQconst {
26134                         break
26135                 }
26136                 c := auxIntToInt32(v_1_0.AuxInt)
26137                 y := v_1_0.Args[0]
26138                 if !(c&31 == 31) {
26139                         break
26140                 }
26141                 v.reset(OpAMD64SHRL)
26142                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
26143                 v0.AddArg(y)
26144                 v.AddArg2(x, v0)
26145                 return true
26146         }
26147         // match: (SHRL x (ADDLconst [c] y))
26148         // cond: c & 31 == 0
26149         // result: (SHRL x y)
26150         for {
26151                 x := v_0
26152                 if v_1.Op != OpAMD64ADDLconst {
26153                         break
26154                 }
26155                 c := auxIntToInt32(v_1.AuxInt)
26156                 y := v_1.Args[0]
26157                 if !(c&31 == 0) {
26158                         break
26159                 }
26160                 v.reset(OpAMD64SHRL)
26161                 v.AddArg2(x, y)
26162                 return true
26163         }
26164         // match: (SHRL x (NEGL <t> (ADDLconst [c] y)))
26165         // cond: c & 31 == 0
26166         // result: (SHRL x (NEGL <t> y))
26167         for {
26168                 x := v_0
26169                 if v_1.Op != OpAMD64NEGL {
26170                         break
26171                 }
26172                 t := v_1.Type
26173                 v_1_0 := v_1.Args[0]
26174                 if v_1_0.Op != OpAMD64ADDLconst {
26175                         break
26176                 }
26177                 c := auxIntToInt32(v_1_0.AuxInt)
26178                 y := v_1_0.Args[0]
26179                 if !(c&31 == 0) {
26180                         break
26181                 }
26182                 v.reset(OpAMD64SHRL)
26183                 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
26184                 v0.AddArg(y)
26185                 v.AddArg2(x, v0)
26186                 return true
26187         }
26188         // match: (SHRL x (ANDLconst [c] y))
26189         // cond: c & 31 == 31
26190         // result: (SHRL x y)
26191         for {
26192                 x := v_0
26193                 if v_1.Op != OpAMD64ANDLconst {
26194                         break
26195                 }
26196                 c := auxIntToInt32(v_1.AuxInt)
26197                 y := v_1.Args[0]
26198                 if !(c&31 == 31) {
26199                         break
26200                 }
26201                 v.reset(OpAMD64SHRL)
26202                 v.AddArg2(x, y)
26203                 return true
26204         }
26205         // match: (SHRL x (NEGL <t> (ANDLconst [c] y)))
26206         // cond: c & 31 == 31
26207         // result: (SHRL x (NEGL <t> y))
26208         for {
26209                 x := v_0
26210                 if v_1.Op != OpAMD64NEGL {
26211                         break
26212                 }
26213                 t := v_1.Type
26214                 v_1_0 := v_1.Args[0]
26215                 if v_1_0.Op != OpAMD64ANDLconst {
26216                         break
26217                 }
26218                 c := auxIntToInt32(v_1_0.AuxInt)
26219                 y := v_1_0.Args[0]
26220                 if !(c&31 == 31) {
26221                         break
26222                 }
26223                 v.reset(OpAMD64SHRL)
26224                 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
26225                 v0.AddArg(y)
26226                 v.AddArg2(x, v0)
26227                 return true
26228         }
26229         return false
26230 }
26231 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
26232         v_0 := v.Args[0]
26233         // match: (SHRLconst [1] (SHLLconst [1] x))
26234         // result: (BTRLconst [31] x)
26235         for {
26236                 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
26237                         break
26238                 }
26239                 x := v_0.Args[0]
26240                 v.reset(OpAMD64BTRLconst)
26241                 v.AuxInt = int8ToAuxInt(31)
26242                 v.AddArg(x)
26243                 return true
26244         }
26245         // match: (SHRLconst x [0])
26246         // result: x
26247         for {
26248                 if auxIntToInt8(v.AuxInt) != 0 {
26249                         break
26250                 }
26251                 x := v_0
26252                 v.copyOf(x)
26253                 return true
26254         }
26255         return false
26256 }
26257 func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
26258         v_1 := v.Args[1]
26259         v_0 := v.Args[0]
26260         b := v.Block
26261         // match: (SHRQ x (MOVQconst [c]))
26262         // result: (SHRQconst [int8(c&63)] x)
26263         for {
26264                 x := v_0
26265                 if v_1.Op != OpAMD64MOVQconst {
26266                         break
26267                 }
26268                 c := auxIntToInt64(v_1.AuxInt)
26269                 v.reset(OpAMD64SHRQconst)
26270                 v.AuxInt = int8ToAuxInt(int8(c & 63))
26271                 v.AddArg(x)
26272                 return true
26273         }
26274         // match: (SHRQ x (MOVLconst [c]))
26275         // result: (SHRQconst [int8(c&63)] x)
26276         for {
26277                 x := v_0
26278                 if v_1.Op != OpAMD64MOVLconst {
26279                         break
26280                 }
26281                 c := auxIntToInt32(v_1.AuxInt)
26282                 v.reset(OpAMD64SHRQconst)
26283                 v.AuxInt = int8ToAuxInt(int8(c & 63))
26284                 v.AddArg(x)
26285                 return true
26286         }
26287         // match: (SHRQ x (ADDQconst [c] y))
26288         // cond: c & 63 == 0
26289         // result: (SHRQ x y)
26290         for {
26291                 x := v_0
26292                 if v_1.Op != OpAMD64ADDQconst {
26293                         break
26294                 }
26295                 c := auxIntToInt32(v_1.AuxInt)
26296                 y := v_1.Args[0]
26297                 if !(c&63 == 0) {
26298                         break
26299                 }
26300                 v.reset(OpAMD64SHRQ)
26301                 v.AddArg2(x, y)
26302                 return true
26303         }
26304         // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y)))
26305         // cond: c & 63 == 0
26306         // result: (SHRQ x (NEGQ <t> y))
26307         for {
26308                 x := v_0
26309                 if v_1.Op != OpAMD64NEGQ {
26310                         break
26311                 }
26312                 t := v_1.Type
26313                 v_1_0 := v_1.Args[0]
26314                 if v_1_0.Op != OpAMD64ADDQconst {
26315                         break
26316                 }
26317                 c := auxIntToInt32(v_1_0.AuxInt)
26318                 y := v_1_0.Args[0]
26319                 if !(c&63 == 0) {
26320                         break
26321                 }
26322                 v.reset(OpAMD64SHRQ)
26323                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
26324                 v0.AddArg(y)
26325                 v.AddArg2(x, v0)
26326                 return true
26327         }
26328         // match: (SHRQ x (ANDQconst [c] y))
26329         // cond: c & 63 == 63
26330         // result: (SHRQ x y)
26331         for {
26332                 x := v_0
26333                 if v_1.Op != OpAMD64ANDQconst {
26334                         break
26335                 }
26336                 c := auxIntToInt32(v_1.AuxInt)
26337                 y := v_1.Args[0]
26338                 if !(c&63 == 63) {
26339                         break
26340                 }
26341                 v.reset(OpAMD64SHRQ)
26342                 v.AddArg2(x, y)
26343                 return true
26344         }
26345         // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y)))
26346         // cond: c & 63 == 63
26347         // result: (SHRQ x (NEGQ <t> y))
26348         for {
26349                 x := v_0
26350                 if v_1.Op != OpAMD64NEGQ {
26351                         break
26352                 }
26353                 t := v_1.Type
26354                 v_1_0 := v_1.Args[0]
26355                 if v_1_0.Op != OpAMD64ANDQconst {
26356                         break
26357                 }
26358                 c := auxIntToInt32(v_1_0.AuxInt)
26359                 y := v_1_0.Args[0]
26360                 if !(c&63 == 63) {
26361                         break
26362                 }
26363                 v.reset(OpAMD64SHRQ)
26364                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
26365                 v0.AddArg(y)
26366                 v.AddArg2(x, v0)
26367                 return true
26368         }
26369         // match: (SHRQ x (ADDLconst [c] y))
26370         // cond: c & 63 == 0
26371         // result: (SHRQ x y)
26372         for {
26373                 x := v_0
26374                 if v_1.Op != OpAMD64ADDLconst {
26375                         break
26376                 }
26377                 c := auxIntToInt32(v_1.AuxInt)
26378                 y := v_1.Args[0]
26379                 if !(c&63 == 0) {
26380                         break
26381                 }
26382                 v.reset(OpAMD64SHRQ)
26383                 v.AddArg2(x, y)
26384                 return true
26385         }
26386         // match: (SHRQ x (NEGL <t> (ADDLconst [c] y)))
26387         // cond: c & 63 == 0
26388         // result: (SHRQ x (NEGL <t> y))
26389         for {
26390                 x := v_0
26391                 if v_1.Op != OpAMD64NEGL {
26392                         break
26393                 }
26394                 t := v_1.Type
26395                 v_1_0 := v_1.Args[0]
26396                 if v_1_0.Op != OpAMD64ADDLconst {
26397                         break
26398                 }
26399                 c := auxIntToInt32(v_1_0.AuxInt)
26400                 y := v_1_0.Args[0]
26401                 if !(c&63 == 0) {
26402                         break
26403                 }
26404                 v.reset(OpAMD64SHRQ)
26405                 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
26406                 v0.AddArg(y)
26407                 v.AddArg2(x, v0)
26408                 return true
26409         }
26410         // match: (SHRQ x (ANDLconst [c] y))
26411         // cond: c & 63 == 63
26412         // result: (SHRQ x y)
26413         for {
26414                 x := v_0
26415                 if v_1.Op != OpAMD64ANDLconst {
26416                         break
26417                 }
26418                 c := auxIntToInt32(v_1.AuxInt)
26419                 y := v_1.Args[0]
26420                 if !(c&63 == 63) {
26421                         break
26422                 }
26423                 v.reset(OpAMD64SHRQ)
26424                 v.AddArg2(x, y)
26425                 return true
26426         }
26427         // match: (SHRQ x (NEGL <t> (ANDLconst [c] y)))
26428         // cond: c & 63 == 63
26429         // result: (SHRQ x (NEGL <t> y))
26430         for {
26431                 x := v_0
26432                 if v_1.Op != OpAMD64NEGL {
26433                         break
26434                 }
26435                 t := v_1.Type
26436                 v_1_0 := v_1.Args[0]
26437                 if v_1_0.Op != OpAMD64ANDLconst {
26438                         break
26439                 }
26440                 c := auxIntToInt32(v_1_0.AuxInt)
26441                 y := v_1_0.Args[0]
26442                 if !(c&63 == 63) {
26443                         break
26444                 }
26445                 v.reset(OpAMD64SHRQ)
26446                 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
26447                 v0.AddArg(y)
26448                 v.AddArg2(x, v0)
26449                 return true
26450         }
26451         return false
26452 }
26453 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
26454         v_0 := v.Args[0]
26455         // match: (SHRQconst [1] (SHLQconst [1] x))
26456         // result: (BTRQconst [63] x)
26457         for {
26458                 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
26459                         break
26460                 }
26461                 x := v_0.Args[0]
26462                 v.reset(OpAMD64BTRQconst)
26463                 v.AuxInt = int8ToAuxInt(63)
26464                 v.AddArg(x)
26465                 return true
26466         }
26467         // match: (SHRQconst x [0])
26468         // result: x
26469         for {
26470                 if auxIntToInt8(v.AuxInt) != 0 {
26471                         break
26472                 }
26473                 x := v_0
26474                 v.copyOf(x)
26475                 return true
26476         }
26477         return false
26478 }
26479 func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
26480         v_1 := v.Args[1]
26481         v_0 := v.Args[0]
26482         // match: (SHRW x (MOVQconst [c]))
26483         // cond: c&31 < 16
26484         // result: (SHRWconst [int8(c&31)] x)
26485         for {
26486                 x := v_0
26487                 if v_1.Op != OpAMD64MOVQconst {
26488                         break
26489                 }
26490                 c := auxIntToInt64(v_1.AuxInt)
26491                 if !(c&31 < 16) {
26492                         break
26493                 }
26494                 v.reset(OpAMD64SHRWconst)
26495                 v.AuxInt = int8ToAuxInt(int8(c & 31))
26496                 v.AddArg(x)
26497                 return true
26498         }
26499         // match: (SHRW x (MOVLconst [c]))
26500         // cond: c&31 < 16
26501         // result: (SHRWconst [int8(c&31)] x)
26502         for {
26503                 x := v_0
26504                 if v_1.Op != OpAMD64MOVLconst {
26505                         break
26506                 }
26507                 c := auxIntToInt32(v_1.AuxInt)
26508                 if !(c&31 < 16) {
26509                         break
26510                 }
26511                 v.reset(OpAMD64SHRWconst)
26512                 v.AuxInt = int8ToAuxInt(int8(c & 31))
26513                 v.AddArg(x)
26514                 return true
26515         }
26516         // match: (SHRW _ (MOVQconst [c]))
26517         // cond: c&31 >= 16
26518         // result: (MOVLconst [0])
26519         for {
26520                 if v_1.Op != OpAMD64MOVQconst {
26521                         break
26522                 }
26523                 c := auxIntToInt64(v_1.AuxInt)
26524                 if !(c&31 >= 16) {
26525                         break
26526                 }
26527                 v.reset(OpAMD64MOVLconst)
26528                 v.AuxInt = int32ToAuxInt(0)
26529                 return true
26530         }
26531         // match: (SHRW _ (MOVLconst [c]))
26532         // cond: c&31 >= 16
26533         // result: (MOVLconst [0])
26534         for {
26535                 if v_1.Op != OpAMD64MOVLconst {
26536                         break
26537                 }
26538                 c := auxIntToInt32(v_1.AuxInt)
26539                 if !(c&31 >= 16) {
26540                         break
26541                 }
26542                 v.reset(OpAMD64MOVLconst)
26543                 v.AuxInt = int32ToAuxInt(0)
26544                 return true
26545         }
26546         return false
26547 }
26548 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
26549         v_0 := v.Args[0]
26550         // match: (SHRWconst x [0])
26551         // result: x
26552         for {
26553                 if auxIntToInt8(v.AuxInt) != 0 {
26554                         break
26555                 }
26556                 x := v_0
26557                 v.copyOf(x)
26558                 return true
26559         }
26560         return false
26561 }
26562 func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
26563         v_1 := v.Args[1]
26564         v_0 := v.Args[0]
26565         b := v.Block
26566         // match: (SUBL x (MOVLconst [c]))
26567         // result: (SUBLconst x [c])
26568         for {
26569                 x := v_0
26570                 if v_1.Op != OpAMD64MOVLconst {
26571                         break
26572                 }
26573                 c := auxIntToInt32(v_1.AuxInt)
26574                 v.reset(OpAMD64SUBLconst)
26575                 v.AuxInt = int32ToAuxInt(c)
26576                 v.AddArg(x)
26577                 return true
26578         }
26579         // match: (SUBL (MOVLconst [c]) x)
26580         // result: (NEGL (SUBLconst <v.Type> x [c]))
26581         for {
26582                 if v_0.Op != OpAMD64MOVLconst {
26583                         break
26584                 }
26585                 c := auxIntToInt32(v_0.AuxInt)
26586                 x := v_1
26587                 v.reset(OpAMD64NEGL)
26588                 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
26589                 v0.AuxInt = int32ToAuxInt(c)
26590                 v0.AddArg(x)
26591                 v.AddArg(v0)
26592                 return true
26593         }
26594         // match: (SUBL x x)
26595         // result: (MOVLconst [0])
26596         for {
26597                 x := v_0
26598                 if x != v_1 {
26599                         break
26600                 }
26601                 v.reset(OpAMD64MOVLconst)
26602                 v.AuxInt = int32ToAuxInt(0)
26603                 return true
26604         }
26605         // match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
26606         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
26607         // result: (SUBLload x [off] {sym} ptr mem)
26608         for {
26609                 x := v_0
26610                 l := v_1
26611                 if l.Op != OpAMD64MOVLload {
26612                         break
26613                 }
26614                 off := auxIntToInt32(l.AuxInt)
26615                 sym := auxToSym(l.Aux)
26616                 mem := l.Args[1]
26617                 ptr := l.Args[0]
26618                 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
26619                         break
26620                 }
26621                 v.reset(OpAMD64SUBLload)
26622                 v.AuxInt = int32ToAuxInt(off)
26623                 v.Aux = symToAux(sym)
26624                 v.AddArg3(x, ptr, mem)
26625                 return true
26626         }
26627         return false
26628 }
26629 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
26630         v_0 := v.Args[0]
26631         // match: (SUBLconst [c] x)
26632         // cond: c==0
26633         // result: x
26634         for {
26635                 c := auxIntToInt32(v.AuxInt)
26636                 x := v_0
26637                 if !(c == 0) {
26638                         break
26639                 }
26640                 v.copyOf(x)
26641                 return true
26642         }
26643         // match: (SUBLconst [c] x)
26644         // result: (ADDLconst [-c] x)
26645         for {
26646                 c := auxIntToInt32(v.AuxInt)
26647                 x := v_0
26648                 v.reset(OpAMD64ADDLconst)
26649                 v.AuxInt = int32ToAuxInt(-c)
26650                 v.AddArg(x)
26651                 return true
26652         }
26653 }
26654 func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
26655         v_2 := v.Args[2]
26656         v_1 := v.Args[1]
26657         v_0 := v.Args[0]
26658         b := v.Block
26659         typ := &b.Func.Config.Types
26660         // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem)
26661         // cond: is32Bit(int64(off1)+int64(off2))
26662         // result: (SUBLload [off1+off2] {sym} val base mem)
26663         for {
26664                 off1 := auxIntToInt32(v.AuxInt)
26665                 sym := auxToSym(v.Aux)
26666                 val := v_0
26667                 if v_1.Op != OpAMD64ADDQconst {
26668                         break
26669                 }
26670                 off2 := auxIntToInt32(v_1.AuxInt)
26671                 base := v_1.Args[0]
26672                 mem := v_2
26673                 if !(is32Bit(int64(off1) + int64(off2))) {
26674                         break
26675                 }
26676                 v.reset(OpAMD64SUBLload)
26677                 v.AuxInt = int32ToAuxInt(off1 + off2)
26678                 v.Aux = symToAux(sym)
26679                 v.AddArg3(val, base, mem)
26680                 return true
26681         }
26682         // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
26683         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
26684         // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
26685         for {
26686                 off1 := auxIntToInt32(v.AuxInt)
26687                 sym1 := auxToSym(v.Aux)
26688                 val := v_0
26689                 if v_1.Op != OpAMD64LEAQ {
26690                         break
26691                 }
26692                 off2 := auxIntToInt32(v_1.AuxInt)
26693                 sym2 := auxToSym(v_1.Aux)
26694                 base := v_1.Args[0]
26695                 mem := v_2
26696                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
26697                         break
26698                 }
26699                 v.reset(OpAMD64SUBLload)
26700                 v.AuxInt = int32ToAuxInt(off1 + off2)
26701                 v.Aux = symToAux(mergeSym(sym1, sym2))
26702                 v.AddArg3(val, base, mem)
26703                 return true
26704         }
26705         // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
26706         // result: (SUBL x (MOVLf2i y))
26707         for {
26708                 off := auxIntToInt32(v.AuxInt)
26709                 sym := auxToSym(v.Aux)
26710                 x := v_0
26711                 ptr := v_1
26712                 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
26713                         break
26714                 }
26715                 y := v_2.Args[1]
26716                 if ptr != v_2.Args[0] {
26717                         break
26718                 }
26719                 v.reset(OpAMD64SUBL)
26720                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
26721                 v0.AddArg(y)
26722                 v.AddArg2(x, v0)
26723                 return true
26724         }
26725         return false
26726 }
26727 func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
26728         v_2 := v.Args[2]
26729         v_1 := v.Args[1]
26730         v_0 := v.Args[0]
26731         // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
26732         // cond: is32Bit(int64(off1)+int64(off2))
26733         // result: (SUBLmodify [off1+off2] {sym} base val mem)
26734         for {
26735                 off1 := auxIntToInt32(v.AuxInt)
26736                 sym := auxToSym(v.Aux)
26737                 if v_0.Op != OpAMD64ADDQconst {
26738                         break
26739                 }
26740                 off2 := auxIntToInt32(v_0.AuxInt)
26741                 base := v_0.Args[0]
26742                 val := v_1
26743                 mem := v_2
26744                 if !(is32Bit(int64(off1) + int64(off2))) {
26745                         break
26746                 }
26747                 v.reset(OpAMD64SUBLmodify)
26748                 v.AuxInt = int32ToAuxInt(off1 + off2)
26749                 v.Aux = symToAux(sym)
26750                 v.AddArg3(base, val, mem)
26751                 return true
26752         }
26753         // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
26754         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
26755         // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
26756         for {
26757                 off1 := auxIntToInt32(v.AuxInt)
26758                 sym1 := auxToSym(v.Aux)
26759                 if v_0.Op != OpAMD64LEAQ {
26760                         break
26761                 }
26762                 off2 := auxIntToInt32(v_0.AuxInt)
26763                 sym2 := auxToSym(v_0.Aux)
26764                 base := v_0.Args[0]
26765                 val := v_1
26766                 mem := v_2
26767                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
26768                         break
26769                 }
26770                 v.reset(OpAMD64SUBLmodify)
26771                 v.AuxInt = int32ToAuxInt(off1 + off2)
26772                 v.Aux = symToAux(mergeSym(sym1, sym2))
26773                 v.AddArg3(base, val, mem)
26774                 return true
26775         }
26776         return false
26777 }
26778 func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
26779         v_1 := v.Args[1]
26780         v_0 := v.Args[0]
26781         b := v.Block
26782         // match: (SUBQ x (MOVQconst [c]))
26783         // cond: is32Bit(c)
26784         // result: (SUBQconst x [int32(c)])
26785         for {
26786                 x := v_0
26787                 if v_1.Op != OpAMD64MOVQconst {
26788                         break
26789                 }
26790                 c := auxIntToInt64(v_1.AuxInt)
26791                 if !(is32Bit(c)) {
26792                         break
26793                 }
26794                 v.reset(OpAMD64SUBQconst)
26795                 v.AuxInt = int32ToAuxInt(int32(c))
26796                 v.AddArg(x)
26797                 return true
26798         }
26799         // match: (SUBQ (MOVQconst [c]) x)
26800         // cond: is32Bit(c)
26801         // result: (NEGQ (SUBQconst <v.Type> x [int32(c)]))
26802         for {
26803                 if v_0.Op != OpAMD64MOVQconst {
26804                         break
26805                 }
26806                 c := auxIntToInt64(v_0.AuxInt)
26807                 x := v_1
26808                 if !(is32Bit(c)) {
26809                         break
26810                 }
26811                 v.reset(OpAMD64NEGQ)
26812                 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
26813                 v0.AuxInt = int32ToAuxInt(int32(c))
26814                 v0.AddArg(x)
26815                 v.AddArg(v0)
26816                 return true
26817         }
26818         // match: (SUBQ x x)
26819         // result: (MOVQconst [0])
26820         for {
26821                 x := v_0
26822                 if x != v_1 {
26823                         break
26824                 }
26825                 v.reset(OpAMD64MOVQconst)
26826                 v.AuxInt = int64ToAuxInt(0)
26827                 return true
26828         }
26829         // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem))
26830         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
26831         // result: (SUBQload x [off] {sym} ptr mem)
26832         for {
26833                 x := v_0
26834                 l := v_1
26835                 if l.Op != OpAMD64MOVQload {
26836                         break
26837                 }
26838                 off := auxIntToInt32(l.AuxInt)
26839                 sym := auxToSym(l.Aux)
26840                 mem := l.Args[1]
26841                 ptr := l.Args[0]
26842                 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
26843                         break
26844                 }
26845                 v.reset(OpAMD64SUBQload)
26846                 v.AuxInt = int32ToAuxInt(off)
26847                 v.Aux = symToAux(sym)
26848                 v.AddArg3(x, ptr, mem)
26849                 return true
26850         }
26851         return false
26852 }
26853 func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
26854         v_1 := v.Args[1]
26855         v_0 := v.Args[0]
26856         // match: (SUBQborrow x (MOVQconst [c]))
26857         // cond: is32Bit(c)
26858         // result: (SUBQconstborrow x [int32(c)])
26859         for {
26860                 x := v_0
26861                 if v_1.Op != OpAMD64MOVQconst {
26862                         break
26863                 }
26864                 c := auxIntToInt64(v_1.AuxInt)
26865                 if !(is32Bit(c)) {
26866                         break
26867                 }
26868                 v.reset(OpAMD64SUBQconstborrow)
26869                 v.AuxInt = int32ToAuxInt(int32(c))
26870                 v.AddArg(x)
26871                 return true
26872         }
26873         return false
26874 }
26875 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
26876         v_0 := v.Args[0]
26877         // match: (SUBQconst [0] x)
26878         // result: x
26879         for {
26880                 if auxIntToInt32(v.AuxInt) != 0 {
26881                         break
26882                 }
26883                 x := v_0
26884                 v.copyOf(x)
26885                 return true
26886         }
26887         // match: (SUBQconst [c] x)
26888         // cond: c != -(1<<31)
26889         // result: (ADDQconst [-c] x)
26890         for {
26891                 c := auxIntToInt32(v.AuxInt)
26892                 x := v_0
26893                 if !(c != -(1 << 31)) {
26894                         break
26895                 }
26896                 v.reset(OpAMD64ADDQconst)
26897                 v.AuxInt = int32ToAuxInt(-c)
26898                 v.AddArg(x)
26899                 return true
26900         }
26901         // match: (SUBQconst (MOVQconst [d]) [c])
26902         // result: (MOVQconst [d-int64(c)])
26903         for {
26904                 c := auxIntToInt32(v.AuxInt)
26905                 if v_0.Op != OpAMD64MOVQconst {
26906                         break
26907                 }
26908                 d := auxIntToInt64(v_0.AuxInt)
26909                 v.reset(OpAMD64MOVQconst)
26910                 v.AuxInt = int64ToAuxInt(d - int64(c))
26911                 return true
26912         }
26913         // match: (SUBQconst (SUBQconst x [d]) [c])
26914         // cond: is32Bit(int64(-c)-int64(d))
26915         // result: (ADDQconst [-c-d] x)
26916         for {
26917                 c := auxIntToInt32(v.AuxInt)
26918                 if v_0.Op != OpAMD64SUBQconst {
26919                         break
26920                 }
26921                 d := auxIntToInt32(v_0.AuxInt)
26922                 x := v_0.Args[0]
26923                 if !(is32Bit(int64(-c) - int64(d))) {
26924                         break
26925                 }
26926                 v.reset(OpAMD64ADDQconst)
26927                 v.AuxInt = int32ToAuxInt(-c - d)
26928                 v.AddArg(x)
26929                 return true
26930         }
26931         return false
26932 }
26933 func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
26934         v_2 := v.Args[2]
26935         v_1 := v.Args[1]
26936         v_0 := v.Args[0]
26937         b := v.Block
26938         typ := &b.Func.Config.Types
26939         // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem)
26940         // cond: is32Bit(int64(off1)+int64(off2))
26941         // result: (SUBQload [off1+off2] {sym} val base mem)
26942         for {
26943                 off1 := auxIntToInt32(v.AuxInt)
26944                 sym := auxToSym(v.Aux)
26945                 val := v_0
26946                 if v_1.Op != OpAMD64ADDQconst {
26947                         break
26948                 }
26949                 off2 := auxIntToInt32(v_1.AuxInt)
26950                 base := v_1.Args[0]
26951                 mem := v_2
26952                 if !(is32Bit(int64(off1) + int64(off2))) {
26953                         break
26954                 }
26955                 v.reset(OpAMD64SUBQload)
26956                 v.AuxInt = int32ToAuxInt(off1 + off2)
26957                 v.Aux = symToAux(sym)
26958                 v.AddArg3(val, base, mem)
26959                 return true
26960         }
26961         // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
26962         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
26963         // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
26964         for {
26965                 off1 := auxIntToInt32(v.AuxInt)
26966                 sym1 := auxToSym(v.Aux)
26967                 val := v_0
26968                 if v_1.Op != OpAMD64LEAQ {
26969                         break
26970                 }
26971                 off2 := auxIntToInt32(v_1.AuxInt)
26972                 sym2 := auxToSym(v_1.Aux)
26973                 base := v_1.Args[0]
26974                 mem := v_2
26975                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
26976                         break
26977                 }
26978                 v.reset(OpAMD64SUBQload)
26979                 v.AuxInt = int32ToAuxInt(off1 + off2)
26980                 v.Aux = symToAux(mergeSym(sym1, sym2))
26981                 v.AddArg3(val, base, mem)
26982                 return true
26983         }
26984         // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
26985         // result: (SUBQ x (MOVQf2i y))
26986         for {
26987                 off := auxIntToInt32(v.AuxInt)
26988                 sym := auxToSym(v.Aux)
26989                 x := v_0
26990                 ptr := v_1
26991                 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
26992                         break
26993                 }
26994                 y := v_2.Args[1]
26995                 if ptr != v_2.Args[0] {
26996                         break
26997                 }
26998                 v.reset(OpAMD64SUBQ)
26999                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
27000                 v0.AddArg(y)
27001                 v.AddArg2(x, v0)
27002                 return true
27003         }
27004         return false
27005 }
27006 func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
27007         v_2 := v.Args[2]
27008         v_1 := v.Args[1]
27009         v_0 := v.Args[0]
27010         // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
27011         // cond: is32Bit(int64(off1)+int64(off2))
27012         // result: (SUBQmodify [off1+off2] {sym} base val mem)
27013         for {
27014                 off1 := auxIntToInt32(v.AuxInt)
27015                 sym := auxToSym(v.Aux)
27016                 if v_0.Op != OpAMD64ADDQconst {
27017                         break
27018                 }
27019                 off2 := auxIntToInt32(v_0.AuxInt)
27020                 base := v_0.Args[0]
27021                 val := v_1
27022                 mem := v_2
27023                 if !(is32Bit(int64(off1) + int64(off2))) {
27024                         break
27025                 }
27026                 v.reset(OpAMD64SUBQmodify)
27027                 v.AuxInt = int32ToAuxInt(off1 + off2)
27028                 v.Aux = symToAux(sym)
27029                 v.AddArg3(base, val, mem)
27030                 return true
27031         }
27032         // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
27033         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
27034         // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
27035         for {
27036                 off1 := auxIntToInt32(v.AuxInt)
27037                 sym1 := auxToSym(v.Aux)
27038                 if v_0.Op != OpAMD64LEAQ {
27039                         break
27040                 }
27041                 off2 := auxIntToInt32(v_0.AuxInt)
27042                 sym2 := auxToSym(v_0.Aux)
27043                 base := v_0.Args[0]
27044                 val := v_1
27045                 mem := v_2
27046                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
27047                         break
27048                 }
27049                 v.reset(OpAMD64SUBQmodify)
27050                 v.AuxInt = int32ToAuxInt(off1 + off2)
27051                 v.Aux = symToAux(mergeSym(sym1, sym2))
27052                 v.AddArg3(base, val, mem)
27053                 return true
27054         }
27055         return false
27056 }
27057 func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
27058         v_1 := v.Args[1]
27059         v_0 := v.Args[0]
27060         // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
27061         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
27062         // result: (SUBSDload x [off] {sym} ptr mem)
27063         for {
27064                 x := v_0
27065                 l := v_1
27066                 if l.Op != OpAMD64MOVSDload {
27067                         break
27068                 }
27069                 off := auxIntToInt32(l.AuxInt)
27070                 sym := auxToSym(l.Aux)
27071                 mem := l.Args[1]
27072                 ptr := l.Args[0]
27073                 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
27074                         break
27075                 }
27076                 v.reset(OpAMD64SUBSDload)
27077                 v.AuxInt = int32ToAuxInt(off)
27078                 v.Aux = symToAux(sym)
27079                 v.AddArg3(x, ptr, mem)
27080                 return true
27081         }
27082         return false
27083 }
27084 func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
27085         v_2 := v.Args[2]
27086         v_1 := v.Args[1]
27087         v_0 := v.Args[0]
27088         b := v.Block
27089         typ := &b.Func.Config.Types
27090         // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem)
27091         // cond: is32Bit(int64(off1)+int64(off2))
27092         // result: (SUBSDload [off1+off2] {sym} val base mem)
27093         for {
27094                 off1 := auxIntToInt32(v.AuxInt)
27095                 sym := auxToSym(v.Aux)
27096                 val := v_0
27097                 if v_1.Op != OpAMD64ADDQconst {
27098                         break
27099                 }
27100                 off2 := auxIntToInt32(v_1.AuxInt)
27101                 base := v_1.Args[0]
27102                 mem := v_2
27103                 if !(is32Bit(int64(off1) + int64(off2))) {
27104                         break
27105                 }
27106                 v.reset(OpAMD64SUBSDload)
27107                 v.AuxInt = int32ToAuxInt(off1 + off2)
27108                 v.Aux = symToAux(sym)
27109                 v.AddArg3(val, base, mem)
27110                 return true
27111         }
27112         // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
27113         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
27114         // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
27115         for {
27116                 off1 := auxIntToInt32(v.AuxInt)
27117                 sym1 := auxToSym(v.Aux)
27118                 val := v_0
27119                 if v_1.Op != OpAMD64LEAQ {
27120                         break
27121                 }
27122                 off2 := auxIntToInt32(v_1.AuxInt)
27123                 sym2 := auxToSym(v_1.Aux)
27124                 base := v_1.Args[0]
27125                 mem := v_2
27126                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
27127                         break
27128                 }
27129                 v.reset(OpAMD64SUBSDload)
27130                 v.AuxInt = int32ToAuxInt(off1 + off2)
27131                 v.Aux = symToAux(mergeSym(sym1, sym2))
27132                 v.AddArg3(val, base, mem)
27133                 return true
27134         }
27135         // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
27136         // result: (SUBSD x (MOVQi2f y))
27137         for {
27138                 off := auxIntToInt32(v.AuxInt)
27139                 sym := auxToSym(v.Aux)
27140                 x := v_0
27141                 ptr := v_1
27142                 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
27143                         break
27144                 }
27145                 y := v_2.Args[1]
27146                 if ptr != v_2.Args[0] {
27147                         break
27148                 }
27149                 v.reset(OpAMD64SUBSD)
27150                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
27151                 v0.AddArg(y)
27152                 v.AddArg2(x, v0)
27153                 return true
27154         }
27155         return false
27156 }
27157 func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
27158         v_1 := v.Args[1]
27159         v_0 := v.Args[0]
27160         // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
27161         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
27162         // result: (SUBSSload x [off] {sym} ptr mem)
27163         for {
27164                 x := v_0
27165                 l := v_1
27166                 if l.Op != OpAMD64MOVSSload {
27167                         break
27168                 }
27169                 off := auxIntToInt32(l.AuxInt)
27170                 sym := auxToSym(l.Aux)
27171                 mem := l.Args[1]
27172                 ptr := l.Args[0]
27173                 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
27174                         break
27175                 }
27176                 v.reset(OpAMD64SUBSSload)
27177                 v.AuxInt = int32ToAuxInt(off)
27178                 v.Aux = symToAux(sym)
27179                 v.AddArg3(x, ptr, mem)
27180                 return true
27181         }
27182         return false
27183 }
27184 func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
27185         v_2 := v.Args[2]
27186         v_1 := v.Args[1]
27187         v_0 := v.Args[0]
27188         b := v.Block
27189         typ := &b.Func.Config.Types
27190         // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem)
27191         // cond: is32Bit(int64(off1)+int64(off2))
27192         // result: (SUBSSload [off1+off2] {sym} val base mem)
27193         for {
27194                 off1 := auxIntToInt32(v.AuxInt)
27195                 sym := auxToSym(v.Aux)
27196                 val := v_0
27197                 if v_1.Op != OpAMD64ADDQconst {
27198                         break
27199                 }
27200                 off2 := auxIntToInt32(v_1.AuxInt)
27201                 base := v_1.Args[0]
27202                 mem := v_2
27203                 if !(is32Bit(int64(off1) + int64(off2))) {
27204                         break
27205                 }
27206                 v.reset(OpAMD64SUBSSload)
27207                 v.AuxInt = int32ToAuxInt(off1 + off2)
27208                 v.Aux = symToAux(sym)
27209                 v.AddArg3(val, base, mem)
27210                 return true
27211         }
27212         // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
27213         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
27214         // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
27215         for {
27216                 off1 := auxIntToInt32(v.AuxInt)
27217                 sym1 := auxToSym(v.Aux)
27218                 val := v_0
27219                 if v_1.Op != OpAMD64LEAQ {
27220                         break
27221                 }
27222                 off2 := auxIntToInt32(v_1.AuxInt)
27223                 sym2 := auxToSym(v_1.Aux)
27224                 base := v_1.Args[0]
27225                 mem := v_2
27226                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
27227                         break
27228                 }
27229                 v.reset(OpAMD64SUBSSload)
27230                 v.AuxInt = int32ToAuxInt(off1 + off2)
27231                 v.Aux = symToAux(mergeSym(sym1, sym2))
27232                 v.AddArg3(val, base, mem)
27233                 return true
27234         }
27235         // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
27236         // result: (SUBSS x (MOVLi2f y))
27237         for {
27238                 off := auxIntToInt32(v.AuxInt)
27239                 sym := auxToSym(v.Aux)
27240                 x := v_0
27241                 ptr := v_1
27242                 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
27243                         break
27244                 }
27245                 y := v_2.Args[1]
27246                 if ptr != v_2.Args[0] {
27247                         break
27248                 }
27249                 v.reset(OpAMD64SUBSS)
27250                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
27251                 v0.AddArg(y)
27252                 v.AddArg2(x, v0)
27253                 return true
27254         }
27255         return false
27256 }
27257 func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
27258         v_1 := v.Args[1]
27259         v_0 := v.Args[0]
27260         b := v.Block
27261         // match: (TESTB (MOVLconst [c]) x)
27262         // result: (TESTBconst [int8(c)] x)
27263         for {
27264                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27265                         if v_0.Op != OpAMD64MOVLconst {
27266                                 continue
27267                         }
27268                         c := auxIntToInt32(v_0.AuxInt)
27269                         x := v_1
27270                         v.reset(OpAMD64TESTBconst)
27271                         v.AuxInt = int8ToAuxInt(int8(c))
27272                         v.AddArg(x)
27273                         return true
27274                 }
27275                 break
27276         }
27277         // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2)
27278         // cond: l == l2 && l.Uses == 2 && clobber(l)
27279         // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0, off)] ptr mem)
27280         for {
27281                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27282                         l := v_0
27283                         if l.Op != OpAMD64MOVBload {
27284                                 continue
27285                         }
27286                         off := auxIntToInt32(l.AuxInt)
27287                         sym := auxToSym(l.Aux)
27288                         mem := l.Args[1]
27289                         ptr := l.Args[0]
27290                         l2 := v_1
27291                         if !(l == l2 && l.Uses == 2 && clobber(l)) {
27292                                 continue
27293                         }
27294                         b = l.Block
27295                         v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
27296                         v.copyOf(v0)
27297                         v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
27298                         v0.Aux = symToAux(sym)
27299                         v0.AddArg2(ptr, mem)
27300                         return true
27301                 }
27302                 break
27303         }
27304         return false
27305 }
27306 func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
27307         v_0 := v.Args[0]
27308         // match: (TESTBconst [-1] x)
27309         // cond: x.Op != OpAMD64MOVLconst
27310         // result: (TESTB x x)
27311         for {
27312                 if auxIntToInt8(v.AuxInt) != -1 {
27313                         break
27314                 }
27315                 x := v_0
27316                 if !(x.Op != OpAMD64MOVLconst) {
27317                         break
27318                 }
27319                 v.reset(OpAMD64TESTB)
27320                 v.AddArg2(x, x)
27321                 return true
27322         }
27323         return false
27324 }
27325 func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
27326         v_1 := v.Args[1]
27327         v_0 := v.Args[0]
27328         b := v.Block
27329         // match: (TESTL (MOVLconst [c]) x)
27330         // result: (TESTLconst [c] x)
27331         for {
27332                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27333                         if v_0.Op != OpAMD64MOVLconst {
27334                                 continue
27335                         }
27336                         c := auxIntToInt32(v_0.AuxInt)
27337                         x := v_1
27338                         v.reset(OpAMD64TESTLconst)
27339                         v.AuxInt = int32ToAuxInt(c)
27340                         v.AddArg(x)
27341                         return true
27342                 }
27343                 break
27344         }
27345         // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2)
27346         // cond: l == l2 && l.Uses == 2 && clobber(l)
27347         // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0, off)] ptr mem)
27348         for {
27349                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27350                         l := v_0
27351                         if l.Op != OpAMD64MOVLload {
27352                                 continue
27353                         }
27354                         off := auxIntToInt32(l.AuxInt)
27355                         sym := auxToSym(l.Aux)
27356                         mem := l.Args[1]
27357                         ptr := l.Args[0]
27358                         l2 := v_1
27359                         if !(l == l2 && l.Uses == 2 && clobber(l)) {
27360                                 continue
27361                         }
27362                         b = l.Block
27363                         v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
27364                         v.copyOf(v0)
27365                         v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
27366                         v0.Aux = symToAux(sym)
27367                         v0.AddArg2(ptr, mem)
27368                         return true
27369                 }
27370                 break
27371         }
27372         // match: (TESTL a:(ANDLload [off] {sym} x ptr mem) a)
27373         // cond: a.Uses == 2 && a.Block == v.Block && clobber(a)
27374         // result: (TESTL (MOVLload <a.Type> [off] {sym} ptr mem) x)
27375         for {
27376                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27377                         a := v_0
27378                         if a.Op != OpAMD64ANDLload {
27379                                 continue
27380                         }
27381                         off := auxIntToInt32(a.AuxInt)
27382                         sym := auxToSym(a.Aux)
27383                         mem := a.Args[2]
27384                         x := a.Args[0]
27385                         ptr := a.Args[1]
27386                         if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
27387                                 continue
27388                         }
27389                         v.reset(OpAMD64TESTL)
27390                         v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type)
27391                         v0.AuxInt = int32ToAuxInt(off)
27392                         v0.Aux = symToAux(sym)
27393                         v0.AddArg2(ptr, mem)
27394                         v.AddArg2(v0, x)
27395                         return true
27396                 }
27397                 break
27398         }
27399         return false
27400 }
27401 func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
27402         v_0 := v.Args[0]
27403         // match: (TESTLconst [c] (MOVLconst [c]))
27404         // cond: c == 0
27405         // result: (FlagEQ)
27406         for {
27407                 c := auxIntToInt32(v.AuxInt)
27408                 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
27409                         break
27410                 }
27411                 v.reset(OpAMD64FlagEQ)
27412                 return true
27413         }
27414         // match: (TESTLconst [c] (MOVLconst [c]))
27415         // cond: c < 0
27416         // result: (FlagLT_UGT)
27417         for {
27418                 c := auxIntToInt32(v.AuxInt)
27419                 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
27420                         break
27421                 }
27422                 v.reset(OpAMD64FlagLT_UGT)
27423                 return true
27424         }
27425         // match: (TESTLconst [c] (MOVLconst [c]))
27426         // cond: c > 0
27427         // result: (FlagGT_UGT)
27428         for {
27429                 c := auxIntToInt32(v.AuxInt)
27430                 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
27431                         break
27432                 }
27433                 v.reset(OpAMD64FlagGT_UGT)
27434                 return true
27435         }
27436         // match: (TESTLconst [-1] x)
27437         // cond: x.Op != OpAMD64MOVLconst
27438         // result: (TESTL x x)
27439         for {
27440                 if auxIntToInt32(v.AuxInt) != -1 {
27441                         break
27442                 }
27443                 x := v_0
27444                 if !(x.Op != OpAMD64MOVLconst) {
27445                         break
27446                 }
27447                 v.reset(OpAMD64TESTL)
27448                 v.AddArg2(x, x)
27449                 return true
27450         }
27451         return false
27452 }
27453 func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
27454         v_1 := v.Args[1]
27455         v_0 := v.Args[0]
27456         b := v.Block
27457         // match: (TESTQ (MOVQconst [c]) x)
27458         // cond: is32Bit(c)
27459         // result: (TESTQconst [int32(c)] x)
27460         for {
27461                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27462                         if v_0.Op != OpAMD64MOVQconst {
27463                                 continue
27464                         }
27465                         c := auxIntToInt64(v_0.AuxInt)
27466                         x := v_1
27467                         if !(is32Bit(c)) {
27468                                 continue
27469                         }
27470                         v.reset(OpAMD64TESTQconst)
27471                         v.AuxInt = int32ToAuxInt(int32(c))
27472                         v.AddArg(x)
27473                         return true
27474                 }
27475                 break
27476         }
27477         // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2)
27478         // cond: l == l2 && l.Uses == 2 && clobber(l)
27479         // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0, off)] ptr mem)
27480         for {
27481                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27482                         l := v_0
27483                         if l.Op != OpAMD64MOVQload {
27484                                 continue
27485                         }
27486                         off := auxIntToInt32(l.AuxInt)
27487                         sym := auxToSym(l.Aux)
27488                         mem := l.Args[1]
27489                         ptr := l.Args[0]
27490                         l2 := v_1
27491                         if !(l == l2 && l.Uses == 2 && clobber(l)) {
27492                                 continue
27493                         }
27494                         b = l.Block
27495                         v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
27496                         v.copyOf(v0)
27497                         v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
27498                         v0.Aux = symToAux(sym)
27499                         v0.AddArg2(ptr, mem)
27500                         return true
27501                 }
27502                 break
27503         }
27504         // match: (TESTQ a:(ANDQload [off] {sym} x ptr mem) a)
27505         // cond: a.Uses == 2 && a.Block == v.Block && clobber(a)
27506         // result: (TESTQ (MOVQload <a.Type> [off] {sym} ptr mem) x)
27507         for {
27508                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27509                         a := v_0
27510                         if a.Op != OpAMD64ANDQload {
27511                                 continue
27512                         }
27513                         off := auxIntToInt32(a.AuxInt)
27514                         sym := auxToSym(a.Aux)
27515                         mem := a.Args[2]
27516                         x := a.Args[0]
27517                         ptr := a.Args[1]
27518                         if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
27519                                 continue
27520                         }
27521                         v.reset(OpAMD64TESTQ)
27522                         v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type)
27523                         v0.AuxInt = int32ToAuxInt(off)
27524                         v0.Aux = symToAux(sym)
27525                         v0.AddArg2(ptr, mem)
27526                         v.AddArg2(v0, x)
27527                         return true
27528                 }
27529                 break
27530         }
27531         return false
27532 }
27533 func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
27534         v_0 := v.Args[0]
27535         // match: (TESTQconst [c] (MOVQconst [d]))
27536         // cond: int64(c) == d && c == 0
27537         // result: (FlagEQ)
27538         for {
27539                 c := auxIntToInt32(v.AuxInt)
27540                 if v_0.Op != OpAMD64MOVQconst {
27541                         break
27542                 }
27543                 d := auxIntToInt64(v_0.AuxInt)
27544                 if !(int64(c) == d && c == 0) {
27545                         break
27546                 }
27547                 v.reset(OpAMD64FlagEQ)
27548                 return true
27549         }
27550         // match: (TESTQconst [c] (MOVQconst [d]))
27551         // cond: int64(c) == d && c < 0
27552         // result: (FlagLT_UGT)
27553         for {
27554                 c := auxIntToInt32(v.AuxInt)
27555                 if v_0.Op != OpAMD64MOVQconst {
27556                         break
27557                 }
27558                 d := auxIntToInt64(v_0.AuxInt)
27559                 if !(int64(c) == d && c < 0) {
27560                         break
27561                 }
27562                 v.reset(OpAMD64FlagLT_UGT)
27563                 return true
27564         }
27565         // match: (TESTQconst [c] (MOVQconst [d]))
27566         // cond: int64(c) == d && c > 0
27567         // result: (FlagGT_UGT)
27568         for {
27569                 c := auxIntToInt32(v.AuxInt)
27570                 if v_0.Op != OpAMD64MOVQconst {
27571                         break
27572                 }
27573                 d := auxIntToInt64(v_0.AuxInt)
27574                 if !(int64(c) == d && c > 0) {
27575                         break
27576                 }
27577                 v.reset(OpAMD64FlagGT_UGT)
27578                 return true
27579         }
27580         // match: (TESTQconst [-1] x)
27581         // cond: x.Op != OpAMD64MOVQconst
27582         // result: (TESTQ x x)
27583         for {
27584                 if auxIntToInt32(v.AuxInt) != -1 {
27585                         break
27586                 }
27587                 x := v_0
27588                 if !(x.Op != OpAMD64MOVQconst) {
27589                         break
27590                 }
27591                 v.reset(OpAMD64TESTQ)
27592                 v.AddArg2(x, x)
27593                 return true
27594         }
27595         return false
27596 }
27597 func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
27598         v_1 := v.Args[1]
27599         v_0 := v.Args[0]
27600         b := v.Block
27601         // match: (TESTW (MOVLconst [c]) x)
27602         // result: (TESTWconst [int16(c)] x)
27603         for {
27604                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27605                         if v_0.Op != OpAMD64MOVLconst {
27606                                 continue
27607                         }
27608                         c := auxIntToInt32(v_0.AuxInt)
27609                         x := v_1
27610                         v.reset(OpAMD64TESTWconst)
27611                         v.AuxInt = int16ToAuxInt(int16(c))
27612                         v.AddArg(x)
27613                         return true
27614                 }
27615                 break
27616         }
27617         // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2)
27618         // cond: l == l2 && l.Uses == 2 && clobber(l)
27619         // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0, off)] ptr mem)
27620         for {
27621                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27622                         l := v_0
27623                         if l.Op != OpAMD64MOVWload {
27624                                 continue
27625                         }
27626                         off := auxIntToInt32(l.AuxInt)
27627                         sym := auxToSym(l.Aux)
27628                         mem := l.Args[1]
27629                         ptr := l.Args[0]
27630                         l2 := v_1
27631                         if !(l == l2 && l.Uses == 2 && clobber(l)) {
27632                                 continue
27633                         }
27634                         b = l.Block
27635                         v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
27636                         v.copyOf(v0)
27637                         v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
27638                         v0.Aux = symToAux(sym)
27639                         v0.AddArg2(ptr, mem)
27640                         return true
27641                 }
27642                 break
27643         }
27644         return false
27645 }
27646 func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
27647         v_0 := v.Args[0]
27648         // match: (TESTWconst [-1] x)
27649         // cond: x.Op != OpAMD64MOVLconst
27650         // result: (TESTW x x)
27651         for {
27652                 if auxIntToInt16(v.AuxInt) != -1 {
27653                         break
27654                 }
27655                 x := v_0
27656                 if !(x.Op != OpAMD64MOVLconst) {
27657                         break
27658                 }
27659                 v.reset(OpAMD64TESTW)
27660                 v.AddArg2(x, x)
27661                 return true
27662         }
27663         return false
27664 }
27665 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
27666         v_2 := v.Args[2]
27667         v_1 := v.Args[1]
27668         v_0 := v.Args[0]
27669         // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
27670         // cond: is32Bit(int64(off1)+int64(off2))
27671         // result: (XADDLlock [off1+off2] {sym} val ptr mem)
27672         for {
27673                 off1 := auxIntToInt32(v.AuxInt)
27674                 sym := auxToSym(v.Aux)
27675                 val := v_0
27676                 if v_1.Op != OpAMD64ADDQconst {
27677                         break
27678                 }
27679                 off2 := auxIntToInt32(v_1.AuxInt)
27680                 ptr := v_1.Args[0]
27681                 mem := v_2
27682                 if !(is32Bit(int64(off1) + int64(off2))) {
27683                         break
27684                 }
27685                 v.reset(OpAMD64XADDLlock)
27686                 v.AuxInt = int32ToAuxInt(off1 + off2)
27687                 v.Aux = symToAux(sym)
27688                 v.AddArg3(val, ptr, mem)
27689                 return true
27690         }
27691         return false
27692 }
27693 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
27694         v_2 := v.Args[2]
27695         v_1 := v.Args[1]
27696         v_0 := v.Args[0]
27697         // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
27698         // cond: is32Bit(int64(off1)+int64(off2))
27699         // result: (XADDQlock [off1+off2] {sym} val ptr mem)
27700         for {
27701                 off1 := auxIntToInt32(v.AuxInt)
27702                 sym := auxToSym(v.Aux)
27703                 val := v_0
27704                 if v_1.Op != OpAMD64ADDQconst {
27705                         break
27706                 }
27707                 off2 := auxIntToInt32(v_1.AuxInt)
27708                 ptr := v_1.Args[0]
27709                 mem := v_2
27710                 if !(is32Bit(int64(off1) + int64(off2))) {
27711                         break
27712                 }
27713                 v.reset(OpAMD64XADDQlock)
27714                 v.AuxInt = int32ToAuxInt(off1 + off2)
27715                 v.Aux = symToAux(sym)
27716                 v.AddArg3(val, ptr, mem)
27717                 return true
27718         }
27719         return false
27720 }
27721 func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
27722         v_2 := v.Args[2]
27723         v_1 := v.Args[1]
27724         v_0 := v.Args[0]
27725         // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
27726         // cond: is32Bit(int64(off1)+int64(off2))
27727         // result: (XCHGL [off1+off2] {sym} val ptr mem)
27728         for {
27729                 off1 := auxIntToInt32(v.AuxInt)
27730                 sym := auxToSym(v.Aux)
27731                 val := v_0
27732                 if v_1.Op != OpAMD64ADDQconst {
27733                         break
27734                 }
27735                 off2 := auxIntToInt32(v_1.AuxInt)
27736                 ptr := v_1.Args[0]
27737                 mem := v_2
27738                 if !(is32Bit(int64(off1) + int64(off2))) {
27739                         break
27740                 }
27741                 v.reset(OpAMD64XCHGL)
27742                 v.AuxInt = int32ToAuxInt(off1 + off2)
27743                 v.Aux = symToAux(sym)
27744                 v.AddArg3(val, ptr, mem)
27745                 return true
27746         }
27747         // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
27748         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
27749         // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
27750         for {
27751                 off1 := auxIntToInt32(v.AuxInt)
27752                 sym1 := auxToSym(v.Aux)
27753                 val := v_0
27754                 if v_1.Op != OpAMD64LEAQ {
27755                         break
27756                 }
27757                 off2 := auxIntToInt32(v_1.AuxInt)
27758                 sym2 := auxToSym(v_1.Aux)
27759                 ptr := v_1.Args[0]
27760                 mem := v_2
27761                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
27762                         break
27763                 }
27764                 v.reset(OpAMD64XCHGL)
27765                 v.AuxInt = int32ToAuxInt(off1 + off2)
27766                 v.Aux = symToAux(mergeSym(sym1, sym2))
27767                 v.AddArg3(val, ptr, mem)
27768                 return true
27769         }
27770         return false
27771 }
27772 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
27773         v_2 := v.Args[2]
27774         v_1 := v.Args[1]
27775         v_0 := v.Args[0]
27776         // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
27777         // cond: is32Bit(int64(off1)+int64(off2))
27778         // result: (XCHGQ [off1+off2] {sym} val ptr mem)
27779         for {
27780                 off1 := auxIntToInt32(v.AuxInt)
27781                 sym := auxToSym(v.Aux)
27782                 val := v_0
27783                 if v_1.Op != OpAMD64ADDQconst {
27784                         break
27785                 }
27786                 off2 := auxIntToInt32(v_1.AuxInt)
27787                 ptr := v_1.Args[0]
27788                 mem := v_2
27789                 if !(is32Bit(int64(off1) + int64(off2))) {
27790                         break
27791                 }
27792                 v.reset(OpAMD64XCHGQ)
27793                 v.AuxInt = int32ToAuxInt(off1 + off2)
27794                 v.Aux = symToAux(sym)
27795                 v.AddArg3(val, ptr, mem)
27796                 return true
27797         }
27798         // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
27799         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
27800         // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
27801         for {
27802                 off1 := auxIntToInt32(v.AuxInt)
27803                 sym1 := auxToSym(v.Aux)
27804                 val := v_0
27805                 if v_1.Op != OpAMD64LEAQ {
27806                         break
27807                 }
27808                 off2 := auxIntToInt32(v_1.AuxInt)
27809                 sym2 := auxToSym(v_1.Aux)
27810                 ptr := v_1.Args[0]
27811                 mem := v_2
27812                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
27813                         break
27814                 }
27815                 v.reset(OpAMD64XCHGQ)
27816                 v.AuxInt = int32ToAuxInt(off1 + off2)
27817                 v.Aux = symToAux(mergeSym(sym1, sym2))
27818                 v.AddArg3(val, ptr, mem)
27819                 return true
27820         }
27821         return false
27822 }
27823 func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
27824         v_1 := v.Args[1]
27825         v_0 := v.Args[0]
27826         // match: (XORL (SHLL (MOVLconst [1]) y) x)
27827         // result: (BTCL x y)
27828         for {
27829                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27830                         if v_0.Op != OpAMD64SHLL {
27831                                 continue
27832                         }
27833                         y := v_0.Args[1]
27834                         v_0_0 := v_0.Args[0]
27835                         if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
27836                                 continue
27837                         }
27838                         x := v_1
27839                         v.reset(OpAMD64BTCL)
27840                         v.AddArg2(x, y)
27841                         return true
27842                 }
27843                 break
27844         }
27845         // match: (XORL (MOVLconst [c]) x)
27846         // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
27847         // result: (BTCLconst [int8(log32(c))] x)
27848         for {
27849                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27850                         if v_0.Op != OpAMD64MOVLconst {
27851                                 continue
27852                         }
27853                         c := auxIntToInt32(v_0.AuxInt)
27854                         x := v_1
27855                         if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
27856                                 continue
27857                         }
27858                         v.reset(OpAMD64BTCLconst)
27859                         v.AuxInt = int8ToAuxInt(int8(log32(c)))
27860                         v.AddArg(x)
27861                         return true
27862                 }
27863                 break
27864         }
27865         // match: (XORL x (MOVLconst [c]))
27866         // result: (XORLconst [c] x)
27867         for {
27868                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27869                         x := v_0
27870                         if v_1.Op != OpAMD64MOVLconst {
27871                                 continue
27872                         }
27873                         c := auxIntToInt32(v_1.AuxInt)
27874                         v.reset(OpAMD64XORLconst)
27875                         v.AuxInt = int32ToAuxInt(c)
27876                         v.AddArg(x)
27877                         return true
27878                 }
27879                 break
27880         }
27881         // match: (XORL (SHLLconst x [c]) (SHRLconst x [d]))
27882         // cond: d==32-c
27883         // result: (ROLLconst x [c])
27884         for {
27885                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27886                         if v_0.Op != OpAMD64SHLLconst {
27887                                 continue
27888                         }
27889                         c := auxIntToInt8(v_0.AuxInt)
27890                         x := v_0.Args[0]
27891                         if v_1.Op != OpAMD64SHRLconst {
27892                                 continue
27893                         }
27894                         d := auxIntToInt8(v_1.AuxInt)
27895                         if x != v_1.Args[0] || !(d == 32-c) {
27896                                 continue
27897                         }
27898                         v.reset(OpAMD64ROLLconst)
27899                         v.AuxInt = int8ToAuxInt(c)
27900                         v.AddArg(x)
27901                         return true
27902                 }
27903                 break
27904         }
27905         // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
27906         // cond: d==16-c && c < 16 && t.Size() == 2
27907         // result: (ROLWconst x [c])
27908         for {
27909                 t := v.Type
27910                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27911                         if v_0.Op != OpAMD64SHLLconst {
27912                                 continue
27913                         }
27914                         c := auxIntToInt8(v_0.AuxInt)
27915                         x := v_0.Args[0]
27916                         if v_1.Op != OpAMD64SHRWconst {
27917                                 continue
27918                         }
27919                         d := auxIntToInt8(v_1.AuxInt)
27920                         if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
27921                                 continue
27922                         }
27923                         v.reset(OpAMD64ROLWconst)
27924                         v.AuxInt = int8ToAuxInt(c)
27925                         v.AddArg(x)
27926                         return true
27927                 }
27928                 break
27929         }
27930         // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
27931         // cond: d==8-c && c < 8 && t.Size() == 1
27932         // result: (ROLBconst x [c])
27933         for {
27934                 t := v.Type
27935                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27936                         if v_0.Op != OpAMD64SHLLconst {
27937                                 continue
27938                         }
27939                         c := auxIntToInt8(v_0.AuxInt)
27940                         x := v_0.Args[0]
27941                         if v_1.Op != OpAMD64SHRBconst {
27942                                 continue
27943                         }
27944                         d := auxIntToInt8(v_1.AuxInt)
27945                         if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
27946                                 continue
27947                         }
27948                         v.reset(OpAMD64ROLBconst)
27949                         v.AuxInt = int8ToAuxInt(c)
27950                         v.AddArg(x)
27951                         return true
27952                 }
27953                 break
27954         }
27955         // match: (XORL x x)
27956         // result: (MOVLconst [0])
27957         for {
27958                 x := v_0
27959                 if x != v_1 {
27960                         break
27961                 }
27962                 v.reset(OpAMD64MOVLconst)
27963                 v.AuxInt = int32ToAuxInt(0)
27964                 return true
27965         }
27966         // match: (XORL x l:(MOVLload [off] {sym} ptr mem))
27967         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
27968         // result: (XORLload x [off] {sym} ptr mem)
27969         for {
27970                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27971                         x := v_0
27972                         l := v_1
27973                         if l.Op != OpAMD64MOVLload {
27974                                 continue
27975                         }
27976                         off := auxIntToInt32(l.AuxInt)
27977                         sym := auxToSym(l.Aux)
27978                         mem := l.Args[1]
27979                         ptr := l.Args[0]
27980                         if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
27981                                 continue
27982                         }
27983                         v.reset(OpAMD64XORLload)
27984                         v.AuxInt = int32ToAuxInt(off)
27985                         v.Aux = symToAux(sym)
27986                         v.AddArg3(x, ptr, mem)
27987                         return true
27988                 }
27989                 break
27990         }
27991         return false
27992 }
27993 func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
27994         v_0 := v.Args[0]
27995         // match: (XORLconst [c] x)
27996         // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
27997         // result: (BTCLconst [int8(log32(c))] x)
27998         for {
27999                 c := auxIntToInt32(v.AuxInt)
28000                 x := v_0
28001                 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
28002                         break
28003                 }
28004                 v.reset(OpAMD64BTCLconst)
28005                 v.AuxInt = int8ToAuxInt(int8(log32(c)))
28006                 v.AddArg(x)
28007                 return true
28008         }
28009         // match: (XORLconst [1] (SETNE x))
28010         // result: (SETEQ x)
28011         for {
28012                 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE {
28013                         break
28014                 }
28015                 x := v_0.Args[0]
28016                 v.reset(OpAMD64SETEQ)
28017                 v.AddArg(x)
28018                 return true
28019         }
28020         // match: (XORLconst [1] (SETEQ x))
28021         // result: (SETNE x)
28022         for {
28023                 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ {
28024                         break
28025                 }
28026                 x := v_0.Args[0]
28027                 v.reset(OpAMD64SETNE)
28028                 v.AddArg(x)
28029                 return true
28030         }
28031         // match: (XORLconst [1] (SETL x))
28032         // result: (SETGE x)
28033         for {
28034                 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL {
28035                         break
28036                 }
28037                 x := v_0.Args[0]
28038                 v.reset(OpAMD64SETGE)
28039                 v.AddArg(x)
28040                 return true
28041         }
28042         // match: (XORLconst [1] (SETGE x))
28043         // result: (SETL x)
28044         for {
28045                 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE {
28046                         break
28047                 }
28048                 x := v_0.Args[0]
28049                 v.reset(OpAMD64SETL)
28050                 v.AddArg(x)
28051                 return true
28052         }
28053         // match: (XORLconst [1] (SETLE x))
28054         // result: (SETG x)
28055         for {
28056                 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE {
28057                         break
28058                 }
28059                 x := v_0.Args[0]
28060                 v.reset(OpAMD64SETG)
28061                 v.AddArg(x)
28062                 return true
28063         }
28064         // match: (XORLconst [1] (SETG x))
28065         // result: (SETLE x)
28066         for {
28067                 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG {
28068                         break
28069                 }
28070                 x := v_0.Args[0]
28071                 v.reset(OpAMD64SETLE)
28072                 v.AddArg(x)
28073                 return true
28074         }
28075         // match: (XORLconst [1] (SETB x))
28076         // result: (SETAE x)
28077         for {
28078                 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB {
28079                         break
28080                 }
28081                 x := v_0.Args[0]
28082                 v.reset(OpAMD64SETAE)
28083                 v.AddArg(x)
28084                 return true
28085         }
28086         // match: (XORLconst [1] (SETAE x))
28087         // result: (SETB x)
28088         for {
28089                 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE {
28090                         break
28091                 }
28092                 x := v_0.Args[0]
28093                 v.reset(OpAMD64SETB)
28094                 v.AddArg(x)
28095                 return true
28096         }
28097         // match: (XORLconst [1] (SETBE x))
28098         // result: (SETA x)
28099         for {
28100                 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE {
28101                         break
28102                 }
28103                 x := v_0.Args[0]
28104                 v.reset(OpAMD64SETA)
28105                 v.AddArg(x)
28106                 return true
28107         }
28108         // match: (XORLconst [1] (SETA x))
28109         // result: (SETBE x)
28110         for {
28111                 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA {
28112                         break
28113                 }
28114                 x := v_0.Args[0]
28115                 v.reset(OpAMD64SETBE)
28116                 v.AddArg(x)
28117                 return true
28118         }
28119         // match: (XORLconst [c] (XORLconst [d] x))
28120         // result: (XORLconst [c ^ d] x)
28121         for {
28122                 c := auxIntToInt32(v.AuxInt)
28123                 if v_0.Op != OpAMD64XORLconst {
28124                         break
28125                 }
28126                 d := auxIntToInt32(v_0.AuxInt)
28127                 x := v_0.Args[0]
28128                 v.reset(OpAMD64XORLconst)
28129                 v.AuxInt = int32ToAuxInt(c ^ d)
28130                 v.AddArg(x)
28131                 return true
28132         }
28133         // match: (XORLconst [c] (BTCLconst [d] x))
28134         // result: (XORLconst [c ^ 1<<uint32(d)] x)
28135         for {
28136                 c := auxIntToInt32(v.AuxInt)
28137                 if v_0.Op != OpAMD64BTCLconst {
28138                         break
28139                 }
28140                 d := auxIntToInt8(v_0.AuxInt)
28141                 x := v_0.Args[0]
28142                 v.reset(OpAMD64XORLconst)
28143                 v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
28144                 v.AddArg(x)
28145                 return true
28146         }
28147         // match: (XORLconst [c] x)
28148         // cond: c==0
28149         // result: x
28150         for {
28151                 c := auxIntToInt32(v.AuxInt)
28152                 x := v_0
28153                 if !(c == 0) {
28154                         break
28155                 }
28156                 v.copyOf(x)
28157                 return true
28158         }
28159         // match: (XORLconst [c] (MOVLconst [d]))
28160         // result: (MOVLconst [c^d])
28161         for {
28162                 c := auxIntToInt32(v.AuxInt)
28163                 if v_0.Op != OpAMD64MOVLconst {
28164                         break
28165                 }
28166                 d := auxIntToInt32(v_0.AuxInt)
28167                 v.reset(OpAMD64MOVLconst)
28168                 v.AuxInt = int32ToAuxInt(c ^ d)
28169                 return true
28170         }
28171         return false
28172 }
28173 func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
28174         v_1 := v.Args[1]
28175         v_0 := v.Args[0]
28176         // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
28177         // cond: ValAndOff(valoff1).canAdd32(off2)
28178         // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
28179         for {
28180                 valoff1 := auxIntToValAndOff(v.AuxInt)
28181                 sym := auxToSym(v.Aux)
28182                 if v_0.Op != OpAMD64ADDQconst {
28183                         break
28184                 }
28185                 off2 := auxIntToInt32(v_0.AuxInt)
28186                 base := v_0.Args[0]
28187                 mem := v_1
28188                 if !(ValAndOff(valoff1).canAdd32(off2)) {
28189                         break
28190                 }
28191                 v.reset(OpAMD64XORLconstmodify)
28192                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
28193                 v.Aux = symToAux(sym)
28194                 v.AddArg2(base, mem)
28195                 return true
28196         }
28197         // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
28198         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
28199         // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
28200         for {
28201                 valoff1 := auxIntToValAndOff(v.AuxInt)
28202                 sym1 := auxToSym(v.Aux)
28203                 if v_0.Op != OpAMD64LEAQ {
28204                         break
28205                 }
28206                 off2 := auxIntToInt32(v_0.AuxInt)
28207                 sym2 := auxToSym(v_0.Aux)
28208                 base := v_0.Args[0]
28209                 mem := v_1
28210                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
28211                         break
28212                 }
28213                 v.reset(OpAMD64XORLconstmodify)
28214                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
28215                 v.Aux = symToAux(mergeSym(sym1, sym2))
28216                 v.AddArg2(base, mem)
28217                 return true
28218         }
28219         return false
28220 }
28221 func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
28222         v_2 := v.Args[2]
28223         v_1 := v.Args[1]
28224         v_0 := v.Args[0]
28225         b := v.Block
28226         typ := &b.Func.Config.Types
28227         // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem)
28228         // cond: is32Bit(int64(off1)+int64(off2))
28229         // result: (XORLload [off1+off2] {sym} val base mem)
28230         for {
28231                 off1 := auxIntToInt32(v.AuxInt)
28232                 sym := auxToSym(v.Aux)
28233                 val := v_0
28234                 if v_1.Op != OpAMD64ADDQconst {
28235                         break
28236                 }
28237                 off2 := auxIntToInt32(v_1.AuxInt)
28238                 base := v_1.Args[0]
28239                 mem := v_2
28240                 if !(is32Bit(int64(off1) + int64(off2))) {
28241                         break
28242                 }
28243                 v.reset(OpAMD64XORLload)
28244                 v.AuxInt = int32ToAuxInt(off1 + off2)
28245                 v.Aux = symToAux(sym)
28246                 v.AddArg3(val, base, mem)
28247                 return true
28248         }
28249         // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
28250         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
28251         // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
28252         for {
28253                 off1 := auxIntToInt32(v.AuxInt)
28254                 sym1 := auxToSym(v.Aux)
28255                 val := v_0
28256                 if v_1.Op != OpAMD64LEAQ {
28257                         break
28258                 }
28259                 off2 := auxIntToInt32(v_1.AuxInt)
28260                 sym2 := auxToSym(v_1.Aux)
28261                 base := v_1.Args[0]
28262                 mem := v_2
28263                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
28264                         break
28265                 }
28266                 v.reset(OpAMD64XORLload)
28267                 v.AuxInt = int32ToAuxInt(off1 + off2)
28268                 v.Aux = symToAux(mergeSym(sym1, sym2))
28269                 v.AddArg3(val, base, mem)
28270                 return true
28271         }
28272         // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
28273         // result: (XORL x (MOVLf2i y))
28274         for {
28275                 off := auxIntToInt32(v.AuxInt)
28276                 sym := auxToSym(v.Aux)
28277                 x := v_0
28278                 ptr := v_1
28279                 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
28280                         break
28281                 }
28282                 y := v_2.Args[1]
28283                 if ptr != v_2.Args[0] {
28284                         break
28285                 }
28286                 v.reset(OpAMD64XORL)
28287                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
28288                 v0.AddArg(y)
28289                 v.AddArg2(x, v0)
28290                 return true
28291         }
28292         return false
28293 }
28294 func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
28295         v_2 := v.Args[2]
28296         v_1 := v.Args[1]
28297         v_0 := v.Args[0]
28298         b := v.Block
28299         // match: (XORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) <t> x) mem)
28300         // result: (BTCLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
28301         for {
28302                 off := auxIntToInt32(v.AuxInt)
28303                 sym := auxToSym(v.Aux)
28304                 ptr := v_0
28305                 s := v_1
28306                 if s.Op != OpAMD64SHLL {
28307                         break
28308                 }
28309                 t := s.Type
28310                 x := s.Args[1]
28311                 s_0 := s.Args[0]
28312                 if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
28313                         break
28314                 }
28315                 mem := v_2
28316                 v.reset(OpAMD64BTCLmodify)
28317                 v.AuxInt = int32ToAuxInt(off)
28318                 v.Aux = symToAux(sym)
28319                 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
28320                 v0.AuxInt = int32ToAuxInt(31)
28321                 v0.AddArg(x)
28322                 v.AddArg3(ptr, v0, mem)
28323                 return true
28324         }
28325         // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
28326         // cond: is32Bit(int64(off1)+int64(off2))
28327         // result: (XORLmodify [off1+off2] {sym} base val mem)
28328         for {
28329                 off1 := auxIntToInt32(v.AuxInt)
28330                 sym := auxToSym(v.Aux)
28331                 if v_0.Op != OpAMD64ADDQconst {
28332                         break
28333                 }
28334                 off2 := auxIntToInt32(v_0.AuxInt)
28335                 base := v_0.Args[0]
28336                 val := v_1
28337                 mem := v_2
28338                 if !(is32Bit(int64(off1) + int64(off2))) {
28339                         break
28340                 }
28341                 v.reset(OpAMD64XORLmodify)
28342                 v.AuxInt = int32ToAuxInt(off1 + off2)
28343                 v.Aux = symToAux(sym)
28344                 v.AddArg3(base, val, mem)
28345                 return true
28346         }
28347         // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
28348         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
28349         // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
28350         for {
28351                 off1 := auxIntToInt32(v.AuxInt)
28352                 sym1 := auxToSym(v.Aux)
28353                 if v_0.Op != OpAMD64LEAQ {
28354                         break
28355                 }
28356                 off2 := auxIntToInt32(v_0.AuxInt)
28357                 sym2 := auxToSym(v_0.Aux)
28358                 base := v_0.Args[0]
28359                 val := v_1
28360                 mem := v_2
28361                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
28362                         break
28363                 }
28364                 v.reset(OpAMD64XORLmodify)
28365                 v.AuxInt = int32ToAuxInt(off1 + off2)
28366                 v.Aux = symToAux(mergeSym(sym1, sym2))
28367                 v.AddArg3(base, val, mem)
28368                 return true
28369         }
28370         return false
28371 }
28372 func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
28373         v_1 := v.Args[1]
28374         v_0 := v.Args[0]
28375         // match: (XORQ (SHLQ (MOVQconst [1]) y) x)
28376         // result: (BTCQ x y)
28377         for {
28378                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
28379                         if v_0.Op != OpAMD64SHLQ {
28380                                 continue
28381                         }
28382                         y := v_0.Args[1]
28383                         v_0_0 := v_0.Args[0]
28384                         if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
28385                                 continue
28386                         }
28387                         x := v_1
28388                         v.reset(OpAMD64BTCQ)
28389                         v.AddArg2(x, y)
28390                         return true
28391                 }
28392                 break
28393         }
28394         // match: (XORQ (MOVQconst [c]) x)
28395         // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
28396         // result: (BTCQconst [int8(log64(c))] x)
28397         for {
28398                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
28399                         if v_0.Op != OpAMD64MOVQconst {
28400                                 continue
28401                         }
28402                         c := auxIntToInt64(v_0.AuxInt)
28403                         x := v_1
28404                         if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
28405                                 continue
28406                         }
28407                         v.reset(OpAMD64BTCQconst)
28408                         v.AuxInt = int8ToAuxInt(int8(log64(c)))
28409                         v.AddArg(x)
28410                         return true
28411                 }
28412                 break
28413         }
28414         // match: (XORQ x (MOVQconst [c]))
28415         // cond: is32Bit(c)
28416         // result: (XORQconst [int32(c)] x)
28417         for {
28418                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
28419                         x := v_0
28420                         if v_1.Op != OpAMD64MOVQconst {
28421                                 continue
28422                         }
28423                         c := auxIntToInt64(v_1.AuxInt)
28424                         if !(is32Bit(c)) {
28425                                 continue
28426                         }
28427                         v.reset(OpAMD64XORQconst)
28428                         v.AuxInt = int32ToAuxInt(int32(c))
28429                         v.AddArg(x)
28430                         return true
28431                 }
28432                 break
28433         }
28434         // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d]))
28435         // cond: d==64-c
28436         // result: (ROLQconst x [c])
28437         for {
28438                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
28439                         if v_0.Op != OpAMD64SHLQconst {
28440                                 continue
28441                         }
28442                         c := auxIntToInt8(v_0.AuxInt)
28443                         x := v_0.Args[0]
28444                         if v_1.Op != OpAMD64SHRQconst {
28445                                 continue
28446                         }
28447                         d := auxIntToInt8(v_1.AuxInt)
28448                         if x != v_1.Args[0] || !(d == 64-c) {
28449                                 continue
28450                         }
28451                         v.reset(OpAMD64ROLQconst)
28452                         v.AuxInt = int8ToAuxInt(c)
28453                         v.AddArg(x)
28454                         return true
28455                 }
28456                 break
28457         }
28458         // match: (XORQ x x)
28459         // result: (MOVQconst [0])
28460         for {
28461                 x := v_0
28462                 if x != v_1 {
28463                         break
28464                 }
28465                 v.reset(OpAMD64MOVQconst)
28466                 v.AuxInt = int64ToAuxInt(0)
28467                 return true
28468         }
28469         // match: (XORQ x l:(MOVQload [off] {sym} ptr mem))
28470         // cond: canMergeLoadClobber(v, l, x) && clobber(l)
28471         // result: (XORQload x [off] {sym} ptr mem)
28472         for {
28473                 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
28474                         x := v_0
28475                         l := v_1
28476                         if l.Op != OpAMD64MOVQload {
28477                                 continue
28478                         }
28479                         off := auxIntToInt32(l.AuxInt)
28480                         sym := auxToSym(l.Aux)
28481                         mem := l.Args[1]
28482                         ptr := l.Args[0]
28483                         if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
28484                                 continue
28485                         }
28486                         v.reset(OpAMD64XORQload)
28487                         v.AuxInt = int32ToAuxInt(off)
28488                         v.Aux = symToAux(sym)
28489                         v.AddArg3(x, ptr, mem)
28490                         return true
28491                 }
28492                 break
28493         }
28494         return false
28495 }
28496 func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
28497         v_0 := v.Args[0]
28498         // match: (XORQconst [c] x)
28499         // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
28500         // result: (BTCQconst [int8(log32(c))] x)
28501         for {
28502                 c := auxIntToInt32(v.AuxInt)
28503                 x := v_0
28504                 if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
28505                         break
28506                 }
28507                 v.reset(OpAMD64BTCQconst)
28508                 v.AuxInt = int8ToAuxInt(int8(log32(c)))
28509                 v.AddArg(x)
28510                 return true
28511         }
28512         // match: (XORQconst [c] (XORQconst [d] x))
28513         // result: (XORQconst [c ^ d] x)
28514         for {
28515                 c := auxIntToInt32(v.AuxInt)
28516                 if v_0.Op != OpAMD64XORQconst {
28517                         break
28518                 }
28519                 d := auxIntToInt32(v_0.AuxInt)
28520                 x := v_0.Args[0]
28521                 v.reset(OpAMD64XORQconst)
28522                 v.AuxInt = int32ToAuxInt(c ^ d)
28523                 v.AddArg(x)
28524                 return true
28525         }
28526         // match: (XORQconst [c] (BTCQconst [d] x))
28527         // cond: is32Bit(int64(c) ^ 1<<uint32(d))
28528         // result: (XORQconst [c ^ 1<<uint32(d)] x)
28529         for {
28530                 c := auxIntToInt32(v.AuxInt)
28531                 if v_0.Op != OpAMD64BTCQconst {
28532                         break
28533                 }
28534                 d := auxIntToInt8(v_0.AuxInt)
28535                 x := v_0.Args[0]
28536                 if !(is32Bit(int64(c) ^ 1<<uint32(d))) {
28537                         break
28538                 }
28539                 v.reset(OpAMD64XORQconst)
28540                 v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
28541                 v.AddArg(x)
28542                 return true
28543         }
28544         // match: (XORQconst [0] x)
28545         // result: x
28546         for {
28547                 if auxIntToInt32(v.AuxInt) != 0 {
28548                         break
28549                 }
28550                 x := v_0
28551                 v.copyOf(x)
28552                 return true
28553         }
28554         // match: (XORQconst [c] (MOVQconst [d]))
28555         // result: (MOVQconst [int64(c)^d])
28556         for {
28557                 c := auxIntToInt32(v.AuxInt)
28558                 if v_0.Op != OpAMD64MOVQconst {
28559                         break
28560                 }
28561                 d := auxIntToInt64(v_0.AuxInt)
28562                 v.reset(OpAMD64MOVQconst)
28563                 v.AuxInt = int64ToAuxInt(int64(c) ^ d)
28564                 return true
28565         }
28566         return false
28567 }
28568 func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
28569         v_1 := v.Args[1]
28570         v_0 := v.Args[0]
28571         // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
28572         // cond: ValAndOff(valoff1).canAdd32(off2)
28573         // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
28574         for {
28575                 valoff1 := auxIntToValAndOff(v.AuxInt)
28576                 sym := auxToSym(v.Aux)
28577                 if v_0.Op != OpAMD64ADDQconst {
28578                         break
28579                 }
28580                 off2 := auxIntToInt32(v_0.AuxInt)
28581                 base := v_0.Args[0]
28582                 mem := v_1
28583                 if !(ValAndOff(valoff1).canAdd32(off2)) {
28584                         break
28585                 }
28586                 v.reset(OpAMD64XORQconstmodify)
28587                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
28588                 v.Aux = symToAux(sym)
28589                 v.AddArg2(base, mem)
28590                 return true
28591         }
28592         // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
28593         // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
28594         // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
28595         for {
28596                 valoff1 := auxIntToValAndOff(v.AuxInt)
28597                 sym1 := auxToSym(v.Aux)
28598                 if v_0.Op != OpAMD64LEAQ {
28599                         break
28600                 }
28601                 off2 := auxIntToInt32(v_0.AuxInt)
28602                 sym2 := auxToSym(v_0.Aux)
28603                 base := v_0.Args[0]
28604                 mem := v_1
28605                 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
28606                         break
28607                 }
28608                 v.reset(OpAMD64XORQconstmodify)
28609                 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
28610                 v.Aux = symToAux(mergeSym(sym1, sym2))
28611                 v.AddArg2(base, mem)
28612                 return true
28613         }
28614         return false
28615 }
28616 func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
28617         v_2 := v.Args[2]
28618         v_1 := v.Args[1]
28619         v_0 := v.Args[0]
28620         b := v.Block
28621         typ := &b.Func.Config.Types
28622         // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem)
28623         // cond: is32Bit(int64(off1)+int64(off2))
28624         // result: (XORQload [off1+off2] {sym} val base mem)
28625         for {
28626                 off1 := auxIntToInt32(v.AuxInt)
28627                 sym := auxToSym(v.Aux)
28628                 val := v_0
28629                 if v_1.Op != OpAMD64ADDQconst {
28630                         break
28631                 }
28632                 off2 := auxIntToInt32(v_1.AuxInt)
28633                 base := v_1.Args[0]
28634                 mem := v_2
28635                 if !(is32Bit(int64(off1) + int64(off2))) {
28636                         break
28637                 }
28638                 v.reset(OpAMD64XORQload)
28639                 v.AuxInt = int32ToAuxInt(off1 + off2)
28640                 v.Aux = symToAux(sym)
28641                 v.AddArg3(val, base, mem)
28642                 return true
28643         }
28644         // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
28645         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
28646         // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
28647         for {
28648                 off1 := auxIntToInt32(v.AuxInt)
28649                 sym1 := auxToSym(v.Aux)
28650                 val := v_0
28651                 if v_1.Op != OpAMD64LEAQ {
28652                         break
28653                 }
28654                 off2 := auxIntToInt32(v_1.AuxInt)
28655                 sym2 := auxToSym(v_1.Aux)
28656                 base := v_1.Args[0]
28657                 mem := v_2
28658                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
28659                         break
28660                 }
28661                 v.reset(OpAMD64XORQload)
28662                 v.AuxInt = int32ToAuxInt(off1 + off2)
28663                 v.Aux = symToAux(mergeSym(sym1, sym2))
28664                 v.AddArg3(val, base, mem)
28665                 return true
28666         }
28667         // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
28668         // result: (XORQ x (MOVQf2i y))
28669         for {
28670                 off := auxIntToInt32(v.AuxInt)
28671                 sym := auxToSym(v.Aux)
28672                 x := v_0
28673                 ptr := v_1
28674                 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
28675                         break
28676                 }
28677                 y := v_2.Args[1]
28678                 if ptr != v_2.Args[0] {
28679                         break
28680                 }
28681                 v.reset(OpAMD64XORQ)
28682                 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
28683                 v0.AddArg(y)
28684                 v.AddArg2(x, v0)
28685                 return true
28686         }
28687         return false
28688 }
28689 func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
28690         v_2 := v.Args[2]
28691         v_1 := v.Args[1]
28692         v_0 := v.Args[0]
28693         b := v.Block
28694         // match: (XORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) <t> x) mem)
28695         // result: (BTCQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
28696         for {
28697                 off := auxIntToInt32(v.AuxInt)
28698                 sym := auxToSym(v.Aux)
28699                 ptr := v_0
28700                 s := v_1
28701                 if s.Op != OpAMD64SHLQ {
28702                         break
28703                 }
28704                 t := s.Type
28705                 x := s.Args[1]
28706                 s_0 := s.Args[0]
28707                 if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
28708                         break
28709                 }
28710                 mem := v_2
28711                 v.reset(OpAMD64BTCQmodify)
28712                 v.AuxInt = int32ToAuxInt(off)
28713                 v.Aux = symToAux(sym)
28714                 v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
28715                 v0.AuxInt = int32ToAuxInt(63)
28716                 v0.AddArg(x)
28717                 v.AddArg3(ptr, v0, mem)
28718                 return true
28719         }
28720         // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
28721         // cond: is32Bit(int64(off1)+int64(off2))
28722         // result: (XORQmodify [off1+off2] {sym} base val mem)
28723         for {
28724                 off1 := auxIntToInt32(v.AuxInt)
28725                 sym := auxToSym(v.Aux)
28726                 if v_0.Op != OpAMD64ADDQconst {
28727                         break
28728                 }
28729                 off2 := auxIntToInt32(v_0.AuxInt)
28730                 base := v_0.Args[0]
28731                 val := v_1
28732                 mem := v_2
28733                 if !(is32Bit(int64(off1) + int64(off2))) {
28734                         break
28735                 }
28736                 v.reset(OpAMD64XORQmodify)
28737                 v.AuxInt = int32ToAuxInt(off1 + off2)
28738                 v.Aux = symToAux(sym)
28739                 v.AddArg3(base, val, mem)
28740                 return true
28741         }
28742         // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
28743         // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
28744         // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
28745         for {
28746                 off1 := auxIntToInt32(v.AuxInt)
28747                 sym1 := auxToSym(v.Aux)
28748                 if v_0.Op != OpAMD64LEAQ {
28749                         break
28750                 }
28751                 off2 := auxIntToInt32(v_0.AuxInt)
28752                 sym2 := auxToSym(v_0.Aux)
28753                 base := v_0.Args[0]
28754                 val := v_1
28755                 mem := v_2
28756                 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
28757                         break
28758                 }
28759                 v.reset(OpAMD64XORQmodify)
28760                 v.AuxInt = int32ToAuxInt(off1 + off2)
28761                 v.Aux = symToAux(mergeSym(sym1, sym2))
28762                 v.AddArg3(base, val, mem)
28763                 return true
28764         }
28765         return false
28766 }
28767 func rewriteValueAMD64_OpAddr(v *Value) bool {
28768         v_0 := v.Args[0]
28769         // match: (Addr {sym} base)
28770         // result: (LEAQ {sym} base)
28771         for {
28772                 sym := auxToSym(v.Aux)
28773                 base := v_0
28774                 v.reset(OpAMD64LEAQ)
28775                 v.Aux = symToAux(sym)
28776                 v.AddArg(base)
28777                 return true
28778         }
28779 }
28780 func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
28781         v_2 := v.Args[2]
28782         v_1 := v.Args[1]
28783         v_0 := v.Args[0]
28784         b := v.Block
28785         typ := &b.Func.Config.Types
28786         // match: (AtomicAdd32 ptr val mem)
28787         // result: (AddTupleFirst32 val (XADDLlock val ptr mem))
28788         for {
28789                 ptr := v_0
28790                 val := v_1
28791                 mem := v_2
28792                 v.reset(OpAMD64AddTupleFirst32)
28793                 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
28794                 v0.AddArg3(val, ptr, mem)
28795                 v.AddArg2(val, v0)
28796                 return true
28797         }
28798 }
28799 func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
28800         v_2 := v.Args[2]
28801         v_1 := v.Args[1]
28802         v_0 := v.Args[0]
28803         b := v.Block
28804         typ := &b.Func.Config.Types
28805         // match: (AtomicAdd64 ptr val mem)
28806         // result: (AddTupleFirst64 val (XADDQlock val ptr mem))
28807         for {
28808                 ptr := v_0
28809                 val := v_1
28810                 mem := v_2
28811                 v.reset(OpAMD64AddTupleFirst64)
28812                 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
28813                 v0.AddArg3(val, ptr, mem)
28814                 v.AddArg2(val, v0)
28815                 return true
28816         }
28817 }
28818 func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
28819         v_2 := v.Args[2]
28820         v_1 := v.Args[1]
28821         v_0 := v.Args[0]
28822         // match: (AtomicAnd32 ptr val mem)
28823         // result: (ANDLlock ptr val mem)
28824         for {
28825                 ptr := v_0
28826                 val := v_1
28827                 mem := v_2
28828                 v.reset(OpAMD64ANDLlock)
28829                 v.AddArg3(ptr, val, mem)
28830                 return true
28831         }
28832 }
28833 func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
28834         v_2 := v.Args[2]
28835         v_1 := v.Args[1]
28836         v_0 := v.Args[0]
28837         // match: (AtomicAnd8 ptr val mem)
28838         // result: (ANDBlock ptr val mem)
28839         for {
28840                 ptr := v_0
28841                 val := v_1
28842                 mem := v_2
28843                 v.reset(OpAMD64ANDBlock)
28844                 v.AddArg3(ptr, val, mem)
28845                 return true
28846         }
28847 }
28848 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
28849         v_3 := v.Args[3]
28850         v_2 := v.Args[2]
28851         v_1 := v.Args[1]
28852         v_0 := v.Args[0]
28853         // match: (AtomicCompareAndSwap32 ptr old new_ mem)
28854         // result: (CMPXCHGLlock ptr old new_ mem)
28855         for {
28856                 ptr := v_0
28857                 old := v_1
28858                 new_ := v_2
28859                 mem := v_3
28860                 v.reset(OpAMD64CMPXCHGLlock)
28861                 v.AddArg4(ptr, old, new_, mem)
28862                 return true
28863         }
28864 }
28865 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
28866         v_3 := v.Args[3]
28867         v_2 := v.Args[2]
28868         v_1 := v.Args[1]
28869         v_0 := v.Args[0]
28870         // match: (AtomicCompareAndSwap64 ptr old new_ mem)
28871         // result: (CMPXCHGQlock ptr old new_ mem)
28872         for {
28873                 ptr := v_0
28874                 old := v_1
28875                 new_ := v_2
28876                 mem := v_3
28877                 v.reset(OpAMD64CMPXCHGQlock)
28878                 v.AddArg4(ptr, old, new_, mem)
28879                 return true
28880         }
28881 }
28882 func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
28883         v_2 := v.Args[2]
28884         v_1 := v.Args[1]
28885         v_0 := v.Args[0]
28886         // match: (AtomicExchange32 ptr val mem)
28887         // result: (XCHGL val ptr mem)
28888         for {
28889                 ptr := v_0
28890                 val := v_1
28891                 mem := v_2
28892                 v.reset(OpAMD64XCHGL)
28893                 v.AddArg3(val, ptr, mem)
28894                 return true
28895         }
28896 }
28897 func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
28898         v_2 := v.Args[2]
28899         v_1 := v.Args[1]
28900         v_0 := v.Args[0]
28901         // match: (AtomicExchange64 ptr val mem)
28902         // result: (XCHGQ val ptr mem)
28903         for {
28904                 ptr := v_0
28905                 val := v_1
28906                 mem := v_2
28907                 v.reset(OpAMD64XCHGQ)
28908                 v.AddArg3(val, ptr, mem)
28909                 return true
28910         }
28911 }
28912 func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
28913         v_1 := v.Args[1]
28914         v_0 := v.Args[0]
28915         // match: (AtomicLoad32 ptr mem)
28916         // result: (MOVLatomicload ptr mem)
28917         for {
28918                 ptr := v_0
28919                 mem := v_1
28920                 v.reset(OpAMD64MOVLatomicload)
28921                 v.AddArg2(ptr, mem)
28922                 return true
28923         }
28924 }
28925 func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
28926         v_1 := v.Args[1]
28927         v_0 := v.Args[0]
28928         // match: (AtomicLoad64 ptr mem)
28929         // result: (MOVQatomicload ptr mem)
28930         for {
28931                 ptr := v_0
28932                 mem := v_1
28933                 v.reset(OpAMD64MOVQatomicload)
28934                 v.AddArg2(ptr, mem)
28935                 return true
28936         }
28937 }
28938 func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool {
28939         v_1 := v.Args[1]
28940         v_0 := v.Args[0]
28941         // match: (AtomicLoad8 ptr mem)
28942         // result: (MOVBatomicload ptr mem)
28943         for {
28944                 ptr := v_0
28945                 mem := v_1
28946                 v.reset(OpAMD64MOVBatomicload)
28947                 v.AddArg2(ptr, mem)
28948                 return true
28949         }
28950 }
28951 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
28952         v_1 := v.Args[1]
28953         v_0 := v.Args[0]
28954         // match: (AtomicLoadPtr ptr mem)
28955         // result: (MOVQatomicload ptr mem)
28956         for {
28957                 ptr := v_0
28958                 mem := v_1
28959                 v.reset(OpAMD64MOVQatomicload)
28960                 v.AddArg2(ptr, mem)
28961                 return true
28962         }
28963 }
28964 func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
28965         v_2 := v.Args[2]
28966         v_1 := v.Args[1]
28967         v_0 := v.Args[0]
28968         // match: (AtomicOr32 ptr val mem)
28969         // result: (ORLlock ptr val mem)
28970         for {
28971                 ptr := v_0
28972                 val := v_1
28973                 mem := v_2
28974                 v.reset(OpAMD64ORLlock)
28975                 v.AddArg3(ptr, val, mem)
28976                 return true
28977         }
28978 }
28979 func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
28980         v_2 := v.Args[2]
28981         v_1 := v.Args[1]
28982         v_0 := v.Args[0]
28983         // match: (AtomicOr8 ptr val mem)
28984         // result: (ORBlock ptr val mem)
28985         for {
28986                 ptr := v_0
28987                 val := v_1
28988                 mem := v_2
28989                 v.reset(OpAMD64ORBlock)
28990                 v.AddArg3(ptr, val, mem)
28991                 return true
28992         }
28993 }
28994 func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
28995         v_2 := v.Args[2]
28996         v_1 := v.Args[1]
28997         v_0 := v.Args[0]
28998         b := v.Block
28999         typ := &b.Func.Config.Types
29000         // match: (AtomicStore32 ptr val mem)
29001         // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
29002         for {
29003                 ptr := v_0
29004                 val := v_1
29005                 mem := v_2
29006                 v.reset(OpSelect1)
29007                 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
29008                 v0.AddArg3(val, ptr, mem)
29009                 v.AddArg(v0)
29010                 return true
29011         }
29012 }
29013 func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
29014         v_2 := v.Args[2]
29015         v_1 := v.Args[1]
29016         v_0 := v.Args[0]
29017         b := v.Block
29018         typ := &b.Func.Config.Types
29019         // match: (AtomicStore64 ptr val mem)
29020         // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
29021         for {
29022                 ptr := v_0
29023                 val := v_1
29024                 mem := v_2
29025                 v.reset(OpSelect1)
29026                 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
29027                 v0.AddArg3(val, ptr, mem)
29028                 v.AddArg(v0)
29029                 return true
29030         }
29031 }
29032 func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
29033         v_2 := v.Args[2]
29034         v_1 := v.Args[1]
29035         v_0 := v.Args[0]
29036         b := v.Block
29037         typ := &b.Func.Config.Types
29038         // match: (AtomicStore8 ptr val mem)
29039         // result: (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
29040         for {
29041                 ptr := v_0
29042                 val := v_1
29043                 mem := v_2
29044                 v.reset(OpSelect1)
29045                 v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
29046                 v0.AddArg3(val, ptr, mem)
29047                 v.AddArg(v0)
29048                 return true
29049         }
29050 }
29051 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
29052         v_2 := v.Args[2]
29053         v_1 := v.Args[1]
29054         v_0 := v.Args[0]
29055         b := v.Block
29056         typ := &b.Func.Config.Types
29057         // match: (AtomicStorePtrNoWB ptr val mem)
29058         // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
29059         for {
29060                 ptr := v_0
29061                 val := v_1
29062                 mem := v_2
29063                 v.reset(OpSelect1)
29064                 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
29065                 v0.AddArg3(val, ptr, mem)
29066                 v.AddArg(v0)
29067                 return true
29068         }
29069 }
29070 func rewriteValueAMD64_OpBitLen16(v *Value) bool {
29071         v_0 := v.Args[0]
29072         b := v.Block
29073         typ := &b.Func.Config.Types
29074         // match: (BitLen16 x)
29075         // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
29076         for {
29077                 x := v_0
29078                 v.reset(OpAMD64BSRL)
29079                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
29080                 v0.AuxInt = int32ToAuxInt(1)
29081                 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
29082                 v1.AddArg(x)
29083                 v0.AddArg2(v1, v1)
29084                 v.AddArg(v0)
29085                 return true
29086         }
29087 }
29088 func rewriteValueAMD64_OpBitLen32(v *Value) bool {
29089         v_0 := v.Args[0]
29090         b := v.Block
29091         typ := &b.Func.Config.Types
29092         // match: (BitLen32 x)
29093         // result: (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
29094         for {
29095                 x := v_0
29096                 v.reset(OpSelect0)
29097                 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29098                 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
29099                 v1.AuxInt = int32ToAuxInt(1)
29100                 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
29101                 v2.AddArg(x)
29102                 v1.AddArg2(v2, v2)
29103                 v0.AddArg(v1)
29104                 v.AddArg(v0)
29105                 return true
29106         }
29107 }
29108 func rewriteValueAMD64_OpBitLen64(v *Value) bool {
29109         v_0 := v.Args[0]
29110         b := v.Block
29111         typ := &b.Func.Config.Types
29112         // match: (BitLen64 <t> x)
29113         // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
29114         for {
29115                 t := v.Type
29116                 x := v_0
29117                 v.reset(OpAMD64ADDQconst)
29118                 v.AuxInt = int32ToAuxInt(1)
29119                 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
29120                 v1 := b.NewValue0(v.Pos, OpSelect0, t)
29121                 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29122                 v2.AddArg(x)
29123                 v1.AddArg(v2)
29124                 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
29125                 v3.AuxInt = int64ToAuxInt(-1)
29126                 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29127                 v4.AddArg(v2)
29128                 v0.AddArg3(v1, v3, v4)
29129                 v.AddArg(v0)
29130                 return true
29131         }
29132 }
29133 func rewriteValueAMD64_OpBitLen8(v *Value) bool {
29134         v_0 := v.Args[0]
29135         b := v.Block
29136         typ := &b.Func.Config.Types
29137         // match: (BitLen8 x)
29138         // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
29139         for {
29140                 x := v_0
29141                 v.reset(OpAMD64BSRL)
29142                 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
29143                 v0.AuxInt = int32ToAuxInt(1)
29144                 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
29145                 v1.AddArg(x)
29146                 v0.AddArg2(v1, v1)
29147                 v.AddArg(v0)
29148                 return true
29149         }
29150 }
29151 func rewriteValueAMD64_OpCeil(v *Value) bool {
29152         v_0 := v.Args[0]
29153         // match: (Ceil x)
29154         // result: (ROUNDSD [2] x)
29155         for {
29156                 x := v_0
29157                 v.reset(OpAMD64ROUNDSD)
29158                 v.AuxInt = int8ToAuxInt(2)
29159                 v.AddArg(x)
29160                 return true
29161         }
29162 }
29163 func rewriteValueAMD64_OpCondSelect(v *Value) bool {
29164         v_2 := v.Args[2]
29165         v_1 := v.Args[1]
29166         v_0 := v.Args[0]
29167         b := v.Block
29168         typ := &b.Func.Config.Types
29169         // match: (CondSelect <t> x y (SETEQ cond))
29170         // cond: (is64BitInt(t) || isPtr(t))
29171         // result: (CMOVQEQ y x cond)
29172         for {
29173                 t := v.Type
29174                 x := v_0
29175                 y := v_1
29176                 if v_2.Op != OpAMD64SETEQ {
29177                         break
29178                 }
29179                 cond := v_2.Args[0]
29180                 if !(is64BitInt(t) || isPtr(t)) {
29181                         break
29182                 }
29183                 v.reset(OpAMD64CMOVQEQ)
29184                 v.AddArg3(y, x, cond)
29185                 return true
29186         }
29187         // match: (CondSelect <t> x y (SETNE cond))
29188         // cond: (is64BitInt(t) || isPtr(t))
29189         // result: (CMOVQNE y x cond)
29190         for {
29191                 t := v.Type
29192                 x := v_0
29193                 y := v_1
29194                 if v_2.Op != OpAMD64SETNE {
29195                         break
29196                 }
29197                 cond := v_2.Args[0]
29198                 if !(is64BitInt(t) || isPtr(t)) {
29199                         break
29200                 }
29201                 v.reset(OpAMD64CMOVQNE)
29202                 v.AddArg3(y, x, cond)
29203                 return true
29204         }
29205         // match: (CondSelect <t> x y (SETL cond))
29206         // cond: (is64BitInt(t) || isPtr(t))
29207         // result: (CMOVQLT y x cond)
29208         for {
29209                 t := v.Type
29210                 x := v_0
29211                 y := v_1
29212                 if v_2.Op != OpAMD64SETL {
29213                         break
29214                 }
29215                 cond := v_2.Args[0]
29216                 if !(is64BitInt(t) || isPtr(t)) {
29217                         break
29218                 }
29219                 v.reset(OpAMD64CMOVQLT)
29220                 v.AddArg3(y, x, cond)
29221                 return true
29222         }
29223         // match: (CondSelect <t> x y (SETG cond))
29224         // cond: (is64BitInt(t) || isPtr(t))
29225         // result: (CMOVQGT y x cond)
29226         for {
29227                 t := v.Type
29228                 x := v_0
29229                 y := v_1
29230                 if v_2.Op != OpAMD64SETG {
29231                         break
29232                 }
29233                 cond := v_2.Args[0]
29234                 if !(is64BitInt(t) || isPtr(t)) {
29235                         break
29236                 }
29237                 v.reset(OpAMD64CMOVQGT)
29238                 v.AddArg3(y, x, cond)
29239                 return true
29240         }
29241         // match: (CondSelect <t> x y (SETLE cond))
29242         // cond: (is64BitInt(t) || isPtr(t))
29243         // result: (CMOVQLE y x cond)
29244         for {
29245                 t := v.Type
29246                 x := v_0
29247                 y := v_1
29248                 if v_2.Op != OpAMD64SETLE {
29249                         break
29250                 }
29251                 cond := v_2.Args[0]
29252                 if !(is64BitInt(t) || isPtr(t)) {
29253                         break
29254                 }
29255                 v.reset(OpAMD64CMOVQLE)
29256                 v.AddArg3(y, x, cond)
29257                 return true
29258         }
29259         // match: (CondSelect <t> x y (SETGE cond))
29260         // cond: (is64BitInt(t) || isPtr(t))
29261         // result: (CMOVQGE y x cond)
29262         for {
29263                 t := v.Type
29264                 x := v_0
29265                 y := v_1
29266                 if v_2.Op != OpAMD64SETGE {
29267                         break
29268                 }
29269                 cond := v_2.Args[0]
29270                 if !(is64BitInt(t) || isPtr(t)) {
29271                         break
29272                 }
29273                 v.reset(OpAMD64CMOVQGE)
29274                 v.AddArg3(y, x, cond)
29275                 return true
29276         }
29277         // match: (CondSelect <t> x y (SETA cond))
29278         // cond: (is64BitInt(t) || isPtr(t))
29279         // result: (CMOVQHI y x cond)
29280         for {
29281                 t := v.Type
29282                 x := v_0
29283                 y := v_1
29284                 if v_2.Op != OpAMD64SETA {
29285                         break
29286                 }
29287                 cond := v_2.Args[0]
29288                 if !(is64BitInt(t) || isPtr(t)) {
29289                         break
29290                 }
29291                 v.reset(OpAMD64CMOVQHI)
29292                 v.AddArg3(y, x, cond)
29293                 return true
29294         }
29295         // match: (CondSelect <t> x y (SETB cond))
29296         // cond: (is64BitInt(t) || isPtr(t))
29297         // result: (CMOVQCS y x cond)
29298         for {
29299                 t := v.Type
29300                 x := v_0
29301                 y := v_1
29302                 if v_2.Op != OpAMD64SETB {
29303                         break
29304                 }
29305                 cond := v_2.Args[0]
29306                 if !(is64BitInt(t) || isPtr(t)) {
29307                         break
29308                 }
29309                 v.reset(OpAMD64CMOVQCS)
29310                 v.AddArg3(y, x, cond)
29311                 return true
29312         }
29313         // match: (CondSelect <t> x y (SETAE cond))
29314         // cond: (is64BitInt(t) || isPtr(t))
29315         // result: (CMOVQCC y x cond)
29316         for {
29317                 t := v.Type
29318                 x := v_0
29319                 y := v_1
29320                 if v_2.Op != OpAMD64SETAE {
29321                         break
29322                 }
29323                 cond := v_2.Args[0]
29324                 if !(is64BitInt(t) || isPtr(t)) {
29325                         break
29326                 }
29327                 v.reset(OpAMD64CMOVQCC)
29328                 v.AddArg3(y, x, cond)
29329                 return true
29330         }
29331         // match: (CondSelect <t> x y (SETBE cond))
29332         // cond: (is64BitInt(t) || isPtr(t))
29333         // result: (CMOVQLS y x cond)
29334         for {
29335                 t := v.Type
29336                 x := v_0
29337                 y := v_1
29338                 if v_2.Op != OpAMD64SETBE {
29339                         break
29340                 }
29341                 cond := v_2.Args[0]
29342                 if !(is64BitInt(t) || isPtr(t)) {
29343                         break
29344                 }
29345                 v.reset(OpAMD64CMOVQLS)
29346                 v.AddArg3(y, x, cond)
29347                 return true
29348         }
29349         // match: (CondSelect <t> x y (SETEQF cond))
29350         // cond: (is64BitInt(t) || isPtr(t))
29351         // result: (CMOVQEQF y x cond)
29352         for {
29353                 t := v.Type
29354                 x := v_0
29355                 y := v_1
29356                 if v_2.Op != OpAMD64SETEQF {
29357                         break
29358                 }
29359                 cond := v_2.Args[0]
29360                 if !(is64BitInt(t) || isPtr(t)) {
29361                         break
29362                 }
29363                 v.reset(OpAMD64CMOVQEQF)
29364                 v.AddArg3(y, x, cond)
29365                 return true
29366         }
29367         // match: (CondSelect <t> x y (SETNEF cond))
29368         // cond: (is64BitInt(t) || isPtr(t))
29369         // result: (CMOVQNEF y x cond)
29370         for {
29371                 t := v.Type
29372                 x := v_0
29373                 y := v_1
29374                 if v_2.Op != OpAMD64SETNEF {
29375                         break
29376                 }
29377                 cond := v_2.Args[0]
29378                 if !(is64BitInt(t) || isPtr(t)) {
29379                         break
29380                 }
29381                 v.reset(OpAMD64CMOVQNEF)
29382                 v.AddArg3(y, x, cond)
29383                 return true
29384         }
29385         // match: (CondSelect <t> x y (SETGF cond))
29386         // cond: (is64BitInt(t) || isPtr(t))
29387         // result: (CMOVQGTF y x cond)
29388         for {
29389                 t := v.Type
29390                 x := v_0
29391                 y := v_1
29392                 if v_2.Op != OpAMD64SETGF {
29393                         break
29394                 }
29395                 cond := v_2.Args[0]
29396                 if !(is64BitInt(t) || isPtr(t)) {
29397                         break
29398                 }
29399                 v.reset(OpAMD64CMOVQGTF)
29400                 v.AddArg3(y, x, cond)
29401                 return true
29402         }
29403         // match: (CondSelect <t> x y (SETGEF cond))
29404         // cond: (is64BitInt(t) || isPtr(t))
29405         // result: (CMOVQGEF y x cond)
29406         for {
29407                 t := v.Type
29408                 x := v_0
29409                 y := v_1
29410                 if v_2.Op != OpAMD64SETGEF {
29411                         break
29412                 }
29413                 cond := v_2.Args[0]
29414                 if !(is64BitInt(t) || isPtr(t)) {
29415                         break
29416                 }
29417                 v.reset(OpAMD64CMOVQGEF)
29418                 v.AddArg3(y, x, cond)
29419                 return true
29420         }
29421         // match: (CondSelect <t> x y (SETEQ cond))
29422         // cond: is32BitInt(t)
29423         // result: (CMOVLEQ y x cond)
29424         for {
29425                 t := v.Type
29426                 x := v_0
29427                 y := v_1
29428                 if v_2.Op != OpAMD64SETEQ {
29429                         break
29430                 }
29431                 cond := v_2.Args[0]
29432                 if !(is32BitInt(t)) {
29433                         break
29434                 }
29435                 v.reset(OpAMD64CMOVLEQ)
29436                 v.AddArg3(y, x, cond)
29437                 return true
29438         }
29439         // match: (CondSelect <t> x y (SETNE cond))
29440         // cond: is32BitInt(t)
29441         // result: (CMOVLNE y x cond)
29442         for {
29443                 t := v.Type
29444                 x := v_0
29445                 y := v_1
29446                 if v_2.Op != OpAMD64SETNE {
29447                         break
29448                 }
29449                 cond := v_2.Args[0]
29450                 if !(is32BitInt(t)) {
29451                         break
29452                 }
29453                 v.reset(OpAMD64CMOVLNE)
29454                 v.AddArg3(y, x, cond)
29455                 return true
29456         }
29457         // match: (CondSelect <t> x y (SETL cond))
29458         // cond: is32BitInt(t)
29459         // result: (CMOVLLT y x cond)
29460         for {
29461                 t := v.Type
29462                 x := v_0
29463                 y := v_1
29464                 if v_2.Op != OpAMD64SETL {
29465                         break
29466                 }
29467                 cond := v_2.Args[0]
29468                 if !(is32BitInt(t)) {
29469                         break
29470                 }
29471                 v.reset(OpAMD64CMOVLLT)
29472                 v.AddArg3(y, x, cond)
29473                 return true
29474         }
29475         // match: (CondSelect <t> x y (SETG cond))
29476         // cond: is32BitInt(t)
29477         // result: (CMOVLGT y x cond)
29478         for {
29479                 t := v.Type
29480                 x := v_0
29481                 y := v_1
29482                 if v_2.Op != OpAMD64SETG {
29483                         break
29484                 }
29485                 cond := v_2.Args[0]
29486                 if !(is32BitInt(t)) {
29487                         break
29488                 }
29489                 v.reset(OpAMD64CMOVLGT)
29490                 v.AddArg3(y, x, cond)
29491                 return true
29492         }
29493         // match: (CondSelect <t> x y (SETLE cond))
29494         // cond: is32BitInt(t)
29495         // result: (CMOVLLE y x cond)
29496         for {
29497                 t := v.Type
29498                 x := v_0
29499                 y := v_1
29500                 if v_2.Op != OpAMD64SETLE {
29501                         break
29502                 }
29503                 cond := v_2.Args[0]
29504                 if !(is32BitInt(t)) {
29505                         break
29506                 }
29507                 v.reset(OpAMD64CMOVLLE)
29508                 v.AddArg3(y, x, cond)
29509                 return true
29510         }
29511         // match: (CondSelect <t> x y (SETGE cond))
29512         // cond: is32BitInt(t)
29513         // result: (CMOVLGE y x cond)
29514         for {
29515                 t := v.Type
29516                 x := v_0
29517                 y := v_1
29518                 if v_2.Op != OpAMD64SETGE {
29519                         break
29520                 }
29521                 cond := v_2.Args[0]
29522                 if !(is32BitInt(t)) {
29523                         break
29524                 }
29525                 v.reset(OpAMD64CMOVLGE)
29526                 v.AddArg3(y, x, cond)
29527                 return true
29528         }
29529         // match: (CondSelect <t> x y (SETA cond))
29530         // cond: is32BitInt(t)
29531         // result: (CMOVLHI y x cond)
29532         for {
29533                 t := v.Type
29534                 x := v_0
29535                 y := v_1
29536                 if v_2.Op != OpAMD64SETA {
29537                         break
29538                 }
29539                 cond := v_2.Args[0]
29540                 if !(is32BitInt(t)) {
29541                         break
29542                 }
29543                 v.reset(OpAMD64CMOVLHI)
29544                 v.AddArg3(y, x, cond)
29545                 return true
29546         }
29547         // match: (CondSelect <t> x y (SETB cond))
29548         // cond: is32BitInt(t)
29549         // result: (CMOVLCS y x cond)
29550         for {
29551                 t := v.Type
29552                 x := v_0
29553                 y := v_1
29554                 if v_2.Op != OpAMD64SETB {
29555                         break
29556                 }
29557                 cond := v_2.Args[0]
29558                 if !(is32BitInt(t)) {
29559                         break
29560                 }
29561                 v.reset(OpAMD64CMOVLCS)
29562                 v.AddArg3(y, x, cond)
29563                 return true
29564         }
29565         // match: (CondSelect <t> x y (SETAE cond))
29566         // cond: is32BitInt(t)
29567         // result: (CMOVLCC y x cond)
29568         for {
29569                 t := v.Type
29570                 x := v_0
29571                 y := v_1
29572                 if v_2.Op != OpAMD64SETAE {
29573                         break
29574                 }
29575                 cond := v_2.Args[0]
29576                 if !(is32BitInt(t)) {
29577                         break
29578                 }
29579                 v.reset(OpAMD64CMOVLCC)
29580                 v.AddArg3(y, x, cond)
29581                 return true
29582         }
29583         // match: (CondSelect <t> x y (SETBE cond))
29584         // cond: is32BitInt(t)
29585         // result: (CMOVLLS y x cond)
29586         for {
29587                 t := v.Type
29588                 x := v_0
29589                 y := v_1
29590                 if v_2.Op != OpAMD64SETBE {
29591                         break
29592                 }
29593                 cond := v_2.Args[0]
29594                 if !(is32BitInt(t)) {
29595                         break
29596                 }
29597                 v.reset(OpAMD64CMOVLLS)
29598                 v.AddArg3(y, x, cond)
29599                 return true
29600         }
29601         // match: (CondSelect <t> x y (SETEQF cond))
29602         // cond: is32BitInt(t)
29603         // result: (CMOVLEQF y x cond)
29604         for {
29605                 t := v.Type
29606                 x := v_0
29607                 y := v_1
29608                 if v_2.Op != OpAMD64SETEQF {
29609                         break
29610                 }
29611                 cond := v_2.Args[0]
29612                 if !(is32BitInt(t)) {
29613                         break
29614                 }
29615                 v.reset(OpAMD64CMOVLEQF)
29616                 v.AddArg3(y, x, cond)
29617                 return true
29618         }
29619         // match: (CondSelect <t> x y (SETNEF cond))
29620         // cond: is32BitInt(t)
29621         // result: (CMOVLNEF y x cond)
29622         for {
29623                 t := v.Type
29624                 x := v_0
29625                 y := v_1
29626                 if v_2.Op != OpAMD64SETNEF {
29627                         break
29628                 }
29629                 cond := v_2.Args[0]
29630                 if !(is32BitInt(t)) {
29631                         break
29632                 }
29633                 v.reset(OpAMD64CMOVLNEF)
29634                 v.AddArg3(y, x, cond)
29635                 return true
29636         }
29637         // match: (CondSelect <t> x y (SETGF cond))
29638         // cond: is32BitInt(t)
29639         // result: (CMOVLGTF y x cond)
29640         for {
29641                 t := v.Type
29642                 x := v_0
29643                 y := v_1
29644                 if v_2.Op != OpAMD64SETGF {
29645                         break
29646                 }
29647                 cond := v_2.Args[0]
29648                 if !(is32BitInt(t)) {
29649                         break
29650                 }
29651                 v.reset(OpAMD64CMOVLGTF)
29652                 v.AddArg3(y, x, cond)
29653                 return true
29654         }
29655         // match: (CondSelect <t> x y (SETGEF cond))
29656         // cond: is32BitInt(t)
29657         // result: (CMOVLGEF y x cond)
29658         for {
29659                 t := v.Type
29660                 x := v_0
29661                 y := v_1
29662                 if v_2.Op != OpAMD64SETGEF {
29663                         break
29664                 }
29665                 cond := v_2.Args[0]
29666                 if !(is32BitInt(t)) {
29667                         break
29668                 }
29669                 v.reset(OpAMD64CMOVLGEF)
29670                 v.AddArg3(y, x, cond)
29671                 return true
29672         }
29673         // match: (CondSelect <t> x y (SETEQ cond))
29674         // cond: is16BitInt(t)
29675         // result: (CMOVWEQ y x cond)
29676         for {
29677                 t := v.Type
29678                 x := v_0
29679                 y := v_1
29680                 if v_2.Op != OpAMD64SETEQ {
29681                         break
29682                 }
29683                 cond := v_2.Args[0]
29684                 if !(is16BitInt(t)) {
29685                         break
29686                 }
29687                 v.reset(OpAMD64CMOVWEQ)
29688                 v.AddArg3(y, x, cond)
29689                 return true
29690         }
29691         // match: (CondSelect <t> x y (SETNE cond))
29692         // cond: is16BitInt(t)
29693         // result: (CMOVWNE y x cond)
29694         for {
29695                 t := v.Type
29696                 x := v_0
29697                 y := v_1
29698                 if v_2.Op != OpAMD64SETNE {
29699                         break
29700                 }
29701                 cond := v_2.Args[0]
29702                 if !(is16BitInt(t)) {
29703                         break
29704                 }
29705                 v.reset(OpAMD64CMOVWNE)
29706                 v.AddArg3(y, x, cond)
29707                 return true
29708         }
29709         // match: (CondSelect <t> x y (SETL cond))
29710         // cond: is16BitInt(t)
29711         // result: (CMOVWLT y x cond)
29712         for {
29713                 t := v.Type
29714                 x := v_0
29715                 y := v_1
29716                 if v_2.Op != OpAMD64SETL {
29717                         break
29718                 }
29719                 cond := v_2.Args[0]
29720                 if !(is16BitInt(t)) {
29721                         break
29722                 }
29723                 v.reset(OpAMD64CMOVWLT)
29724                 v.AddArg3(y, x, cond)
29725                 return true
29726         }
29727         // match: (CondSelect <t> x y (SETG cond))
29728         // cond: is16BitInt(t)
29729         // result: (CMOVWGT y x cond)
29730         for {
29731                 t := v.Type
29732                 x := v_0
29733                 y := v_1
29734                 if v_2.Op != OpAMD64SETG {
29735                         break
29736                 }
29737                 cond := v_2.Args[0]
29738                 if !(is16BitInt(t)) {
29739                         break
29740                 }
29741                 v.reset(OpAMD64CMOVWGT)
29742                 v.AddArg3(y, x, cond)
29743                 return true
29744         }
29745         // match: (CondSelect <t> x y (SETLE cond))
29746         // cond: is16BitInt(t)
29747         // result: (CMOVWLE y x cond)
29748         for {
29749                 t := v.Type
29750                 x := v_0
29751                 y := v_1
29752                 if v_2.Op != OpAMD64SETLE {
29753                         break
29754                 }
29755                 cond := v_2.Args[0]
29756                 if !(is16BitInt(t)) {
29757                         break
29758                 }
29759                 v.reset(OpAMD64CMOVWLE)
29760                 v.AddArg3(y, x, cond)
29761                 return true
29762         }
29763         // match: (CondSelect <t> x y (SETGE cond))
29764         // cond: is16BitInt(t)
29765         // result: (CMOVWGE y x cond)
29766         for {
29767                 t := v.Type
29768                 x := v_0
29769                 y := v_1
29770                 if v_2.Op != OpAMD64SETGE {
29771                         break
29772                 }
29773                 cond := v_2.Args[0]
29774                 if !(is16BitInt(t)) {
29775                         break
29776                 }
29777                 v.reset(OpAMD64CMOVWGE)
29778                 v.AddArg3(y, x, cond)
29779                 return true
29780         }
29781         // match: (CondSelect <t> x y (SETA cond))
29782         // cond: is16BitInt(t)
29783         // result: (CMOVWHI y x cond)
29784         for {
29785                 t := v.Type
29786                 x := v_0
29787                 y := v_1
29788                 if v_2.Op != OpAMD64SETA {
29789                         break
29790                 }
29791                 cond := v_2.Args[0]
29792                 if !(is16BitInt(t)) {
29793                         break
29794                 }
29795                 v.reset(OpAMD64CMOVWHI)
29796                 v.AddArg3(y, x, cond)
29797                 return true
29798         }
29799         // match: (CondSelect <t> x y (SETB cond))
29800         // cond: is16BitInt(t)
29801         // result: (CMOVWCS y x cond)
29802         for {
29803                 t := v.Type
29804                 x := v_0
29805                 y := v_1
29806                 if v_2.Op != OpAMD64SETB {
29807                         break
29808                 }
29809                 cond := v_2.Args[0]
29810                 if !(is16BitInt(t)) {
29811                         break
29812                 }
29813                 v.reset(OpAMD64CMOVWCS)
29814                 v.AddArg3(y, x, cond)
29815                 return true
29816         }
29817         // match: (CondSelect <t> x y (SETAE cond))
29818         // cond: is16BitInt(t)
29819         // result: (CMOVWCC y x cond)
29820         for {
29821                 t := v.Type
29822                 x := v_0
29823                 y := v_1
29824                 if v_2.Op != OpAMD64SETAE {
29825                         break
29826                 }
29827                 cond := v_2.Args[0]
29828                 if !(is16BitInt(t)) {
29829                         break
29830                 }
29831                 v.reset(OpAMD64CMOVWCC)
29832                 v.AddArg3(y, x, cond)
29833                 return true
29834         }
29835         // match: (CondSelect <t> x y (SETBE cond))
29836         // cond: is16BitInt(t)
29837         // result: (CMOVWLS y x cond)
29838         for {
29839                 t := v.Type
29840                 x := v_0
29841                 y := v_1
29842                 if v_2.Op != OpAMD64SETBE {
29843                         break
29844                 }
29845                 cond := v_2.Args[0]
29846                 if !(is16BitInt(t)) {
29847                         break
29848                 }
29849                 v.reset(OpAMD64CMOVWLS)
29850                 v.AddArg3(y, x, cond)
29851                 return true
29852         }
29853         // match: (CondSelect <t> x y (SETEQF cond))
29854         // cond: is16BitInt(t)
29855         // result: (CMOVWEQF y x cond)
29856         for {
29857                 t := v.Type
29858                 x := v_0
29859                 y := v_1
29860                 if v_2.Op != OpAMD64SETEQF {
29861                         break
29862                 }
29863                 cond := v_2.Args[0]
29864                 if !(is16BitInt(t)) {
29865                         break
29866                 }
29867                 v.reset(OpAMD64CMOVWEQF)
29868                 v.AddArg3(y, x, cond)
29869                 return true
29870         }
29871         // match: (CondSelect <t> x y (SETNEF cond))
29872         // cond: is16BitInt(t)
29873         // result: (CMOVWNEF y x cond)
29874         for {
29875                 t := v.Type
29876                 x := v_0
29877                 y := v_1
29878                 if v_2.Op != OpAMD64SETNEF {
29879                         break
29880                 }
29881                 cond := v_2.Args[0]
29882                 if !(is16BitInt(t)) {
29883                         break
29884                 }
29885                 v.reset(OpAMD64CMOVWNEF)
29886                 v.AddArg3(y, x, cond)
29887                 return true
29888         }
29889         // match: (CondSelect <t> x y (SETGF cond))
29890         // cond: is16BitInt(t)
29891         // result: (CMOVWGTF y x cond)
29892         for {
29893                 t := v.Type
29894                 x := v_0
29895                 y := v_1
29896                 if v_2.Op != OpAMD64SETGF {
29897                         break
29898                 }
29899                 cond := v_2.Args[0]
29900                 if !(is16BitInt(t)) {
29901                         break
29902                 }
29903                 v.reset(OpAMD64CMOVWGTF)
29904                 v.AddArg3(y, x, cond)
29905                 return true
29906         }
29907         // match: (CondSelect <t> x y (SETGEF cond))
29908         // cond: is16BitInt(t)
29909         // result: (CMOVWGEF y x cond)
29910         for {
29911                 t := v.Type
29912                 x := v_0
29913                 y := v_1
29914                 if v_2.Op != OpAMD64SETGEF {
29915                         break
29916                 }
29917                 cond := v_2.Args[0]
29918                 if !(is16BitInt(t)) {
29919                         break
29920                 }
29921                 v.reset(OpAMD64CMOVWGEF)
29922                 v.AddArg3(y, x, cond)
29923                 return true
29924         }
29925         // match: (CondSelect <t> x y check)
29926         // cond: !check.Type.IsFlags() && check.Type.Size() == 1
29927         // result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
29928         for {
29929                 t := v.Type
29930                 x := v_0
29931                 y := v_1
29932                 check := v_2
29933                 if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
29934                         break
29935                 }
29936                 v.reset(OpCondSelect)
29937                 v.Type = t
29938                 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
29939                 v0.AddArg(check)
29940                 v.AddArg3(x, y, v0)
29941                 return true
29942         }
29943         // match: (CondSelect <t> x y check)
29944         // cond: !check.Type.IsFlags() && check.Type.Size() == 2
29945         // result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
29946         for {
29947                 t := v.Type
29948                 x := v_0
29949                 y := v_1
29950                 check := v_2
29951                 if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
29952                         break
29953                 }
29954                 v.reset(OpCondSelect)
29955                 v.Type = t
29956                 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
29957                 v0.AddArg(check)
29958                 v.AddArg3(x, y, v0)
29959                 return true
29960         }
29961         // match: (CondSelect <t> x y check)
29962         // cond: !check.Type.IsFlags() && check.Type.Size() == 4
29963         // result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
29964         for {
29965                 t := v.Type
29966                 x := v_0
29967                 y := v_1
29968                 check := v_2
29969                 if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
29970                         break
29971                 }
29972                 v.reset(OpCondSelect)
29973                 v.Type = t
29974                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
29975                 v0.AddArg(check)
29976                 v.AddArg3(x, y, v0)
29977                 return true
29978         }
29979         // match: (CondSelect <t> x y check)
29980         // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
29981         // result: (CMOVQNE y x (CMPQconst [0] check))
29982         for {
29983                 t := v.Type
29984                 x := v_0
29985                 y := v_1
29986                 check := v_2
29987                 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
29988                         break
29989                 }
29990                 v.reset(OpAMD64CMOVQNE)
29991                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29992                 v0.AuxInt = int32ToAuxInt(0)
29993                 v0.AddArg(check)
29994                 v.AddArg3(y, x, v0)
29995                 return true
29996         }
29997         // match: (CondSelect <t> x y check)
29998         // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
29999         // result: (CMOVLNE y x (CMPQconst [0] check))
30000         for {
30001                 t := v.Type
30002                 x := v_0
30003                 y := v_1
30004                 check := v_2
30005                 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
30006                         break
30007                 }
30008                 v.reset(OpAMD64CMOVLNE)
30009                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
30010                 v0.AuxInt = int32ToAuxInt(0)
30011                 v0.AddArg(check)
30012                 v.AddArg3(y, x, v0)
30013                 return true
30014         }
30015         // match: (CondSelect <t> x y check)
30016         // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
30017         // result: (CMOVWNE y x (CMPQconst [0] check))
30018         for {
30019                 t := v.Type
30020                 x := v_0
30021                 y := v_1
30022                 check := v_2
30023                 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
30024                         break
30025                 }
30026                 v.reset(OpAMD64CMOVWNE)
30027                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
30028                 v0.AuxInt = int32ToAuxInt(0)
30029                 v0.AddArg(check)
30030                 v.AddArg3(y, x, v0)
30031                 return true
30032         }
30033         return false
30034 }
30035 func rewriteValueAMD64_OpConst16(v *Value) bool {
30036         // match: (Const16 [c])
30037         // result: (MOVLconst [int32(c)])
30038         for {
30039                 c := auxIntToInt16(v.AuxInt)
30040                 v.reset(OpAMD64MOVLconst)
30041                 v.AuxInt = int32ToAuxInt(int32(c))
30042                 return true
30043         }
30044 }
30045 func rewriteValueAMD64_OpConst8(v *Value) bool {
30046         // match: (Const8 [c])
30047         // result: (MOVLconst [int32(c)])
30048         for {
30049                 c := auxIntToInt8(v.AuxInt)
30050                 v.reset(OpAMD64MOVLconst)
30051                 v.AuxInt = int32ToAuxInt(int32(c))
30052                 return true
30053         }
30054 }
30055 func rewriteValueAMD64_OpConstBool(v *Value) bool {
30056         // match: (ConstBool [c])
30057         // result: (MOVLconst [b2i32(c)])
30058         for {
30059                 c := auxIntToBool(v.AuxInt)
30060                 v.reset(OpAMD64MOVLconst)
30061                 v.AuxInt = int32ToAuxInt(b2i32(c))
30062                 return true
30063         }
30064 }
30065 func rewriteValueAMD64_OpConstNil(v *Value) bool {
30066         // match: (ConstNil )
30067         // result: (MOVQconst [0])
30068         for {
30069                 v.reset(OpAMD64MOVQconst)
30070                 v.AuxInt = int64ToAuxInt(0)
30071                 return true
30072         }
30073 }
30074 func rewriteValueAMD64_OpCtz16(v *Value) bool {
30075         v_0 := v.Args[0]
30076         b := v.Block
30077         typ := &b.Func.Config.Types
30078         // match: (Ctz16 x)
30079         // result: (BSFL (BTSLconst <typ.UInt32> [16] x))
30080         for {
30081                 x := v_0
30082                 v.reset(OpAMD64BSFL)
30083                 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
30084                 v0.AuxInt = int8ToAuxInt(16)
30085                 v0.AddArg(x)
30086                 v.AddArg(v0)
30087                 return true
30088         }
30089 }
30090 func rewriteValueAMD64_OpCtz32(v *Value) bool {
30091         v_0 := v.Args[0]
30092         b := v.Block
30093         typ := &b.Func.Config.Types
30094         // match: (Ctz32 x)
30095         // result: (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
30096         for {
30097                 x := v_0
30098                 v.reset(OpSelect0)
30099                 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
30100                 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
30101                 v1.AuxInt = int8ToAuxInt(32)
30102                 v1.AddArg(x)
30103                 v0.AddArg(v1)
30104                 v.AddArg(v0)
30105                 return true
30106         }
30107 }
30108 func rewriteValueAMD64_OpCtz64(v *Value) bool {
30109         v_0 := v.Args[0]
30110         b := v.Block
30111         typ := &b.Func.Config.Types
30112         // match: (Ctz64 <t> x)
30113         // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
30114         for {
30115                 t := v.Type
30116                 x := v_0
30117                 v.reset(OpAMD64CMOVQEQ)
30118                 v0 := b.NewValue0(v.Pos, OpSelect0, t)
30119                 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
30120                 v1.AddArg(x)
30121                 v0.AddArg(v1)
30122                 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
30123                 v2.AuxInt = int64ToAuxInt(64)
30124                 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
30125                 v3.AddArg(v1)
30126                 v.AddArg3(v0, v2, v3)
30127                 return true
30128         }
30129 }
30130 func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool {
30131         v_0 := v.Args[0]
30132         b := v.Block
30133         typ := &b.Func.Config.Types
30134         // match: (Ctz64NonZero x)
30135         // result: (Select0 (BSFQ x))
30136         for {
30137                 x := v_0
30138                 v.reset(OpSelect0)
30139                 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
30140                 v0.AddArg(x)
30141                 v.AddArg(v0)
30142                 return true
30143         }
30144 }
30145 func rewriteValueAMD64_OpCtz8(v *Value) bool {
30146         v_0 := v.Args[0]
30147         b := v.Block
30148         typ := &b.Func.Config.Types
30149         // match: (Ctz8 x)
30150         // result: (BSFL (BTSLconst <typ.UInt32> [ 8] x))
30151         for {
30152                 x := v_0
30153                 v.reset(OpAMD64BSFL)
30154                 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
30155                 v0.AuxInt = int8ToAuxInt(8)
30156                 v0.AddArg(x)
30157                 v.AddArg(v0)
30158                 return true
30159         }
30160 }
30161 func rewriteValueAMD64_OpDiv16(v *Value) bool {
30162         v_1 := v.Args[1]
30163         v_0 := v.Args[0]
30164         b := v.Block
30165         typ := &b.Func.Config.Types
30166         // match: (Div16 [a] x y)
30167         // result: (Select0 (DIVW [a] x y))
30168         for {
30169                 a := auxIntToBool(v.AuxInt)
30170                 x := v_0
30171                 y := v_1
30172                 v.reset(OpSelect0)
30173                 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
30174                 v0.AuxInt = boolToAuxInt(a)
30175                 v0.AddArg2(x, y)
30176                 v.AddArg(v0)
30177                 return true
30178         }
30179 }
30180 func rewriteValueAMD64_OpDiv16u(v *Value) bool {
30181         v_1 := v.Args[1]
30182         v_0 := v.Args[0]
30183         b := v.Block
30184         typ := &b.Func.Config.Types
30185         // match: (Div16u x y)
30186         // result: (Select0 (DIVWU x y))
30187         for {
30188                 x := v_0
30189                 y := v_1
30190                 v.reset(OpSelect0)
30191                 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
30192                 v0.AddArg2(x, y)
30193                 v.AddArg(v0)
30194                 return true
30195         }
30196 }
30197 func rewriteValueAMD64_OpDiv32(v *Value) bool {
30198         v_1 := v.Args[1]
30199         v_0 := v.Args[0]
30200         b := v.Block
30201         typ := &b.Func.Config.Types
30202         // match: (Div32 [a] x y)
30203         // result: (Select0 (DIVL [a] x y))
30204         for {
30205                 a := auxIntToBool(v.AuxInt)
30206                 x := v_0
30207                 y := v_1
30208                 v.reset(OpSelect0)
30209                 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
30210                 v0.AuxInt = boolToAuxInt(a)
30211                 v0.AddArg2(x, y)
30212                 v.AddArg(v0)
30213                 return true
30214         }
30215 }
30216 func rewriteValueAMD64_OpDiv32u(v *Value) bool {
30217         v_1 := v.Args[1]
30218         v_0 := v.Args[0]
30219         b := v.Block
30220         typ := &b.Func.Config.Types
30221         // match: (Div32u x y)
30222         // result: (Select0 (DIVLU x y))
30223         for {
30224                 x := v_0
30225                 y := v_1
30226                 v.reset(OpSelect0)
30227                 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
30228                 v0.AddArg2(x, y)
30229                 v.AddArg(v0)
30230                 return true
30231         }
30232 }
30233 func rewriteValueAMD64_OpDiv64(v *Value) bool {
30234         v_1 := v.Args[1]
30235         v_0 := v.Args[0]
30236         b := v.Block
30237         typ := &b.Func.Config.Types
30238         // match: (Div64 [a] x y)
30239         // result: (Select0 (DIVQ [a] x y))
30240         for {
30241                 a := auxIntToBool(v.AuxInt)
30242                 x := v_0
30243                 y := v_1
30244                 v.reset(OpSelect0)
30245                 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
30246                 v0.AuxInt = boolToAuxInt(a)
30247                 v0.AddArg2(x, y)
30248                 v.AddArg(v0)
30249                 return true
30250         }
30251 }
30252 func rewriteValueAMD64_OpDiv64u(v *Value) bool {
30253         v_1 := v.Args[1]
30254         v_0 := v.Args[0]
30255         b := v.Block
30256         typ := &b.Func.Config.Types
30257         // match: (Div64u x y)
30258         // result: (Select0 (DIVQU x y))
30259         for {
30260                 x := v_0
30261                 y := v_1
30262                 v.reset(OpSelect0)
30263                 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
30264                 v0.AddArg2(x, y)
30265                 v.AddArg(v0)
30266                 return true
30267         }
30268 }
30269 func rewriteValueAMD64_OpDiv8(v *Value) bool {
30270         v_1 := v.Args[1]
30271         v_0 := v.Args[0]
30272         b := v.Block
30273         typ := &b.Func.Config.Types
30274         // match: (Div8 x y)
30275         // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
30276         for {
30277                 x := v_0
30278                 y := v_1
30279                 v.reset(OpSelect0)
30280                 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
30281                 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
30282                 v1.AddArg(x)
30283                 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
30284                 v2.AddArg(y)
30285                 v0.AddArg2(v1, v2)
30286                 v.AddArg(v0)
30287                 return true
30288         }
30289 }
30290 func rewriteValueAMD64_OpDiv8u(v *Value) bool {
30291         v_1 := v.Args[1]
30292         v_0 := v.Args[0]
30293         b := v.Block
30294         typ := &b.Func.Config.Types
30295         // match: (Div8u x y)
30296         // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
30297         for {
30298                 x := v_0
30299                 y := v_1
30300                 v.reset(OpSelect0)
30301                 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
30302                 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
30303                 v1.AddArg(x)
30304                 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
30305                 v2.AddArg(y)
30306                 v0.AddArg2(v1, v2)
30307                 v.AddArg(v0)
30308                 return true
30309         }
30310 }
30311 func rewriteValueAMD64_OpEq16(v *Value) bool {
30312         v_1 := v.Args[1]
30313         v_0 := v.Args[0]
30314         b := v.Block
30315         // match: (Eq16 x y)
30316         // result: (SETEQ (CMPW x y))
30317         for {
30318                 x := v_0
30319                 y := v_1
30320                 v.reset(OpAMD64SETEQ)
30321                 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
30322                 v0.AddArg2(x, y)
30323                 v.AddArg(v0)
30324                 return true
30325         }
30326 }
30327 func rewriteValueAMD64_OpEq32(v *Value) bool {
30328         v_1 := v.Args[1]
30329         v_0 := v.Args[0]
30330         b := v.Block
30331         // match: (Eq32 x y)
30332         // result: (SETEQ (CMPL x y))
30333         for {
30334                 x := v_0
30335                 y := v_1
30336                 v.reset(OpAMD64SETEQ)
30337                 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
30338                 v0.AddArg2(x, y)
30339                 v.AddArg(v0)
30340                 return true
30341         }
30342 }
30343 func rewriteValueAMD64_OpEq32F(v *Value) bool {
30344         v_1 := v.Args[1]
30345         v_0 := v.Args[0]
30346         b := v.Block
30347         // match: (Eq32F x y)
30348         // result: (SETEQF (UCOMISS x y))
30349         for {
30350                 x := v_0
30351                 y := v_1
30352                 v.reset(OpAMD64SETEQF)
30353                 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
30354                 v0.AddArg2(x, y)
30355                 v.AddArg(v0)
30356                 return true
30357         }
30358 }
30359 func rewriteValueAMD64_OpEq64(v *Value) bool {
30360         v_1 := v.Args[1]
30361         v_0 := v.Args[0]
30362         b := v.Block
30363         // match: (Eq64 x y)
30364         // result: (SETEQ (CMPQ x y))
30365         for {
30366                 x := v_0
30367                 y := v_1
30368                 v.reset(OpAMD64SETEQ)
30369                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30370                 v0.AddArg2(x, y)
30371                 v.AddArg(v0)
30372                 return true
30373         }
30374 }
30375 func rewriteValueAMD64_OpEq64F(v *Value) bool {
30376         v_1 := v.Args[1]
30377         v_0 := v.Args[0]
30378         b := v.Block
30379         // match: (Eq64F x y)
30380         // result: (SETEQF (UCOMISD x y))
30381         for {
30382                 x := v_0
30383                 y := v_1
30384                 v.reset(OpAMD64SETEQF)
30385                 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
30386                 v0.AddArg2(x, y)
30387                 v.AddArg(v0)
30388                 return true
30389         }
30390 }
30391 func rewriteValueAMD64_OpEq8(v *Value) bool {
30392         v_1 := v.Args[1]
30393         v_0 := v.Args[0]
30394         b := v.Block
30395         // match: (Eq8 x y)
30396         // result: (SETEQ (CMPB x y))
30397         for {
30398                 x := v_0
30399                 y := v_1
30400                 v.reset(OpAMD64SETEQ)
30401                 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
30402                 v0.AddArg2(x, y)
30403                 v.AddArg(v0)
30404                 return true
30405         }
30406 }
30407 func rewriteValueAMD64_OpEqB(v *Value) bool {
30408         v_1 := v.Args[1]
30409         v_0 := v.Args[0]
30410         b := v.Block
30411         // match: (EqB x y)
30412         // result: (SETEQ (CMPB x y))
30413         for {
30414                 x := v_0
30415                 y := v_1
30416                 v.reset(OpAMD64SETEQ)
30417                 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
30418                 v0.AddArg2(x, y)
30419                 v.AddArg(v0)
30420                 return true
30421         }
30422 }
30423 func rewriteValueAMD64_OpEqPtr(v *Value) bool {
30424         v_1 := v.Args[1]
30425         v_0 := v.Args[0]
30426         b := v.Block
30427         // match: (EqPtr x y)
30428         // result: (SETEQ (CMPQ x y))
30429         for {
30430                 x := v_0
30431                 y := v_1
30432                 v.reset(OpAMD64SETEQ)
30433                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30434                 v0.AddArg2(x, y)
30435                 v.AddArg(v0)
30436                 return true
30437         }
30438 }
30439 func rewriteValueAMD64_OpFMA(v *Value) bool {
30440         v_2 := v.Args[2]
30441         v_1 := v.Args[1]
30442         v_0 := v.Args[0]
30443         // match: (FMA x y z)
30444         // result: (VFMADD231SD z x y)
30445         for {
30446                 x := v_0
30447                 y := v_1
30448                 z := v_2
30449                 v.reset(OpAMD64VFMADD231SD)
30450                 v.AddArg3(z, x, y)
30451                 return true
30452         }
30453 }
30454 func rewriteValueAMD64_OpFloor(v *Value) bool {
30455         v_0 := v.Args[0]
30456         // match: (Floor x)
30457         // result: (ROUNDSD [1] x)
30458         for {
30459                 x := v_0
30460                 v.reset(OpAMD64ROUNDSD)
30461                 v.AuxInt = int8ToAuxInt(1)
30462                 v.AddArg(x)
30463                 return true
30464         }
30465 }
30466 func rewriteValueAMD64_OpGetG(v *Value) bool {
30467         v_0 := v.Args[0]
30468         // match: (GetG mem)
30469         // cond: !(objabi.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)
30470         // result: (LoweredGetG mem)
30471         for {
30472                 mem := v_0
30473                 if !(!(objabi.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)) {
30474                         break
30475                 }
30476                 v.reset(OpAMD64LoweredGetG)
30477                 v.AddArg(mem)
30478                 return true
30479         }
30480         return false
30481 }
30482 func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
30483         b := v.Block
30484         typ := &b.Func.Config.Types
30485         // match: (HasCPUFeature {s})
30486         // result: (SETNE (CMPQconst [0] (LoweredHasCPUFeature {s})))
30487         for {
30488                 s := auxToSym(v.Aux)
30489                 v.reset(OpAMD64SETNE)
30490                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
30491                 v0.AuxInt = int32ToAuxInt(0)
30492                 v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64)
30493                 v1.Aux = symToAux(s)
30494                 v0.AddArg(v1)
30495                 v.AddArg(v0)
30496                 return true
30497         }
30498 }
30499 func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
30500         v_1 := v.Args[1]
30501         v_0 := v.Args[0]
30502         b := v.Block
30503         // match: (IsInBounds idx len)
30504         // result: (SETB (CMPQ idx len))
30505         for {
30506                 idx := v_0
30507                 len := v_1
30508                 v.reset(OpAMD64SETB)
30509                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30510                 v0.AddArg2(idx, len)
30511                 v.AddArg(v0)
30512                 return true
30513         }
30514 }
30515 func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
30516         v_0 := v.Args[0]
30517         b := v.Block
30518         // match: (IsNonNil p)
30519         // result: (SETNE (TESTQ p p))
30520         for {
30521                 p := v_0
30522                 v.reset(OpAMD64SETNE)
30523                 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
30524                 v0.AddArg2(p, p)
30525                 v.AddArg(v0)
30526                 return true
30527         }
30528 }
30529 func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
30530         v_1 := v.Args[1]
30531         v_0 := v.Args[0]
30532         b := v.Block
30533         // match: (IsSliceInBounds idx len)
30534         // result: (SETBE (CMPQ idx len))
30535         for {
30536                 idx := v_0
30537                 len := v_1
30538                 v.reset(OpAMD64SETBE)
30539                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30540                 v0.AddArg2(idx, len)
30541                 v.AddArg(v0)
30542                 return true
30543         }
30544 }
30545 func rewriteValueAMD64_OpLeq16(v *Value) bool {
30546         v_1 := v.Args[1]
30547         v_0 := v.Args[0]
30548         b := v.Block
30549         // match: (Leq16 x y)
30550         // result: (SETLE (CMPW x y))
30551         for {
30552                 x := v_0
30553                 y := v_1
30554                 v.reset(OpAMD64SETLE)
30555                 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
30556                 v0.AddArg2(x, y)
30557                 v.AddArg(v0)
30558                 return true
30559         }
30560 }
30561 func rewriteValueAMD64_OpLeq16U(v *Value) bool {
30562         v_1 := v.Args[1]
30563         v_0 := v.Args[0]
30564         b := v.Block
30565         // match: (Leq16U x y)
30566         // result: (SETBE (CMPW x y))
30567         for {
30568                 x := v_0
30569                 y := v_1
30570                 v.reset(OpAMD64SETBE)
30571                 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
30572                 v0.AddArg2(x, y)
30573                 v.AddArg(v0)
30574                 return true
30575         }
30576 }
30577 func rewriteValueAMD64_OpLeq32(v *Value) bool {
30578         v_1 := v.Args[1]
30579         v_0 := v.Args[0]
30580         b := v.Block
30581         // match: (Leq32 x y)
30582         // result: (SETLE (CMPL x y))
30583         for {
30584                 x := v_0
30585                 y := v_1
30586                 v.reset(OpAMD64SETLE)
30587                 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
30588                 v0.AddArg2(x, y)
30589                 v.AddArg(v0)
30590                 return true
30591         }
30592 }
30593 func rewriteValueAMD64_OpLeq32F(v *Value) bool {
30594         v_1 := v.Args[1]
30595         v_0 := v.Args[0]
30596         b := v.Block
30597         // match: (Leq32F x y)
30598         // result: (SETGEF (UCOMISS y x))
30599         for {
30600                 x := v_0
30601                 y := v_1
30602                 v.reset(OpAMD64SETGEF)
30603                 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
30604                 v0.AddArg2(y, x)
30605                 v.AddArg(v0)
30606                 return true
30607         }
30608 }
30609 func rewriteValueAMD64_OpLeq32U(v *Value) bool {
30610         v_1 := v.Args[1]
30611         v_0 := v.Args[0]
30612         b := v.Block
30613         // match: (Leq32U x y)
30614         // result: (SETBE (CMPL x y))
30615         for {
30616                 x := v_0
30617                 y := v_1
30618                 v.reset(OpAMD64SETBE)
30619                 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
30620                 v0.AddArg2(x, y)
30621                 v.AddArg(v0)
30622                 return true
30623         }
30624 }
30625 func rewriteValueAMD64_OpLeq64(v *Value) bool {
30626         v_1 := v.Args[1]
30627         v_0 := v.Args[0]
30628         b := v.Block
30629         // match: (Leq64 x y)
30630         // result: (SETLE (CMPQ x y))
30631         for {
30632                 x := v_0
30633                 y := v_1
30634                 v.reset(OpAMD64SETLE)
30635                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30636                 v0.AddArg2(x, y)
30637                 v.AddArg(v0)
30638                 return true
30639         }
30640 }
30641 func rewriteValueAMD64_OpLeq64F(v *Value) bool {
30642         v_1 := v.Args[1]
30643         v_0 := v.Args[0]
30644         b := v.Block
30645         // match: (Leq64F x y)
30646         // result: (SETGEF (UCOMISD y x))
30647         for {
30648                 x := v_0
30649                 y := v_1
30650                 v.reset(OpAMD64SETGEF)
30651                 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
30652                 v0.AddArg2(y, x)
30653                 v.AddArg(v0)
30654                 return true
30655         }
30656 }
30657 func rewriteValueAMD64_OpLeq64U(v *Value) bool {
30658         v_1 := v.Args[1]
30659         v_0 := v.Args[0]
30660         b := v.Block
30661         // match: (Leq64U x y)
30662         // result: (SETBE (CMPQ x y))
30663         for {
30664                 x := v_0
30665                 y := v_1
30666                 v.reset(OpAMD64SETBE)
30667                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30668                 v0.AddArg2(x, y)
30669                 v.AddArg(v0)
30670                 return true
30671         }
30672 }
30673 func rewriteValueAMD64_OpLeq8(v *Value) bool {
30674         v_1 := v.Args[1]
30675         v_0 := v.Args[0]
30676         b := v.Block
30677         // match: (Leq8 x y)
30678         // result: (SETLE (CMPB x y))
30679         for {
30680                 x := v_0
30681                 y := v_1
30682                 v.reset(OpAMD64SETLE)
30683                 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
30684                 v0.AddArg2(x, y)
30685                 v.AddArg(v0)
30686                 return true
30687         }
30688 }
30689 func rewriteValueAMD64_OpLeq8U(v *Value) bool {
30690         v_1 := v.Args[1]
30691         v_0 := v.Args[0]
30692         b := v.Block
30693         // match: (Leq8U x y)
30694         // result: (SETBE (CMPB x y))
30695         for {
30696                 x := v_0
30697                 y := v_1
30698                 v.reset(OpAMD64SETBE)
30699                 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
30700                 v0.AddArg2(x, y)
30701                 v.AddArg(v0)
30702                 return true
30703         }
30704 }
30705 func rewriteValueAMD64_OpLess16(v *Value) bool {
30706         v_1 := v.Args[1]
30707         v_0 := v.Args[0]
30708         b := v.Block
30709         // match: (Less16 x y)
30710         // result: (SETL (CMPW x y))
30711         for {
30712                 x := v_0
30713                 y := v_1
30714                 v.reset(OpAMD64SETL)
30715                 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
30716                 v0.AddArg2(x, y)
30717                 v.AddArg(v0)
30718                 return true
30719         }
30720 }
30721 func rewriteValueAMD64_OpLess16U(v *Value) bool {
30722         v_1 := v.Args[1]
30723         v_0 := v.Args[0]
30724         b := v.Block
30725         // match: (Less16U x y)
30726         // result: (SETB (CMPW x y))
30727         for {
30728                 x := v_0
30729                 y := v_1
30730                 v.reset(OpAMD64SETB)
30731                 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
30732                 v0.AddArg2(x, y)
30733                 v.AddArg(v0)
30734                 return true
30735         }
30736 }
30737 func rewriteValueAMD64_OpLess32(v *Value) bool {
30738         v_1 := v.Args[1]
30739         v_0 := v.Args[0]
30740         b := v.Block
30741         // match: (Less32 x y)
30742         // result: (SETL (CMPL x y))
30743         for {
30744                 x := v_0
30745                 y := v_1
30746                 v.reset(OpAMD64SETL)
30747                 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
30748                 v0.AddArg2(x, y)
30749                 v.AddArg(v0)
30750                 return true
30751         }
30752 }
30753 func rewriteValueAMD64_OpLess32F(v *Value) bool {
30754         v_1 := v.Args[1]
30755         v_0 := v.Args[0]
30756         b := v.Block
30757         // match: (Less32F x y)
30758         // result: (SETGF (UCOMISS y x))
30759         for {
30760                 x := v_0
30761                 y := v_1
30762                 v.reset(OpAMD64SETGF)
30763                 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
30764                 v0.AddArg2(y, x)
30765                 v.AddArg(v0)
30766                 return true
30767         }
30768 }
30769 func rewriteValueAMD64_OpLess32U(v *Value) bool {
30770         v_1 := v.Args[1]
30771         v_0 := v.Args[0]
30772         b := v.Block
30773         // match: (Less32U x y)
30774         // result: (SETB (CMPL x y))
30775         for {
30776                 x := v_0
30777                 y := v_1
30778                 v.reset(OpAMD64SETB)
30779                 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
30780                 v0.AddArg2(x, y)
30781                 v.AddArg(v0)
30782                 return true
30783         }
30784 }
30785 func rewriteValueAMD64_OpLess64(v *Value) bool {
30786         v_1 := v.Args[1]
30787         v_0 := v.Args[0]
30788         b := v.Block
30789         // match: (Less64 x y)
30790         // result: (SETL (CMPQ x y))
30791         for {
30792                 x := v_0
30793                 y := v_1
30794                 v.reset(OpAMD64SETL)
30795                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30796                 v0.AddArg2(x, y)
30797                 v.AddArg(v0)
30798                 return true
30799         }
30800 }
30801 func rewriteValueAMD64_OpLess64F(v *Value) bool {
30802         v_1 := v.Args[1]
30803         v_0 := v.Args[0]
30804         b := v.Block
30805         // match: (Less64F x y)
30806         // result: (SETGF (UCOMISD y x))
30807         for {
30808                 x := v_0
30809                 y := v_1
30810                 v.reset(OpAMD64SETGF)
30811                 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
30812                 v0.AddArg2(y, x)
30813                 v.AddArg(v0)
30814                 return true
30815         }
30816 }
30817 func rewriteValueAMD64_OpLess64U(v *Value) bool {
30818         v_1 := v.Args[1]
30819         v_0 := v.Args[0]
30820         b := v.Block
30821         // match: (Less64U x y)
30822         // result: (SETB (CMPQ x y))
30823         for {
30824                 x := v_0
30825                 y := v_1
30826                 v.reset(OpAMD64SETB)
30827                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30828                 v0.AddArg2(x, y)
30829                 v.AddArg(v0)
30830                 return true
30831         }
30832 }
30833 func rewriteValueAMD64_OpLess8(v *Value) bool {
30834         v_1 := v.Args[1]
30835         v_0 := v.Args[0]
30836         b := v.Block
30837         // match: (Less8 x y)
30838         // result: (SETL (CMPB x y))
30839         for {
30840                 x := v_0
30841                 y := v_1
30842                 v.reset(OpAMD64SETL)
30843                 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
30844                 v0.AddArg2(x, y)
30845                 v.AddArg(v0)
30846                 return true
30847         }
30848 }
30849 func rewriteValueAMD64_OpLess8U(v *Value) bool {
30850         v_1 := v.Args[1]
30851         v_0 := v.Args[0]
30852         b := v.Block
30853         // match: (Less8U x y)
30854         // result: (SETB (CMPB x y))
30855         for {
30856                 x := v_0
30857                 y := v_1
30858                 v.reset(OpAMD64SETB)
30859                 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
30860                 v0.AddArg2(x, y)
30861                 v.AddArg(v0)
30862                 return true
30863         }
30864 }
30865 func rewriteValueAMD64_OpLoad(v *Value) bool {
30866         v_1 := v.Args[1]
30867         v_0 := v.Args[0]
30868         // match: (Load <t> ptr mem)
30869         // cond: (is64BitInt(t) || isPtr(t))
30870         // result: (MOVQload ptr mem)
30871         for {
30872                 t := v.Type
30873                 ptr := v_0
30874                 mem := v_1
30875                 if !(is64BitInt(t) || isPtr(t)) {
30876                         break
30877                 }
30878                 v.reset(OpAMD64MOVQload)
30879                 v.AddArg2(ptr, mem)
30880                 return true
30881         }
30882         // match: (Load <t> ptr mem)
30883         // cond: is32BitInt(t)
30884         // result: (MOVLload ptr mem)
30885         for {
30886                 t := v.Type
30887                 ptr := v_0
30888                 mem := v_1
30889                 if !(is32BitInt(t)) {
30890                         break
30891                 }
30892                 v.reset(OpAMD64MOVLload)
30893                 v.AddArg2(ptr, mem)
30894                 return true
30895         }
30896         // match: (Load <t> ptr mem)
30897         // cond: is16BitInt(t)
30898         // result: (MOVWload ptr mem)
30899         for {
30900                 t := v.Type
30901                 ptr := v_0
30902                 mem := v_1
30903                 if !(is16BitInt(t)) {
30904                         break
30905                 }
30906                 v.reset(OpAMD64MOVWload)
30907                 v.AddArg2(ptr, mem)
30908                 return true
30909         }
30910         // match: (Load <t> ptr mem)
30911         // cond: (t.IsBoolean() || is8BitInt(t))
30912         // result: (MOVBload ptr mem)
30913         for {
30914                 t := v.Type
30915                 ptr := v_0
30916                 mem := v_1
30917                 if !(t.IsBoolean() || is8BitInt(t)) {
30918                         break
30919                 }
30920                 v.reset(OpAMD64MOVBload)
30921                 v.AddArg2(ptr, mem)
30922                 return true
30923         }
30924         // match: (Load <t> ptr mem)
30925         // cond: is32BitFloat(t)
30926         // result: (MOVSSload ptr mem)
30927         for {
30928                 t := v.Type
30929                 ptr := v_0
30930                 mem := v_1
30931                 if !(is32BitFloat(t)) {
30932                         break
30933                 }
30934                 v.reset(OpAMD64MOVSSload)
30935                 v.AddArg2(ptr, mem)
30936                 return true
30937         }
30938         // match: (Load <t> ptr mem)
30939         // cond: is64BitFloat(t)
30940         // result: (MOVSDload ptr mem)
30941         for {
30942                 t := v.Type
30943                 ptr := v_0
30944                 mem := v_1
30945                 if !(is64BitFloat(t)) {
30946                         break
30947                 }
30948                 v.reset(OpAMD64MOVSDload)
30949                 v.AddArg2(ptr, mem)
30950                 return true
30951         }
30952         return false
30953 }
30954 func rewriteValueAMD64_OpLocalAddr(v *Value) bool {
30955         v_0 := v.Args[0]
30956         // match: (LocalAddr {sym} base _)
30957         // result: (LEAQ {sym} base)
30958         for {
30959                 sym := auxToSym(v.Aux)
30960                 base := v_0
30961                 v.reset(OpAMD64LEAQ)
30962                 v.Aux = symToAux(sym)
30963                 v.AddArg(base)
30964                 return true
30965         }
30966 }
30967 func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
30968         v_1 := v.Args[1]
30969         v_0 := v.Args[0]
30970         b := v.Block
30971         // match: (Lsh16x16 <t> x y)
30972         // cond: !shiftIsBounded(v)
30973         // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
30974         for {
30975                 t := v.Type
30976                 x := v_0
30977                 y := v_1
30978                 if !(!shiftIsBounded(v)) {
30979                         break
30980                 }
30981                 v.reset(OpAMD64ANDL)
30982                 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30983                 v0.AddArg2(x, y)
30984                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30985                 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
30986                 v2.AuxInt = int16ToAuxInt(32)
30987                 v2.AddArg(y)
30988                 v1.AddArg(v2)
30989                 v.AddArg2(v0, v1)
30990                 return true
30991         }
30992         // match: (Lsh16x16 x y)
30993         // cond: shiftIsBounded(v)
30994         // result: (SHLL x y)
30995         for {
30996                 x := v_0
30997                 y := v_1
30998                 if !(shiftIsBounded(v)) {
30999                         break
31000                 }
31001                 v.reset(OpAMD64SHLL)
31002                 v.AddArg2(x, y)
31003                 return true
31004         }
31005         return false
31006 }
31007 func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
31008         v_1 := v.Args[1]
31009         v_0 := v.Args[0]
31010         b := v.Block
31011         // match: (Lsh16x32 <t> x y)
31012         // cond: !shiftIsBounded(v)
31013         // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
31014         for {
31015                 t := v.Type
31016                 x := v_0
31017                 y := v_1
31018                 if !(!shiftIsBounded(v)) {
31019                         break
31020                 }
31021                 v.reset(OpAMD64ANDL)
31022                 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31023                 v0.AddArg2(x, y)
31024                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31025                 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
31026                 v2.AuxInt = int32ToAuxInt(32)
31027                 v2.AddArg(y)
31028                 v1.AddArg(v2)
31029                 v.AddArg2(v0, v1)
31030                 return true
31031         }
31032         // match: (Lsh16x32 x y)
31033         // cond: shiftIsBounded(v)
31034         // result: (SHLL x y)
31035         for {
31036                 x := v_0
31037                 y := v_1
31038                 if !(shiftIsBounded(v)) {
31039                         break
31040                 }
31041                 v.reset(OpAMD64SHLL)
31042                 v.AddArg2(x, y)
31043                 return true
31044         }
31045         return false
31046 }
31047 func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
31048         v_1 := v.Args[1]
31049         v_0 := v.Args[0]
31050         b := v.Block
31051         // match: (Lsh16x64 <t> x y)
31052         // cond: !shiftIsBounded(v)
31053         // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
31054         for {
31055                 t := v.Type
31056                 x := v_0
31057                 y := v_1
31058                 if !(!shiftIsBounded(v)) {
31059                         break
31060                 }
31061                 v.reset(OpAMD64ANDL)
31062                 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31063                 v0.AddArg2(x, y)
31064                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31065                 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
31066                 v2.AuxInt = int32ToAuxInt(32)
31067                 v2.AddArg(y)
31068                 v1.AddArg(v2)
31069                 v.AddArg2(v0, v1)
31070                 return true
31071         }
31072         // match: (Lsh16x64 x y)
31073         // cond: shiftIsBounded(v)
31074         // result: (SHLL x y)
31075         for {
31076                 x := v_0
31077                 y := v_1
31078                 if !(shiftIsBounded(v)) {
31079                         break
31080                 }
31081                 v.reset(OpAMD64SHLL)
31082                 v.AddArg2(x, y)
31083                 return true
31084         }
31085         return false
31086 }
31087 func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
31088         v_1 := v.Args[1]
31089         v_0 := v.Args[0]
31090         b := v.Block
31091         // match: (Lsh16x8 <t> x y)
31092         // cond: !shiftIsBounded(v)
31093         // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
31094         for {
31095                 t := v.Type
31096                 x := v_0
31097                 y := v_1
31098                 if !(!shiftIsBounded(v)) {
31099                         break
31100                 }
31101                 v.reset(OpAMD64ANDL)
31102                 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31103                 v0.AddArg2(x, y)
31104                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31105                 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
31106                 v2.AuxInt = int8ToAuxInt(32)
31107                 v2.AddArg(y)
31108                 v1.AddArg(v2)
31109                 v.AddArg2(v0, v1)
31110                 return true
31111         }
31112         // match: (Lsh16x8 x y)
31113         // cond: shiftIsBounded(v)
31114         // result: (SHLL x y)
31115         for {
31116                 x := v_0
31117                 y := v_1
31118                 if !(shiftIsBounded(v)) {
31119                         break
31120                 }
31121                 v.reset(OpAMD64SHLL)
31122                 v.AddArg2(x, y)
31123                 return true
31124         }
31125         return false
31126 }
31127 func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
31128         v_1 := v.Args[1]
31129         v_0 := v.Args[0]
31130         b := v.Block
31131         // match: (Lsh32x16 <t> x y)
31132         // cond: !shiftIsBounded(v)
31133         // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
31134         for {
31135                 t := v.Type
31136                 x := v_0
31137                 y := v_1
31138                 if !(!shiftIsBounded(v)) {
31139                         break
31140                 }
31141                 v.reset(OpAMD64ANDL)
31142                 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31143                 v0.AddArg2(x, y)
31144                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31145                 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
31146                 v2.AuxInt = int16ToAuxInt(32)
31147                 v2.AddArg(y)
31148                 v1.AddArg(v2)
31149                 v.AddArg2(v0, v1)
31150                 return true
31151         }
31152         // match: (Lsh32x16 x y)
31153         // cond: shiftIsBounded(v)
31154         // result: (SHLL x y)
31155         for {
31156                 x := v_0
31157                 y := v_1
31158                 if !(shiftIsBounded(v)) {
31159                         break
31160                 }
31161                 v.reset(OpAMD64SHLL)
31162                 v.AddArg2(x, y)
31163                 return true
31164         }
31165         return false
31166 }
31167 func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
31168         v_1 := v.Args[1]
31169         v_0 := v.Args[0]
31170         b := v.Block
31171         // match: (Lsh32x32 <t> x y)
31172         // cond: !shiftIsBounded(v)
31173         // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
31174         for {
31175                 t := v.Type
31176                 x := v_0
31177                 y := v_1
31178                 if !(!shiftIsBounded(v)) {
31179                         break
31180                 }
31181                 v.reset(OpAMD64ANDL)
31182                 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31183                 v0.AddArg2(x, y)
31184                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31185                 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
31186                 v2.AuxInt = int32ToAuxInt(32)
31187                 v2.AddArg(y)
31188                 v1.AddArg(v2)
31189                 v.AddArg2(v0, v1)
31190                 return true
31191         }
31192         // match: (Lsh32x32 x y)
31193         // cond: shiftIsBounded(v)
31194         // result: (SHLL x y)
31195         for {
31196                 x := v_0
31197                 y := v_1
31198                 if !(shiftIsBounded(v)) {
31199                         break
31200                 }
31201                 v.reset(OpAMD64SHLL)
31202                 v.AddArg2(x, y)
31203                 return true
31204         }
31205         return false
31206 }
31207 func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
31208         v_1 := v.Args[1]
31209         v_0 := v.Args[0]
31210         b := v.Block
31211         // match: (Lsh32x64 <t> x y)
31212         // cond: !shiftIsBounded(v)
31213         // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
31214         for {
31215                 t := v.Type
31216                 x := v_0
31217                 y := v_1
31218                 if !(!shiftIsBounded(v)) {
31219                         break
31220                 }
31221                 v.reset(OpAMD64ANDL)
31222                 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31223                 v0.AddArg2(x, y)
31224                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31225                 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
31226                 v2.AuxInt = int32ToAuxInt(32)
31227                 v2.AddArg(y)
31228                 v1.AddArg(v2)
31229                 v.AddArg2(v0, v1)
31230                 return true
31231         }
31232         // match: (Lsh32x64 x y)
31233         // cond: shiftIsBounded(v)
31234         // result: (SHLL x y)
31235         for {
31236                 x := v_0
31237                 y := v_1
31238                 if !(shiftIsBounded(v)) {
31239                         break
31240                 }
31241                 v.reset(OpAMD64SHLL)
31242                 v.AddArg2(x, y)
31243                 return true
31244         }
31245         return false
31246 }
31247 func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
31248         v_1 := v.Args[1]
31249         v_0 := v.Args[0]
31250         b := v.Block
31251         // match: (Lsh32x8 <t> x y)
31252         // cond: !shiftIsBounded(v)
31253         // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
31254         for {
31255                 t := v.Type
31256                 x := v_0
31257                 y := v_1
31258                 if !(!shiftIsBounded(v)) {
31259                         break
31260                 }
31261                 v.reset(OpAMD64ANDL)
31262                 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31263                 v0.AddArg2(x, y)
31264                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31265                 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
31266                 v2.AuxInt = int8ToAuxInt(32)
31267                 v2.AddArg(y)
31268                 v1.AddArg(v2)
31269                 v.AddArg2(v0, v1)
31270                 return true
31271         }
31272         // match: (Lsh32x8 x y)
31273         // cond: shiftIsBounded(v)
31274         // result: (SHLL x y)
31275         for {
31276                 x := v_0
31277                 y := v_1
31278                 if !(shiftIsBounded(v)) {
31279                         break
31280                 }
31281                 v.reset(OpAMD64SHLL)
31282                 v.AddArg2(x, y)
31283                 return true
31284         }
31285         return false
31286 }
31287 func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
31288         v_1 := v.Args[1]
31289         v_0 := v.Args[0]
31290         b := v.Block
31291         // match: (Lsh64x16 <t> x y)
31292         // cond: !shiftIsBounded(v)
31293         // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
31294         for {
31295                 t := v.Type
31296                 x := v_0
31297                 y := v_1
31298                 if !(!shiftIsBounded(v)) {
31299                         break
31300                 }
31301                 v.reset(OpAMD64ANDQ)
31302                 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
31303                 v0.AddArg2(x, y)
31304                 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
31305                 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
31306                 v2.AuxInt = int16ToAuxInt(64)
31307                 v2.AddArg(y)
31308                 v1.AddArg(v2)
31309                 v.AddArg2(v0, v1)
31310                 return true
31311         }
31312         // match: (Lsh64x16 x y)
31313         // cond: shiftIsBounded(v)
31314         // result: (SHLQ x y)
31315         for {
31316                 x := v_0
31317                 y := v_1
31318                 if !(shiftIsBounded(v)) {
31319                         break
31320                 }
31321                 v.reset(OpAMD64SHLQ)
31322                 v.AddArg2(x, y)
31323                 return true
31324         }
31325         return false
31326 }
31327 func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
31328         v_1 := v.Args[1]
31329         v_0 := v.Args[0]
31330         b := v.Block
31331         // match: (Lsh64x32 <t> x y)
31332         // cond: !shiftIsBounded(v)
31333         // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
31334         for {
31335                 t := v.Type
31336                 x := v_0
31337                 y := v_1
31338                 if !(!shiftIsBounded(v)) {
31339                         break
31340                 }
31341                 v.reset(OpAMD64ANDQ)
31342                 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
31343                 v0.AddArg2(x, y)
31344                 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
31345                 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
31346                 v2.AuxInt = int32ToAuxInt(64)
31347                 v2.AddArg(y)
31348                 v1.AddArg(v2)
31349                 v.AddArg2(v0, v1)
31350                 return true
31351         }
31352         // match: (Lsh64x32 x y)
31353         // cond: shiftIsBounded(v)
31354         // result: (SHLQ x y)
31355         for {
31356                 x := v_0
31357                 y := v_1
31358                 if !(shiftIsBounded(v)) {
31359                         break
31360                 }
31361                 v.reset(OpAMD64SHLQ)
31362                 v.AddArg2(x, y)
31363                 return true
31364         }
31365         return false
31366 }
31367 func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
31368         v_1 := v.Args[1]
31369         v_0 := v.Args[0]
31370         b := v.Block
31371         // match: (Lsh64x64 <t> x y)
31372         // cond: !shiftIsBounded(v)
31373         // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
31374         for {
31375                 t := v.Type
31376                 x := v_0
31377                 y := v_1
31378                 if !(!shiftIsBounded(v)) {
31379                         break
31380                 }
31381                 v.reset(OpAMD64ANDQ)
31382                 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
31383                 v0.AddArg2(x, y)
31384                 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
31385                 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
31386                 v2.AuxInt = int32ToAuxInt(64)
31387                 v2.AddArg(y)
31388                 v1.AddArg(v2)
31389                 v.AddArg2(v0, v1)
31390                 return true
31391         }
31392         // match: (Lsh64x64 x y)
31393         // cond: shiftIsBounded(v)
31394         // result: (SHLQ x y)
31395         for {
31396                 x := v_0
31397                 y := v_1
31398                 if !(shiftIsBounded(v)) {
31399                         break
31400                 }
31401                 v.reset(OpAMD64SHLQ)
31402                 v.AddArg2(x, y)
31403                 return true
31404         }
31405         return false
31406 }
31407 func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
31408         v_1 := v.Args[1]
31409         v_0 := v.Args[0]
31410         b := v.Block
31411         // match: (Lsh64x8 <t> x y)
31412         // cond: !shiftIsBounded(v)
31413         // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
31414         for {
31415                 t := v.Type
31416                 x := v_0
31417                 y := v_1
31418                 if !(!shiftIsBounded(v)) {
31419                         break
31420                 }
31421                 v.reset(OpAMD64ANDQ)
31422                 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
31423                 v0.AddArg2(x, y)
31424                 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
31425                 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
31426                 v2.AuxInt = int8ToAuxInt(64)
31427                 v2.AddArg(y)
31428                 v1.AddArg(v2)
31429                 v.AddArg2(v0, v1)
31430                 return true
31431         }
31432         // match: (Lsh64x8 x y)
31433         // cond: shiftIsBounded(v)
31434         // result: (SHLQ x y)
31435         for {
31436                 x := v_0
31437                 y := v_1
31438                 if !(shiftIsBounded(v)) {
31439                         break
31440                 }
31441                 v.reset(OpAMD64SHLQ)
31442                 v.AddArg2(x, y)
31443                 return true
31444         }
31445         return false
31446 }
31447 func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
31448         v_1 := v.Args[1]
31449         v_0 := v.Args[0]
31450         b := v.Block
31451         // match: (Lsh8x16 <t> x y)
31452         // cond: !shiftIsBounded(v)
31453         // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
31454         for {
31455                 t := v.Type
31456                 x := v_0
31457                 y := v_1
31458                 if !(!shiftIsBounded(v)) {
31459                         break
31460                 }
31461                 v.reset(OpAMD64ANDL)
31462                 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31463                 v0.AddArg2(x, y)
31464                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31465                 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
31466                 v2.AuxInt = int16ToAuxInt(32)
31467                 v2.AddArg(y)
31468                 v1.AddArg(v2)
31469                 v.AddArg2(v0, v1)
31470                 return true
31471         }
31472         // match: (Lsh8x16 x y)
31473         // cond: shiftIsBounded(v)
31474         // result: (SHLL x y)
31475         for {
31476                 x := v_0
31477                 y := v_1
31478                 if !(shiftIsBounded(v)) {
31479                         break
31480                 }
31481                 v.reset(OpAMD64SHLL)
31482                 v.AddArg2(x, y)
31483                 return true
31484         }
31485         return false
31486 }
31487 func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
31488         v_1 := v.Args[1]
31489         v_0 := v.Args[0]
31490         b := v.Block
31491         // match: (Lsh8x32 <t> x y)
31492         // cond: !shiftIsBounded(v)
31493         // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
31494         for {
31495                 t := v.Type
31496                 x := v_0
31497                 y := v_1
31498                 if !(!shiftIsBounded(v)) {
31499                         break
31500                 }
31501                 v.reset(OpAMD64ANDL)
31502                 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31503                 v0.AddArg2(x, y)
31504                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31505                 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
31506                 v2.AuxInt = int32ToAuxInt(32)
31507                 v2.AddArg(y)
31508                 v1.AddArg(v2)
31509                 v.AddArg2(v0, v1)
31510                 return true
31511         }
31512         // match: (Lsh8x32 x y)
31513         // cond: shiftIsBounded(v)
31514         // result: (SHLL x y)
31515         for {
31516                 x := v_0
31517                 y := v_1
31518                 if !(shiftIsBounded(v)) {
31519                         break
31520                 }
31521                 v.reset(OpAMD64SHLL)
31522                 v.AddArg2(x, y)
31523                 return true
31524         }
31525         return false
31526 }
31527 func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
31528         v_1 := v.Args[1]
31529         v_0 := v.Args[0]
31530         b := v.Block
31531         // match: (Lsh8x64 <t> x y)
31532         // cond: !shiftIsBounded(v)
31533         // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
31534         for {
31535                 t := v.Type
31536                 x := v_0
31537                 y := v_1
31538                 if !(!shiftIsBounded(v)) {
31539                         break
31540                 }
31541                 v.reset(OpAMD64ANDL)
31542                 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31543                 v0.AddArg2(x, y)
31544                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31545                 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
31546                 v2.AuxInt = int32ToAuxInt(32)
31547                 v2.AddArg(y)
31548                 v1.AddArg(v2)
31549                 v.AddArg2(v0, v1)
31550                 return true
31551         }
31552         // match: (Lsh8x64 x y)
31553         // cond: shiftIsBounded(v)
31554         // result: (SHLL x y)
31555         for {
31556                 x := v_0
31557                 y := v_1
31558                 if !(shiftIsBounded(v)) {
31559                         break
31560                 }
31561                 v.reset(OpAMD64SHLL)
31562                 v.AddArg2(x, y)
31563                 return true
31564         }
31565         return false
31566 }
31567 func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
31568         v_1 := v.Args[1]
31569         v_0 := v.Args[0]
31570         b := v.Block
31571         // match: (Lsh8x8 <t> x y)
31572         // cond: !shiftIsBounded(v)
31573         // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
31574         for {
31575                 t := v.Type
31576                 x := v_0
31577                 y := v_1
31578                 if !(!shiftIsBounded(v)) {
31579                         break
31580                 }
31581                 v.reset(OpAMD64ANDL)
31582                 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31583                 v0.AddArg2(x, y)
31584                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31585                 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
31586                 v2.AuxInt = int8ToAuxInt(32)
31587                 v2.AddArg(y)
31588                 v1.AddArg(v2)
31589                 v.AddArg2(v0, v1)
31590                 return true
31591         }
31592         // match: (Lsh8x8 x y)
31593         // cond: shiftIsBounded(v)
31594         // result: (SHLL x y)
31595         for {
31596                 x := v_0
31597                 y := v_1
31598                 if !(shiftIsBounded(v)) {
31599                         break
31600                 }
31601                 v.reset(OpAMD64SHLL)
31602                 v.AddArg2(x, y)
31603                 return true
31604         }
31605         return false
31606 }
31607 func rewriteValueAMD64_OpMod16(v *Value) bool {
31608         v_1 := v.Args[1]
31609         v_0 := v.Args[0]
31610         b := v.Block
31611         typ := &b.Func.Config.Types
31612         // match: (Mod16 [a] x y)
31613         // result: (Select1 (DIVW [a] x y))
31614         for {
31615                 a := auxIntToBool(v.AuxInt)
31616                 x := v_0
31617                 y := v_1
31618                 v.reset(OpSelect1)
31619                 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
31620                 v0.AuxInt = boolToAuxInt(a)
31621                 v0.AddArg2(x, y)
31622                 v.AddArg(v0)
31623                 return true
31624         }
31625 }
31626 func rewriteValueAMD64_OpMod16u(v *Value) bool {
31627         v_1 := v.Args[1]
31628         v_0 := v.Args[0]
31629         b := v.Block
31630         typ := &b.Func.Config.Types
31631         // match: (Mod16u x y)
31632         // result: (Select1 (DIVWU x y))
31633         for {
31634                 x := v_0
31635                 y := v_1
31636                 v.reset(OpSelect1)
31637                 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
31638                 v0.AddArg2(x, y)
31639                 v.AddArg(v0)
31640                 return true
31641         }
31642 }
31643 func rewriteValueAMD64_OpMod32(v *Value) bool {
31644         v_1 := v.Args[1]
31645         v_0 := v.Args[0]
31646         b := v.Block
31647         typ := &b.Func.Config.Types
31648         // match: (Mod32 [a] x y)
31649         // result: (Select1 (DIVL [a] x y))
31650         for {
31651                 a := auxIntToBool(v.AuxInt)
31652                 x := v_0
31653                 y := v_1
31654                 v.reset(OpSelect1)
31655                 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
31656                 v0.AuxInt = boolToAuxInt(a)
31657                 v0.AddArg2(x, y)
31658                 v.AddArg(v0)
31659                 return true
31660         }
31661 }
31662 func rewriteValueAMD64_OpMod32u(v *Value) bool {
31663         v_1 := v.Args[1]
31664         v_0 := v.Args[0]
31665         b := v.Block
31666         typ := &b.Func.Config.Types
31667         // match: (Mod32u x y)
31668         // result: (Select1 (DIVLU x y))
31669         for {
31670                 x := v_0
31671                 y := v_1
31672                 v.reset(OpSelect1)
31673                 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
31674                 v0.AddArg2(x, y)
31675                 v.AddArg(v0)
31676                 return true
31677         }
31678 }
31679 func rewriteValueAMD64_OpMod64(v *Value) bool {
31680         v_1 := v.Args[1]
31681         v_0 := v.Args[0]
31682         b := v.Block
31683         typ := &b.Func.Config.Types
31684         // match: (Mod64 [a] x y)
31685         // result: (Select1 (DIVQ [a] x y))
31686         for {
31687                 a := auxIntToBool(v.AuxInt)
31688                 x := v_0
31689                 y := v_1
31690                 v.reset(OpSelect1)
31691                 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
31692                 v0.AuxInt = boolToAuxInt(a)
31693                 v0.AddArg2(x, y)
31694                 v.AddArg(v0)
31695                 return true
31696         }
31697 }
31698 func rewriteValueAMD64_OpMod64u(v *Value) bool {
31699         v_1 := v.Args[1]
31700         v_0 := v.Args[0]
31701         b := v.Block
31702         typ := &b.Func.Config.Types
31703         // match: (Mod64u x y)
31704         // result: (Select1 (DIVQU x y))
31705         for {
31706                 x := v_0
31707                 y := v_1
31708                 v.reset(OpSelect1)
31709                 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
31710                 v0.AddArg2(x, y)
31711                 v.AddArg(v0)
31712                 return true
31713         }
31714 }
31715 func rewriteValueAMD64_OpMod8(v *Value) bool {
31716         v_1 := v.Args[1]
31717         v_0 := v.Args[0]
31718         b := v.Block
31719         typ := &b.Func.Config.Types
31720         // match: (Mod8 x y)
31721         // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
31722         for {
31723                 x := v_0
31724                 y := v_1
31725                 v.reset(OpSelect1)
31726                 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
31727                 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
31728                 v1.AddArg(x)
31729                 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
31730                 v2.AddArg(y)
31731                 v0.AddArg2(v1, v2)
31732                 v.AddArg(v0)
31733                 return true
31734         }
31735 }
31736 func rewriteValueAMD64_OpMod8u(v *Value) bool {
31737         v_1 := v.Args[1]
31738         v_0 := v.Args[0]
31739         b := v.Block
31740         typ := &b.Func.Config.Types
31741         // match: (Mod8u x y)
31742         // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
31743         for {
31744                 x := v_0
31745                 y := v_1
31746                 v.reset(OpSelect1)
31747                 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
31748                 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
31749                 v1.AddArg(x)
31750                 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
31751                 v2.AddArg(y)
31752                 v0.AddArg2(v1, v2)
31753                 v.AddArg(v0)
31754                 return true
31755         }
31756 }
31757 func rewriteValueAMD64_OpMove(v *Value) bool {
31758         v_2 := v.Args[2]
31759         v_1 := v.Args[1]
31760         v_0 := v.Args[0]
31761         b := v.Block
31762         config := b.Func.Config
31763         typ := &b.Func.Config.Types
31764         // match: (Move [0] _ _ mem)
31765         // result: mem
31766         for {
31767                 if auxIntToInt64(v.AuxInt) != 0 {
31768                         break
31769                 }
31770                 mem := v_2
31771                 v.copyOf(mem)
31772                 return true
31773         }
31774         // match: (Move [1] dst src mem)
31775         // result: (MOVBstore dst (MOVBload src mem) mem)
31776         for {
31777                 if auxIntToInt64(v.AuxInt) != 1 {
31778                         break
31779                 }
31780                 dst := v_0
31781                 src := v_1
31782                 mem := v_2
31783                 v.reset(OpAMD64MOVBstore)
31784                 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
31785                 v0.AddArg2(src, mem)
31786                 v.AddArg3(dst, v0, mem)
31787                 return true
31788         }
31789         // match: (Move [2] dst src mem)
31790         // result: (MOVWstore dst (MOVWload src mem) mem)
31791         for {
31792                 if auxIntToInt64(v.AuxInt) != 2 {
31793                         break
31794                 }
31795                 dst := v_0
31796                 src := v_1
31797                 mem := v_2
31798                 v.reset(OpAMD64MOVWstore)
31799                 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
31800                 v0.AddArg2(src, mem)
31801                 v.AddArg3(dst, v0, mem)
31802                 return true
31803         }
31804         // match: (Move [4] dst src mem)
31805         // result: (MOVLstore dst (MOVLload src mem) mem)
31806         for {
31807                 if auxIntToInt64(v.AuxInt) != 4 {
31808                         break
31809                 }
31810                 dst := v_0
31811                 src := v_1
31812                 mem := v_2
31813                 v.reset(OpAMD64MOVLstore)
31814                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
31815                 v0.AddArg2(src, mem)
31816                 v.AddArg3(dst, v0, mem)
31817                 return true
31818         }
31819         // match: (Move [8] dst src mem)
31820         // result: (MOVQstore dst (MOVQload src mem) mem)
31821         for {
31822                 if auxIntToInt64(v.AuxInt) != 8 {
31823                         break
31824                 }
31825                 dst := v_0
31826                 src := v_1
31827                 mem := v_2
31828                 v.reset(OpAMD64MOVQstore)
31829                 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31830                 v0.AddArg2(src, mem)
31831                 v.AddArg3(dst, v0, mem)
31832                 return true
31833         }
31834         // match: (Move [16] dst src mem)
31835         // cond: config.useSSE
31836         // result: (MOVOstore dst (MOVOload src mem) mem)
31837         for {
31838                 if auxIntToInt64(v.AuxInt) != 16 {
31839                         break
31840                 }
31841                 dst := v_0
31842                 src := v_1
31843                 mem := v_2
31844                 if !(config.useSSE) {
31845                         break
31846                 }
31847                 v.reset(OpAMD64MOVOstore)
31848                 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
31849                 v0.AddArg2(src, mem)
31850                 v.AddArg3(dst, v0, mem)
31851                 return true
31852         }
31853         // match: (Move [16] dst src mem)
31854         // cond: !config.useSSE
31855         // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
31856         for {
31857                 if auxIntToInt64(v.AuxInt) != 16 {
31858                         break
31859                 }
31860                 dst := v_0
31861                 src := v_1
31862                 mem := v_2
31863                 if !(!config.useSSE) {
31864                         break
31865                 }
31866                 v.reset(OpAMD64MOVQstore)
31867                 v.AuxInt = int32ToAuxInt(8)
31868                 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31869                 v0.AuxInt = int32ToAuxInt(8)
31870                 v0.AddArg2(src, mem)
31871                 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
31872                 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31873                 v2.AddArg2(src, mem)
31874                 v1.AddArg3(dst, v2, mem)
31875                 v.AddArg3(dst, v0, v1)
31876                 return true
31877         }
31878         // match: (Move [32] dst src mem)
31879         // result: (Move [16] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
31880         for {
31881                 if auxIntToInt64(v.AuxInt) != 32 {
31882                         break
31883                 }
31884                 dst := v_0
31885                 src := v_1
31886                 mem := v_2
31887                 v.reset(OpMove)
31888                 v.AuxInt = int64ToAuxInt(16)
31889                 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
31890                 v0.AuxInt = int64ToAuxInt(16)
31891                 v0.AddArg(dst)
31892                 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
31893                 v1.AuxInt = int64ToAuxInt(16)
31894                 v1.AddArg(src)
31895                 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
31896                 v2.AuxInt = int64ToAuxInt(16)
31897                 v2.AddArg3(dst, src, mem)
31898                 v.AddArg3(v0, v1, v2)
31899                 return true
31900         }
31901         // match: (Move [48] dst src mem)
31902         // cond: config.useSSE
31903         // result: (Move [32] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
31904         for {
31905                 if auxIntToInt64(v.AuxInt) != 48 {
31906                         break
31907                 }
31908                 dst := v_0
31909                 src := v_1
31910                 mem := v_2
31911                 if !(config.useSSE) {
31912                         break
31913                 }
31914                 v.reset(OpMove)
31915                 v.AuxInt = int64ToAuxInt(32)
31916                 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
31917                 v0.AuxInt = int64ToAuxInt(16)
31918                 v0.AddArg(dst)
31919                 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
31920                 v1.AuxInt = int64ToAuxInt(16)
31921                 v1.AddArg(src)
31922                 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
31923                 v2.AuxInt = int64ToAuxInt(16)
31924                 v2.AddArg3(dst, src, mem)
31925                 v.AddArg3(v0, v1, v2)
31926                 return true
31927         }
31928         // match: (Move [64] dst src mem)
31929         // cond: config.useSSE
31930         // result: (Move [32] (OffPtr <dst.Type> dst [32]) (OffPtr <src.Type> src [32]) (Move [32] dst src mem))
31931         for {
31932                 if auxIntToInt64(v.AuxInt) != 64 {
31933                         break
31934                 }
31935                 dst := v_0
31936                 src := v_1
31937                 mem := v_2
31938                 if !(config.useSSE) {
31939                         break
31940                 }
31941                 v.reset(OpMove)
31942                 v.AuxInt = int64ToAuxInt(32)
31943                 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
31944                 v0.AuxInt = int64ToAuxInt(32)
31945                 v0.AddArg(dst)
31946                 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
31947                 v1.AuxInt = int64ToAuxInt(32)
31948                 v1.AddArg(src)
31949                 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
31950                 v2.AuxInt = int64ToAuxInt(32)
31951                 v2.AddArg3(dst, src, mem)
31952                 v.AddArg3(v0, v1, v2)
31953                 return true
31954         }
31955         // match: (Move [3] dst src mem)
31956         // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
31957         for {
31958                 if auxIntToInt64(v.AuxInt) != 3 {
31959                         break
31960                 }
31961                 dst := v_0
31962                 src := v_1
31963                 mem := v_2
31964                 v.reset(OpAMD64MOVBstore)
31965                 v.AuxInt = int32ToAuxInt(2)
31966                 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
31967                 v0.AuxInt = int32ToAuxInt(2)
31968                 v0.AddArg2(src, mem)
31969                 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
31970                 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
31971                 v2.AddArg2(src, mem)
31972                 v1.AddArg3(dst, v2, mem)
31973                 v.AddArg3(dst, v0, v1)
31974                 return true
31975         }
31976         // match: (Move [5] dst src mem)
31977         // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
31978         for {
31979                 if auxIntToInt64(v.AuxInt) != 5 {
31980                         break
31981                 }
31982                 dst := v_0
31983                 src := v_1
31984                 mem := v_2
31985                 v.reset(OpAMD64MOVBstore)
31986                 v.AuxInt = int32ToAuxInt(4)
31987                 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
31988                 v0.AuxInt = int32ToAuxInt(4)
31989                 v0.AddArg2(src, mem)
31990                 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
31991                 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
31992                 v2.AddArg2(src, mem)
31993                 v1.AddArg3(dst, v2, mem)
31994                 v.AddArg3(dst, v0, v1)
31995                 return true
31996         }
31997         // match: (Move [6] dst src mem)
31998         // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
31999         for {
32000                 if auxIntToInt64(v.AuxInt) != 6 {
32001                         break
32002                 }
32003                 dst := v_0
32004                 src := v_1
32005                 mem := v_2
32006                 v.reset(OpAMD64MOVWstore)
32007                 v.AuxInt = int32ToAuxInt(4)
32008                 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
32009                 v0.AuxInt = int32ToAuxInt(4)
32010                 v0.AddArg2(src, mem)
32011                 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
32012                 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
32013                 v2.AddArg2(src, mem)
32014                 v1.AddArg3(dst, v2, mem)
32015                 v.AddArg3(dst, v0, v1)
32016                 return true
32017         }
32018         // match: (Move [7] dst src mem)
32019         // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
32020         for {
32021                 if auxIntToInt64(v.AuxInt) != 7 {
32022                         break
32023                 }
32024                 dst := v_0
32025                 src := v_1
32026                 mem := v_2
32027                 v.reset(OpAMD64MOVLstore)
32028                 v.AuxInt = int32ToAuxInt(3)
32029                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
32030                 v0.AuxInt = int32ToAuxInt(3)
32031                 v0.AddArg2(src, mem)
32032                 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
32033                 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
32034                 v2.AddArg2(src, mem)
32035                 v1.AddArg3(dst, v2, mem)
32036                 v.AddArg3(dst, v0, v1)
32037                 return true
32038         }
32039         // match: (Move [9] dst src mem)
32040         // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
32041         for {
32042                 if auxIntToInt64(v.AuxInt) != 9 {
32043                         break
32044                 }
32045                 dst := v_0
32046                 src := v_1
32047                 mem := v_2
32048                 v.reset(OpAMD64MOVBstore)
32049                 v.AuxInt = int32ToAuxInt(8)
32050                 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
32051                 v0.AuxInt = int32ToAuxInt(8)
32052                 v0.AddArg2(src, mem)
32053                 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32054                 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32055                 v2.AddArg2(src, mem)
32056                 v1.AddArg3(dst, v2, mem)
32057                 v.AddArg3(dst, v0, v1)
32058                 return true
32059         }
32060         // match: (Move [10] dst src mem)
32061         // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
32062         for {
32063                 if auxIntToInt64(v.AuxInt) != 10 {
32064                         break
32065                 }
32066                 dst := v_0
32067                 src := v_1
32068                 mem := v_2
32069                 v.reset(OpAMD64MOVWstore)
32070                 v.AuxInt = int32ToAuxInt(8)
32071                 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
32072                 v0.AuxInt = int32ToAuxInt(8)
32073                 v0.AddArg2(src, mem)
32074                 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32075                 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32076                 v2.AddArg2(src, mem)
32077                 v1.AddArg3(dst, v2, mem)
32078                 v.AddArg3(dst, v0, v1)
32079                 return true
32080         }
32081         // match: (Move [12] dst src mem)
32082         // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
32083         for {
32084                 if auxIntToInt64(v.AuxInt) != 12 {
32085                         break
32086                 }
32087                 dst := v_0
32088                 src := v_1
32089                 mem := v_2
32090                 v.reset(OpAMD64MOVLstore)
32091                 v.AuxInt = int32ToAuxInt(8)
32092                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
32093                 v0.AuxInt = int32ToAuxInt(8)
32094                 v0.AddArg2(src, mem)
32095                 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32096                 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32097                 v2.AddArg2(src, mem)
32098                 v1.AddArg3(dst, v2, mem)
32099                 v.AddArg3(dst, v0, v1)
32100                 return true
32101         }
32102         // match: (Move [s] dst src mem)
32103         // cond: s == 11 || s >= 13 && s <= 15
32104         // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem))
32105         for {
32106                 s := auxIntToInt64(v.AuxInt)
32107                 dst := v_0
32108                 src := v_1
32109                 mem := v_2
32110                 if !(s == 11 || s >= 13 && s <= 15) {
32111                         break
32112                 }
32113                 v.reset(OpAMD64MOVQstore)
32114                 v.AuxInt = int32ToAuxInt(int32(s - 8))
32115                 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32116                 v0.AuxInt = int32ToAuxInt(int32(s - 8))
32117                 v0.AddArg2(src, mem)
32118                 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32119                 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32120                 v2.AddArg2(src, mem)
32121                 v1.AddArg3(dst, v2, mem)
32122                 v.AddArg3(dst, v0, v1)
32123                 return true
32124         }
32125         // match: (Move [s] dst src mem)
32126         // cond: s > 16 && s%16 != 0 && s%16 <= 8
32127         // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem))
32128         for {
32129                 s := auxIntToInt64(v.AuxInt)
32130                 dst := v_0
32131                 src := v_1
32132                 mem := v_2
32133                 if !(s > 16 && s%16 != 0 && s%16 <= 8) {
32134                         break
32135                 }
32136                 v.reset(OpMove)
32137                 v.AuxInt = int64ToAuxInt(s - s%16)
32138                 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
32139                 v0.AuxInt = int64ToAuxInt(s % 16)
32140                 v0.AddArg(dst)
32141                 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
32142                 v1.AuxInt = int64ToAuxInt(s % 16)
32143                 v1.AddArg(src)
32144                 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32145                 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32146                 v3.AddArg2(src, mem)
32147                 v2.AddArg3(dst, v3, mem)
32148                 v.AddArg3(v0, v1, v2)
32149                 return true
32150         }
32151         // match: (Move [s] dst src mem)
32152         // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE
32153         // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem))
32154         for {
32155                 s := auxIntToInt64(v.AuxInt)
32156                 dst := v_0
32157                 src := v_1
32158                 mem := v_2
32159                 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) {
32160                         break
32161                 }
32162                 v.reset(OpMove)
32163                 v.AuxInt = int64ToAuxInt(s - s%16)
32164                 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
32165                 v0.AuxInt = int64ToAuxInt(s % 16)
32166                 v0.AddArg(dst)
32167                 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
32168                 v1.AuxInt = int64ToAuxInt(s % 16)
32169                 v1.AddArg(src)
32170                 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
32171                 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
32172                 v3.AddArg2(src, mem)
32173                 v2.AddArg3(dst, v3, mem)
32174                 v.AddArg3(v0, v1, v2)
32175                 return true
32176         }
32177         // match: (Move [s] dst src mem)
32178         // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE
32179         // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)))
32180         for {
32181                 s := auxIntToInt64(v.AuxInt)
32182                 dst := v_0
32183                 src := v_1
32184                 mem := v_2
32185                 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) {
32186                         break
32187                 }
32188                 v.reset(OpMove)
32189                 v.AuxInt = int64ToAuxInt(s - s%16)
32190                 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
32191                 v0.AuxInt = int64ToAuxInt(s % 16)
32192                 v0.AddArg(dst)
32193                 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
32194                 v1.AuxInt = int64ToAuxInt(s % 16)
32195                 v1.AddArg(src)
32196                 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32197                 v2.AuxInt = int32ToAuxInt(8)
32198                 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32199                 v3.AuxInt = int32ToAuxInt(8)
32200                 v3.AddArg2(src, mem)
32201                 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32202                 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32203                 v5.AddArg2(src, mem)
32204                 v4.AddArg3(dst, v5, mem)
32205                 v2.AddArg3(dst, v3, v4)
32206                 v.AddArg3(v0, v1, v2)
32207                 return true
32208         }
32209         // match: (Move [s] dst src mem)
32210         // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
32211         // result: (DUFFCOPY [s] dst src mem)
32212         for {
32213                 s := auxIntToInt64(v.AuxInt)
32214                 dst := v_0
32215                 src := v_1
32216                 mem := v_2
32217                 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
32218                         break
32219                 }
32220                 v.reset(OpAMD64DUFFCOPY)
32221                 v.AuxInt = int64ToAuxInt(s)
32222                 v.AddArg3(dst, src, mem)
32223                 return true
32224         }
32225         // match: (Move [s] dst src mem)
32226         // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)
32227         // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem)
32228         for {
32229                 s := auxIntToInt64(v.AuxInt)
32230                 dst := v_0
32231                 src := v_1
32232                 mem := v_2
32233                 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) {
32234                         break
32235                 }
32236                 v.reset(OpAMD64REPMOVSQ)
32237                 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
32238                 v0.AuxInt = int64ToAuxInt(s / 8)
32239                 v.AddArg4(dst, src, v0, mem)
32240                 return true
32241         }
32242         return false
32243 }
32244 func rewriteValueAMD64_OpNeg32F(v *Value) bool {
32245         v_0 := v.Args[0]
32246         b := v.Block
32247         typ := &b.Func.Config.Types
32248         // match: (Neg32F x)
32249         // result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
32250         for {
32251                 x := v_0
32252                 v.reset(OpAMD64PXOR)
32253                 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
32254                 v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
32255                 v.AddArg2(x, v0)
32256                 return true
32257         }
32258 }
32259 func rewriteValueAMD64_OpNeg64F(v *Value) bool {
32260         v_0 := v.Args[0]
32261         b := v.Block
32262         typ := &b.Func.Config.Types
32263         // match: (Neg64F x)
32264         // result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
32265         for {
32266                 x := v_0
32267                 v.reset(OpAMD64PXOR)
32268                 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
32269                 v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
32270                 v.AddArg2(x, v0)
32271                 return true
32272         }
32273 }
32274 func rewriteValueAMD64_OpNeq16(v *Value) bool {
32275         v_1 := v.Args[1]
32276         v_0 := v.Args[0]
32277         b := v.Block
32278         // match: (Neq16 x y)
32279         // result: (SETNE (CMPW x y))
32280         for {
32281                 x := v_0
32282                 y := v_1
32283                 v.reset(OpAMD64SETNE)
32284                 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
32285                 v0.AddArg2(x, y)
32286                 v.AddArg(v0)
32287                 return true
32288         }
32289 }
32290 func rewriteValueAMD64_OpNeq32(v *Value) bool {
32291         v_1 := v.Args[1]
32292         v_0 := v.Args[0]
32293         b := v.Block
32294         // match: (Neq32 x y)
32295         // result: (SETNE (CMPL x y))
32296         for {
32297                 x := v_0
32298                 y := v_1
32299                 v.reset(OpAMD64SETNE)
32300                 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
32301                 v0.AddArg2(x, y)
32302                 v.AddArg(v0)
32303                 return true
32304         }
32305 }
32306 func rewriteValueAMD64_OpNeq32F(v *Value) bool {
32307         v_1 := v.Args[1]
32308         v_0 := v.Args[0]
32309         b := v.Block
32310         // match: (Neq32F x y)
32311         // result: (SETNEF (UCOMISS x y))
32312         for {
32313                 x := v_0
32314                 y := v_1
32315                 v.reset(OpAMD64SETNEF)
32316                 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
32317                 v0.AddArg2(x, y)
32318                 v.AddArg(v0)
32319                 return true
32320         }
32321 }
32322 func rewriteValueAMD64_OpNeq64(v *Value) bool {
32323         v_1 := v.Args[1]
32324         v_0 := v.Args[0]
32325         b := v.Block
32326         // match: (Neq64 x y)
32327         // result: (SETNE (CMPQ x y))
32328         for {
32329                 x := v_0
32330                 y := v_1
32331                 v.reset(OpAMD64SETNE)
32332                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
32333                 v0.AddArg2(x, y)
32334                 v.AddArg(v0)
32335                 return true
32336         }
32337 }
32338 func rewriteValueAMD64_OpNeq64F(v *Value) bool {
32339         v_1 := v.Args[1]
32340         v_0 := v.Args[0]
32341         b := v.Block
32342         // match: (Neq64F x y)
32343         // result: (SETNEF (UCOMISD x y))
32344         for {
32345                 x := v_0
32346                 y := v_1
32347                 v.reset(OpAMD64SETNEF)
32348                 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
32349                 v0.AddArg2(x, y)
32350                 v.AddArg(v0)
32351                 return true
32352         }
32353 }
32354 func rewriteValueAMD64_OpNeq8(v *Value) bool {
32355         v_1 := v.Args[1]
32356         v_0 := v.Args[0]
32357         b := v.Block
32358         // match: (Neq8 x y)
32359         // result: (SETNE (CMPB x y))
32360         for {
32361                 x := v_0
32362                 y := v_1
32363                 v.reset(OpAMD64SETNE)
32364                 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
32365                 v0.AddArg2(x, y)
32366                 v.AddArg(v0)
32367                 return true
32368         }
32369 }
32370 func rewriteValueAMD64_OpNeqB(v *Value) bool {
32371         v_1 := v.Args[1]
32372         v_0 := v.Args[0]
32373         b := v.Block
32374         // match: (NeqB x y)
32375         // result: (SETNE (CMPB x y))
32376         for {
32377                 x := v_0
32378                 y := v_1
32379                 v.reset(OpAMD64SETNE)
32380                 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
32381                 v0.AddArg2(x, y)
32382                 v.AddArg(v0)
32383                 return true
32384         }
32385 }
32386 func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
32387         v_1 := v.Args[1]
32388         v_0 := v.Args[0]
32389         b := v.Block
32390         // match: (NeqPtr x y)
32391         // result: (SETNE (CMPQ x y))
32392         for {
32393                 x := v_0
32394                 y := v_1
32395                 v.reset(OpAMD64SETNE)
32396                 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
32397                 v0.AddArg2(x, y)
32398                 v.AddArg(v0)
32399                 return true
32400         }
32401 }
32402 func rewriteValueAMD64_OpNot(v *Value) bool {
32403         v_0 := v.Args[0]
32404         // match: (Not x)
32405         // result: (XORLconst [1] x)
32406         for {
32407                 x := v_0
32408                 v.reset(OpAMD64XORLconst)
32409                 v.AuxInt = int32ToAuxInt(1)
32410                 v.AddArg(x)
32411                 return true
32412         }
32413 }
32414 func rewriteValueAMD64_OpOffPtr(v *Value) bool {
32415         v_0 := v.Args[0]
32416         b := v.Block
32417         typ := &b.Func.Config.Types
32418         // match: (OffPtr [off] ptr)
32419         // cond: is32Bit(off)
32420         // result: (ADDQconst [int32(off)] ptr)
32421         for {
32422                 off := auxIntToInt64(v.AuxInt)
32423                 ptr := v_0
32424                 if !(is32Bit(off)) {
32425                         break
32426                 }
32427                 v.reset(OpAMD64ADDQconst)
32428                 v.AuxInt = int32ToAuxInt(int32(off))
32429                 v.AddArg(ptr)
32430                 return true
32431         }
32432         // match: (OffPtr [off] ptr)
32433         // result: (ADDQ (MOVQconst [off]) ptr)
32434         for {
32435                 off := auxIntToInt64(v.AuxInt)
32436                 ptr := v_0
32437                 v.reset(OpAMD64ADDQ)
32438                 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
32439                 v0.AuxInt = int64ToAuxInt(off)
32440                 v.AddArg2(v0, ptr)
32441                 return true
32442         }
32443 }
32444 func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
32445         v_2 := v.Args[2]
32446         v_1 := v.Args[1]
32447         v_0 := v.Args[0]
32448         // match: (PanicBounds [kind] x y mem)
32449         // cond: boundsABI(kind) == 0
32450         // result: (LoweredPanicBoundsA [kind] x y mem)
32451         for {
32452                 kind := auxIntToInt64(v.AuxInt)
32453                 x := v_0
32454                 y := v_1
32455                 mem := v_2
32456                 if !(boundsABI(kind) == 0) {
32457                         break
32458                 }
32459                 v.reset(OpAMD64LoweredPanicBoundsA)
32460                 v.AuxInt = int64ToAuxInt(kind)
32461                 v.AddArg3(x, y, mem)
32462                 return true
32463         }
32464         // match: (PanicBounds [kind] x y mem)
32465         // cond: boundsABI(kind) == 1
32466         // result: (LoweredPanicBoundsB [kind] x y mem)
32467         for {
32468                 kind := auxIntToInt64(v.AuxInt)
32469                 x := v_0
32470                 y := v_1
32471                 mem := v_2
32472                 if !(boundsABI(kind) == 1) {
32473                         break
32474                 }
32475                 v.reset(OpAMD64LoweredPanicBoundsB)
32476                 v.AuxInt = int64ToAuxInt(kind)
32477                 v.AddArg3(x, y, mem)
32478                 return true
32479         }
32480         // match: (PanicBounds [kind] x y mem)
32481         // cond: boundsABI(kind) == 2
32482         // result: (LoweredPanicBoundsC [kind] x y mem)
32483         for {
32484                 kind := auxIntToInt64(v.AuxInt)
32485                 x := v_0
32486                 y := v_1
32487                 mem := v_2
32488                 if !(boundsABI(kind) == 2) {
32489                         break
32490                 }
32491                 v.reset(OpAMD64LoweredPanicBoundsC)
32492                 v.AuxInt = int64ToAuxInt(kind)
32493                 v.AddArg3(x, y, mem)
32494                 return true
32495         }
32496         return false
32497 }
32498 func rewriteValueAMD64_OpPopCount16(v *Value) bool {
32499         v_0 := v.Args[0]
32500         b := v.Block
32501         typ := &b.Func.Config.Types
32502         // match: (PopCount16 x)
32503         // result: (POPCNTL (MOVWQZX <typ.UInt32> x))
32504         for {
32505                 x := v_0
32506                 v.reset(OpAMD64POPCNTL)
32507                 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
32508                 v0.AddArg(x)
32509                 v.AddArg(v0)
32510                 return true
32511         }
32512 }
32513 func rewriteValueAMD64_OpPopCount8(v *Value) bool {
32514         v_0 := v.Args[0]
32515         b := v.Block
32516         typ := &b.Func.Config.Types
32517         // match: (PopCount8 x)
32518         // result: (POPCNTL (MOVBQZX <typ.UInt32> x))
32519         for {
32520                 x := v_0
32521                 v.reset(OpAMD64POPCNTL)
32522                 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
32523                 v0.AddArg(x)
32524                 v.AddArg(v0)
32525                 return true
32526         }
32527 }
32528 func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
32529         v_0 := v.Args[0]
32530         // match: (RoundToEven x)
32531         // result: (ROUNDSD [0] x)
32532         for {
32533                 x := v_0
32534                 v.reset(OpAMD64ROUNDSD)
32535                 v.AuxInt = int8ToAuxInt(0)
32536                 v.AddArg(x)
32537                 return true
32538         }
32539 }
32540 func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
32541         v_1 := v.Args[1]
32542         v_0 := v.Args[0]
32543         b := v.Block
32544         // match: (Rsh16Ux16 <t> x y)
32545         // cond: !shiftIsBounded(v)
32546         // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
32547         for {
32548                 t := v.Type
32549                 x := v_0
32550                 y := v_1
32551                 if !(!shiftIsBounded(v)) {
32552                         break
32553                 }
32554                 v.reset(OpAMD64ANDL)
32555                 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
32556                 v0.AddArg2(x, y)
32557                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32558                 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
32559                 v2.AuxInt = int16ToAuxInt(16)
32560                 v2.AddArg(y)
32561                 v1.AddArg(v2)
32562                 v.AddArg2(v0, v1)
32563                 return true
32564         }
32565         // match: (Rsh16Ux16 x y)
32566         // cond: shiftIsBounded(v)
32567         // result: (SHRW x y)
32568         for {
32569                 x := v_0
32570                 y := v_1
32571                 if !(shiftIsBounded(v)) {
32572                         break
32573                 }
32574                 v.reset(OpAMD64SHRW)
32575                 v.AddArg2(x, y)
32576                 return true
32577         }
32578         return false
32579 }
32580 func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
32581         v_1 := v.Args[1]
32582         v_0 := v.Args[0]
32583         b := v.Block
32584         // match: (Rsh16Ux32 <t> x y)
32585         // cond: !shiftIsBounded(v)
32586         // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
32587         for {
32588                 t := v.Type
32589                 x := v_0
32590                 y := v_1
32591                 if !(!shiftIsBounded(v)) {
32592                         break
32593                 }
32594                 v.reset(OpAMD64ANDL)
32595                 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
32596                 v0.AddArg2(x, y)
32597                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32598                 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
32599                 v2.AuxInt = int32ToAuxInt(16)
32600                 v2.AddArg(y)
32601                 v1.AddArg(v2)
32602                 v.AddArg2(v0, v1)
32603                 return true
32604         }
32605         // match: (Rsh16Ux32 x y)
32606         // cond: shiftIsBounded(v)
32607         // result: (SHRW x y)
32608         for {
32609                 x := v_0
32610                 y := v_1
32611                 if !(shiftIsBounded(v)) {
32612                         break
32613                 }
32614                 v.reset(OpAMD64SHRW)
32615                 v.AddArg2(x, y)
32616                 return true
32617         }
32618         return false
32619 }
32620 func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
32621         v_1 := v.Args[1]
32622         v_0 := v.Args[0]
32623         b := v.Block
32624         // match: (Rsh16Ux64 <t> x y)
32625         // cond: !shiftIsBounded(v)
32626         // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
32627         for {
32628                 t := v.Type
32629                 x := v_0
32630                 y := v_1
32631                 if !(!shiftIsBounded(v)) {
32632                         break
32633                 }
32634                 v.reset(OpAMD64ANDL)
32635                 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
32636                 v0.AddArg2(x, y)
32637                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32638                 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
32639                 v2.AuxInt = int32ToAuxInt(16)
32640                 v2.AddArg(y)
32641                 v1.AddArg(v2)
32642                 v.AddArg2(v0, v1)
32643                 return true
32644         }
32645         // match: (Rsh16Ux64 x y)
32646         // cond: shiftIsBounded(v)
32647         // result: (SHRW x y)
32648         for {
32649                 x := v_0
32650                 y := v_1
32651                 if !(shiftIsBounded(v)) {
32652                         break
32653                 }
32654                 v.reset(OpAMD64SHRW)
32655                 v.AddArg2(x, y)
32656                 return true
32657         }
32658         return false
32659 }
32660 func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
32661         v_1 := v.Args[1]
32662         v_0 := v.Args[0]
32663         b := v.Block
32664         // match: (Rsh16Ux8 <t> x y)
32665         // cond: !shiftIsBounded(v)
32666         // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
32667         for {
32668                 t := v.Type
32669                 x := v_0
32670                 y := v_1
32671                 if !(!shiftIsBounded(v)) {
32672                         break
32673                 }
32674                 v.reset(OpAMD64ANDL)
32675                 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
32676                 v0.AddArg2(x, y)
32677                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32678                 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
32679                 v2.AuxInt = int8ToAuxInt(16)
32680                 v2.AddArg(y)
32681                 v1.AddArg(v2)
32682                 v.AddArg2(v0, v1)
32683                 return true
32684         }
32685         // match: (Rsh16Ux8 x y)
32686         // cond: shiftIsBounded(v)
32687         // result: (SHRW x y)
32688         for {
32689                 x := v_0
32690                 y := v_1
32691                 if !(shiftIsBounded(v)) {
32692                         break
32693                 }
32694                 v.reset(OpAMD64SHRW)
32695                 v.AddArg2(x, y)
32696                 return true
32697         }
32698         return false
32699 }
32700 func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
32701         v_1 := v.Args[1]
32702         v_0 := v.Args[0]
32703         b := v.Block
32704         // match: (Rsh16x16 <t> x y)
32705         // cond: !shiftIsBounded(v)
32706         // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
32707         for {
32708                 t := v.Type
32709                 x := v_0
32710                 y := v_1
32711                 if !(!shiftIsBounded(v)) {
32712                         break
32713                 }
32714                 v.reset(OpAMD64SARW)
32715                 v.Type = t
32716                 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32717                 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32718                 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32719                 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
32720                 v3.AuxInt = int16ToAuxInt(16)
32721                 v3.AddArg(y)
32722                 v2.AddArg(v3)
32723                 v1.AddArg(v2)
32724                 v0.AddArg2(y, v1)
32725                 v.AddArg2(x, v0)
32726                 return true
32727         }
32728         // match: (Rsh16x16 x y)
32729         // cond: shiftIsBounded(v)
32730         // result: (SARW x y)
32731         for {
32732                 x := v_0
32733                 y := v_1
32734                 if !(shiftIsBounded(v)) {
32735                         break
32736                 }
32737                 v.reset(OpAMD64SARW)
32738                 v.AddArg2(x, y)
32739                 return true
32740         }
32741         return false
32742 }
32743 func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
32744         v_1 := v.Args[1]
32745         v_0 := v.Args[0]
32746         b := v.Block
32747         // match: (Rsh16x32 <t> x y)
32748         // cond: !shiftIsBounded(v)
32749         // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
32750         for {
32751                 t := v.Type
32752                 x := v_0
32753                 y := v_1
32754                 if !(!shiftIsBounded(v)) {
32755                         break
32756                 }
32757                 v.reset(OpAMD64SARW)
32758                 v.Type = t
32759                 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32760                 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32761                 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32762                 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
32763                 v3.AuxInt = int32ToAuxInt(16)
32764                 v3.AddArg(y)
32765                 v2.AddArg(v3)
32766                 v1.AddArg(v2)
32767                 v0.AddArg2(y, v1)
32768                 v.AddArg2(x, v0)
32769                 return true
32770         }
32771         // match: (Rsh16x32 x y)
32772         // cond: shiftIsBounded(v)
32773         // result: (SARW x y)
32774         for {
32775                 x := v_0
32776                 y := v_1
32777                 if !(shiftIsBounded(v)) {
32778                         break
32779                 }
32780                 v.reset(OpAMD64SARW)
32781                 v.AddArg2(x, y)
32782                 return true
32783         }
32784         return false
32785 }
32786 func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
32787         v_1 := v.Args[1]
32788         v_0 := v.Args[0]
32789         b := v.Block
32790         // match: (Rsh16x64 <t> x y)
32791         // cond: !shiftIsBounded(v)
32792         // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
32793         for {
32794                 t := v.Type
32795                 x := v_0
32796                 y := v_1
32797                 if !(!shiftIsBounded(v)) {
32798                         break
32799                 }
32800                 v.reset(OpAMD64SARW)
32801                 v.Type = t
32802                 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
32803                 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
32804                 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
32805                 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
32806                 v3.AuxInt = int32ToAuxInt(16)
32807                 v3.AddArg(y)
32808                 v2.AddArg(v3)
32809                 v1.AddArg(v2)
32810                 v0.AddArg2(y, v1)
32811                 v.AddArg2(x, v0)
32812                 return true
32813         }
32814         // match: (Rsh16x64 x y)
32815         // cond: shiftIsBounded(v)
32816         // result: (SARW x y)
32817         for {
32818                 x := v_0
32819                 y := v_1
32820                 if !(shiftIsBounded(v)) {
32821                         break
32822                 }
32823                 v.reset(OpAMD64SARW)
32824                 v.AddArg2(x, y)
32825                 return true
32826         }
32827         return false
32828 }
32829 func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
32830         v_1 := v.Args[1]
32831         v_0 := v.Args[0]
32832         b := v.Block
32833         // match: (Rsh16x8 <t> x y)
32834         // cond: !shiftIsBounded(v)
32835         // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
32836         for {
32837                 t := v.Type
32838                 x := v_0
32839                 y := v_1
32840                 if !(!shiftIsBounded(v)) {
32841                         break
32842                 }
32843                 v.reset(OpAMD64SARW)
32844                 v.Type = t
32845                 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32846                 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32847                 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32848                 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
32849                 v3.AuxInt = int8ToAuxInt(16)
32850                 v3.AddArg(y)
32851                 v2.AddArg(v3)
32852                 v1.AddArg(v2)
32853                 v0.AddArg2(y, v1)
32854                 v.AddArg2(x, v0)
32855                 return true
32856         }
32857         // match: (Rsh16x8 x y)
32858         // cond: shiftIsBounded(v)
32859         // result: (SARW x y)
32860         for {
32861                 x := v_0
32862                 y := v_1
32863                 if !(shiftIsBounded(v)) {
32864                         break
32865                 }
32866                 v.reset(OpAMD64SARW)
32867                 v.AddArg2(x, y)
32868                 return true
32869         }
32870         return false
32871 }
32872 func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
32873         v_1 := v.Args[1]
32874         v_0 := v.Args[0]
32875         b := v.Block
32876         // match: (Rsh32Ux16 <t> x y)
32877         // cond: !shiftIsBounded(v)
32878         // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
32879         for {
32880                 t := v.Type
32881                 x := v_0
32882                 y := v_1
32883                 if !(!shiftIsBounded(v)) {
32884                         break
32885                 }
32886                 v.reset(OpAMD64ANDL)
32887                 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
32888                 v0.AddArg2(x, y)
32889                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32890                 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
32891                 v2.AuxInt = int16ToAuxInt(32)
32892                 v2.AddArg(y)
32893                 v1.AddArg(v2)
32894                 v.AddArg2(v0, v1)
32895                 return true
32896         }
32897         // match: (Rsh32Ux16 x y)
32898         // cond: shiftIsBounded(v)
32899         // result: (SHRL x y)
32900         for {
32901                 x := v_0
32902                 y := v_1
32903                 if !(shiftIsBounded(v)) {
32904                         break
32905                 }
32906                 v.reset(OpAMD64SHRL)
32907                 v.AddArg2(x, y)
32908                 return true
32909         }
32910         return false
32911 }
32912 func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
32913         v_1 := v.Args[1]
32914         v_0 := v.Args[0]
32915         b := v.Block
32916         // match: (Rsh32Ux32 <t> x y)
32917         // cond: !shiftIsBounded(v)
32918         // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
32919         for {
32920                 t := v.Type
32921                 x := v_0
32922                 y := v_1
32923                 if !(!shiftIsBounded(v)) {
32924                         break
32925                 }
32926                 v.reset(OpAMD64ANDL)
32927                 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
32928                 v0.AddArg2(x, y)
32929                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32930                 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
32931                 v2.AuxInt = int32ToAuxInt(32)
32932                 v2.AddArg(y)
32933                 v1.AddArg(v2)
32934                 v.AddArg2(v0, v1)
32935                 return true
32936         }
32937         // match: (Rsh32Ux32 x y)
32938         // cond: shiftIsBounded(v)
32939         // result: (SHRL x y)
32940         for {
32941                 x := v_0
32942                 y := v_1
32943                 if !(shiftIsBounded(v)) {
32944                         break
32945                 }
32946                 v.reset(OpAMD64SHRL)
32947                 v.AddArg2(x, y)
32948                 return true
32949         }
32950         return false
32951 }
32952 func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
32953         v_1 := v.Args[1]
32954         v_0 := v.Args[0]
32955         b := v.Block
32956         // match: (Rsh32Ux64 <t> x y)
32957         // cond: !shiftIsBounded(v)
32958         // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
32959         for {
32960                 t := v.Type
32961                 x := v_0
32962                 y := v_1
32963                 if !(!shiftIsBounded(v)) {
32964                         break
32965                 }
32966                 v.reset(OpAMD64ANDL)
32967                 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
32968                 v0.AddArg2(x, y)
32969                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32970                 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
32971                 v2.AuxInt = int32ToAuxInt(32)
32972                 v2.AddArg(y)
32973                 v1.AddArg(v2)
32974                 v.AddArg2(v0, v1)
32975                 return true
32976         }
32977         // match: (Rsh32Ux64 x y)
32978         // cond: shiftIsBounded(v)
32979         // result: (SHRL x y)
32980         for {
32981                 x := v_0
32982                 y := v_1
32983                 if !(shiftIsBounded(v)) {
32984                         break
32985                 }
32986                 v.reset(OpAMD64SHRL)
32987                 v.AddArg2(x, y)
32988                 return true
32989         }
32990         return false
32991 }
32992 func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
32993         v_1 := v.Args[1]
32994         v_0 := v.Args[0]
32995         b := v.Block
32996         // match: (Rsh32Ux8 <t> x y)
32997         // cond: !shiftIsBounded(v)
32998         // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
32999         for {
33000                 t := v.Type
33001                 x := v_0
33002                 y := v_1
33003                 if !(!shiftIsBounded(v)) {
33004                         break
33005                 }
33006                 v.reset(OpAMD64ANDL)
33007                 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
33008                 v0.AddArg2(x, y)
33009                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
33010                 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
33011                 v2.AuxInt = int8ToAuxInt(32)
33012                 v2.AddArg(y)
33013                 v1.AddArg(v2)
33014                 v.AddArg2(v0, v1)
33015                 return true
33016         }
33017         // match: (Rsh32Ux8 x y)
33018         // cond: shiftIsBounded(v)
33019         // result: (SHRL x y)
33020         for {
33021                 x := v_0
33022                 y := v_1
33023                 if !(shiftIsBounded(v)) {
33024                         break
33025                 }
33026                 v.reset(OpAMD64SHRL)
33027                 v.AddArg2(x, y)
33028                 return true
33029         }
33030         return false
33031 }
33032 func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
33033         v_1 := v.Args[1]
33034         v_0 := v.Args[0]
33035         b := v.Block
33036         // match: (Rsh32x16 <t> x y)
33037         // cond: !shiftIsBounded(v)
33038         // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
33039         for {
33040                 t := v.Type
33041                 x := v_0
33042                 y := v_1
33043                 if !(!shiftIsBounded(v)) {
33044                         break
33045                 }
33046                 v.reset(OpAMD64SARL)
33047                 v.Type = t
33048                 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33049                 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33050                 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33051                 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
33052                 v3.AuxInt = int16ToAuxInt(32)
33053                 v3.AddArg(y)
33054                 v2.AddArg(v3)
33055                 v1.AddArg(v2)
33056                 v0.AddArg2(y, v1)
33057                 v.AddArg2(x, v0)
33058                 return true
33059         }
33060         // match: (Rsh32x16 x y)
33061         // cond: shiftIsBounded(v)
33062         // result: (SARL x y)
33063         for {
33064                 x := v_0
33065                 y := v_1
33066                 if !(shiftIsBounded(v)) {
33067                         break
33068                 }
33069                 v.reset(OpAMD64SARL)
33070                 v.AddArg2(x, y)
33071                 return true
33072         }
33073         return false
33074 }
33075 func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
33076         v_1 := v.Args[1]
33077         v_0 := v.Args[0]
33078         b := v.Block
33079         // match: (Rsh32x32 <t> x y)
33080         // cond: !shiftIsBounded(v)
33081         // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
33082         for {
33083                 t := v.Type
33084                 x := v_0
33085                 y := v_1
33086                 if !(!shiftIsBounded(v)) {
33087                         break
33088                 }
33089                 v.reset(OpAMD64SARL)
33090                 v.Type = t
33091                 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33092                 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33093                 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33094                 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
33095                 v3.AuxInt = int32ToAuxInt(32)
33096                 v3.AddArg(y)
33097                 v2.AddArg(v3)
33098                 v1.AddArg(v2)
33099                 v0.AddArg2(y, v1)
33100                 v.AddArg2(x, v0)
33101                 return true
33102         }
33103         // match: (Rsh32x32 x y)
33104         // cond: shiftIsBounded(v)
33105         // result: (SARL x y)
33106         for {
33107                 x := v_0
33108                 y := v_1
33109                 if !(shiftIsBounded(v)) {
33110                         break
33111                 }
33112                 v.reset(OpAMD64SARL)
33113                 v.AddArg2(x, y)
33114                 return true
33115         }
33116         return false
33117 }
33118 func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
33119         v_1 := v.Args[1]
33120         v_0 := v.Args[0]
33121         b := v.Block
33122         // match: (Rsh32x64 <t> x y)
33123         // cond: !shiftIsBounded(v)
33124         // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
33125         for {
33126                 t := v.Type
33127                 x := v_0
33128                 y := v_1
33129                 if !(!shiftIsBounded(v)) {
33130                         break
33131                 }
33132                 v.reset(OpAMD64SARL)
33133                 v.Type = t
33134                 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
33135                 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
33136                 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
33137                 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
33138                 v3.AuxInt = int32ToAuxInt(32)
33139                 v3.AddArg(y)
33140                 v2.AddArg(v3)
33141                 v1.AddArg(v2)
33142                 v0.AddArg2(y, v1)
33143                 v.AddArg2(x, v0)
33144                 return true
33145         }
33146         // match: (Rsh32x64 x y)
33147         // cond: shiftIsBounded(v)
33148         // result: (SARL x y)
33149         for {
33150                 x := v_0
33151                 y := v_1
33152                 if !(shiftIsBounded(v)) {
33153                         break
33154                 }
33155                 v.reset(OpAMD64SARL)
33156                 v.AddArg2(x, y)
33157                 return true
33158         }
33159         return false
33160 }
33161 func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
33162         v_1 := v.Args[1]
33163         v_0 := v.Args[0]
33164         b := v.Block
33165         // match: (Rsh32x8 <t> x y)
33166         // cond: !shiftIsBounded(v)
33167         // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
33168         for {
33169                 t := v.Type
33170                 x := v_0
33171                 y := v_1
33172                 if !(!shiftIsBounded(v)) {
33173                         break
33174                 }
33175                 v.reset(OpAMD64SARL)
33176                 v.Type = t
33177                 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33178                 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33179                 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33180                 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
33181                 v3.AuxInt = int8ToAuxInt(32)
33182                 v3.AddArg(y)
33183                 v2.AddArg(v3)
33184                 v1.AddArg(v2)
33185                 v0.AddArg2(y, v1)
33186                 v.AddArg2(x, v0)
33187                 return true
33188         }
33189         // match: (Rsh32x8 x y)
33190         // cond: shiftIsBounded(v)
33191         // result: (SARL x y)
33192         for {
33193                 x := v_0
33194                 y := v_1
33195                 if !(shiftIsBounded(v)) {
33196                         break
33197                 }
33198                 v.reset(OpAMD64SARL)
33199                 v.AddArg2(x, y)
33200                 return true
33201         }
33202         return false
33203 }
33204 func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
33205         v_1 := v.Args[1]
33206         v_0 := v.Args[0]
33207         b := v.Block
33208         // match: (Rsh64Ux16 <t> x y)
33209         // cond: !shiftIsBounded(v)
33210         // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
33211         for {
33212                 t := v.Type
33213                 x := v_0
33214                 y := v_1
33215                 if !(!shiftIsBounded(v)) {
33216                         break
33217                 }
33218                 v.reset(OpAMD64ANDQ)
33219                 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
33220                 v0.AddArg2(x, y)
33221                 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
33222                 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
33223                 v2.AuxInt = int16ToAuxInt(64)
33224                 v2.AddArg(y)
33225                 v1.AddArg(v2)
33226                 v.AddArg2(v0, v1)
33227                 return true
33228         }
33229         // match: (Rsh64Ux16 x y)
33230         // cond: shiftIsBounded(v)
33231         // result: (SHRQ x y)
33232         for {
33233                 x := v_0
33234                 y := v_1
33235                 if !(shiftIsBounded(v)) {
33236                         break
33237                 }
33238                 v.reset(OpAMD64SHRQ)
33239                 v.AddArg2(x, y)
33240                 return true
33241         }
33242         return false
33243 }
33244 func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
33245         v_1 := v.Args[1]
33246         v_0 := v.Args[0]
33247         b := v.Block
33248         // match: (Rsh64Ux32 <t> x y)
33249         // cond: !shiftIsBounded(v)
33250         // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
33251         for {
33252                 t := v.Type
33253                 x := v_0
33254                 y := v_1
33255                 if !(!shiftIsBounded(v)) {
33256                         break
33257                 }
33258                 v.reset(OpAMD64ANDQ)
33259                 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
33260                 v0.AddArg2(x, y)
33261                 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
33262                 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
33263                 v2.AuxInt = int32ToAuxInt(64)
33264                 v2.AddArg(y)
33265                 v1.AddArg(v2)
33266                 v.AddArg2(v0, v1)
33267                 return true
33268         }
33269         // match: (Rsh64Ux32 x y)
33270         // cond: shiftIsBounded(v)
33271         // result: (SHRQ x y)
33272         for {
33273                 x := v_0
33274                 y := v_1
33275                 if !(shiftIsBounded(v)) {
33276                         break
33277                 }
33278                 v.reset(OpAMD64SHRQ)
33279                 v.AddArg2(x, y)
33280                 return true
33281         }
33282         return false
33283 }
33284 func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
33285         v_1 := v.Args[1]
33286         v_0 := v.Args[0]
33287         b := v.Block
33288         // match: (Rsh64Ux64 <t> x y)
33289         // cond: !shiftIsBounded(v)
33290         // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
33291         for {
33292                 t := v.Type
33293                 x := v_0
33294                 y := v_1
33295                 if !(!shiftIsBounded(v)) {
33296                         break
33297                 }
33298                 v.reset(OpAMD64ANDQ)
33299                 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
33300                 v0.AddArg2(x, y)
33301                 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
33302                 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
33303                 v2.AuxInt = int32ToAuxInt(64)
33304                 v2.AddArg(y)
33305                 v1.AddArg(v2)
33306                 v.AddArg2(v0, v1)
33307                 return true
33308         }
33309         // match: (Rsh64Ux64 x y)
33310         // cond: shiftIsBounded(v)
33311         // result: (SHRQ x y)
33312         for {
33313                 x := v_0
33314                 y := v_1
33315                 if !(shiftIsBounded(v)) {
33316                         break
33317                 }
33318                 v.reset(OpAMD64SHRQ)
33319                 v.AddArg2(x, y)
33320                 return true
33321         }
33322         return false
33323 }
33324 func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
33325         v_1 := v.Args[1]
33326         v_0 := v.Args[0]
33327         b := v.Block
33328         // match: (Rsh64Ux8 <t> x y)
33329         // cond: !shiftIsBounded(v)
33330         // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
33331         for {
33332                 t := v.Type
33333                 x := v_0
33334                 y := v_1
33335                 if !(!shiftIsBounded(v)) {
33336                         break
33337                 }
33338                 v.reset(OpAMD64ANDQ)
33339                 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
33340                 v0.AddArg2(x, y)
33341                 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
33342                 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
33343                 v2.AuxInt = int8ToAuxInt(64)
33344                 v2.AddArg(y)
33345                 v1.AddArg(v2)
33346                 v.AddArg2(v0, v1)
33347                 return true
33348         }
33349         // match: (Rsh64Ux8 x y)
33350         // cond: shiftIsBounded(v)
33351         // result: (SHRQ x y)
33352         for {
33353                 x := v_0
33354                 y := v_1
33355                 if !(shiftIsBounded(v)) {
33356                         break
33357                 }
33358                 v.reset(OpAMD64SHRQ)
33359                 v.AddArg2(x, y)
33360                 return true
33361         }
33362         return false
33363 }
33364 func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
33365         v_1 := v.Args[1]
33366         v_0 := v.Args[0]
33367         b := v.Block
33368         // match: (Rsh64x16 <t> x y)
33369         // cond: !shiftIsBounded(v)
33370         // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
33371         for {
33372                 t := v.Type
33373                 x := v_0
33374                 y := v_1
33375                 if !(!shiftIsBounded(v)) {
33376                         break
33377                 }
33378                 v.reset(OpAMD64SARQ)
33379                 v.Type = t
33380                 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33381                 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33382                 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33383                 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
33384                 v3.AuxInt = int16ToAuxInt(64)
33385                 v3.AddArg(y)
33386                 v2.AddArg(v3)
33387                 v1.AddArg(v2)
33388                 v0.AddArg2(y, v1)
33389                 v.AddArg2(x, v0)
33390                 return true
33391         }
33392         // match: (Rsh64x16 x y)
33393         // cond: shiftIsBounded(v)
33394         // result: (SARQ x y)
33395         for {
33396                 x := v_0
33397                 y := v_1
33398                 if !(shiftIsBounded(v)) {
33399                         break
33400                 }
33401                 v.reset(OpAMD64SARQ)
33402                 v.AddArg2(x, y)
33403                 return true
33404         }
33405         return false
33406 }
33407 func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
33408         v_1 := v.Args[1]
33409         v_0 := v.Args[0]
33410         b := v.Block
33411         // match: (Rsh64x32 <t> x y)
33412         // cond: !shiftIsBounded(v)
33413         // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
33414         for {
33415                 t := v.Type
33416                 x := v_0
33417                 y := v_1
33418                 if !(!shiftIsBounded(v)) {
33419                         break
33420                 }
33421                 v.reset(OpAMD64SARQ)
33422                 v.Type = t
33423                 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33424                 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33425                 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33426                 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
33427                 v3.AuxInt = int32ToAuxInt(64)
33428                 v3.AddArg(y)
33429                 v2.AddArg(v3)
33430                 v1.AddArg(v2)
33431                 v0.AddArg2(y, v1)
33432                 v.AddArg2(x, v0)
33433                 return true
33434         }
33435         // match: (Rsh64x32 x y)
33436         // cond: shiftIsBounded(v)
33437         // result: (SARQ x y)
33438         for {
33439                 x := v_0
33440                 y := v_1
33441                 if !(shiftIsBounded(v)) {
33442                         break
33443                 }
33444                 v.reset(OpAMD64SARQ)
33445                 v.AddArg2(x, y)
33446                 return true
33447         }
33448         return false
33449 }
33450 func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
33451         v_1 := v.Args[1]
33452         v_0 := v.Args[0]
33453         b := v.Block
33454         // match: (Rsh64x64 <t> x y)
33455         // cond: !shiftIsBounded(v)
33456         // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
33457         for {
33458                 t := v.Type
33459                 x := v_0
33460                 y := v_1
33461                 if !(!shiftIsBounded(v)) {
33462                         break
33463                 }
33464                 v.reset(OpAMD64SARQ)
33465                 v.Type = t
33466                 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
33467                 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
33468                 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
33469                 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
33470                 v3.AuxInt = int32ToAuxInt(64)
33471                 v3.AddArg(y)
33472                 v2.AddArg(v3)
33473                 v1.AddArg(v2)
33474                 v0.AddArg2(y, v1)
33475                 v.AddArg2(x, v0)
33476                 return true
33477         }
33478         // match: (Rsh64x64 x y)
33479         // cond: shiftIsBounded(v)
33480         // result: (SARQ x y)
33481         for {
33482                 x := v_0
33483                 y := v_1
33484                 if !(shiftIsBounded(v)) {
33485                         break
33486                 }
33487                 v.reset(OpAMD64SARQ)
33488                 v.AddArg2(x, y)
33489                 return true
33490         }
33491         return false
33492 }
33493 func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
33494         v_1 := v.Args[1]
33495         v_0 := v.Args[0]
33496         b := v.Block
33497         // match: (Rsh64x8 <t> x y)
33498         // cond: !shiftIsBounded(v)
33499         // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
33500         for {
33501                 t := v.Type
33502                 x := v_0
33503                 y := v_1
33504                 if !(!shiftIsBounded(v)) {
33505                         break
33506                 }
33507                 v.reset(OpAMD64SARQ)
33508                 v.Type = t
33509                 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33510                 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33511                 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33512                 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
33513                 v3.AuxInt = int8ToAuxInt(64)
33514                 v3.AddArg(y)
33515                 v2.AddArg(v3)
33516                 v1.AddArg(v2)
33517                 v0.AddArg2(y, v1)
33518                 v.AddArg2(x, v0)
33519                 return true
33520         }
33521         // match: (Rsh64x8 x y)
33522         // cond: shiftIsBounded(v)
33523         // result: (SARQ x y)
33524         for {
33525                 x := v_0
33526                 y := v_1
33527                 if !(shiftIsBounded(v)) {
33528                         break
33529                 }
33530                 v.reset(OpAMD64SARQ)
33531                 v.AddArg2(x, y)
33532                 return true
33533         }
33534         return false
33535 }
33536 func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
33537         v_1 := v.Args[1]
33538         v_0 := v.Args[0]
33539         b := v.Block
33540         // match: (Rsh8Ux16 <t> x y)
33541         // cond: !shiftIsBounded(v)
33542         // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
33543         for {
33544                 t := v.Type
33545                 x := v_0
33546                 y := v_1
33547                 if !(!shiftIsBounded(v)) {
33548                         break
33549                 }
33550                 v.reset(OpAMD64ANDL)
33551                 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
33552                 v0.AddArg2(x, y)
33553                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
33554                 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
33555                 v2.AuxInt = int16ToAuxInt(8)
33556                 v2.AddArg(y)
33557                 v1.AddArg(v2)
33558                 v.AddArg2(v0, v1)
33559                 return true
33560         }
33561         // match: (Rsh8Ux16 x y)
33562         // cond: shiftIsBounded(v)
33563         // result: (SHRB x y)
33564         for {
33565                 x := v_0
33566                 y := v_1
33567                 if !(shiftIsBounded(v)) {
33568                         break
33569                 }
33570                 v.reset(OpAMD64SHRB)
33571                 v.AddArg2(x, y)
33572                 return true
33573         }
33574         return false
33575 }
33576 func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
33577         v_1 := v.Args[1]
33578         v_0 := v.Args[0]
33579         b := v.Block
33580         // match: (Rsh8Ux32 <t> x y)
33581         // cond: !shiftIsBounded(v)
33582         // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
33583         for {
33584                 t := v.Type
33585                 x := v_0
33586                 y := v_1
33587                 if !(!shiftIsBounded(v)) {
33588                         break
33589                 }
33590                 v.reset(OpAMD64ANDL)
33591                 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
33592                 v0.AddArg2(x, y)
33593                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
33594                 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
33595                 v2.AuxInt = int32ToAuxInt(8)
33596                 v2.AddArg(y)
33597                 v1.AddArg(v2)
33598                 v.AddArg2(v0, v1)
33599                 return true
33600         }
33601         // match: (Rsh8Ux32 x y)
33602         // cond: shiftIsBounded(v)
33603         // result: (SHRB x y)
33604         for {
33605                 x := v_0
33606                 y := v_1
33607                 if !(shiftIsBounded(v)) {
33608                         break
33609                 }
33610                 v.reset(OpAMD64SHRB)
33611                 v.AddArg2(x, y)
33612                 return true
33613         }
33614         return false
33615 }
33616 func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
33617         v_1 := v.Args[1]
33618         v_0 := v.Args[0]
33619         b := v.Block
33620         // match: (Rsh8Ux64 <t> x y)
33621         // cond: !shiftIsBounded(v)
33622         // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
33623         for {
33624                 t := v.Type
33625                 x := v_0
33626                 y := v_1
33627                 if !(!shiftIsBounded(v)) {
33628                         break
33629                 }
33630                 v.reset(OpAMD64ANDL)
33631                 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
33632                 v0.AddArg2(x, y)
33633                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
33634                 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
33635                 v2.AuxInt = int32ToAuxInt(8)
33636                 v2.AddArg(y)
33637                 v1.AddArg(v2)
33638                 v.AddArg2(v0, v1)
33639                 return true
33640         }
33641         // match: (Rsh8Ux64 x y)
33642         // cond: shiftIsBounded(v)
33643         // result: (SHRB x y)
33644         for {
33645                 x := v_0
33646                 y := v_1
33647                 if !(shiftIsBounded(v)) {
33648                         break
33649                 }
33650                 v.reset(OpAMD64SHRB)
33651                 v.AddArg2(x, y)
33652                 return true
33653         }
33654         return false
33655 }
33656 func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
33657         v_1 := v.Args[1]
33658         v_0 := v.Args[0]
33659         b := v.Block
33660         // match: (Rsh8Ux8 <t> x y)
33661         // cond: !shiftIsBounded(v)
33662         // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
33663         for {
33664                 t := v.Type
33665                 x := v_0
33666                 y := v_1
33667                 if !(!shiftIsBounded(v)) {
33668                         break
33669                 }
33670                 v.reset(OpAMD64ANDL)
33671                 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
33672                 v0.AddArg2(x, y)
33673                 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
33674                 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
33675                 v2.AuxInt = int8ToAuxInt(8)
33676                 v2.AddArg(y)
33677                 v1.AddArg(v2)
33678                 v.AddArg2(v0, v1)
33679                 return true
33680         }
33681         // match: (Rsh8Ux8 x y)
33682         // cond: shiftIsBounded(v)
33683         // result: (SHRB x y)
33684         for {
33685                 x := v_0
33686                 y := v_1
33687                 if !(shiftIsBounded(v)) {
33688                         break
33689                 }
33690                 v.reset(OpAMD64SHRB)
33691                 v.AddArg2(x, y)
33692                 return true
33693         }
33694         return false
33695 }
33696 func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
33697         v_1 := v.Args[1]
33698         v_0 := v.Args[0]
33699         b := v.Block
33700         // match: (Rsh8x16 <t> x y)
33701         // cond: !shiftIsBounded(v)
33702         // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
33703         for {
33704                 t := v.Type
33705                 x := v_0
33706                 y := v_1
33707                 if !(!shiftIsBounded(v)) {
33708                         break
33709                 }
33710                 v.reset(OpAMD64SARB)
33711                 v.Type = t
33712                 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33713                 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33714                 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33715                 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
33716                 v3.AuxInt = int16ToAuxInt(8)
33717                 v3.AddArg(y)
33718                 v2.AddArg(v3)
33719                 v1.AddArg(v2)
33720                 v0.AddArg2(y, v1)
33721                 v.AddArg2(x, v0)
33722                 return true
33723         }
33724         // match: (Rsh8x16 x y)
33725         // cond: shiftIsBounded(v)
33726         // result: (SARB x y)
33727         for {
33728                 x := v_0
33729                 y := v_1
33730                 if !(shiftIsBounded(v)) {
33731                         break
33732                 }
33733                 v.reset(OpAMD64SARB)
33734                 v.AddArg2(x, y)
33735                 return true
33736         }
33737         return false
33738 }
33739 func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
33740         v_1 := v.Args[1]
33741         v_0 := v.Args[0]
33742         b := v.Block
33743         // match: (Rsh8x32 <t> x y)
33744         // cond: !shiftIsBounded(v)
33745         // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
33746         for {
33747                 t := v.Type
33748                 x := v_0
33749                 y := v_1
33750                 if !(!shiftIsBounded(v)) {
33751                         break
33752                 }
33753                 v.reset(OpAMD64SARB)
33754                 v.Type = t
33755                 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33756                 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33757                 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33758                 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
33759                 v3.AuxInt = int32ToAuxInt(8)
33760                 v3.AddArg(y)
33761                 v2.AddArg(v3)
33762                 v1.AddArg(v2)
33763                 v0.AddArg2(y, v1)
33764                 v.AddArg2(x, v0)
33765                 return true
33766         }
33767         // match: (Rsh8x32 x y)
33768         // cond: shiftIsBounded(v)
33769         // result: (SARB x y)
33770         for {
33771                 x := v_0
33772                 y := v_1
33773                 if !(shiftIsBounded(v)) {
33774                         break
33775                 }
33776                 v.reset(OpAMD64SARB)
33777                 v.AddArg2(x, y)
33778                 return true
33779         }
33780         return false
33781 }
33782 func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
33783         v_1 := v.Args[1]
33784         v_0 := v.Args[0]
33785         b := v.Block
33786         // match: (Rsh8x64 <t> x y)
33787         // cond: !shiftIsBounded(v)
33788         // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
33789         for {
33790                 t := v.Type
33791                 x := v_0
33792                 y := v_1
33793                 if !(!shiftIsBounded(v)) {
33794                         break
33795                 }
33796                 v.reset(OpAMD64SARB)
33797                 v.Type = t
33798                 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
33799                 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
33800                 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
33801                 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
33802                 v3.AuxInt = int32ToAuxInt(8)
33803                 v3.AddArg(y)
33804                 v2.AddArg(v3)
33805                 v1.AddArg(v2)
33806                 v0.AddArg2(y, v1)
33807                 v.AddArg2(x, v0)
33808                 return true
33809         }
33810         // match: (Rsh8x64 x y)
33811         // cond: shiftIsBounded(v)
33812         // result: (SARB x y)
33813         for {
33814                 x := v_0
33815                 y := v_1
33816                 if !(shiftIsBounded(v)) {
33817                         break
33818                 }
33819                 v.reset(OpAMD64SARB)
33820                 v.AddArg2(x, y)
33821                 return true
33822         }
33823         return false
33824 }
33825 func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
33826         v_1 := v.Args[1]
33827         v_0 := v.Args[0]
33828         b := v.Block
33829         // match: (Rsh8x8 <t> x y)
33830         // cond: !shiftIsBounded(v)
33831         // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
33832         for {
33833                 t := v.Type
33834                 x := v_0
33835                 y := v_1
33836                 if !(!shiftIsBounded(v)) {
33837                         break
33838                 }
33839                 v.reset(OpAMD64SARB)
33840                 v.Type = t
33841                 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33842                 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33843                 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33844                 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
33845                 v3.AuxInt = int8ToAuxInt(8)
33846                 v3.AddArg(y)
33847                 v2.AddArg(v3)
33848                 v1.AddArg(v2)
33849                 v0.AddArg2(y, v1)
33850                 v.AddArg2(x, v0)
33851                 return true
33852         }
33853         // match: (Rsh8x8 x y)
33854         // cond: shiftIsBounded(v)
33855         // result: (SARB x y)
33856         for {
33857                 x := v_0
33858                 y := v_1
33859                 if !(shiftIsBounded(v)) {
33860                         break
33861                 }
33862                 v.reset(OpAMD64SARB)
33863                 v.AddArg2(x, y)
33864                 return true
33865         }
33866         return false
33867 }
33868 func rewriteValueAMD64_OpSelect0(v *Value) bool {
33869         v_0 := v.Args[0]
33870         b := v.Block
33871         typ := &b.Func.Config.Types
33872         // match: (Select0 (Mul64uover x y))
33873         // result: (Select0 <typ.UInt64> (MULQU x y))
33874         for {
33875                 if v_0.Op != OpMul64uover {
33876                         break
33877                 }
33878                 y := v_0.Args[1]
33879                 x := v_0.Args[0]
33880                 v.reset(OpSelect0)
33881                 v.Type = typ.UInt64
33882                 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
33883                 v0.AddArg2(x, y)
33884                 v.AddArg(v0)
33885                 return true
33886         }
33887         // match: (Select0 (Mul32uover x y))
33888         // result: (Select0 <typ.UInt32> (MULLU x y))
33889         for {
33890                 if v_0.Op != OpMul32uover {
33891                         break
33892                 }
33893                 y := v_0.Args[1]
33894                 x := v_0.Args[0]
33895                 v.reset(OpSelect0)
33896                 v.Type = typ.UInt32
33897                 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
33898                 v0.AddArg2(x, y)
33899                 v.AddArg(v0)
33900                 return true
33901         }
33902         // match: (Select0 (Add64carry x y c))
33903         // result: (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
33904         for {
33905                 if v_0.Op != OpAdd64carry {
33906                         break
33907                 }
33908                 c := v_0.Args[2]
33909                 x := v_0.Args[0]
33910                 y := v_0.Args[1]
33911                 v.reset(OpSelect0)
33912                 v.Type = typ.UInt64
33913                 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
33914                 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
33915                 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
33916                 v2.AddArg(c)
33917                 v1.AddArg(v2)
33918                 v0.AddArg3(x, y, v1)
33919                 v.AddArg(v0)
33920                 return true
33921         }
33922         // match: (Select0 (Sub64borrow x y c))
33923         // result: (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
33924         for {
33925                 if v_0.Op != OpSub64borrow {
33926                         break
33927                 }
33928                 c := v_0.Args[2]
33929                 x := v_0.Args[0]
33930                 y := v_0.Args[1]
33931                 v.reset(OpSelect0)
33932                 v.Type = typ.UInt64
33933                 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
33934                 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
33935                 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
33936                 v2.AddArg(c)
33937                 v1.AddArg(v2)
33938                 v0.AddArg3(x, y, v1)
33939                 v.AddArg(v0)
33940                 return true
33941         }
33942         // match: (Select0 <t> (AddTupleFirst32 val tuple))
33943         // result: (ADDL val (Select0 <t> tuple))
33944         for {
33945                 t := v.Type
33946                 if v_0.Op != OpAMD64AddTupleFirst32 {
33947                         break
33948                 }
33949                 tuple := v_0.Args[1]
33950                 val := v_0.Args[0]
33951                 v.reset(OpAMD64ADDL)
33952                 v0 := b.NewValue0(v.Pos, OpSelect0, t)
33953                 v0.AddArg(tuple)
33954                 v.AddArg2(val, v0)
33955                 return true
33956         }
33957         // match: (Select0 <t> (AddTupleFirst64 val tuple))
33958         // result: (ADDQ val (Select0 <t> tuple))
33959         for {
33960                 t := v.Type
33961                 if v_0.Op != OpAMD64AddTupleFirst64 {
33962                         break
33963                 }
33964                 tuple := v_0.Args[1]
33965                 val := v_0.Args[0]
33966                 v.reset(OpAMD64ADDQ)
33967                 v0 := b.NewValue0(v.Pos, OpSelect0, t)
33968                 v0.AddArg(tuple)
33969                 v.AddArg2(val, v0)
33970                 return true
33971         }
33972         return false
33973 }
33974 func rewriteValueAMD64_OpSelect1(v *Value) bool {
33975         v_0 := v.Args[0]
33976         b := v.Block
33977         typ := &b.Func.Config.Types
33978         // match: (Select1 (Mul64uover x y))
33979         // result: (SETO (Select1 <types.TypeFlags> (MULQU x y)))
33980         for {
33981                 if v_0.Op != OpMul64uover {
33982                         break
33983                 }
33984                 y := v_0.Args[1]
33985                 x := v_0.Args[0]
33986                 v.reset(OpAMD64SETO)
33987                 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
33988                 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
33989                 v1.AddArg2(x, y)
33990                 v0.AddArg(v1)
33991                 v.AddArg(v0)
33992                 return true
33993         }
33994         // match: (Select1 (Mul32uover x y))
33995         // result: (SETO (Select1 <types.TypeFlags> (MULLU x y)))
33996         for {
33997                 if v_0.Op != OpMul32uover {
33998                         break
33999                 }
34000                 y := v_0.Args[1]
34001                 x := v_0.Args[0]
34002                 v.reset(OpAMD64SETO)
34003                 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
34004                 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
34005                 v1.AddArg2(x, y)
34006                 v0.AddArg(v1)
34007                 v.AddArg(v0)
34008                 return true
34009         }
34010         // match: (Select1 (Add64carry x y c))
34011         // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
34012         for {
34013                 if v_0.Op != OpAdd64carry {
34014                         break
34015                 }
34016                 c := v_0.Args[2]
34017                 x := v_0.Args[0]
34018                 y := v_0.Args[1]
34019                 v.reset(OpAMD64NEGQ)
34020                 v.Type = typ.UInt64
34021                 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
34022                 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
34023                 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
34024                 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
34025                 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
34026                 v4.AddArg(c)
34027                 v3.AddArg(v4)
34028                 v2.AddArg3(x, y, v3)
34029                 v1.AddArg(v2)
34030                 v0.AddArg(v1)
34031                 v.AddArg(v0)
34032                 return true
34033         }
34034         // match: (Select1 (Sub64borrow x y c))
34035         // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
34036         for {
34037                 if v_0.Op != OpSub64borrow {
34038                         break
34039                 }
34040                 c := v_0.Args[2]
34041                 x := v_0.Args[0]
34042                 y := v_0.Args[1]
34043                 v.reset(OpAMD64NEGQ)
34044                 v.Type = typ.UInt64
34045                 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
34046                 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
34047                 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
34048                 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
34049                 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
34050                 v4.AddArg(c)
34051                 v3.AddArg(v4)
34052                 v2.AddArg3(x, y, v3)
34053                 v1.AddArg(v2)
34054                 v0.AddArg(v1)
34055                 v.AddArg(v0)
34056                 return true
34057         }
34058         // match: (Select1 (NEGLflags (MOVQconst [0])))
34059         // result: (FlagEQ)
34060         for {
34061                 if v_0.Op != OpAMD64NEGLflags {
34062                         break
34063                 }
34064                 v_0_0 := v_0.Args[0]
34065                 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
34066                         break
34067                 }
34068                 v.reset(OpAMD64FlagEQ)
34069                 return true
34070         }
34071         // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x))))
34072         // result: x
34073         for {
34074                 if v_0.Op != OpAMD64NEGLflags {
34075                         break
34076                 }
34077                 v_0_0 := v_0.Args[0]
34078                 if v_0_0.Op != OpAMD64NEGQ {
34079                         break
34080                 }
34081                 v_0_0_0 := v_0_0.Args[0]
34082                 if v_0_0_0.Op != OpAMD64SBBQcarrymask {
34083                         break
34084                 }
34085                 x := v_0_0_0.Args[0]
34086                 v.copyOf(x)
34087                 return true
34088         }
34089         // match: (Select1 (AddTupleFirst32 _ tuple))
34090         // result: (Select1 tuple)
34091         for {
34092                 if v_0.Op != OpAMD64AddTupleFirst32 {
34093                         break
34094                 }
34095                 tuple := v_0.Args[1]
34096                 v.reset(OpSelect1)
34097                 v.AddArg(tuple)
34098                 return true
34099         }
34100         // match: (Select1 (AddTupleFirst64 _ tuple))
34101         // result: (Select1 tuple)
34102         for {
34103                 if v_0.Op != OpAMD64AddTupleFirst64 {
34104                         break
34105                 }
34106                 tuple := v_0.Args[1]
34107                 v.reset(OpSelect1)
34108                 v.AddArg(tuple)
34109                 return true
34110         }
34111         return false
34112 }
34113 func rewriteValueAMD64_OpSlicemask(v *Value) bool {
34114         v_0 := v.Args[0]
34115         b := v.Block
34116         // match: (Slicemask <t> x)
34117         // result: (SARQconst (NEGQ <t> x) [63])
34118         for {
34119                 t := v.Type
34120                 x := v_0
34121                 v.reset(OpAMD64SARQconst)
34122                 v.AuxInt = int8ToAuxInt(63)
34123                 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
34124                 v0.AddArg(x)
34125                 v.AddArg(v0)
34126                 return true
34127         }
34128 }
34129 func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
34130         v_1 := v.Args[1]
34131         v_0 := v.Args[0]
34132         b := v.Block
34133         typ := &b.Func.Config.Types
34134         // match: (SpectreIndex <t> x y)
34135         // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
34136         for {
34137                 x := v_0
34138                 y := v_1
34139                 v.reset(OpAMD64CMOVQCC)
34140                 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
34141                 v0.AuxInt = int64ToAuxInt(0)
34142                 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
34143                 v1.AddArg2(x, y)
34144                 v.AddArg3(x, v0, v1)
34145                 return true
34146         }
34147 }
34148 func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
34149         v_1 := v.Args[1]
34150         v_0 := v.Args[0]
34151         b := v.Block
34152         typ := &b.Func.Config.Types
34153         // match: (SpectreSliceIndex <t> x y)
34154         // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
34155         for {
34156                 x := v_0
34157                 y := v_1
34158                 v.reset(OpAMD64CMOVQHI)
34159                 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
34160                 v0.AuxInt = int64ToAuxInt(0)
34161                 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
34162                 v1.AddArg2(x, y)
34163                 v.AddArg3(x, v0, v1)
34164                 return true
34165         }
34166 }
34167 func rewriteValueAMD64_OpStore(v *Value) bool {
34168         v_2 := v.Args[2]
34169         v_1 := v.Args[1]
34170         v_0 := v.Args[0]
34171         // match: (Store {t} ptr val mem)
34172         // cond: t.Size() == 8 && is64BitFloat(val.Type)
34173         // result: (MOVSDstore ptr val mem)
34174         for {
34175                 t := auxToType(v.Aux)
34176                 ptr := v_0
34177                 val := v_1
34178                 mem := v_2
34179                 if !(t.Size() == 8 && is64BitFloat(val.Type)) {
34180                         break
34181                 }
34182                 v.reset(OpAMD64MOVSDstore)
34183                 v.AddArg3(ptr, val, mem)
34184                 return true
34185         }
34186         // match: (Store {t} ptr val mem)
34187         // cond: t.Size() == 4 && is32BitFloat(val.Type)
34188         // result: (MOVSSstore ptr val mem)
34189         for {
34190                 t := auxToType(v.Aux)
34191                 ptr := v_0
34192                 val := v_1
34193                 mem := v_2
34194                 if !(t.Size() == 4 && is32BitFloat(val.Type)) {
34195                         break
34196                 }
34197                 v.reset(OpAMD64MOVSSstore)
34198                 v.AddArg3(ptr, val, mem)
34199                 return true
34200         }
34201         // match: (Store {t} ptr val mem)
34202         // cond: t.Size() == 8
34203         // result: (MOVQstore ptr val mem)
34204         for {
34205                 t := auxToType(v.Aux)
34206                 ptr := v_0
34207                 val := v_1
34208                 mem := v_2
34209                 if !(t.Size() == 8) {
34210                         break
34211                 }
34212                 v.reset(OpAMD64MOVQstore)
34213                 v.AddArg3(ptr, val, mem)
34214                 return true
34215         }
34216         // match: (Store {t} ptr val mem)
34217         // cond: t.Size() == 4
34218         // result: (MOVLstore ptr val mem)
34219         for {
34220                 t := auxToType(v.Aux)
34221                 ptr := v_0
34222                 val := v_1
34223                 mem := v_2
34224                 if !(t.Size() == 4) {
34225                         break
34226                 }
34227                 v.reset(OpAMD64MOVLstore)
34228                 v.AddArg3(ptr, val, mem)
34229                 return true
34230         }
34231         // match: (Store {t} ptr val mem)
34232         // cond: t.Size() == 2
34233         // result: (MOVWstore ptr val mem)
34234         for {
34235                 t := auxToType(v.Aux)
34236                 ptr := v_0
34237                 val := v_1
34238                 mem := v_2
34239                 if !(t.Size() == 2) {
34240                         break
34241                 }
34242                 v.reset(OpAMD64MOVWstore)
34243                 v.AddArg3(ptr, val, mem)
34244                 return true
34245         }
34246         // match: (Store {t} ptr val mem)
34247         // cond: t.Size() == 1
34248         // result: (MOVBstore ptr val mem)
34249         for {
34250                 t := auxToType(v.Aux)
34251                 ptr := v_0
34252                 val := v_1
34253                 mem := v_2
34254                 if !(t.Size() == 1) {
34255                         break
34256                 }
34257                 v.reset(OpAMD64MOVBstore)
34258                 v.AddArg3(ptr, val, mem)
34259                 return true
34260         }
34261         return false
34262 }
34263 func rewriteValueAMD64_OpTrunc(v *Value) bool {
34264         v_0 := v.Args[0]
34265         // match: (Trunc x)
34266         // result: (ROUNDSD [3] x)
34267         for {
34268                 x := v_0
34269                 v.reset(OpAMD64ROUNDSD)
34270                 v.AuxInt = int8ToAuxInt(3)
34271                 v.AddArg(x)
34272                 return true
34273         }
34274 }
34275 func rewriteValueAMD64_OpZero(v *Value) bool {
34276         v_1 := v.Args[1]
34277         v_0 := v.Args[0]
34278         b := v.Block
34279         config := b.Func.Config
34280         typ := &b.Func.Config.Types
34281         // match: (Zero [0] _ mem)
34282         // result: mem
34283         for {
34284                 if auxIntToInt64(v.AuxInt) != 0 {
34285                         break
34286                 }
34287                 mem := v_1
34288                 v.copyOf(mem)
34289                 return true
34290         }
34291         // match: (Zero [1] destptr mem)
34292         // result: (MOVBstoreconst [makeValAndOff(0,0)] destptr mem)
34293         for {
34294                 if auxIntToInt64(v.AuxInt) != 1 {
34295                         break
34296                 }
34297                 destptr := v_0
34298                 mem := v_1
34299                 v.reset(OpAMD64MOVBstoreconst)
34300                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34301                 v.AddArg2(destptr, mem)
34302                 return true
34303         }
34304         // match: (Zero [2] destptr mem)
34305         // result: (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)
34306         for {
34307                 if auxIntToInt64(v.AuxInt) != 2 {
34308                         break
34309                 }
34310                 destptr := v_0
34311                 mem := v_1
34312                 v.reset(OpAMD64MOVWstoreconst)
34313                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34314                 v.AddArg2(destptr, mem)
34315                 return true
34316         }
34317         // match: (Zero [4] destptr mem)
34318         // result: (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)
34319         for {
34320                 if auxIntToInt64(v.AuxInt) != 4 {
34321                         break
34322                 }
34323                 destptr := v_0
34324                 mem := v_1
34325                 v.reset(OpAMD64MOVLstoreconst)
34326                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34327                 v.AddArg2(destptr, mem)
34328                 return true
34329         }
34330         // match: (Zero [8] destptr mem)
34331         // result: (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)
34332         for {
34333                 if auxIntToInt64(v.AuxInt) != 8 {
34334                         break
34335                 }
34336                 destptr := v_0
34337                 mem := v_1
34338                 v.reset(OpAMD64MOVQstoreconst)
34339                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34340                 v.AddArg2(destptr, mem)
34341                 return true
34342         }
34343         // match: (Zero [3] destptr mem)
34344         // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
34345         for {
34346                 if auxIntToInt64(v.AuxInt) != 3 {
34347                         break
34348                 }
34349                 destptr := v_0
34350                 mem := v_1
34351                 v.reset(OpAMD64MOVBstoreconst)
34352                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
34353                 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
34354                 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34355                 v0.AddArg2(destptr, mem)
34356                 v.AddArg2(destptr, v0)
34357                 return true
34358         }
34359         // match: (Zero [5] destptr mem)
34360         // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
34361         for {
34362                 if auxIntToInt64(v.AuxInt) != 5 {
34363                         break
34364                 }
34365                 destptr := v_0
34366                 mem := v_1
34367                 v.reset(OpAMD64MOVBstoreconst)
34368                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
34369                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
34370                 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34371                 v0.AddArg2(destptr, mem)
34372                 v.AddArg2(destptr, v0)
34373                 return true
34374         }
34375         // match: (Zero [6] destptr mem)
34376         // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
34377         for {
34378                 if auxIntToInt64(v.AuxInt) != 6 {
34379                         break
34380                 }
34381                 destptr := v_0
34382                 mem := v_1
34383                 v.reset(OpAMD64MOVWstoreconst)
34384                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
34385                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
34386                 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34387                 v0.AddArg2(destptr, mem)
34388                 v.AddArg2(destptr, v0)
34389                 return true
34390         }
34391         // match: (Zero [7] destptr mem)
34392         // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
34393         for {
34394                 if auxIntToInt64(v.AuxInt) != 7 {
34395                         break
34396                 }
34397                 destptr := v_0
34398                 mem := v_1
34399                 v.reset(OpAMD64MOVLstoreconst)
34400                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
34401                 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
34402                 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34403                 v0.AddArg2(destptr, mem)
34404                 v.AddArg2(destptr, v0)
34405                 return true
34406         }
34407         // match: (Zero [s] destptr mem)
34408         // cond: s%8 != 0 && s > 8 && !config.useSSE
34409         // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
34410         for {
34411                 s := auxIntToInt64(v.AuxInt)
34412                 destptr := v_0
34413                 mem := v_1
34414                 if !(s%8 != 0 && s > 8 && !config.useSSE) {
34415                         break
34416                 }
34417                 v.reset(OpZero)
34418                 v.AuxInt = int64ToAuxInt(s - s%8)
34419                 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34420                 v0.AuxInt = int64ToAuxInt(s % 8)
34421                 v0.AddArg(destptr)
34422                 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34423                 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34424                 v1.AddArg2(destptr, mem)
34425                 v.AddArg2(v0, v1)
34426                 return true
34427         }
34428         // match: (Zero [16] destptr mem)
34429         // cond: !config.useSSE
34430         // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
34431         for {
34432                 if auxIntToInt64(v.AuxInt) != 16 {
34433                         break
34434                 }
34435                 destptr := v_0
34436                 mem := v_1
34437                 if !(!config.useSSE) {
34438                         break
34439                 }
34440                 v.reset(OpAMD64MOVQstoreconst)
34441                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
34442                 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34443                 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34444                 v0.AddArg2(destptr, mem)
34445                 v.AddArg2(destptr, v0)
34446                 return true
34447         }
34448         // match: (Zero [24] destptr mem)
34449         // cond: !config.useSSE
34450         // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))
34451         for {
34452                 if auxIntToInt64(v.AuxInt) != 24 {
34453                         break
34454                 }
34455                 destptr := v_0
34456                 mem := v_1
34457                 if !(!config.useSSE) {
34458                         break
34459                 }
34460                 v.reset(OpAMD64MOVQstoreconst)
34461                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
34462                 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34463                 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
34464                 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34465                 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34466                 v1.AddArg2(destptr, mem)
34467                 v0.AddArg2(destptr, v1)
34468                 v.AddArg2(destptr, v0)
34469                 return true
34470         }
34471         // match: (Zero [32] destptr mem)
34472         // cond: !config.useSSE
34473         // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))))
34474         for {
34475                 if auxIntToInt64(v.AuxInt) != 32 {
34476                         break
34477                 }
34478                 destptr := v_0
34479                 mem := v_1
34480                 if !(!config.useSSE) {
34481                         break
34482                 }
34483                 v.reset(OpAMD64MOVQstoreconst)
34484                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 24))
34485                 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34486                 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
34487                 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34488                 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
34489                 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34490                 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34491                 v2.AddArg2(destptr, mem)
34492                 v1.AddArg2(destptr, v2)
34493                 v0.AddArg2(destptr, v1)
34494                 v.AddArg2(destptr, v0)
34495                 return true
34496         }
34497         // match: (Zero [s] destptr mem)
34498         // cond: s > 8 && s < 16 && config.useSSE
34499         // result: (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
34500         for {
34501                 s := auxIntToInt64(v.AuxInt)
34502                 destptr := v_0
34503                 mem := v_1
34504                 if !(s > 8 && s < 16 && config.useSSE) {
34505                         break
34506                 }
34507                 v.reset(OpAMD64MOVQstoreconst)
34508                 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8)))
34509                 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34510                 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34511                 v0.AddArg2(destptr, mem)
34512                 v.AddArg2(destptr, v0)
34513                 return true
34514         }
34515         // match: (Zero [s] destptr mem)
34516         // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE
34517         // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstorezero destptr mem))
34518         for {
34519                 s := auxIntToInt64(v.AuxInt)
34520                 destptr := v_0
34521                 mem := v_1
34522                 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) {
34523                         break
34524                 }
34525                 v.reset(OpZero)
34526                 v.AuxInt = int64ToAuxInt(s - s%16)
34527                 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34528                 v0.AuxInt = int64ToAuxInt(s % 16)
34529                 v0.AddArg(destptr)
34530                 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34531                 v1.AddArg2(destptr, mem)
34532                 v.AddArg2(v0, v1)
34533                 return true
34534         }
34535         // match: (Zero [s] destptr mem)
34536         // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE
34537         // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
34538         for {
34539                 s := auxIntToInt64(v.AuxInt)
34540                 destptr := v_0
34541                 mem := v_1
34542                 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) {
34543                         break
34544                 }
34545                 v.reset(OpZero)
34546                 v.AuxInt = int64ToAuxInt(s - s%16)
34547                 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34548                 v0.AuxInt = int64ToAuxInt(s % 16)
34549                 v0.AddArg(destptr)
34550                 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34551                 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34552                 v1.AddArg2(destptr, mem)
34553                 v.AddArg2(v0, v1)
34554                 return true
34555         }
34556         // match: (Zero [16] destptr mem)
34557         // cond: config.useSSE
34558         // result: (MOVOstorezero destptr mem)
34559         for {
34560                 if auxIntToInt64(v.AuxInt) != 16 {
34561                         break
34562                 }
34563                 destptr := v_0
34564                 mem := v_1
34565                 if !(config.useSSE) {
34566                         break
34567                 }
34568                 v.reset(OpAMD64MOVOstorezero)
34569                 v.AddArg2(destptr, mem)
34570                 return true
34571         }
34572         // match: (Zero [32] destptr mem)
34573         // cond: config.useSSE
34574         // result: (MOVOstorezero (OffPtr <destptr.Type> destptr [16]) (MOVOstorezero destptr mem))
34575         for {
34576                 if auxIntToInt64(v.AuxInt) != 32 {
34577                         break
34578                 }
34579                 destptr := v_0
34580                 mem := v_1
34581                 if !(config.useSSE) {
34582                         break
34583                 }
34584                 v.reset(OpAMD64MOVOstorezero)
34585                 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34586                 v0.AuxInt = int64ToAuxInt(16)
34587                 v0.AddArg(destptr)
34588                 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34589                 v1.AddArg2(destptr, mem)
34590                 v.AddArg2(v0, v1)
34591                 return true
34592         }
34593         // match: (Zero [48] destptr mem)
34594         // cond: config.useSSE
34595         // result: (MOVOstorezero (OffPtr <destptr.Type> destptr [32]) (MOVOstorezero (OffPtr <destptr.Type> destptr [16]) (MOVOstorezero destptr mem)))
34596         for {
34597                 if auxIntToInt64(v.AuxInt) != 48 {
34598                         break
34599                 }
34600                 destptr := v_0
34601                 mem := v_1
34602                 if !(config.useSSE) {
34603                         break
34604                 }
34605                 v.reset(OpAMD64MOVOstorezero)
34606                 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34607                 v0.AuxInt = int64ToAuxInt(32)
34608                 v0.AddArg(destptr)
34609                 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34610                 v2 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34611                 v2.AuxInt = int64ToAuxInt(16)
34612                 v2.AddArg(destptr)
34613                 v3 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34614                 v3.AddArg2(destptr, mem)
34615                 v1.AddArg2(v2, v3)
34616                 v.AddArg2(v0, v1)
34617                 return true
34618         }
34619         // match: (Zero [64] destptr mem)
34620         // cond: config.useSSE
34621         // result: (MOVOstorezero (OffPtr <destptr.Type> destptr [48]) (MOVOstorezero (OffPtr <destptr.Type> destptr [32]) (MOVOstorezero (OffPtr <destptr.Type> destptr [16]) (MOVOstorezero destptr mem))))
34622         for {
34623                 if auxIntToInt64(v.AuxInt) != 64 {
34624                         break
34625                 }
34626                 destptr := v_0
34627                 mem := v_1
34628                 if !(config.useSSE) {
34629                         break
34630                 }
34631                 v.reset(OpAMD64MOVOstorezero)
34632                 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34633                 v0.AuxInt = int64ToAuxInt(48)
34634                 v0.AddArg(destptr)
34635                 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34636                 v2 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34637                 v2.AuxInt = int64ToAuxInt(32)
34638                 v2.AddArg(destptr)
34639                 v3 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34640                 v4 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34641                 v4.AuxInt = int64ToAuxInt(16)
34642                 v4.AddArg(destptr)
34643                 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34644                 v5.AddArg2(destptr, mem)
34645                 v3.AddArg2(v4, v5)
34646                 v1.AddArg2(v2, v3)
34647                 v.AddArg2(v0, v1)
34648                 return true
34649         }
34650         // match: (Zero [s] destptr mem)
34651         // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice
34652         // result: (DUFFZERO [s] destptr mem)
34653         for {
34654                 s := auxIntToInt64(v.AuxInt)
34655                 destptr := v_0
34656                 mem := v_1
34657                 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
34658                         break
34659                 }
34660                 v.reset(OpAMD64DUFFZERO)
34661                 v.AuxInt = int64ToAuxInt(s)
34662                 v.AddArg2(destptr, mem)
34663                 return true
34664         }
34665         // match: (Zero [s] destptr mem)
34666         // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0
34667         // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
34668         for {
34669                 s := auxIntToInt64(v.AuxInt)
34670                 destptr := v_0
34671                 mem := v_1
34672                 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) {
34673                         break
34674                 }
34675                 v.reset(OpAMD64REPSTOSQ)
34676                 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
34677                 v0.AuxInt = int64ToAuxInt(s / 8)
34678                 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
34679                 v1.AuxInt = int64ToAuxInt(0)
34680                 v.AddArg4(destptr, v0, v1, mem)
34681                 return true
34682         }
34683         return false
34684 }
34685 func rewriteBlockAMD64(b *Block) bool {
34686         switch b.Kind {
34687         case BlockAMD64EQ:
34688                 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y))
34689                 // result: (UGE (BTL x y))
34690                 for b.Controls[0].Op == OpAMD64TESTL {
34691                         v_0 := b.Controls[0]
34692                         _ = v_0.Args[1]
34693                         v_0_0 := v_0.Args[0]
34694                         v_0_1 := v_0.Args[1]
34695                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34696                                 if v_0_0.Op != OpAMD64SHLL {
34697                                         continue
34698                                 }
34699                                 x := v_0_0.Args[1]
34700                                 v_0_0_0 := v_0_0.Args[0]
34701                                 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
34702                                         continue
34703                                 }
34704                                 y := v_0_1
34705                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
34706                                 v0.AddArg2(x, y)
34707                                 b.resetWithControl(BlockAMD64UGE, v0)
34708                                 return true
34709                         }
34710                         break
34711                 }
34712                 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
34713                 // result: (UGE (BTQ x y))
34714                 for b.Controls[0].Op == OpAMD64TESTQ {
34715                         v_0 := b.Controls[0]
34716                         _ = v_0.Args[1]
34717                         v_0_0 := v_0.Args[0]
34718                         v_0_1 := v_0.Args[1]
34719                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34720                                 if v_0_0.Op != OpAMD64SHLQ {
34721                                         continue
34722                                 }
34723                                 x := v_0_0.Args[1]
34724                                 v_0_0_0 := v_0_0.Args[0]
34725                                 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
34726                                         continue
34727                                 }
34728                                 y := v_0_1
34729                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
34730                                 v0.AddArg2(x, y)
34731                                 b.resetWithControl(BlockAMD64UGE, v0)
34732                                 return true
34733                         }
34734                         break
34735                 }
34736                 // match: (EQ (TESTLconst [c] x))
34737                 // cond: isUint32PowerOfTwo(int64(c))
34738                 // result: (UGE (BTLconst [int8(log32(c))] x))
34739                 for b.Controls[0].Op == OpAMD64TESTLconst {
34740                         v_0 := b.Controls[0]
34741                         c := auxIntToInt32(v_0.AuxInt)
34742                         x := v_0.Args[0]
34743                         if !(isUint32PowerOfTwo(int64(c))) {
34744                                 break
34745                         }
34746                         v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
34747                         v0.AuxInt = int8ToAuxInt(int8(log32(c)))
34748                         v0.AddArg(x)
34749                         b.resetWithControl(BlockAMD64UGE, v0)
34750                         return true
34751                 }
34752                 // match: (EQ (TESTQconst [c] x))
34753                 // cond: isUint64PowerOfTwo(int64(c))
34754                 // result: (UGE (BTQconst [int8(log32(c))] x))
34755                 for b.Controls[0].Op == OpAMD64TESTQconst {
34756                         v_0 := b.Controls[0]
34757                         c := auxIntToInt32(v_0.AuxInt)
34758                         x := v_0.Args[0]
34759                         if !(isUint64PowerOfTwo(int64(c))) {
34760                                 break
34761                         }
34762                         v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34763                         v0.AuxInt = int8ToAuxInt(int8(log32(c)))
34764                         v0.AddArg(x)
34765                         b.resetWithControl(BlockAMD64UGE, v0)
34766                         return true
34767                 }
34768                 // match: (EQ (TESTQ (MOVQconst [c]) x))
34769                 // cond: isUint64PowerOfTwo(c)
34770                 // result: (UGE (BTQconst [int8(log64(c))] x))
34771                 for b.Controls[0].Op == OpAMD64TESTQ {
34772                         v_0 := b.Controls[0]
34773                         _ = v_0.Args[1]
34774                         v_0_0 := v_0.Args[0]
34775                         v_0_1 := v_0.Args[1]
34776                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34777                                 if v_0_0.Op != OpAMD64MOVQconst {
34778                                         continue
34779                                 }
34780                                 c := auxIntToInt64(v_0_0.AuxInt)
34781                                 x := v_0_1
34782                                 if !(isUint64PowerOfTwo(c)) {
34783                                         continue
34784                                 }
34785                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34786                                 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
34787                                 v0.AddArg(x)
34788                                 b.resetWithControl(BlockAMD64UGE, v0)
34789                                 return true
34790                         }
34791                         break
34792                 }
34793                 // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
34794                 // cond: z1==z2
34795                 // result: (UGE (BTQconst [63] x))
34796                 for b.Controls[0].Op == OpAMD64TESTQ {
34797                         v_0 := b.Controls[0]
34798                         _ = v_0.Args[1]
34799                         v_0_0 := v_0.Args[0]
34800                         v_0_1 := v_0.Args[1]
34801                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34802                                 z1 := v_0_0
34803                                 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
34804                                         continue
34805                                 }
34806                                 z1_0 := z1.Args[0]
34807                                 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
34808                                         continue
34809                                 }
34810                                 x := z1_0.Args[0]
34811                                 z2 := v_0_1
34812                                 if !(z1 == z2) {
34813                                         continue
34814                                 }
34815                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34816                                 v0.AuxInt = int8ToAuxInt(63)
34817                                 v0.AddArg(x)
34818                                 b.resetWithControl(BlockAMD64UGE, v0)
34819                                 return true
34820                         }
34821                         break
34822                 }
34823                 // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
34824                 // cond: z1==z2
34825                 // result: (UGE (BTQconst [31] x))
34826                 for b.Controls[0].Op == OpAMD64TESTL {
34827                         v_0 := b.Controls[0]
34828                         _ = v_0.Args[1]
34829                         v_0_0 := v_0.Args[0]
34830                         v_0_1 := v_0.Args[1]
34831                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34832                                 z1 := v_0_0
34833                                 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
34834                                         continue
34835                                 }
34836                                 z1_0 := z1.Args[0]
34837                                 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
34838                                         continue
34839                                 }
34840                                 x := z1_0.Args[0]
34841                                 z2 := v_0_1
34842                                 if !(z1 == z2) {
34843                                         continue
34844                                 }
34845                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34846                                 v0.AuxInt = int8ToAuxInt(31)
34847                                 v0.AddArg(x)
34848                                 b.resetWithControl(BlockAMD64UGE, v0)
34849                                 return true
34850                         }
34851                         break
34852                 }
34853                 // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
34854                 // cond: z1==z2
34855                 // result: (UGE (BTQconst [0] x))
34856                 for b.Controls[0].Op == OpAMD64TESTQ {
34857                         v_0 := b.Controls[0]
34858                         _ = v_0.Args[1]
34859                         v_0_0 := v_0.Args[0]
34860                         v_0_1 := v_0.Args[1]
34861                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34862                                 z1 := v_0_0
34863                                 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
34864                                         continue
34865                                 }
34866                                 z1_0 := z1.Args[0]
34867                                 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
34868                                         continue
34869                                 }
34870                                 x := z1_0.Args[0]
34871                                 z2 := v_0_1
34872                                 if !(z1 == z2) {
34873                                         continue
34874                                 }
34875                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34876                                 v0.AuxInt = int8ToAuxInt(0)
34877                                 v0.AddArg(x)
34878                                 b.resetWithControl(BlockAMD64UGE, v0)
34879                                 return true
34880                         }
34881                         break
34882                 }
34883                 // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
34884                 // cond: z1==z2
34885                 // result: (UGE (BTLconst [0] x))
34886                 for b.Controls[0].Op == OpAMD64TESTL {
34887                         v_0 := b.Controls[0]
34888                         _ = v_0.Args[1]
34889                         v_0_0 := v_0.Args[0]
34890                         v_0_1 := v_0.Args[1]
34891                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34892                                 z1 := v_0_0
34893                                 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
34894                                         continue
34895                                 }
34896                                 z1_0 := z1.Args[0]
34897                                 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
34898                                         continue
34899                                 }
34900                                 x := z1_0.Args[0]
34901                                 z2 := v_0_1
34902                                 if !(z1 == z2) {
34903                                         continue
34904                                 }
34905                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
34906                                 v0.AuxInt = int8ToAuxInt(0)
34907                                 v0.AddArg(x)
34908                                 b.resetWithControl(BlockAMD64UGE, v0)
34909                                 return true
34910                         }
34911                         break
34912                 }
34913                 // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2))
34914                 // cond: z1==z2
34915                 // result: (UGE (BTQconst [63] x))
34916                 for b.Controls[0].Op == OpAMD64TESTQ {
34917                         v_0 := b.Controls[0]
34918                         _ = v_0.Args[1]
34919                         v_0_0 := v_0.Args[0]
34920                         v_0_1 := v_0.Args[1]
34921                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34922                                 z1 := v_0_0
34923                                 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
34924                                         continue
34925                                 }
34926                                 x := z1.Args[0]
34927                                 z2 := v_0_1
34928                                 if !(z1 == z2) {
34929                                         continue
34930                                 }
34931                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34932                                 v0.AuxInt = int8ToAuxInt(63)
34933                                 v0.AddArg(x)
34934                                 b.resetWithControl(BlockAMD64UGE, v0)
34935                                 return true
34936                         }
34937                         break
34938                 }
34939                 // match: (EQ (TESTL z1:(SHRLconst [31] x) z2))
34940                 // cond: z1==z2
34941                 // result: (UGE (BTLconst [31] x))
34942                 for b.Controls[0].Op == OpAMD64TESTL {
34943                         v_0 := b.Controls[0]
34944                         _ = v_0.Args[1]
34945                         v_0_0 := v_0.Args[0]
34946                         v_0_1 := v_0.Args[1]
34947                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34948                                 z1 := v_0_0
34949                                 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
34950                                         continue
34951                                 }
34952                                 x := z1.Args[0]
34953                                 z2 := v_0_1
34954                                 if !(z1 == z2) {
34955                                         continue
34956                                 }
34957                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
34958                                 v0.AuxInt = int8ToAuxInt(31)
34959                                 v0.AddArg(x)
34960                                 b.resetWithControl(BlockAMD64UGE, v0)
34961                                 return true
34962                         }
34963                         break
34964                 }
34965                 // match: (EQ (InvertFlags cmp) yes no)
34966                 // result: (EQ cmp yes no)
34967                 for b.Controls[0].Op == OpAMD64InvertFlags {
34968                         v_0 := b.Controls[0]
34969                         cmp := v_0.Args[0]
34970                         b.resetWithControl(BlockAMD64EQ, cmp)
34971                         return true
34972                 }
34973                 // match: (EQ (FlagEQ) yes no)
34974                 // result: (First yes no)
34975                 for b.Controls[0].Op == OpAMD64FlagEQ {
34976                         b.Reset(BlockFirst)
34977                         return true
34978                 }
34979                 // match: (EQ (FlagLT_ULT) yes no)
34980                 // result: (First no yes)
34981                 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
34982                         b.Reset(BlockFirst)
34983                         b.swapSuccessors()
34984                         return true
34985                 }
34986                 // match: (EQ (FlagLT_UGT) yes no)
34987                 // result: (First no yes)
34988                 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
34989                         b.Reset(BlockFirst)
34990                         b.swapSuccessors()
34991                         return true
34992                 }
34993                 // match: (EQ (FlagGT_ULT) yes no)
34994                 // result: (First no yes)
34995                 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
34996                         b.Reset(BlockFirst)
34997                         b.swapSuccessors()
34998                         return true
34999                 }
35000                 // match: (EQ (FlagGT_UGT) yes no)
35001                 // result: (First no yes)
35002                 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35003                         b.Reset(BlockFirst)
35004                         b.swapSuccessors()
35005                         return true
35006                 }
35007         case BlockAMD64GE:
35008                 // match: (GE (InvertFlags cmp) yes no)
35009                 // result: (LE cmp yes no)
35010                 for b.Controls[0].Op == OpAMD64InvertFlags {
35011                         v_0 := b.Controls[0]
35012                         cmp := v_0.Args[0]
35013                         b.resetWithControl(BlockAMD64LE, cmp)
35014                         return true
35015                 }
35016                 // match: (GE (FlagEQ) yes no)
35017                 // result: (First yes no)
35018                 for b.Controls[0].Op == OpAMD64FlagEQ {
35019                         b.Reset(BlockFirst)
35020                         return true
35021                 }
35022                 // match: (GE (FlagLT_ULT) yes no)
35023                 // result: (First no yes)
35024                 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35025                         b.Reset(BlockFirst)
35026                         b.swapSuccessors()
35027                         return true
35028                 }
35029                 // match: (GE (FlagLT_UGT) yes no)
35030                 // result: (First no yes)
35031                 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35032                         b.Reset(BlockFirst)
35033                         b.swapSuccessors()
35034                         return true
35035                 }
35036                 // match: (GE (FlagGT_ULT) yes no)
35037                 // result: (First yes no)
35038                 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35039                         b.Reset(BlockFirst)
35040                         return true
35041                 }
35042                 // match: (GE (FlagGT_UGT) yes no)
35043                 // result: (First yes no)
35044                 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35045                         b.Reset(BlockFirst)
35046                         return true
35047                 }
35048         case BlockAMD64GT:
35049                 // match: (GT (InvertFlags cmp) yes no)
35050                 // result: (LT cmp yes no)
35051                 for b.Controls[0].Op == OpAMD64InvertFlags {
35052                         v_0 := b.Controls[0]
35053                         cmp := v_0.Args[0]
35054                         b.resetWithControl(BlockAMD64LT, cmp)
35055                         return true
35056                 }
35057                 // match: (GT (FlagEQ) yes no)
35058                 // result: (First no yes)
35059                 for b.Controls[0].Op == OpAMD64FlagEQ {
35060                         b.Reset(BlockFirst)
35061                         b.swapSuccessors()
35062                         return true
35063                 }
35064                 // match: (GT (FlagLT_ULT) yes no)
35065                 // result: (First no yes)
35066                 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35067                         b.Reset(BlockFirst)
35068                         b.swapSuccessors()
35069                         return true
35070                 }
35071                 // match: (GT (FlagLT_UGT) yes no)
35072                 // result: (First no yes)
35073                 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35074                         b.Reset(BlockFirst)
35075                         b.swapSuccessors()
35076                         return true
35077                 }
35078                 // match: (GT (FlagGT_ULT) yes no)
35079                 // result: (First yes no)
35080                 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35081                         b.Reset(BlockFirst)
35082                         return true
35083                 }
35084                 // match: (GT (FlagGT_UGT) yes no)
35085                 // result: (First yes no)
35086                 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35087                         b.Reset(BlockFirst)
35088                         return true
35089                 }
35090         case BlockIf:
35091                 // match: (If (SETL cmp) yes no)
35092                 // result: (LT cmp yes no)
35093                 for b.Controls[0].Op == OpAMD64SETL {
35094                         v_0 := b.Controls[0]
35095                         cmp := v_0.Args[0]
35096                         b.resetWithControl(BlockAMD64LT, cmp)
35097                         return true
35098                 }
35099                 // match: (If (SETLE cmp) yes no)
35100                 // result: (LE cmp yes no)
35101                 for b.Controls[0].Op == OpAMD64SETLE {
35102                         v_0 := b.Controls[0]
35103                         cmp := v_0.Args[0]
35104                         b.resetWithControl(BlockAMD64LE, cmp)
35105                         return true
35106                 }
35107                 // match: (If (SETG cmp) yes no)
35108                 // result: (GT cmp yes no)
35109                 for b.Controls[0].Op == OpAMD64SETG {
35110                         v_0 := b.Controls[0]
35111                         cmp := v_0.Args[0]
35112                         b.resetWithControl(BlockAMD64GT, cmp)
35113                         return true
35114                 }
35115                 // match: (If (SETGE cmp) yes no)
35116                 // result: (GE cmp yes no)
35117                 for b.Controls[0].Op == OpAMD64SETGE {
35118                         v_0 := b.Controls[0]
35119                         cmp := v_0.Args[0]
35120                         b.resetWithControl(BlockAMD64GE, cmp)
35121                         return true
35122                 }
35123                 // match: (If (SETEQ cmp) yes no)
35124                 // result: (EQ cmp yes no)
35125                 for b.Controls[0].Op == OpAMD64SETEQ {
35126                         v_0 := b.Controls[0]
35127                         cmp := v_0.Args[0]
35128                         b.resetWithControl(BlockAMD64EQ, cmp)
35129                         return true
35130                 }
35131                 // match: (If (SETNE cmp) yes no)
35132                 // result: (NE cmp yes no)
35133                 for b.Controls[0].Op == OpAMD64SETNE {
35134                         v_0 := b.Controls[0]
35135                         cmp := v_0.Args[0]
35136                         b.resetWithControl(BlockAMD64NE, cmp)
35137                         return true
35138                 }
35139                 // match: (If (SETB cmp) yes no)
35140                 // result: (ULT cmp yes no)
35141                 for b.Controls[0].Op == OpAMD64SETB {
35142                         v_0 := b.Controls[0]
35143                         cmp := v_0.Args[0]
35144                         b.resetWithControl(BlockAMD64ULT, cmp)
35145                         return true
35146                 }
35147                 // match: (If (SETBE cmp) yes no)
35148                 // result: (ULE cmp yes no)
35149                 for b.Controls[0].Op == OpAMD64SETBE {
35150                         v_0 := b.Controls[0]
35151                         cmp := v_0.Args[0]
35152                         b.resetWithControl(BlockAMD64ULE, cmp)
35153                         return true
35154                 }
35155                 // match: (If (SETA cmp) yes no)
35156                 // result: (UGT cmp yes no)
35157                 for b.Controls[0].Op == OpAMD64SETA {
35158                         v_0 := b.Controls[0]
35159                         cmp := v_0.Args[0]
35160                         b.resetWithControl(BlockAMD64UGT, cmp)
35161                         return true
35162                 }
35163                 // match: (If (SETAE cmp) yes no)
35164                 // result: (UGE cmp yes no)
35165                 for b.Controls[0].Op == OpAMD64SETAE {
35166                         v_0 := b.Controls[0]
35167                         cmp := v_0.Args[0]
35168                         b.resetWithControl(BlockAMD64UGE, cmp)
35169                         return true
35170                 }
35171                 // match: (If (SETO cmp) yes no)
35172                 // result: (OS cmp yes no)
35173                 for b.Controls[0].Op == OpAMD64SETO {
35174                         v_0 := b.Controls[0]
35175                         cmp := v_0.Args[0]
35176                         b.resetWithControl(BlockAMD64OS, cmp)
35177                         return true
35178                 }
35179                 // match: (If (SETGF cmp) yes no)
35180                 // result: (UGT cmp yes no)
35181                 for b.Controls[0].Op == OpAMD64SETGF {
35182                         v_0 := b.Controls[0]
35183                         cmp := v_0.Args[0]
35184                         b.resetWithControl(BlockAMD64UGT, cmp)
35185                         return true
35186                 }
35187                 // match: (If (SETGEF cmp) yes no)
35188                 // result: (UGE cmp yes no)
35189                 for b.Controls[0].Op == OpAMD64SETGEF {
35190                         v_0 := b.Controls[0]
35191                         cmp := v_0.Args[0]
35192                         b.resetWithControl(BlockAMD64UGE, cmp)
35193                         return true
35194                 }
35195                 // match: (If (SETEQF cmp) yes no)
35196                 // result: (EQF cmp yes no)
35197                 for b.Controls[0].Op == OpAMD64SETEQF {
35198                         v_0 := b.Controls[0]
35199                         cmp := v_0.Args[0]
35200                         b.resetWithControl(BlockAMD64EQF, cmp)
35201                         return true
35202                 }
35203                 // match: (If (SETNEF cmp) yes no)
35204                 // result: (NEF cmp yes no)
35205                 for b.Controls[0].Op == OpAMD64SETNEF {
35206                         v_0 := b.Controls[0]
35207                         cmp := v_0.Args[0]
35208                         b.resetWithControl(BlockAMD64NEF, cmp)
35209                         return true
35210                 }
35211                 // match: (If cond yes no)
35212                 // result: (NE (TESTB cond cond) yes no)
35213                 for {
35214                         cond := b.Controls[0]
35215                         v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
35216                         v0.AddArg2(cond, cond)
35217                         b.resetWithControl(BlockAMD64NE, v0)
35218                         return true
35219                 }
35220         case BlockAMD64LE:
35221                 // match: (LE (InvertFlags cmp) yes no)
35222                 // result: (GE cmp yes no)
35223                 for b.Controls[0].Op == OpAMD64InvertFlags {
35224                         v_0 := b.Controls[0]
35225                         cmp := v_0.Args[0]
35226                         b.resetWithControl(BlockAMD64GE, cmp)
35227                         return true
35228                 }
35229                 // match: (LE (FlagEQ) yes no)
35230                 // result: (First yes no)
35231                 for b.Controls[0].Op == OpAMD64FlagEQ {
35232                         b.Reset(BlockFirst)
35233                         return true
35234                 }
35235                 // match: (LE (FlagLT_ULT) yes no)
35236                 // result: (First yes no)
35237                 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35238                         b.Reset(BlockFirst)
35239                         return true
35240                 }
35241                 // match: (LE (FlagLT_UGT) yes no)
35242                 // result: (First yes no)
35243                 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35244                         b.Reset(BlockFirst)
35245                         return true
35246                 }
35247                 // match: (LE (FlagGT_ULT) yes no)
35248                 // result: (First no yes)
35249                 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35250                         b.Reset(BlockFirst)
35251                         b.swapSuccessors()
35252                         return true
35253                 }
35254                 // match: (LE (FlagGT_UGT) yes no)
35255                 // result: (First no yes)
35256                 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35257                         b.Reset(BlockFirst)
35258                         b.swapSuccessors()
35259                         return true
35260                 }
35261         case BlockAMD64LT:
35262                 // match: (LT (InvertFlags cmp) yes no)
35263                 // result: (GT cmp yes no)
35264                 for b.Controls[0].Op == OpAMD64InvertFlags {
35265                         v_0 := b.Controls[0]
35266                         cmp := v_0.Args[0]
35267                         b.resetWithControl(BlockAMD64GT, cmp)
35268                         return true
35269                 }
35270                 // match: (LT (FlagEQ) yes no)
35271                 // result: (First no yes)
35272                 for b.Controls[0].Op == OpAMD64FlagEQ {
35273                         b.Reset(BlockFirst)
35274                         b.swapSuccessors()
35275                         return true
35276                 }
35277                 // match: (LT (FlagLT_ULT) yes no)
35278                 // result: (First yes no)
35279                 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35280                         b.Reset(BlockFirst)
35281                         return true
35282                 }
35283                 // match: (LT (FlagLT_UGT) yes no)
35284                 // result: (First yes no)
35285                 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35286                         b.Reset(BlockFirst)
35287                         return true
35288                 }
35289                 // match: (LT (FlagGT_ULT) yes no)
35290                 // result: (First no yes)
35291                 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35292                         b.Reset(BlockFirst)
35293                         b.swapSuccessors()
35294                         return true
35295                 }
35296                 // match: (LT (FlagGT_UGT) yes no)
35297                 // result: (First no yes)
35298                 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35299                         b.Reset(BlockFirst)
35300                         b.swapSuccessors()
35301                         return true
35302                 }
35303         case BlockAMD64NE:
35304                 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no)
35305                 // result: (LT cmp yes no)
35306                 for b.Controls[0].Op == OpAMD64TESTB {
35307                         v_0 := b.Controls[0]
35308                         _ = v_0.Args[1]
35309                         v_0_0 := v_0.Args[0]
35310                         if v_0_0.Op != OpAMD64SETL {
35311                                 break
35312                         }
35313                         cmp := v_0_0.Args[0]
35314                         v_0_1 := v_0.Args[1]
35315                         if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
35316                                 break
35317                         }
35318                         b.resetWithControl(BlockAMD64LT, cmp)
35319                         return true
35320                 }
35321                 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
35322                 // result: (LE cmp yes no)
35323                 for b.Controls[0].Op == OpAMD64TESTB {
35324                         v_0 := b.Controls[0]
35325                         _ = v_0.Args[1]
35326                         v_0_0 := v_0.Args[0]
35327                         if v_0_0.Op != OpAMD64SETLE {
35328                                 break
35329                         }
35330                         cmp := v_0_0.Args[0]
35331                         v_0_1 := v_0.Args[1]
35332                         if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
35333                                 break
35334                         }
35335                         b.resetWithControl(BlockAMD64LE, cmp)
35336                         return true
35337                 }
35338                 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
35339                 // result: (GT cmp yes no)
35340                 for b.Controls[0].Op == OpAMD64TESTB {
35341                         v_0 := b.Controls[0]
35342                         _ = v_0.Args[1]
35343                         v_0_0 := v_0.Args[0]
35344                         if v_0_0.Op != OpAMD64SETG {
35345                                 break
35346                         }
35347                         cmp := v_0_0.Args[0]
35348                         v_0_1 := v_0.Args[1]
35349                         if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
35350                                 break
35351                         }
35352                         b.resetWithControl(BlockAMD64GT, cmp)
35353                         return true
35354                 }
35355                 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
35356                 // result: (GE cmp yes no)
35357                 for b.Controls[0].Op == OpAMD64TESTB {
35358                         v_0 := b.Controls[0]
35359                         _ = v_0.Args[1]
35360                         v_0_0 := v_0.Args[0]
35361                         if v_0_0.Op != OpAMD64SETGE {
35362                                 break
35363                         }
35364                         cmp := v_0_0.Args[0]
35365                         v_0_1 := v_0.Args[1]
35366                         if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
35367                                 break
35368                         }
35369                         b.resetWithControl(BlockAMD64GE, cmp)
35370                         return true
35371                 }
35372                 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
35373                 // result: (EQ cmp yes no)
35374                 for b.Controls[0].Op == OpAMD64TESTB {
35375                         v_0 := b.Controls[0]
35376                         _ = v_0.Args[1]
35377                         v_0_0 := v_0.Args[0]
35378                         if v_0_0.Op != OpAMD64SETEQ {
35379                                 break
35380                         }
35381                         cmp := v_0_0.Args[0]
35382                         v_0_1 := v_0.Args[1]
35383                         if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
35384                                 break
35385                         }
35386                         b.resetWithControl(BlockAMD64EQ, cmp)
35387                         return true
35388                 }
35389                 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
35390                 // result: (NE cmp yes no)
35391                 for b.Controls[0].Op == OpAMD64TESTB {
35392                         v_0 := b.Controls[0]
35393                         _ = v_0.Args[1]
35394                         v_0_0 := v_0.Args[0]
35395                         if v_0_0.Op != OpAMD64SETNE {
35396                                 break
35397                         }
35398                         cmp := v_0_0.Args[0]
35399                         v_0_1 := v_0.Args[1]
35400                         if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
35401                                 break
35402                         }
35403                         b.resetWithControl(BlockAMD64NE, cmp)
35404                         return true
35405                 }
35406                 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
35407                 // result: (ULT cmp yes no)
35408                 for b.Controls[0].Op == OpAMD64TESTB {
35409                         v_0 := b.Controls[0]
35410                         _ = v_0.Args[1]
35411                         v_0_0 := v_0.Args[0]
35412                         if v_0_0.Op != OpAMD64SETB {
35413                                 break
35414                         }
35415                         cmp := v_0_0.Args[0]
35416                         v_0_1 := v_0.Args[1]
35417                         if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
35418                                 break
35419                         }
35420                         b.resetWithControl(BlockAMD64ULT, cmp)
35421                         return true
35422                 }
35423                 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
35424                 // result: (ULE cmp yes no)
35425                 for b.Controls[0].Op == OpAMD64TESTB {
35426                         v_0 := b.Controls[0]
35427                         _ = v_0.Args[1]
35428                         v_0_0 := v_0.Args[0]
35429                         if v_0_0.Op != OpAMD64SETBE {
35430                                 break
35431                         }
35432                         cmp := v_0_0.Args[0]
35433                         v_0_1 := v_0.Args[1]
35434                         if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
35435                                 break
35436                         }
35437                         b.resetWithControl(BlockAMD64ULE, cmp)
35438                         return true
35439                 }
35440                 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
35441                 // result: (UGT cmp yes no)
35442                 for b.Controls[0].Op == OpAMD64TESTB {
35443                         v_0 := b.Controls[0]
35444                         _ = v_0.Args[1]
35445                         v_0_0 := v_0.Args[0]
35446                         if v_0_0.Op != OpAMD64SETA {
35447                                 break
35448                         }
35449                         cmp := v_0_0.Args[0]
35450                         v_0_1 := v_0.Args[1]
35451                         if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
35452                                 break
35453                         }
35454                         b.resetWithControl(BlockAMD64UGT, cmp)
35455                         return true
35456                 }
35457                 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
35458                 // result: (UGE cmp yes no)
35459                 for b.Controls[0].Op == OpAMD64TESTB {
35460                         v_0 := b.Controls[0]
35461                         _ = v_0.Args[1]
35462                         v_0_0 := v_0.Args[0]
35463                         if v_0_0.Op != OpAMD64SETAE {
35464                                 break
35465                         }
35466                         cmp := v_0_0.Args[0]
35467                         v_0_1 := v_0.Args[1]
35468                         if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
35469                                 break
35470                         }
35471                         b.resetWithControl(BlockAMD64UGE, cmp)
35472                         return true
35473                 }
35474                 // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
35475                 // result: (OS cmp yes no)
35476                 for b.Controls[0].Op == OpAMD64TESTB {
35477                         v_0 := b.Controls[0]
35478                         _ = v_0.Args[1]
35479                         v_0_0 := v_0.Args[0]
35480                         if v_0_0.Op != OpAMD64SETO {
35481                                 break
35482                         }
35483                         cmp := v_0_0.Args[0]
35484                         v_0_1 := v_0.Args[1]
35485                         if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
35486                                 break
35487                         }
35488                         b.resetWithControl(BlockAMD64OS, cmp)
35489                         return true
35490                 }
35491                 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y))
35492                 // result: (ULT (BTL x y))
35493                 for b.Controls[0].Op == OpAMD64TESTL {
35494                         v_0 := b.Controls[0]
35495                         _ = v_0.Args[1]
35496                         v_0_0 := v_0.Args[0]
35497                         v_0_1 := v_0.Args[1]
35498                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35499                                 if v_0_0.Op != OpAMD64SHLL {
35500                                         continue
35501                                 }
35502                                 x := v_0_0.Args[1]
35503                                 v_0_0_0 := v_0_0.Args[0]
35504                                 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
35505                                         continue
35506                                 }
35507                                 y := v_0_1
35508                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
35509                                 v0.AddArg2(x, y)
35510                                 b.resetWithControl(BlockAMD64ULT, v0)
35511                                 return true
35512                         }
35513                         break
35514                 }
35515                 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y))
35516                 // result: (ULT (BTQ x y))
35517                 for b.Controls[0].Op == OpAMD64TESTQ {
35518                         v_0 := b.Controls[0]
35519                         _ = v_0.Args[1]
35520                         v_0_0 := v_0.Args[0]
35521                         v_0_1 := v_0.Args[1]
35522                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35523                                 if v_0_0.Op != OpAMD64SHLQ {
35524                                         continue
35525                                 }
35526                                 x := v_0_0.Args[1]
35527                                 v_0_0_0 := v_0_0.Args[0]
35528                                 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
35529                                         continue
35530                                 }
35531                                 y := v_0_1
35532                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
35533                                 v0.AddArg2(x, y)
35534                                 b.resetWithControl(BlockAMD64ULT, v0)
35535                                 return true
35536                         }
35537                         break
35538                 }
35539                 // match: (NE (TESTLconst [c] x))
35540                 // cond: isUint32PowerOfTwo(int64(c))
35541                 // result: (ULT (BTLconst [int8(log32(c))] x))
35542                 for b.Controls[0].Op == OpAMD64TESTLconst {
35543                         v_0 := b.Controls[0]
35544                         c := auxIntToInt32(v_0.AuxInt)
35545                         x := v_0.Args[0]
35546                         if !(isUint32PowerOfTwo(int64(c))) {
35547                                 break
35548                         }
35549                         v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
35550                         v0.AuxInt = int8ToAuxInt(int8(log32(c)))
35551                         v0.AddArg(x)
35552                         b.resetWithControl(BlockAMD64ULT, v0)
35553                         return true
35554                 }
35555                 // match: (NE (TESTQconst [c] x))
35556                 // cond: isUint64PowerOfTwo(int64(c))
35557                 // result: (ULT (BTQconst [int8(log32(c))] x))
35558                 for b.Controls[0].Op == OpAMD64TESTQconst {
35559                         v_0 := b.Controls[0]
35560                         c := auxIntToInt32(v_0.AuxInt)
35561                         x := v_0.Args[0]
35562                         if !(isUint64PowerOfTwo(int64(c))) {
35563                                 break
35564                         }
35565                         v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
35566                         v0.AuxInt = int8ToAuxInt(int8(log32(c)))
35567                         v0.AddArg(x)
35568                         b.resetWithControl(BlockAMD64ULT, v0)
35569                         return true
35570                 }
35571                 // match: (NE (TESTQ (MOVQconst [c]) x))
35572                 // cond: isUint64PowerOfTwo(c)
35573                 // result: (ULT (BTQconst [int8(log64(c))] x))
35574                 for b.Controls[0].Op == OpAMD64TESTQ {
35575                         v_0 := b.Controls[0]
35576                         _ = v_0.Args[1]
35577                         v_0_0 := v_0.Args[0]
35578                         v_0_1 := v_0.Args[1]
35579                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35580                                 if v_0_0.Op != OpAMD64MOVQconst {
35581                                         continue
35582                                 }
35583                                 c := auxIntToInt64(v_0_0.AuxInt)
35584                                 x := v_0_1
35585                                 if !(isUint64PowerOfTwo(c)) {
35586                                         continue
35587                                 }
35588                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
35589                                 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
35590                                 v0.AddArg(x)
35591                                 b.resetWithControl(BlockAMD64ULT, v0)
35592                                 return true
35593                         }
35594                         break
35595                 }
35596                 // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
35597                 // cond: z1==z2
35598                 // result: (ULT (BTQconst [63] x))
35599                 for b.Controls[0].Op == OpAMD64TESTQ {
35600                         v_0 := b.Controls[0]
35601                         _ = v_0.Args[1]
35602                         v_0_0 := v_0.Args[0]
35603                         v_0_1 := v_0.Args[1]
35604                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35605                                 z1 := v_0_0
35606                                 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
35607                                         continue
35608                                 }
35609                                 z1_0 := z1.Args[0]
35610                                 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
35611                                         continue
35612                                 }
35613                                 x := z1_0.Args[0]
35614                                 z2 := v_0_1
35615                                 if !(z1 == z2) {
35616                                         continue
35617                                 }
35618                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
35619                                 v0.AuxInt = int8ToAuxInt(63)
35620                                 v0.AddArg(x)
35621                                 b.resetWithControl(BlockAMD64ULT, v0)
35622                                 return true
35623                         }
35624                         break
35625                 }
35626                 // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
35627                 // cond: z1==z2
35628                 // result: (ULT (BTQconst [31] x))
35629                 for b.Controls[0].Op == OpAMD64TESTL {
35630                         v_0 := b.Controls[0]
35631                         _ = v_0.Args[1]
35632                         v_0_0 := v_0.Args[0]
35633                         v_0_1 := v_0.Args[1]
35634                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35635                                 z1 := v_0_0
35636                                 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
35637                                         continue
35638                                 }
35639                                 z1_0 := z1.Args[0]
35640                                 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
35641                                         continue
35642                                 }
35643                                 x := z1_0.Args[0]
35644                                 z2 := v_0_1
35645                                 if !(z1 == z2) {
35646                                         continue
35647                                 }
35648                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
35649                                 v0.AuxInt = int8ToAuxInt(31)
35650                                 v0.AddArg(x)
35651                                 b.resetWithControl(BlockAMD64ULT, v0)
35652                                 return true
35653                         }
35654                         break
35655                 }
35656                 // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
35657                 // cond: z1==z2
35658                 // result: (ULT (BTQconst [0] x))
35659                 for b.Controls[0].Op == OpAMD64TESTQ {
35660                         v_0 := b.Controls[0]
35661                         _ = v_0.Args[1]
35662                         v_0_0 := v_0.Args[0]
35663                         v_0_1 := v_0.Args[1]
35664                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35665                                 z1 := v_0_0
35666                                 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
35667                                         continue
35668                                 }
35669                                 z1_0 := z1.Args[0]
35670                                 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
35671                                         continue
35672                                 }
35673                                 x := z1_0.Args[0]
35674                                 z2 := v_0_1
35675                                 if !(z1 == z2) {
35676                                         continue
35677                                 }
35678                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
35679                                 v0.AuxInt = int8ToAuxInt(0)
35680                                 v0.AddArg(x)
35681                                 b.resetWithControl(BlockAMD64ULT, v0)
35682                                 return true
35683                         }
35684                         break
35685                 }
35686                 // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
35687                 // cond: z1==z2
35688                 // result: (ULT (BTLconst [0] x))
35689                 for b.Controls[0].Op == OpAMD64TESTL {
35690                         v_0 := b.Controls[0]
35691                         _ = v_0.Args[1]
35692                         v_0_0 := v_0.Args[0]
35693                         v_0_1 := v_0.Args[1]
35694                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35695                                 z1 := v_0_0
35696                                 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
35697                                         continue
35698                                 }
35699                                 z1_0 := z1.Args[0]
35700                                 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
35701                                         continue
35702                                 }
35703                                 x := z1_0.Args[0]
35704                                 z2 := v_0_1
35705                                 if !(z1 == z2) {
35706                                         continue
35707                                 }
35708                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
35709                                 v0.AuxInt = int8ToAuxInt(0)
35710                                 v0.AddArg(x)
35711                                 b.resetWithControl(BlockAMD64ULT, v0)
35712                                 return true
35713                         }
35714                         break
35715                 }
35716                 // match: (NE (TESTQ z1:(SHRQconst [63] x) z2))
35717                 // cond: z1==z2
35718                 // result: (ULT (BTQconst [63] x))
35719                 for b.Controls[0].Op == OpAMD64TESTQ {
35720                         v_0 := b.Controls[0]
35721                         _ = v_0.Args[1]
35722                         v_0_0 := v_0.Args[0]
35723                         v_0_1 := v_0.Args[1]
35724                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35725                                 z1 := v_0_0
35726                                 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
35727                                         continue
35728                                 }
35729                                 x := z1.Args[0]
35730                                 z2 := v_0_1
35731                                 if !(z1 == z2) {
35732                                         continue
35733                                 }
35734                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
35735                                 v0.AuxInt = int8ToAuxInt(63)
35736                                 v0.AddArg(x)
35737                                 b.resetWithControl(BlockAMD64ULT, v0)
35738                                 return true
35739                         }
35740                         break
35741                 }
35742                 // match: (NE (TESTL z1:(SHRLconst [31] x) z2))
35743                 // cond: z1==z2
35744                 // result: (ULT (BTLconst [31] x))
35745                 for b.Controls[0].Op == OpAMD64TESTL {
35746                         v_0 := b.Controls[0]
35747                         _ = v_0.Args[1]
35748                         v_0_0 := v_0.Args[0]
35749                         v_0_1 := v_0.Args[1]
35750                         for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35751                                 z1 := v_0_0
35752                                 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
35753                                         continue
35754                                 }
35755                                 x := z1.Args[0]
35756                                 z2 := v_0_1
35757                                 if !(z1 == z2) {
35758                                         continue
35759                                 }
35760                                 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
35761                                 v0.AuxInt = int8ToAuxInt(31)
35762                                 v0.AddArg(x)
35763                                 b.resetWithControl(BlockAMD64ULT, v0)
35764                                 return true
35765                         }
35766                         break
35767                 }
35768                 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
35769                 // result: (UGT cmp yes no)
35770                 for b.Controls[0].Op == OpAMD64TESTB {
35771                         v_0 := b.Controls[0]
35772                         _ = v_0.Args[1]
35773                         v_0_0 := v_0.Args[0]
35774                         if v_0_0.Op != OpAMD64SETGF {
35775                                 break
35776                         }
35777                         cmp := v_0_0.Args[0]
35778                         v_0_1 := v_0.Args[1]
35779                         if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
35780                                 break
35781                         }
35782                         b.resetWithControl(BlockAMD64UGT, cmp)
35783                         return true
35784                 }
35785                 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
35786                 // result: (UGE cmp yes no)
35787                 for b.Controls[0].Op == OpAMD64TESTB {
35788                         v_0 := b.Controls[0]
35789                         _ = v_0.Args[1]
35790                         v_0_0 := v_0.Args[0]
35791                         if v_0_0.Op != OpAMD64SETGEF {
35792                                 break
35793                         }
35794                         cmp := v_0_0.Args[0]
35795                         v_0_1 := v_0.Args[1]
35796                         if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
35797                                 break
35798                         }
35799                         b.resetWithControl(BlockAMD64UGE, cmp)
35800                         return true
35801                 }
35802                 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
35803                 // result: (EQF cmp yes no)
35804                 for b.Controls[0].Op == OpAMD64TESTB {
35805                         v_0 := b.Controls[0]
35806                         _ = v_0.Args[1]
35807                         v_0_0 := v_0.Args[0]
35808                         if v_0_0.Op != OpAMD64SETEQF {
35809                                 break
35810                         }
35811                         cmp := v_0_0.Args[0]
35812                         v_0_1 := v_0.Args[1]
35813                         if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
35814                                 break
35815                         }
35816                         b.resetWithControl(BlockAMD64EQF, cmp)
35817                         return true
35818                 }
35819                 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
35820                 // result: (NEF cmp yes no)
35821                 for b.Controls[0].Op == OpAMD64TESTB {
35822                         v_0 := b.Controls[0]
35823                         _ = v_0.Args[1]
35824                         v_0_0 := v_0.Args[0]
35825                         if v_0_0.Op != OpAMD64SETNEF {
35826                                 break
35827                         }
35828                         cmp := v_0_0.Args[0]
35829                         v_0_1 := v_0.Args[1]
35830                         if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
35831                                 break
35832                         }
35833                         b.resetWithControl(BlockAMD64NEF, cmp)
35834                         return true
35835                 }
35836                 // match: (NE (InvertFlags cmp) yes no)
35837                 // result: (NE cmp yes no)
35838                 for b.Controls[0].Op == OpAMD64InvertFlags {
35839                         v_0 := b.Controls[0]
35840                         cmp := v_0.Args[0]
35841                         b.resetWithControl(BlockAMD64NE, cmp)
35842                         return true
35843                 }
35844                 // match: (NE (FlagEQ) yes no)
35845                 // result: (First no yes)
35846                 for b.Controls[0].Op == OpAMD64FlagEQ {
35847                         b.Reset(BlockFirst)
35848                         b.swapSuccessors()
35849                         return true
35850                 }
35851                 // match: (NE (FlagLT_ULT) yes no)
35852                 // result: (First yes no)
35853                 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35854                         b.Reset(BlockFirst)
35855                         return true
35856                 }
35857                 // match: (NE (FlagLT_UGT) yes no)
35858                 // result: (First yes no)
35859                 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35860                         b.Reset(BlockFirst)
35861                         return true
35862                 }
35863                 // match: (NE (FlagGT_ULT) yes no)
35864                 // result: (First yes no)
35865                 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35866                         b.Reset(BlockFirst)
35867                         return true
35868                 }
35869                 // match: (NE (FlagGT_UGT) yes no)
35870                 // result: (First yes no)
35871                 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35872                         b.Reset(BlockFirst)
35873                         return true
35874                 }
35875         case BlockAMD64UGE:
35876                 // match: (UGE (TESTQ x x) yes no)
35877                 // result: (First yes no)
35878                 for b.Controls[0].Op == OpAMD64TESTQ {
35879                         v_0 := b.Controls[0]
35880                         x := v_0.Args[1]
35881                         if x != v_0.Args[0] {
35882                                 break
35883                         }
35884                         b.Reset(BlockFirst)
35885                         return true
35886                 }
35887                 // match: (UGE (TESTL x x) yes no)
35888                 // result: (First yes no)
35889                 for b.Controls[0].Op == OpAMD64TESTL {
35890                         v_0 := b.Controls[0]
35891                         x := v_0.Args[1]
35892                         if x != v_0.Args[0] {
35893                                 break
35894                         }
35895                         b.Reset(BlockFirst)
35896                         return true
35897                 }
35898                 // match: (UGE (TESTW x x) yes no)
35899                 // result: (First yes no)
35900                 for b.Controls[0].Op == OpAMD64TESTW {
35901                         v_0 := b.Controls[0]
35902                         x := v_0.Args[1]
35903                         if x != v_0.Args[0] {
35904                                 break
35905                         }
35906                         b.Reset(BlockFirst)
35907                         return true
35908                 }
35909                 // match: (UGE (TESTB x x) yes no)
35910                 // result: (First yes no)
35911                 for b.Controls[0].Op == OpAMD64TESTB {
35912                         v_0 := b.Controls[0]
35913                         x := v_0.Args[1]
35914                         if x != v_0.Args[0] {
35915                                 break
35916                         }
35917                         b.Reset(BlockFirst)
35918                         return true
35919                 }
35920                 // match: (UGE (InvertFlags cmp) yes no)
35921                 // result: (ULE cmp yes no)
35922                 for b.Controls[0].Op == OpAMD64InvertFlags {
35923                         v_0 := b.Controls[0]
35924                         cmp := v_0.Args[0]
35925                         b.resetWithControl(BlockAMD64ULE, cmp)
35926                         return true
35927                 }
35928                 // match: (UGE (FlagEQ) yes no)
35929                 // result: (First yes no)
35930                 for b.Controls[0].Op == OpAMD64FlagEQ {
35931                         b.Reset(BlockFirst)
35932                         return true
35933                 }
35934                 // match: (UGE (FlagLT_ULT) yes no)
35935                 // result: (First no yes)
35936                 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35937                         b.Reset(BlockFirst)
35938                         b.swapSuccessors()
35939                         return true
35940                 }
35941                 // match: (UGE (FlagLT_UGT) yes no)
35942                 // result: (First yes no)
35943                 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35944                         b.Reset(BlockFirst)
35945                         return true
35946                 }
35947                 // match: (UGE (FlagGT_ULT) yes no)
35948                 // result: (First no yes)
35949                 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35950                         b.Reset(BlockFirst)
35951                         b.swapSuccessors()
35952                         return true
35953                 }
35954                 // match: (UGE (FlagGT_UGT) yes no)
35955                 // result: (First yes no)
35956                 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35957                         b.Reset(BlockFirst)
35958                         return true
35959                 }
35960         case BlockAMD64UGT:
35961                 // match: (UGT (InvertFlags cmp) yes no)
35962                 // result: (ULT cmp yes no)
35963                 for b.Controls[0].Op == OpAMD64InvertFlags {
35964                         v_0 := b.Controls[0]
35965                         cmp := v_0.Args[0]
35966                         b.resetWithControl(BlockAMD64ULT, cmp)
35967                         return true
35968                 }
35969                 // match: (UGT (FlagEQ) yes no)
35970                 // result: (First no yes)
35971                 for b.Controls[0].Op == OpAMD64FlagEQ {
35972                         b.Reset(BlockFirst)
35973                         b.swapSuccessors()
35974                         return true
35975                 }
35976                 // match: (UGT (FlagLT_ULT) yes no)
35977                 // result: (First no yes)
35978                 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35979                         b.Reset(BlockFirst)
35980                         b.swapSuccessors()
35981                         return true
35982                 }
35983                 // match: (UGT (FlagLT_UGT) yes no)
35984                 // result: (First yes no)
35985                 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35986                         b.Reset(BlockFirst)
35987                         return true
35988                 }
35989                 // match: (UGT (FlagGT_ULT) yes no)
35990                 // result: (First no yes)
35991                 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35992                         b.Reset(BlockFirst)
35993                         b.swapSuccessors()
35994                         return true
35995                 }
35996                 // match: (UGT (FlagGT_UGT) yes no)
35997                 // result: (First yes no)
35998                 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35999                         b.Reset(BlockFirst)
36000                         return true
36001                 }
36002         case BlockAMD64ULE:
36003                 // match: (ULE (InvertFlags cmp) yes no)
36004                 // result: (UGE cmp yes no)
36005                 for b.Controls[0].Op == OpAMD64InvertFlags {
36006                         v_0 := b.Controls[0]
36007                         cmp := v_0.Args[0]
36008                         b.resetWithControl(BlockAMD64UGE, cmp)
36009                         return true
36010                 }
36011                 // match: (ULE (FlagEQ) yes no)
36012                 // result: (First yes no)
36013                 for b.Controls[0].Op == OpAMD64FlagEQ {
36014                         b.Reset(BlockFirst)
36015                         return true
36016                 }
36017                 // match: (ULE (FlagLT_ULT) yes no)
36018                 // result: (First yes no)
36019                 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
36020                         b.Reset(BlockFirst)
36021                         return true
36022                 }
36023                 // match: (ULE (FlagLT_UGT) yes no)
36024                 // result: (First no yes)
36025                 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
36026                         b.Reset(BlockFirst)
36027                         b.swapSuccessors()
36028                         return true
36029                 }
36030                 // match: (ULE (FlagGT_ULT) yes no)
36031                 // result: (First yes no)
36032                 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
36033                         b.Reset(BlockFirst)
36034                         return true
36035                 }
36036                 // match: (ULE (FlagGT_UGT) yes no)
36037                 // result: (First no yes)
36038                 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
36039                         b.Reset(BlockFirst)
36040                         b.swapSuccessors()
36041                         return true
36042                 }
36043         case BlockAMD64ULT:
36044                 // match: (ULT (TESTQ x x) yes no)
36045                 // result: (First no yes)
36046                 for b.Controls[0].Op == OpAMD64TESTQ {
36047                         v_0 := b.Controls[0]
36048                         x := v_0.Args[1]
36049                         if x != v_0.Args[0] {
36050                                 break
36051                         }
36052                         b.Reset(BlockFirst)
36053                         b.swapSuccessors()
36054                         return true
36055                 }
36056                 // match: (ULT (TESTL x x) yes no)
36057                 // result: (First no yes)
36058                 for b.Controls[0].Op == OpAMD64TESTL {
36059                         v_0 := b.Controls[0]
36060                         x := v_0.Args[1]
36061                         if x != v_0.Args[0] {
36062                                 break
36063                         }
36064                         b.Reset(BlockFirst)
36065                         b.swapSuccessors()
36066                         return true
36067                 }
36068                 // match: (ULT (TESTW x x) yes no)
36069                 // result: (First no yes)
36070                 for b.Controls[0].Op == OpAMD64TESTW {
36071                         v_0 := b.Controls[0]
36072                         x := v_0.Args[1]
36073                         if x != v_0.Args[0] {
36074                                 break
36075                         }
36076                         b.Reset(BlockFirst)
36077                         b.swapSuccessors()
36078                         return true
36079                 }
36080                 // match: (ULT (TESTB x x) yes no)
36081                 // result: (First no yes)
36082                 for b.Controls[0].Op == OpAMD64TESTB {
36083                         v_0 := b.Controls[0]
36084                         x := v_0.Args[1]
36085                         if x != v_0.Args[0] {
36086                                 break
36087                         }
36088                         b.Reset(BlockFirst)
36089                         b.swapSuccessors()
36090                         return true
36091                 }
36092                 // match: (ULT (InvertFlags cmp) yes no)
36093                 // result: (UGT cmp yes no)
36094                 for b.Controls[0].Op == OpAMD64InvertFlags {
36095                         v_0 := b.Controls[0]
36096                         cmp := v_0.Args[0]
36097                         b.resetWithControl(BlockAMD64UGT, cmp)
36098                         return true
36099                 }
36100                 // match: (ULT (FlagEQ) yes no)
36101                 // result: (First no yes)
36102                 for b.Controls[0].Op == OpAMD64FlagEQ {
36103                         b.Reset(BlockFirst)
36104                         b.swapSuccessors()
36105                         return true
36106                 }
36107                 // match: (ULT (FlagLT_ULT) yes no)
36108                 // result: (First yes no)
36109                 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
36110                         b.Reset(BlockFirst)
36111                         return true
36112                 }
36113                 // match: (ULT (FlagLT_UGT) yes no)
36114                 // result: (First no yes)
36115                 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
36116                         b.Reset(BlockFirst)
36117                         b.swapSuccessors()
36118                         return true
36119                 }
36120                 // match: (ULT (FlagGT_ULT) yes no)
36121                 // result: (First yes no)
36122                 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
36123                         b.Reset(BlockFirst)
36124                         return true
36125                 }
36126                 // match: (ULT (FlagGT_UGT) yes no)
36127                 // result: (First no yes)
36128                 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
36129                         b.Reset(BlockFirst)
36130                         b.swapSuccessors()
36131                         return true
36132                 }
36133         }
36134         return false
36135 }