1 // Code generated from gen/AMD64.rules; DO NOT EDIT.
2 // generated with: cd gen; go run *.go
7 import "cmd/internal/obj"
8 import "cmd/internal/objabi"
9 import "cmd/compile/internal/types"
11 func rewriteValueAMD64(v *Value) bool {
14 return rewriteValueAMD64_OpAMD64ADCQ(v)
15 case OpAMD64ADCQconst:
16 return rewriteValueAMD64_OpAMD64ADCQconst(v)
18 return rewriteValueAMD64_OpAMD64ADDL(v)
19 case OpAMD64ADDLconst:
20 return rewriteValueAMD64_OpAMD64ADDLconst(v)
21 case OpAMD64ADDLconstmodify:
22 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
24 return rewriteValueAMD64_OpAMD64ADDLload(v)
25 case OpAMD64ADDLmodify:
26 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
28 return rewriteValueAMD64_OpAMD64ADDQ(v)
29 case OpAMD64ADDQcarry:
30 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
31 case OpAMD64ADDQconst:
32 return rewriteValueAMD64_OpAMD64ADDQconst(v)
33 case OpAMD64ADDQconstmodify:
34 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
36 return rewriteValueAMD64_OpAMD64ADDQload(v)
37 case OpAMD64ADDQmodify:
38 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
40 return rewriteValueAMD64_OpAMD64ADDSD(v)
41 case OpAMD64ADDSDload:
42 return rewriteValueAMD64_OpAMD64ADDSDload(v)
44 return rewriteValueAMD64_OpAMD64ADDSS(v)
45 case OpAMD64ADDSSload:
46 return rewriteValueAMD64_OpAMD64ADDSSload(v)
48 return rewriteValueAMD64_OpAMD64ANDL(v)
49 case OpAMD64ANDLconst:
50 return rewriteValueAMD64_OpAMD64ANDLconst(v)
51 case OpAMD64ANDLconstmodify:
52 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
54 return rewriteValueAMD64_OpAMD64ANDLload(v)
55 case OpAMD64ANDLmodify:
56 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
58 return rewriteValueAMD64_OpAMD64ANDQ(v)
59 case OpAMD64ANDQconst:
60 return rewriteValueAMD64_OpAMD64ANDQconst(v)
61 case OpAMD64ANDQconstmodify:
62 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
64 return rewriteValueAMD64_OpAMD64ANDQload(v)
65 case OpAMD64ANDQmodify:
66 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
68 return rewriteValueAMD64_OpAMD64BSFQ(v)
69 case OpAMD64BTCLconst:
70 return rewriteValueAMD64_OpAMD64BTCLconst(v)
71 case OpAMD64BTCLconstmodify:
72 return rewriteValueAMD64_OpAMD64BTCLconstmodify(v)
73 case OpAMD64BTCLmodify:
74 return rewriteValueAMD64_OpAMD64BTCLmodify(v)
75 case OpAMD64BTCQconst:
76 return rewriteValueAMD64_OpAMD64BTCQconst(v)
77 case OpAMD64BTCQconstmodify:
78 return rewriteValueAMD64_OpAMD64BTCQconstmodify(v)
79 case OpAMD64BTCQmodify:
80 return rewriteValueAMD64_OpAMD64BTCQmodify(v)
82 return rewriteValueAMD64_OpAMD64BTLconst(v)
84 return rewriteValueAMD64_OpAMD64BTQconst(v)
85 case OpAMD64BTRLconst:
86 return rewriteValueAMD64_OpAMD64BTRLconst(v)
87 case OpAMD64BTRLconstmodify:
88 return rewriteValueAMD64_OpAMD64BTRLconstmodify(v)
89 case OpAMD64BTRLmodify:
90 return rewriteValueAMD64_OpAMD64BTRLmodify(v)
91 case OpAMD64BTRQconst:
92 return rewriteValueAMD64_OpAMD64BTRQconst(v)
93 case OpAMD64BTRQconstmodify:
94 return rewriteValueAMD64_OpAMD64BTRQconstmodify(v)
95 case OpAMD64BTRQmodify:
96 return rewriteValueAMD64_OpAMD64BTRQmodify(v)
97 case OpAMD64BTSLconst:
98 return rewriteValueAMD64_OpAMD64BTSLconst(v)
99 case OpAMD64BTSLconstmodify:
100 return rewriteValueAMD64_OpAMD64BTSLconstmodify(v)
101 case OpAMD64BTSLmodify:
102 return rewriteValueAMD64_OpAMD64BTSLmodify(v)
103 case OpAMD64BTSQconst:
104 return rewriteValueAMD64_OpAMD64BTSQconst(v)
105 case OpAMD64BTSQconstmodify:
106 return rewriteValueAMD64_OpAMD64BTSQconstmodify(v)
107 case OpAMD64BTSQmodify:
108 return rewriteValueAMD64_OpAMD64BTSQmodify(v)
110 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
112 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
114 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
116 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
118 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
120 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
122 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
124 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
126 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
128 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
130 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
132 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
134 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
136 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
138 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
140 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
142 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
144 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
146 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
148 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
150 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
152 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
154 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
156 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
158 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
160 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
162 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
164 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
166 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
168 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
170 return rewriteValueAMD64_OpAMD64CMPB(v)
171 case OpAMD64CMPBconst:
172 return rewriteValueAMD64_OpAMD64CMPBconst(v)
173 case OpAMD64CMPBconstload:
174 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
175 case OpAMD64CMPBload:
176 return rewriteValueAMD64_OpAMD64CMPBload(v)
178 return rewriteValueAMD64_OpAMD64CMPL(v)
179 case OpAMD64CMPLconst:
180 return rewriteValueAMD64_OpAMD64CMPLconst(v)
181 case OpAMD64CMPLconstload:
182 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
183 case OpAMD64CMPLload:
184 return rewriteValueAMD64_OpAMD64CMPLload(v)
186 return rewriteValueAMD64_OpAMD64CMPQ(v)
187 case OpAMD64CMPQconst:
188 return rewriteValueAMD64_OpAMD64CMPQconst(v)
189 case OpAMD64CMPQconstload:
190 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
191 case OpAMD64CMPQload:
192 return rewriteValueAMD64_OpAMD64CMPQload(v)
194 return rewriteValueAMD64_OpAMD64CMPW(v)
195 case OpAMD64CMPWconst:
196 return rewriteValueAMD64_OpAMD64CMPWconst(v)
197 case OpAMD64CMPWconstload:
198 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
199 case OpAMD64CMPWload:
200 return rewriteValueAMD64_OpAMD64CMPWload(v)
201 case OpAMD64CMPXCHGLlock:
202 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
203 case OpAMD64CMPXCHGQlock:
204 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
206 return rewriteValueAMD64_OpAMD64DIVSD(v)
207 case OpAMD64DIVSDload:
208 return rewriteValueAMD64_OpAMD64DIVSDload(v)
210 return rewriteValueAMD64_OpAMD64DIVSS(v)
211 case OpAMD64DIVSSload:
212 return rewriteValueAMD64_OpAMD64DIVSSload(v)
214 return rewriteValueAMD64_OpAMD64HMULL(v)
216 return rewriteValueAMD64_OpAMD64HMULLU(v)
218 return rewriteValueAMD64_OpAMD64HMULQ(v)
220 return rewriteValueAMD64_OpAMD64HMULQU(v)
222 return rewriteValueAMD64_OpAMD64LEAL(v)
224 return rewriteValueAMD64_OpAMD64LEAL1(v)
226 return rewriteValueAMD64_OpAMD64LEAL2(v)
228 return rewriteValueAMD64_OpAMD64LEAL4(v)
230 return rewriteValueAMD64_OpAMD64LEAL8(v)
232 return rewriteValueAMD64_OpAMD64LEAQ(v)
234 return rewriteValueAMD64_OpAMD64LEAQ1(v)
236 return rewriteValueAMD64_OpAMD64LEAQ2(v)
238 return rewriteValueAMD64_OpAMD64LEAQ4(v)
240 return rewriteValueAMD64_OpAMD64LEAQ8(v)
242 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
243 case OpAMD64MOVBQSXload:
244 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
246 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
247 case OpAMD64MOVBatomicload:
248 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
249 case OpAMD64MOVBload:
250 return rewriteValueAMD64_OpAMD64MOVBload(v)
251 case OpAMD64MOVBstore:
252 return rewriteValueAMD64_OpAMD64MOVBstore(v)
253 case OpAMD64MOVBstoreconst:
254 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
256 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
257 case OpAMD64MOVLQSXload:
258 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
260 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
261 case OpAMD64MOVLatomicload:
262 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
264 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
266 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
267 case OpAMD64MOVLload:
268 return rewriteValueAMD64_OpAMD64MOVLload(v)
269 case OpAMD64MOVLstore:
270 return rewriteValueAMD64_OpAMD64MOVLstore(v)
271 case OpAMD64MOVLstoreconst:
272 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
273 case OpAMD64MOVOload:
274 return rewriteValueAMD64_OpAMD64MOVOload(v)
275 case OpAMD64MOVOstore:
276 return rewriteValueAMD64_OpAMD64MOVOstore(v)
277 case OpAMD64MOVQatomicload:
278 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
280 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
282 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
283 case OpAMD64MOVQload:
284 return rewriteValueAMD64_OpAMD64MOVQload(v)
285 case OpAMD64MOVQstore:
286 return rewriteValueAMD64_OpAMD64MOVQstore(v)
287 case OpAMD64MOVQstoreconst:
288 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
289 case OpAMD64MOVSDload:
290 return rewriteValueAMD64_OpAMD64MOVSDload(v)
291 case OpAMD64MOVSDstore:
292 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
293 case OpAMD64MOVSSload:
294 return rewriteValueAMD64_OpAMD64MOVSSload(v)
295 case OpAMD64MOVSSstore:
296 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
298 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
299 case OpAMD64MOVWQSXload:
300 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
302 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
303 case OpAMD64MOVWload:
304 return rewriteValueAMD64_OpAMD64MOVWload(v)
305 case OpAMD64MOVWstore:
306 return rewriteValueAMD64_OpAMD64MOVWstore(v)
307 case OpAMD64MOVWstoreconst:
308 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
310 return rewriteValueAMD64_OpAMD64MULL(v)
311 case OpAMD64MULLconst:
312 return rewriteValueAMD64_OpAMD64MULLconst(v)
314 return rewriteValueAMD64_OpAMD64MULQ(v)
315 case OpAMD64MULQconst:
316 return rewriteValueAMD64_OpAMD64MULQconst(v)
318 return rewriteValueAMD64_OpAMD64MULSD(v)
319 case OpAMD64MULSDload:
320 return rewriteValueAMD64_OpAMD64MULSDload(v)
322 return rewriteValueAMD64_OpAMD64MULSS(v)
323 case OpAMD64MULSSload:
324 return rewriteValueAMD64_OpAMD64MULSSload(v)
326 return rewriteValueAMD64_OpAMD64NEGL(v)
328 return rewriteValueAMD64_OpAMD64NEGQ(v)
330 return rewriteValueAMD64_OpAMD64NOTL(v)
332 return rewriteValueAMD64_OpAMD64NOTQ(v)
334 return rewriteValueAMD64_OpAMD64ORL(v)
335 case OpAMD64ORLconst:
336 return rewriteValueAMD64_OpAMD64ORLconst(v)
337 case OpAMD64ORLconstmodify:
338 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
340 return rewriteValueAMD64_OpAMD64ORLload(v)
341 case OpAMD64ORLmodify:
342 return rewriteValueAMD64_OpAMD64ORLmodify(v)
344 return rewriteValueAMD64_OpAMD64ORQ(v)
345 case OpAMD64ORQconst:
346 return rewriteValueAMD64_OpAMD64ORQconst(v)
347 case OpAMD64ORQconstmodify:
348 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
350 return rewriteValueAMD64_OpAMD64ORQload(v)
351 case OpAMD64ORQmodify:
352 return rewriteValueAMD64_OpAMD64ORQmodify(v)
354 return rewriteValueAMD64_OpAMD64ROLB(v)
355 case OpAMD64ROLBconst:
356 return rewriteValueAMD64_OpAMD64ROLBconst(v)
358 return rewriteValueAMD64_OpAMD64ROLL(v)
359 case OpAMD64ROLLconst:
360 return rewriteValueAMD64_OpAMD64ROLLconst(v)
362 return rewriteValueAMD64_OpAMD64ROLQ(v)
363 case OpAMD64ROLQconst:
364 return rewriteValueAMD64_OpAMD64ROLQconst(v)
366 return rewriteValueAMD64_OpAMD64ROLW(v)
367 case OpAMD64ROLWconst:
368 return rewriteValueAMD64_OpAMD64ROLWconst(v)
370 return rewriteValueAMD64_OpAMD64RORB(v)
372 return rewriteValueAMD64_OpAMD64RORL(v)
374 return rewriteValueAMD64_OpAMD64RORQ(v)
376 return rewriteValueAMD64_OpAMD64RORW(v)
378 return rewriteValueAMD64_OpAMD64SARB(v)
379 case OpAMD64SARBconst:
380 return rewriteValueAMD64_OpAMD64SARBconst(v)
382 return rewriteValueAMD64_OpAMD64SARL(v)
383 case OpAMD64SARLconst:
384 return rewriteValueAMD64_OpAMD64SARLconst(v)
386 return rewriteValueAMD64_OpAMD64SARQ(v)
387 case OpAMD64SARQconst:
388 return rewriteValueAMD64_OpAMD64SARQconst(v)
390 return rewriteValueAMD64_OpAMD64SARW(v)
391 case OpAMD64SARWconst:
392 return rewriteValueAMD64_OpAMD64SARWconst(v)
393 case OpAMD64SBBLcarrymask:
394 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
396 return rewriteValueAMD64_OpAMD64SBBQ(v)
397 case OpAMD64SBBQcarrymask:
398 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
399 case OpAMD64SBBQconst:
400 return rewriteValueAMD64_OpAMD64SBBQconst(v)
402 return rewriteValueAMD64_OpAMD64SETA(v)
404 return rewriteValueAMD64_OpAMD64SETAE(v)
405 case OpAMD64SETAEstore:
406 return rewriteValueAMD64_OpAMD64SETAEstore(v)
407 case OpAMD64SETAstore:
408 return rewriteValueAMD64_OpAMD64SETAstore(v)
410 return rewriteValueAMD64_OpAMD64SETB(v)
412 return rewriteValueAMD64_OpAMD64SETBE(v)
413 case OpAMD64SETBEstore:
414 return rewriteValueAMD64_OpAMD64SETBEstore(v)
415 case OpAMD64SETBstore:
416 return rewriteValueAMD64_OpAMD64SETBstore(v)
418 return rewriteValueAMD64_OpAMD64SETEQ(v)
419 case OpAMD64SETEQstore:
420 return rewriteValueAMD64_OpAMD64SETEQstore(v)
422 return rewriteValueAMD64_OpAMD64SETG(v)
424 return rewriteValueAMD64_OpAMD64SETGE(v)
425 case OpAMD64SETGEstore:
426 return rewriteValueAMD64_OpAMD64SETGEstore(v)
427 case OpAMD64SETGstore:
428 return rewriteValueAMD64_OpAMD64SETGstore(v)
430 return rewriteValueAMD64_OpAMD64SETL(v)
432 return rewriteValueAMD64_OpAMD64SETLE(v)
433 case OpAMD64SETLEstore:
434 return rewriteValueAMD64_OpAMD64SETLEstore(v)
435 case OpAMD64SETLstore:
436 return rewriteValueAMD64_OpAMD64SETLstore(v)
438 return rewriteValueAMD64_OpAMD64SETNE(v)
439 case OpAMD64SETNEstore:
440 return rewriteValueAMD64_OpAMD64SETNEstore(v)
442 return rewriteValueAMD64_OpAMD64SHLL(v)
443 case OpAMD64SHLLconst:
444 return rewriteValueAMD64_OpAMD64SHLLconst(v)
446 return rewriteValueAMD64_OpAMD64SHLQ(v)
447 case OpAMD64SHLQconst:
448 return rewriteValueAMD64_OpAMD64SHLQconst(v)
450 return rewriteValueAMD64_OpAMD64SHRB(v)
451 case OpAMD64SHRBconst:
452 return rewriteValueAMD64_OpAMD64SHRBconst(v)
454 return rewriteValueAMD64_OpAMD64SHRL(v)
455 case OpAMD64SHRLconst:
456 return rewriteValueAMD64_OpAMD64SHRLconst(v)
458 return rewriteValueAMD64_OpAMD64SHRQ(v)
459 case OpAMD64SHRQconst:
460 return rewriteValueAMD64_OpAMD64SHRQconst(v)
462 return rewriteValueAMD64_OpAMD64SHRW(v)
463 case OpAMD64SHRWconst:
464 return rewriteValueAMD64_OpAMD64SHRWconst(v)
466 return rewriteValueAMD64_OpAMD64SUBL(v)
467 case OpAMD64SUBLconst:
468 return rewriteValueAMD64_OpAMD64SUBLconst(v)
469 case OpAMD64SUBLload:
470 return rewriteValueAMD64_OpAMD64SUBLload(v)
471 case OpAMD64SUBLmodify:
472 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
474 return rewriteValueAMD64_OpAMD64SUBQ(v)
475 case OpAMD64SUBQborrow:
476 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
477 case OpAMD64SUBQconst:
478 return rewriteValueAMD64_OpAMD64SUBQconst(v)
479 case OpAMD64SUBQload:
480 return rewriteValueAMD64_OpAMD64SUBQload(v)
481 case OpAMD64SUBQmodify:
482 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
484 return rewriteValueAMD64_OpAMD64SUBSD(v)
485 case OpAMD64SUBSDload:
486 return rewriteValueAMD64_OpAMD64SUBSDload(v)
488 return rewriteValueAMD64_OpAMD64SUBSS(v)
489 case OpAMD64SUBSSload:
490 return rewriteValueAMD64_OpAMD64SUBSSload(v)
492 return rewriteValueAMD64_OpAMD64TESTB(v)
493 case OpAMD64TESTBconst:
494 return rewriteValueAMD64_OpAMD64TESTBconst(v)
496 return rewriteValueAMD64_OpAMD64TESTL(v)
497 case OpAMD64TESTLconst:
498 return rewriteValueAMD64_OpAMD64TESTLconst(v)
500 return rewriteValueAMD64_OpAMD64TESTQ(v)
501 case OpAMD64TESTQconst:
502 return rewriteValueAMD64_OpAMD64TESTQconst(v)
504 return rewriteValueAMD64_OpAMD64TESTW(v)
505 case OpAMD64TESTWconst:
506 return rewriteValueAMD64_OpAMD64TESTWconst(v)
507 case OpAMD64XADDLlock:
508 return rewriteValueAMD64_OpAMD64XADDLlock(v)
509 case OpAMD64XADDQlock:
510 return rewriteValueAMD64_OpAMD64XADDQlock(v)
512 return rewriteValueAMD64_OpAMD64XCHGL(v)
514 return rewriteValueAMD64_OpAMD64XCHGQ(v)
516 return rewriteValueAMD64_OpAMD64XORL(v)
517 case OpAMD64XORLconst:
518 return rewriteValueAMD64_OpAMD64XORLconst(v)
519 case OpAMD64XORLconstmodify:
520 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
521 case OpAMD64XORLload:
522 return rewriteValueAMD64_OpAMD64XORLload(v)
523 case OpAMD64XORLmodify:
524 return rewriteValueAMD64_OpAMD64XORLmodify(v)
526 return rewriteValueAMD64_OpAMD64XORQ(v)
527 case OpAMD64XORQconst:
528 return rewriteValueAMD64_OpAMD64XORQconst(v)
529 case OpAMD64XORQconstmodify:
530 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
531 case OpAMD64XORQload:
532 return rewriteValueAMD64_OpAMD64XORQload(v)
533 case OpAMD64XORQmodify:
534 return rewriteValueAMD64_OpAMD64XORQmodify(v)
557 return rewriteValueAMD64_OpAddr(v)
574 return rewriteValueAMD64_OpAtomicAdd32(v)
576 return rewriteValueAMD64_OpAtomicAdd64(v)
578 return rewriteValueAMD64_OpAtomicAnd32(v)
580 return rewriteValueAMD64_OpAtomicAnd8(v)
581 case OpAtomicCompareAndSwap32:
582 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
583 case OpAtomicCompareAndSwap64:
584 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
585 case OpAtomicExchange32:
586 return rewriteValueAMD64_OpAtomicExchange32(v)
587 case OpAtomicExchange64:
588 return rewriteValueAMD64_OpAtomicExchange64(v)
590 return rewriteValueAMD64_OpAtomicLoad32(v)
592 return rewriteValueAMD64_OpAtomicLoad64(v)
594 return rewriteValueAMD64_OpAtomicLoad8(v)
595 case OpAtomicLoadPtr:
596 return rewriteValueAMD64_OpAtomicLoadPtr(v)
598 return rewriteValueAMD64_OpAtomicOr32(v)
600 return rewriteValueAMD64_OpAtomicOr8(v)
601 case OpAtomicStore32:
602 return rewriteValueAMD64_OpAtomicStore32(v)
603 case OpAtomicStore64:
604 return rewriteValueAMD64_OpAtomicStore64(v)
606 return rewriteValueAMD64_OpAtomicStore8(v)
607 case OpAtomicStorePtrNoWB:
608 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
613 return rewriteValueAMD64_OpBitLen16(v)
615 return rewriteValueAMD64_OpBitLen32(v)
617 return rewriteValueAMD64_OpBitLen64(v)
619 return rewriteValueAMD64_OpBitLen8(v)
627 return rewriteValueAMD64_OpCeil(v)
629 v.Op = OpAMD64CALLclosure
644 return rewriteValueAMD64_OpCondSelect(v)
646 return rewriteValueAMD64_OpConst16(v)
648 v.Op = OpAMD64MOVLconst
651 v.Op = OpAMD64MOVSSconst
654 v.Op = OpAMD64MOVQconst
657 v.Op = OpAMD64MOVSDconst
660 return rewriteValueAMD64_OpConst8(v)
662 return rewriteValueAMD64_OpConstBool(v)
664 return rewriteValueAMD64_OpConstNil(v)
666 return rewriteValueAMD64_OpCtz16(v)
671 return rewriteValueAMD64_OpCtz32(v)
676 return rewriteValueAMD64_OpCtz64(v)
678 return rewriteValueAMD64_OpCtz64NonZero(v)
680 return rewriteValueAMD64_OpCtz8(v)
685 v.Op = OpAMD64CVTTSS2SL
688 v.Op = OpAMD64CVTTSS2SQ
691 v.Op = OpAMD64CVTSS2SD
694 v.Op = OpAMD64CVTSL2SS
697 v.Op = OpAMD64CVTSL2SD
700 v.Op = OpAMD64CVTTSD2SL
703 v.Op = OpAMD64CVTSD2SS
706 v.Op = OpAMD64CVTTSD2SQ
709 v.Op = OpAMD64CVTSQ2SS
712 v.Op = OpAMD64CVTSQ2SD
714 case OpCvtBoolToUint8:
721 return rewriteValueAMD64_OpDiv16(v)
723 return rewriteValueAMD64_OpDiv16u(v)
725 return rewriteValueAMD64_OpDiv32(v)
730 return rewriteValueAMD64_OpDiv32u(v)
732 return rewriteValueAMD64_OpDiv64(v)
737 return rewriteValueAMD64_OpDiv64u(v)
739 return rewriteValueAMD64_OpDiv8(v)
741 return rewriteValueAMD64_OpDiv8u(v)
743 return rewriteValueAMD64_OpEq16(v)
745 return rewriteValueAMD64_OpEq32(v)
747 return rewriteValueAMD64_OpEq32F(v)
749 return rewriteValueAMD64_OpEq64(v)
751 return rewriteValueAMD64_OpEq64F(v)
753 return rewriteValueAMD64_OpEq8(v)
755 return rewriteValueAMD64_OpEqB(v)
757 return rewriteValueAMD64_OpEqPtr(v)
759 return rewriteValueAMD64_OpFMA(v)
761 return rewriteValueAMD64_OpFloor(v)
763 v.Op = OpAMD64LoweredGetCallerPC
766 v.Op = OpAMD64LoweredGetCallerSP
768 case OpGetClosurePtr:
769 v.Op = OpAMD64LoweredGetClosurePtr
772 return rewriteValueAMD64_OpGetG(v)
773 case OpHasCPUFeature:
774 return rewriteValueAMD64_OpHasCPUFeature(v)
788 v.Op = OpAMD64CALLinter
791 return rewriteValueAMD64_OpIsInBounds(v)
793 return rewriteValueAMD64_OpIsNonNil(v)
794 case OpIsSliceInBounds:
795 return rewriteValueAMD64_OpIsSliceInBounds(v)
797 return rewriteValueAMD64_OpLeq16(v)
799 return rewriteValueAMD64_OpLeq16U(v)
801 return rewriteValueAMD64_OpLeq32(v)
803 return rewriteValueAMD64_OpLeq32F(v)
805 return rewriteValueAMD64_OpLeq32U(v)
807 return rewriteValueAMD64_OpLeq64(v)
809 return rewriteValueAMD64_OpLeq64F(v)
811 return rewriteValueAMD64_OpLeq64U(v)
813 return rewriteValueAMD64_OpLeq8(v)
815 return rewriteValueAMD64_OpLeq8U(v)
817 return rewriteValueAMD64_OpLess16(v)
819 return rewriteValueAMD64_OpLess16U(v)
821 return rewriteValueAMD64_OpLess32(v)
823 return rewriteValueAMD64_OpLess32F(v)
825 return rewriteValueAMD64_OpLess32U(v)
827 return rewriteValueAMD64_OpLess64(v)
829 return rewriteValueAMD64_OpLess64F(v)
831 return rewriteValueAMD64_OpLess64U(v)
833 return rewriteValueAMD64_OpLess8(v)
835 return rewriteValueAMD64_OpLess8U(v)
837 return rewriteValueAMD64_OpLoad(v)
839 return rewriteValueAMD64_OpLocalAddr(v)
841 return rewriteValueAMD64_OpLsh16x16(v)
843 return rewriteValueAMD64_OpLsh16x32(v)
845 return rewriteValueAMD64_OpLsh16x64(v)
847 return rewriteValueAMD64_OpLsh16x8(v)
849 return rewriteValueAMD64_OpLsh32x16(v)
851 return rewriteValueAMD64_OpLsh32x32(v)
853 return rewriteValueAMD64_OpLsh32x64(v)
855 return rewriteValueAMD64_OpLsh32x8(v)
857 return rewriteValueAMD64_OpLsh64x16(v)
859 return rewriteValueAMD64_OpLsh64x32(v)
861 return rewriteValueAMD64_OpLsh64x64(v)
863 return rewriteValueAMD64_OpLsh64x8(v)
865 return rewriteValueAMD64_OpLsh8x16(v)
867 return rewriteValueAMD64_OpLsh8x32(v)
869 return rewriteValueAMD64_OpLsh8x64(v)
871 return rewriteValueAMD64_OpLsh8x8(v)
873 return rewriteValueAMD64_OpMod16(v)
875 return rewriteValueAMD64_OpMod16u(v)
877 return rewriteValueAMD64_OpMod32(v)
879 return rewriteValueAMD64_OpMod32u(v)
881 return rewriteValueAMD64_OpMod64(v)
883 return rewriteValueAMD64_OpMod64u(v)
885 return rewriteValueAMD64_OpMod8(v)
887 return rewriteValueAMD64_OpMod8u(v)
889 return rewriteValueAMD64_OpMove(v)
918 return rewriteValueAMD64_OpNeg32F(v)
923 return rewriteValueAMD64_OpNeg64F(v)
928 return rewriteValueAMD64_OpNeq16(v)
930 return rewriteValueAMD64_OpNeq32(v)
932 return rewriteValueAMD64_OpNeq32F(v)
934 return rewriteValueAMD64_OpNeq64(v)
936 return rewriteValueAMD64_OpNeq64F(v)
938 return rewriteValueAMD64_OpNeq8(v)
940 return rewriteValueAMD64_OpNeqB(v)
942 return rewriteValueAMD64_OpNeqPtr(v)
944 v.Op = OpAMD64LoweredNilCheck
947 return rewriteValueAMD64_OpNot(v)
949 return rewriteValueAMD64_OpOffPtr(v)
966 return rewriteValueAMD64_OpPanicBounds(v)
968 return rewriteValueAMD64_OpPopCount16(v)
970 v.Op = OpAMD64POPCNTL
973 v.Op = OpAMD64POPCNTQ
976 return rewriteValueAMD64_OpPopCount8(v)
996 return rewriteValueAMD64_OpRoundToEven(v)
998 return rewriteValueAMD64_OpRsh16Ux16(v)
1000 return rewriteValueAMD64_OpRsh16Ux32(v)
1002 return rewriteValueAMD64_OpRsh16Ux64(v)
1004 return rewriteValueAMD64_OpRsh16Ux8(v)
1006 return rewriteValueAMD64_OpRsh16x16(v)
1008 return rewriteValueAMD64_OpRsh16x32(v)
1010 return rewriteValueAMD64_OpRsh16x64(v)
1012 return rewriteValueAMD64_OpRsh16x8(v)
1014 return rewriteValueAMD64_OpRsh32Ux16(v)
1016 return rewriteValueAMD64_OpRsh32Ux32(v)
1018 return rewriteValueAMD64_OpRsh32Ux64(v)
1020 return rewriteValueAMD64_OpRsh32Ux8(v)
1022 return rewriteValueAMD64_OpRsh32x16(v)
1024 return rewriteValueAMD64_OpRsh32x32(v)
1026 return rewriteValueAMD64_OpRsh32x64(v)
1028 return rewriteValueAMD64_OpRsh32x8(v)
1030 return rewriteValueAMD64_OpRsh64Ux16(v)
1032 return rewriteValueAMD64_OpRsh64Ux32(v)
1034 return rewriteValueAMD64_OpRsh64Ux64(v)
1036 return rewriteValueAMD64_OpRsh64Ux8(v)
1038 return rewriteValueAMD64_OpRsh64x16(v)
1040 return rewriteValueAMD64_OpRsh64x32(v)
1042 return rewriteValueAMD64_OpRsh64x64(v)
1044 return rewriteValueAMD64_OpRsh64x8(v)
1046 return rewriteValueAMD64_OpRsh8Ux16(v)
1048 return rewriteValueAMD64_OpRsh8Ux32(v)
1050 return rewriteValueAMD64_OpRsh8Ux64(v)
1052 return rewriteValueAMD64_OpRsh8Ux8(v)
1054 return rewriteValueAMD64_OpRsh8x16(v)
1056 return rewriteValueAMD64_OpRsh8x32(v)
1058 return rewriteValueAMD64_OpRsh8x64(v)
1060 return rewriteValueAMD64_OpRsh8x8(v)
1062 return rewriteValueAMD64_OpSelect0(v)
1064 return rewriteValueAMD64_OpSelect1(v)
1065 case OpSignExt16to32:
1066 v.Op = OpAMD64MOVWQSX
1068 case OpSignExt16to64:
1069 v.Op = OpAMD64MOVWQSX
1071 case OpSignExt32to64:
1072 v.Op = OpAMD64MOVLQSX
1074 case OpSignExt8to16:
1075 v.Op = OpAMD64MOVBQSX
1077 case OpSignExt8to32:
1078 v.Op = OpAMD64MOVBQSX
1080 case OpSignExt8to64:
1081 v.Op = OpAMD64MOVBQSX
1084 return rewriteValueAMD64_OpSlicemask(v)
1085 case OpSpectreIndex:
1086 return rewriteValueAMD64_OpSpectreIndex(v)
1087 case OpSpectreSliceIndex:
1088 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1090 v.Op = OpAMD64SQRTSD
1093 v.Op = OpAMD64SQRTSS
1096 v.Op = OpAMD64CALLstatic
1099 return rewriteValueAMD64_OpStore(v)
1122 return rewriteValueAMD64_OpTrunc(v)
1142 v.Op = OpAMD64LoweredWB
1157 return rewriteValueAMD64_OpZero(v)
1158 case OpZeroExt16to32:
1159 v.Op = OpAMD64MOVWQZX
1161 case OpZeroExt16to64:
1162 v.Op = OpAMD64MOVWQZX
1164 case OpZeroExt32to64:
1165 v.Op = OpAMD64MOVLQZX
1167 case OpZeroExt8to16:
1168 v.Op = OpAMD64MOVBQZX
1170 case OpZeroExt8to32:
1171 v.Op = OpAMD64MOVBQZX
1173 case OpZeroExt8to64:
1174 v.Op = OpAMD64MOVBQZX
1179 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1183 // match: (ADCQ x (MOVQconst [c]) carry)
1185 // result: (ADCQconst x [int32(c)] carry)
1187 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1189 if v_1.Op != OpAMD64MOVQconst {
1192 c := auxIntToInt64(v_1.AuxInt)
1197 v.reset(OpAMD64ADCQconst)
1198 v.AuxInt = int32ToAuxInt(int32(c))
1204 // match: (ADCQ x y (FlagEQ))
1205 // result: (ADDQcarry x y)
1209 if v_2.Op != OpAMD64FlagEQ {
1212 v.reset(OpAMD64ADDQcarry)
1218 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1221 // match: (ADCQconst x [c] (FlagEQ))
1222 // result: (ADDQconstcarry x [c])
1224 c := auxIntToInt32(v.AuxInt)
1226 if v_1.Op != OpAMD64FlagEQ {
1229 v.reset(OpAMD64ADDQconstcarry)
1230 v.AuxInt = int32ToAuxInt(c)
1236 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1239 // match: (ADDL x (MOVLconst [c]))
1240 // result: (ADDLconst [c] x)
1242 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1244 if v_1.Op != OpAMD64MOVLconst {
1247 c := auxIntToInt32(v_1.AuxInt)
1248 v.reset(OpAMD64ADDLconst)
1249 v.AuxInt = int32ToAuxInt(c)
1255 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d]))
1257 // result: (ROLLconst x [c])
1259 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1260 if v_0.Op != OpAMD64SHLLconst {
1263 c := auxIntToInt8(v_0.AuxInt)
1265 if v_1.Op != OpAMD64SHRLconst {
1268 d := auxIntToInt8(v_1.AuxInt)
1269 if x != v_1.Args[0] || !(d == 32-c) {
1272 v.reset(OpAMD64ROLLconst)
1273 v.AuxInt = int8ToAuxInt(c)
1279 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
1280 // cond: d==16-c && c < 16 && t.Size() == 2
1281 // result: (ROLWconst x [c])
1284 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1285 if v_0.Op != OpAMD64SHLLconst {
1288 c := auxIntToInt8(v_0.AuxInt)
1290 if v_1.Op != OpAMD64SHRWconst {
1293 d := auxIntToInt8(v_1.AuxInt)
1294 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
1297 v.reset(OpAMD64ROLWconst)
1298 v.AuxInt = int8ToAuxInt(c)
1304 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
1305 // cond: d==8-c && c < 8 && t.Size() == 1
1306 // result: (ROLBconst x [c])
1309 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1310 if v_0.Op != OpAMD64SHLLconst {
1313 c := auxIntToInt8(v_0.AuxInt)
1315 if v_1.Op != OpAMD64SHRBconst {
1318 d := auxIntToInt8(v_1.AuxInt)
1319 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
1322 v.reset(OpAMD64ROLBconst)
1323 v.AuxInt = int8ToAuxInt(c)
1329 // match: (ADDL x (SHLLconst [3] y))
1330 // result: (LEAL8 x y)
1332 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1334 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1338 v.reset(OpAMD64LEAL8)
1344 // match: (ADDL x (SHLLconst [2] y))
1345 // result: (LEAL4 x y)
1347 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1349 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1353 v.reset(OpAMD64LEAL4)
1359 // match: (ADDL x (SHLLconst [1] y))
1360 // result: (LEAL2 x y)
1362 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1364 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
1368 v.reset(OpAMD64LEAL2)
1374 // match: (ADDL x (ADDL y y))
1375 // result: (LEAL2 x y)
1377 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1379 if v_1.Op != OpAMD64ADDL {
1383 if y != v_1.Args[0] {
1386 v.reset(OpAMD64LEAL2)
1392 // match: (ADDL x (ADDL x y))
1393 // result: (LEAL2 y x)
1395 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1397 if v_1.Op != OpAMD64ADDL {
1401 v_1_0 := v_1.Args[0]
1402 v_1_1 := v_1.Args[1]
1403 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1408 v.reset(OpAMD64LEAL2)
1415 // match: (ADDL (ADDLconst [c] x) y)
1416 // result: (LEAL1 [c] x y)
1418 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1419 if v_0.Op != OpAMD64ADDLconst {
1422 c := auxIntToInt32(v_0.AuxInt)
1425 v.reset(OpAMD64LEAL1)
1426 v.AuxInt = int32ToAuxInt(c)
1432 // match: (ADDL x (LEAL [c] {s} y))
1433 // cond: x.Op != OpSB && y.Op != OpSB
1434 // result: (LEAL1 [c] {s} x y)
1436 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1438 if v_1.Op != OpAMD64LEAL {
1441 c := auxIntToInt32(v_1.AuxInt)
1442 s := auxToSym(v_1.Aux)
1444 if !(x.Op != OpSB && y.Op != OpSB) {
1447 v.reset(OpAMD64LEAL1)
1448 v.AuxInt = int32ToAuxInt(c)
1455 // match: (ADDL x (NEGL y))
1456 // result: (SUBL x y)
1458 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1460 if v_1.Op != OpAMD64NEGL {
1464 v.reset(OpAMD64SUBL)
1470 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
1471 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
1472 // result: (ADDLload x [off] {sym} ptr mem)
1474 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1477 if l.Op != OpAMD64MOVLload {
1480 off := auxIntToInt32(l.AuxInt)
1481 sym := auxToSym(l.Aux)
1484 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1487 v.reset(OpAMD64ADDLload)
1488 v.AuxInt = int32ToAuxInt(off)
1489 v.Aux = symToAux(sym)
1490 v.AddArg3(x, ptr, mem)
1497 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1499 // match: (ADDLconst [c] (ADDL x y))
1500 // result: (LEAL1 [c] x y)
1502 c := auxIntToInt32(v.AuxInt)
1503 if v_0.Op != OpAMD64ADDL {
1508 v.reset(OpAMD64LEAL1)
1509 v.AuxInt = int32ToAuxInt(c)
1513 // match: (ADDLconst [c] (SHLLconst [1] x))
1514 // result: (LEAL1 [c] x x)
1516 c := auxIntToInt32(v.AuxInt)
1517 if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1521 v.reset(OpAMD64LEAL1)
1522 v.AuxInt = int32ToAuxInt(c)
1526 // match: (ADDLconst [c] (LEAL [d] {s} x))
1527 // cond: is32Bit(int64(c)+int64(d))
1528 // result: (LEAL [c+d] {s} x)
1530 c := auxIntToInt32(v.AuxInt)
1531 if v_0.Op != OpAMD64LEAL {
1534 d := auxIntToInt32(v_0.AuxInt)
1535 s := auxToSym(v_0.Aux)
1537 if !(is32Bit(int64(c) + int64(d))) {
1540 v.reset(OpAMD64LEAL)
1541 v.AuxInt = int32ToAuxInt(c + d)
1546 // match: (ADDLconst [c] (LEAL1 [d] {s} x y))
1547 // cond: is32Bit(int64(c)+int64(d))
1548 // result: (LEAL1 [c+d] {s} x y)
1550 c := auxIntToInt32(v.AuxInt)
1551 if v_0.Op != OpAMD64LEAL1 {
1554 d := auxIntToInt32(v_0.AuxInt)
1555 s := auxToSym(v_0.Aux)
1558 if !(is32Bit(int64(c) + int64(d))) {
1561 v.reset(OpAMD64LEAL1)
1562 v.AuxInt = int32ToAuxInt(c + d)
1567 // match: (ADDLconst [c] (LEAL2 [d] {s} x y))
1568 // cond: is32Bit(int64(c)+int64(d))
1569 // result: (LEAL2 [c+d] {s} x y)
1571 c := auxIntToInt32(v.AuxInt)
1572 if v_0.Op != OpAMD64LEAL2 {
1575 d := auxIntToInt32(v_0.AuxInt)
1576 s := auxToSym(v_0.Aux)
1579 if !(is32Bit(int64(c) + int64(d))) {
1582 v.reset(OpAMD64LEAL2)
1583 v.AuxInt = int32ToAuxInt(c + d)
1588 // match: (ADDLconst [c] (LEAL4 [d] {s} x y))
1589 // cond: is32Bit(int64(c)+int64(d))
1590 // result: (LEAL4 [c+d] {s} x y)
1592 c := auxIntToInt32(v.AuxInt)
1593 if v_0.Op != OpAMD64LEAL4 {
1596 d := auxIntToInt32(v_0.AuxInt)
1597 s := auxToSym(v_0.Aux)
1600 if !(is32Bit(int64(c) + int64(d))) {
1603 v.reset(OpAMD64LEAL4)
1604 v.AuxInt = int32ToAuxInt(c + d)
1609 // match: (ADDLconst [c] (LEAL8 [d] {s} x y))
1610 // cond: is32Bit(int64(c)+int64(d))
1611 // result: (LEAL8 [c+d] {s} x y)
1613 c := auxIntToInt32(v.AuxInt)
1614 if v_0.Op != OpAMD64LEAL8 {
1617 d := auxIntToInt32(v_0.AuxInt)
1618 s := auxToSym(v_0.Aux)
1621 if !(is32Bit(int64(c) + int64(d))) {
1624 v.reset(OpAMD64LEAL8)
1625 v.AuxInt = int32ToAuxInt(c + d)
1630 // match: (ADDLconst [c] x)
1634 c := auxIntToInt32(v.AuxInt)
1642 // match: (ADDLconst [c] (MOVLconst [d]))
1643 // result: (MOVLconst [c+d])
1645 c := auxIntToInt32(v.AuxInt)
1646 if v_0.Op != OpAMD64MOVLconst {
1649 d := auxIntToInt32(v_0.AuxInt)
1650 v.reset(OpAMD64MOVLconst)
1651 v.AuxInt = int32ToAuxInt(c + d)
1654 // match: (ADDLconst [c] (ADDLconst [d] x))
1655 // result: (ADDLconst [c+d] x)
1657 c := auxIntToInt32(v.AuxInt)
1658 if v_0.Op != OpAMD64ADDLconst {
1661 d := auxIntToInt32(v_0.AuxInt)
1663 v.reset(OpAMD64ADDLconst)
1664 v.AuxInt = int32ToAuxInt(c + d)
1668 // match: (ADDLconst [off] x:(SP))
1669 // result: (LEAL [off] x)
1671 off := auxIntToInt32(v.AuxInt)
1676 v.reset(OpAMD64LEAL)
1677 v.AuxInt = int32ToAuxInt(off)
1683 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1686 // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
1687 // cond: ValAndOff(valoff1).canAdd32(off2)
1688 // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
1690 valoff1 := auxIntToValAndOff(v.AuxInt)
1691 sym := auxToSym(v.Aux)
1692 if v_0.Op != OpAMD64ADDQconst {
1695 off2 := auxIntToInt32(v_0.AuxInt)
1698 if !(ValAndOff(valoff1).canAdd32(off2)) {
1701 v.reset(OpAMD64ADDLconstmodify)
1702 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1703 v.Aux = symToAux(sym)
1704 v.AddArg2(base, mem)
1707 // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
1708 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
1709 // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
1711 valoff1 := auxIntToValAndOff(v.AuxInt)
1712 sym1 := auxToSym(v.Aux)
1713 if v_0.Op != OpAMD64LEAQ {
1716 off2 := auxIntToInt32(v_0.AuxInt)
1717 sym2 := auxToSym(v_0.Aux)
1720 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1723 v.reset(OpAMD64ADDLconstmodify)
1724 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1725 v.Aux = symToAux(mergeSym(sym1, sym2))
1726 v.AddArg2(base, mem)
1731 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1736 typ := &b.Func.Config.Types
1737 // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem)
1738 // cond: is32Bit(int64(off1)+int64(off2))
1739 // result: (ADDLload [off1+off2] {sym} val base mem)
1741 off1 := auxIntToInt32(v.AuxInt)
1742 sym := auxToSym(v.Aux)
1744 if v_1.Op != OpAMD64ADDQconst {
1747 off2 := auxIntToInt32(v_1.AuxInt)
1750 if !(is32Bit(int64(off1) + int64(off2))) {
1753 v.reset(OpAMD64ADDLload)
1754 v.AuxInt = int32ToAuxInt(off1 + off2)
1755 v.Aux = symToAux(sym)
1756 v.AddArg3(val, base, mem)
1759 // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
1760 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
1761 // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
1763 off1 := auxIntToInt32(v.AuxInt)
1764 sym1 := auxToSym(v.Aux)
1766 if v_1.Op != OpAMD64LEAQ {
1769 off2 := auxIntToInt32(v_1.AuxInt)
1770 sym2 := auxToSym(v_1.Aux)
1773 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1776 v.reset(OpAMD64ADDLload)
1777 v.AuxInt = int32ToAuxInt(off1 + off2)
1778 v.Aux = symToAux(mergeSym(sym1, sym2))
1779 v.AddArg3(val, base, mem)
1782 // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
1783 // result: (ADDL x (MOVLf2i y))
1785 off := auxIntToInt32(v.AuxInt)
1786 sym := auxToSym(v.Aux)
1789 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1793 if ptr != v_2.Args[0] {
1796 v.reset(OpAMD64ADDL)
1797 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1804 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1808 // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
1809 // cond: is32Bit(int64(off1)+int64(off2))
1810 // result: (ADDLmodify [off1+off2] {sym} base val mem)
1812 off1 := auxIntToInt32(v.AuxInt)
1813 sym := auxToSym(v.Aux)
1814 if v_0.Op != OpAMD64ADDQconst {
1817 off2 := auxIntToInt32(v_0.AuxInt)
1821 if !(is32Bit(int64(off1) + int64(off2))) {
1824 v.reset(OpAMD64ADDLmodify)
1825 v.AuxInt = int32ToAuxInt(off1 + off2)
1826 v.Aux = symToAux(sym)
1827 v.AddArg3(base, val, mem)
1830 // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
1831 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
1832 // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1834 off1 := auxIntToInt32(v.AuxInt)
1835 sym1 := auxToSym(v.Aux)
1836 if v_0.Op != OpAMD64LEAQ {
1839 off2 := auxIntToInt32(v_0.AuxInt)
1840 sym2 := auxToSym(v_0.Aux)
1844 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1847 v.reset(OpAMD64ADDLmodify)
1848 v.AuxInt = int32ToAuxInt(off1 + off2)
1849 v.Aux = symToAux(mergeSym(sym1, sym2))
1850 v.AddArg3(base, val, mem)
1855 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1858 // match: (ADDQ x (MOVQconst [c]))
1860 // result: (ADDQconst [int32(c)] x)
1862 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1864 if v_1.Op != OpAMD64MOVQconst {
1867 c := auxIntToInt64(v_1.AuxInt)
1871 v.reset(OpAMD64ADDQconst)
1872 v.AuxInt = int32ToAuxInt(int32(c))
1878 // match: (ADDQ x (MOVLconst [c]))
1879 // result: (ADDQconst [c] x)
1881 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1883 if v_1.Op != OpAMD64MOVLconst {
1886 c := auxIntToInt32(v_1.AuxInt)
1887 v.reset(OpAMD64ADDQconst)
1888 v.AuxInt = int32ToAuxInt(c)
1894 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d]))
1896 // result: (ROLQconst x [c])
1898 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1899 if v_0.Op != OpAMD64SHLQconst {
1902 c := auxIntToInt8(v_0.AuxInt)
1904 if v_1.Op != OpAMD64SHRQconst {
1907 d := auxIntToInt8(v_1.AuxInt)
1908 if x != v_1.Args[0] || !(d == 64-c) {
1911 v.reset(OpAMD64ROLQconst)
1912 v.AuxInt = int8ToAuxInt(c)
1918 // match: (ADDQ x (SHLQconst [3] y))
1919 // result: (LEAQ8 x y)
1921 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1923 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1927 v.reset(OpAMD64LEAQ8)
1933 // match: (ADDQ x (SHLQconst [2] y))
1934 // result: (LEAQ4 x y)
1936 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1938 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1942 v.reset(OpAMD64LEAQ4)
1948 // match: (ADDQ x (SHLQconst [1] y))
1949 // result: (LEAQ2 x y)
1951 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1953 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
1957 v.reset(OpAMD64LEAQ2)
1963 // match: (ADDQ x (ADDQ y y))
1964 // result: (LEAQ2 x y)
1966 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1968 if v_1.Op != OpAMD64ADDQ {
1972 if y != v_1.Args[0] {
1975 v.reset(OpAMD64LEAQ2)
1981 // match: (ADDQ x (ADDQ x y))
1982 // result: (LEAQ2 y x)
1984 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1986 if v_1.Op != OpAMD64ADDQ {
1990 v_1_0 := v_1.Args[0]
1991 v_1_1 := v_1.Args[1]
1992 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1997 v.reset(OpAMD64LEAQ2)
2004 // match: (ADDQ (ADDQconst [c] x) y)
2005 // result: (LEAQ1 [c] x y)
2007 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2008 if v_0.Op != OpAMD64ADDQconst {
2011 c := auxIntToInt32(v_0.AuxInt)
2014 v.reset(OpAMD64LEAQ1)
2015 v.AuxInt = int32ToAuxInt(c)
2021 // match: (ADDQ x (LEAQ [c] {s} y))
2022 // cond: x.Op != OpSB && y.Op != OpSB
2023 // result: (LEAQ1 [c] {s} x y)
2025 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2027 if v_1.Op != OpAMD64LEAQ {
2030 c := auxIntToInt32(v_1.AuxInt)
2031 s := auxToSym(v_1.Aux)
2033 if !(x.Op != OpSB && y.Op != OpSB) {
2036 v.reset(OpAMD64LEAQ1)
2037 v.AuxInt = int32ToAuxInt(c)
2044 // match: (ADDQ x (NEGQ y))
2045 // result: (SUBQ x y)
2047 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2049 if v_1.Op != OpAMD64NEGQ {
2053 v.reset(OpAMD64SUBQ)
2059 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem))
2060 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
2061 // result: (ADDQload x [off] {sym} ptr mem)
2063 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2066 if l.Op != OpAMD64MOVQload {
2069 off := auxIntToInt32(l.AuxInt)
2070 sym := auxToSym(l.Aux)
2073 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2076 v.reset(OpAMD64ADDQload)
2077 v.AuxInt = int32ToAuxInt(off)
2078 v.Aux = symToAux(sym)
2079 v.AddArg3(x, ptr, mem)
2086 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2089 // match: (ADDQcarry x (MOVQconst [c]))
2091 // result: (ADDQconstcarry x [int32(c)])
2093 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2095 if v_1.Op != OpAMD64MOVQconst {
2098 c := auxIntToInt64(v_1.AuxInt)
2102 v.reset(OpAMD64ADDQconstcarry)
2103 v.AuxInt = int32ToAuxInt(int32(c))
2111 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2113 // match: (ADDQconst [c] (ADDQ x y))
2114 // result: (LEAQ1 [c] x y)
2116 c := auxIntToInt32(v.AuxInt)
2117 if v_0.Op != OpAMD64ADDQ {
2122 v.reset(OpAMD64LEAQ1)
2123 v.AuxInt = int32ToAuxInt(c)
2127 // match: (ADDQconst [c] (SHLQconst [1] x))
2128 // result: (LEAQ1 [c] x x)
2130 c := auxIntToInt32(v.AuxInt)
2131 if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
2135 v.reset(OpAMD64LEAQ1)
2136 v.AuxInt = int32ToAuxInt(c)
2140 // match: (ADDQconst [c] (LEAQ [d] {s} x))
2141 // cond: is32Bit(int64(c)+int64(d))
2142 // result: (LEAQ [c+d] {s} x)
2144 c := auxIntToInt32(v.AuxInt)
2145 if v_0.Op != OpAMD64LEAQ {
2148 d := auxIntToInt32(v_0.AuxInt)
2149 s := auxToSym(v_0.Aux)
2151 if !(is32Bit(int64(c) + int64(d))) {
2154 v.reset(OpAMD64LEAQ)
2155 v.AuxInt = int32ToAuxInt(c + d)
2160 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
2161 // cond: is32Bit(int64(c)+int64(d))
2162 // result: (LEAQ1 [c+d] {s} x y)
2164 c := auxIntToInt32(v.AuxInt)
2165 if v_0.Op != OpAMD64LEAQ1 {
2168 d := auxIntToInt32(v_0.AuxInt)
2169 s := auxToSym(v_0.Aux)
2172 if !(is32Bit(int64(c) + int64(d))) {
2175 v.reset(OpAMD64LEAQ1)
2176 v.AuxInt = int32ToAuxInt(c + d)
2181 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
2182 // cond: is32Bit(int64(c)+int64(d))
2183 // result: (LEAQ2 [c+d] {s} x y)
2185 c := auxIntToInt32(v.AuxInt)
2186 if v_0.Op != OpAMD64LEAQ2 {
2189 d := auxIntToInt32(v_0.AuxInt)
2190 s := auxToSym(v_0.Aux)
2193 if !(is32Bit(int64(c) + int64(d))) {
2196 v.reset(OpAMD64LEAQ2)
2197 v.AuxInt = int32ToAuxInt(c + d)
2202 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
2203 // cond: is32Bit(int64(c)+int64(d))
2204 // result: (LEAQ4 [c+d] {s} x y)
2206 c := auxIntToInt32(v.AuxInt)
2207 if v_0.Op != OpAMD64LEAQ4 {
2210 d := auxIntToInt32(v_0.AuxInt)
2211 s := auxToSym(v_0.Aux)
2214 if !(is32Bit(int64(c) + int64(d))) {
2217 v.reset(OpAMD64LEAQ4)
2218 v.AuxInt = int32ToAuxInt(c + d)
2223 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
2224 // cond: is32Bit(int64(c)+int64(d))
2225 // result: (LEAQ8 [c+d] {s} x y)
2227 c := auxIntToInt32(v.AuxInt)
2228 if v_0.Op != OpAMD64LEAQ8 {
2231 d := auxIntToInt32(v_0.AuxInt)
2232 s := auxToSym(v_0.Aux)
2235 if !(is32Bit(int64(c) + int64(d))) {
2238 v.reset(OpAMD64LEAQ8)
2239 v.AuxInt = int32ToAuxInt(c + d)
2244 // match: (ADDQconst [0] x)
2247 if auxIntToInt32(v.AuxInt) != 0 {
2254 // match: (ADDQconst [c] (MOVQconst [d]))
2255 // result: (MOVQconst [int64(c)+d])
2257 c := auxIntToInt32(v.AuxInt)
2258 if v_0.Op != OpAMD64MOVQconst {
2261 d := auxIntToInt64(v_0.AuxInt)
2262 v.reset(OpAMD64MOVQconst)
2263 v.AuxInt = int64ToAuxInt(int64(c) + d)
2266 // match: (ADDQconst [c] (ADDQconst [d] x))
2267 // cond: is32Bit(int64(c)+int64(d))
2268 // result: (ADDQconst [c+d] x)
2270 c := auxIntToInt32(v.AuxInt)
2271 if v_0.Op != OpAMD64ADDQconst {
2274 d := auxIntToInt32(v_0.AuxInt)
2276 if !(is32Bit(int64(c) + int64(d))) {
2279 v.reset(OpAMD64ADDQconst)
2280 v.AuxInt = int32ToAuxInt(c + d)
2284 // match: (ADDQconst [off] x:(SP))
2285 // result: (LEAQ [off] x)
2287 off := auxIntToInt32(v.AuxInt)
2292 v.reset(OpAMD64LEAQ)
2293 v.AuxInt = int32ToAuxInt(off)
2299 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2302 // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
2303 // cond: ValAndOff(valoff1).canAdd32(off2)
2304 // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
2306 valoff1 := auxIntToValAndOff(v.AuxInt)
2307 sym := auxToSym(v.Aux)
2308 if v_0.Op != OpAMD64ADDQconst {
2311 off2 := auxIntToInt32(v_0.AuxInt)
2314 if !(ValAndOff(valoff1).canAdd32(off2)) {
2317 v.reset(OpAMD64ADDQconstmodify)
2318 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2319 v.Aux = symToAux(sym)
2320 v.AddArg2(base, mem)
2323 // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
2324 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
2325 // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
2327 valoff1 := auxIntToValAndOff(v.AuxInt)
2328 sym1 := auxToSym(v.Aux)
2329 if v_0.Op != OpAMD64LEAQ {
2332 off2 := auxIntToInt32(v_0.AuxInt)
2333 sym2 := auxToSym(v_0.Aux)
2336 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2339 v.reset(OpAMD64ADDQconstmodify)
2340 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2341 v.Aux = symToAux(mergeSym(sym1, sym2))
2342 v.AddArg2(base, mem)
2347 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2352 typ := &b.Func.Config.Types
2353 // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem)
2354 // cond: is32Bit(int64(off1)+int64(off2))
2355 // result: (ADDQload [off1+off2] {sym} val base mem)
2357 off1 := auxIntToInt32(v.AuxInt)
2358 sym := auxToSym(v.Aux)
2360 if v_1.Op != OpAMD64ADDQconst {
2363 off2 := auxIntToInt32(v_1.AuxInt)
2366 if !(is32Bit(int64(off1) + int64(off2))) {
2369 v.reset(OpAMD64ADDQload)
2370 v.AuxInt = int32ToAuxInt(off1 + off2)
2371 v.Aux = symToAux(sym)
2372 v.AddArg3(val, base, mem)
2375 // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
2376 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2377 // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
2379 off1 := auxIntToInt32(v.AuxInt)
2380 sym1 := auxToSym(v.Aux)
2382 if v_1.Op != OpAMD64LEAQ {
2385 off2 := auxIntToInt32(v_1.AuxInt)
2386 sym2 := auxToSym(v_1.Aux)
2389 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2392 v.reset(OpAMD64ADDQload)
2393 v.AuxInt = int32ToAuxInt(off1 + off2)
2394 v.Aux = symToAux(mergeSym(sym1, sym2))
2395 v.AddArg3(val, base, mem)
2398 // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
2399 // result: (ADDQ x (MOVQf2i y))
2401 off := auxIntToInt32(v.AuxInt)
2402 sym := auxToSym(v.Aux)
2405 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2409 if ptr != v_2.Args[0] {
2412 v.reset(OpAMD64ADDQ)
2413 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2420 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2424 // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
2425 // cond: is32Bit(int64(off1)+int64(off2))
2426 // result: (ADDQmodify [off1+off2] {sym} base val mem)
2428 off1 := auxIntToInt32(v.AuxInt)
2429 sym := auxToSym(v.Aux)
2430 if v_0.Op != OpAMD64ADDQconst {
2433 off2 := auxIntToInt32(v_0.AuxInt)
2437 if !(is32Bit(int64(off1) + int64(off2))) {
2440 v.reset(OpAMD64ADDQmodify)
2441 v.AuxInt = int32ToAuxInt(off1 + off2)
2442 v.Aux = symToAux(sym)
2443 v.AddArg3(base, val, mem)
2446 // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
2447 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2448 // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
2450 off1 := auxIntToInt32(v.AuxInt)
2451 sym1 := auxToSym(v.Aux)
2452 if v_0.Op != OpAMD64LEAQ {
2455 off2 := auxIntToInt32(v_0.AuxInt)
2456 sym2 := auxToSym(v_0.Aux)
2460 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2463 v.reset(OpAMD64ADDQmodify)
2464 v.AuxInt = int32ToAuxInt(off1 + off2)
2465 v.Aux = symToAux(mergeSym(sym1, sym2))
2466 v.AddArg3(base, val, mem)
2471 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2474 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
2475 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
2476 // result: (ADDSDload x [off] {sym} ptr mem)
2478 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2481 if l.Op != OpAMD64MOVSDload {
2484 off := auxIntToInt32(l.AuxInt)
2485 sym := auxToSym(l.Aux)
2488 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2491 v.reset(OpAMD64ADDSDload)
2492 v.AuxInt = int32ToAuxInt(off)
2493 v.Aux = symToAux(sym)
2494 v.AddArg3(x, ptr, mem)
2501 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2506 typ := &b.Func.Config.Types
2507 // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem)
2508 // cond: is32Bit(int64(off1)+int64(off2))
2509 // result: (ADDSDload [off1+off2] {sym} val base mem)
2511 off1 := auxIntToInt32(v.AuxInt)
2512 sym := auxToSym(v.Aux)
2514 if v_1.Op != OpAMD64ADDQconst {
2517 off2 := auxIntToInt32(v_1.AuxInt)
2520 if !(is32Bit(int64(off1) + int64(off2))) {
2523 v.reset(OpAMD64ADDSDload)
2524 v.AuxInt = int32ToAuxInt(off1 + off2)
2525 v.Aux = symToAux(sym)
2526 v.AddArg3(val, base, mem)
2529 // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
2530 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2531 // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
2533 off1 := auxIntToInt32(v.AuxInt)
2534 sym1 := auxToSym(v.Aux)
2536 if v_1.Op != OpAMD64LEAQ {
2539 off2 := auxIntToInt32(v_1.AuxInt)
2540 sym2 := auxToSym(v_1.Aux)
2543 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2546 v.reset(OpAMD64ADDSDload)
2547 v.AuxInt = int32ToAuxInt(off1 + off2)
2548 v.Aux = symToAux(mergeSym(sym1, sym2))
2549 v.AddArg3(val, base, mem)
2552 // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
2553 // result: (ADDSD x (MOVQi2f y))
2555 off := auxIntToInt32(v.AuxInt)
2556 sym := auxToSym(v.Aux)
2559 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2563 if ptr != v_2.Args[0] {
2566 v.reset(OpAMD64ADDSD)
2567 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2574 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2577 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
2578 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
2579 // result: (ADDSSload x [off] {sym} ptr mem)
2581 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2584 if l.Op != OpAMD64MOVSSload {
2587 off := auxIntToInt32(l.AuxInt)
2588 sym := auxToSym(l.Aux)
2591 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2594 v.reset(OpAMD64ADDSSload)
2595 v.AuxInt = int32ToAuxInt(off)
2596 v.Aux = symToAux(sym)
2597 v.AddArg3(x, ptr, mem)
2604 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2609 typ := &b.Func.Config.Types
2610 // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem)
2611 // cond: is32Bit(int64(off1)+int64(off2))
2612 // result: (ADDSSload [off1+off2] {sym} val base mem)
2614 off1 := auxIntToInt32(v.AuxInt)
2615 sym := auxToSym(v.Aux)
2617 if v_1.Op != OpAMD64ADDQconst {
2620 off2 := auxIntToInt32(v_1.AuxInt)
2623 if !(is32Bit(int64(off1) + int64(off2))) {
2626 v.reset(OpAMD64ADDSSload)
2627 v.AuxInt = int32ToAuxInt(off1 + off2)
2628 v.Aux = symToAux(sym)
2629 v.AddArg3(val, base, mem)
2632 // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
2633 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2634 // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
2636 off1 := auxIntToInt32(v.AuxInt)
2637 sym1 := auxToSym(v.Aux)
2639 if v_1.Op != OpAMD64LEAQ {
2642 off2 := auxIntToInt32(v_1.AuxInt)
2643 sym2 := auxToSym(v_1.Aux)
2646 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2649 v.reset(OpAMD64ADDSSload)
2650 v.AuxInt = int32ToAuxInt(off1 + off2)
2651 v.Aux = symToAux(mergeSym(sym1, sym2))
2652 v.AddArg3(val, base, mem)
2655 // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
2656 // result: (ADDSS x (MOVLi2f y))
2658 off := auxIntToInt32(v.AuxInt)
2659 sym := auxToSym(v.Aux)
2662 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2666 if ptr != v_2.Args[0] {
2669 v.reset(OpAMD64ADDSS)
2670 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2677 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2680 // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x)
2681 // result: (BTRL x y)
2683 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2684 if v_0.Op != OpAMD64NOTL {
2687 v_0_0 := v_0.Args[0]
2688 if v_0_0.Op != OpAMD64SHLL {
2692 v_0_0_0 := v_0_0.Args[0]
2693 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2697 v.reset(OpAMD64BTRL)
2703 // match: (ANDL (MOVLconst [c]) x)
2704 // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
2705 // result: (BTRLconst [int8(log32(^c))] x)
2707 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2708 if v_0.Op != OpAMD64MOVLconst {
2711 c := auxIntToInt32(v_0.AuxInt)
2713 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
2716 v.reset(OpAMD64BTRLconst)
2717 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
2723 // match: (ANDL x (MOVLconst [c]))
2724 // result: (ANDLconst [c] x)
2726 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2728 if v_1.Op != OpAMD64MOVLconst {
2731 c := auxIntToInt32(v_1.AuxInt)
2732 v.reset(OpAMD64ANDLconst)
2733 v.AuxInt = int32ToAuxInt(c)
2739 // match: (ANDL x x)
2749 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
2750 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
2751 // result: (ANDLload x [off] {sym} ptr mem)
2753 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2756 if l.Op != OpAMD64MOVLload {
2759 off := auxIntToInt32(l.AuxInt)
2760 sym := auxToSym(l.Aux)
2763 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2766 v.reset(OpAMD64ANDLload)
2767 v.AuxInt = int32ToAuxInt(off)
2768 v.Aux = symToAux(sym)
2769 v.AddArg3(x, ptr, mem)
2776 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2778 // match: (ANDLconst [c] x)
2779 // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
2780 // result: (BTRLconst [int8(log32(^c))] x)
2782 c := auxIntToInt32(v.AuxInt)
2784 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
2787 v.reset(OpAMD64BTRLconst)
2788 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
2792 // match: (ANDLconst [c] (ANDLconst [d] x))
2793 // result: (ANDLconst [c & d] x)
2795 c := auxIntToInt32(v.AuxInt)
2796 if v_0.Op != OpAMD64ANDLconst {
2799 d := auxIntToInt32(v_0.AuxInt)
2801 v.reset(OpAMD64ANDLconst)
2802 v.AuxInt = int32ToAuxInt(c & d)
2806 // match: (ANDLconst [c] (BTRLconst [d] x))
2807 // result: (ANDLconst [c &^ (1<<uint32(d))] x)
2809 c := auxIntToInt32(v.AuxInt)
2810 if v_0.Op != OpAMD64BTRLconst {
2813 d := auxIntToInt8(v_0.AuxInt)
2815 v.reset(OpAMD64ANDLconst)
2816 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
2820 // match: (ANDLconst [ 0xFF] x)
2821 // result: (MOVBQZX x)
2823 if auxIntToInt32(v.AuxInt) != 0xFF {
2827 v.reset(OpAMD64MOVBQZX)
2831 // match: (ANDLconst [0xFFFF] x)
2832 // result: (MOVWQZX x)
2834 if auxIntToInt32(v.AuxInt) != 0xFFFF {
2838 v.reset(OpAMD64MOVWQZX)
2842 // match: (ANDLconst [c] _)
2844 // result: (MOVLconst [0])
2846 c := auxIntToInt32(v.AuxInt)
2850 v.reset(OpAMD64MOVLconst)
2851 v.AuxInt = int32ToAuxInt(0)
2854 // match: (ANDLconst [c] x)
2858 c := auxIntToInt32(v.AuxInt)
2866 // match: (ANDLconst [c] (MOVLconst [d]))
2867 // result: (MOVLconst [c&d])
2869 c := auxIntToInt32(v.AuxInt)
2870 if v_0.Op != OpAMD64MOVLconst {
2873 d := auxIntToInt32(v_0.AuxInt)
2874 v.reset(OpAMD64MOVLconst)
2875 v.AuxInt = int32ToAuxInt(c & d)
2880 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2883 // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
2884 // cond: ValAndOff(valoff1).canAdd32(off2)
2885 // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
2887 valoff1 := auxIntToValAndOff(v.AuxInt)
2888 sym := auxToSym(v.Aux)
2889 if v_0.Op != OpAMD64ADDQconst {
2892 off2 := auxIntToInt32(v_0.AuxInt)
2895 if !(ValAndOff(valoff1).canAdd32(off2)) {
2898 v.reset(OpAMD64ANDLconstmodify)
2899 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2900 v.Aux = symToAux(sym)
2901 v.AddArg2(base, mem)
2904 // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
2905 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
2906 // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
2908 valoff1 := auxIntToValAndOff(v.AuxInt)
2909 sym1 := auxToSym(v.Aux)
2910 if v_0.Op != OpAMD64LEAQ {
2913 off2 := auxIntToInt32(v_0.AuxInt)
2914 sym2 := auxToSym(v_0.Aux)
2917 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2920 v.reset(OpAMD64ANDLconstmodify)
2921 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2922 v.Aux = symToAux(mergeSym(sym1, sym2))
2923 v.AddArg2(base, mem)
2928 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2933 typ := &b.Func.Config.Types
2934 // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem)
2935 // cond: is32Bit(int64(off1)+int64(off2))
2936 // result: (ANDLload [off1+off2] {sym} val base mem)
2938 off1 := auxIntToInt32(v.AuxInt)
2939 sym := auxToSym(v.Aux)
2941 if v_1.Op != OpAMD64ADDQconst {
2944 off2 := auxIntToInt32(v_1.AuxInt)
2947 if !(is32Bit(int64(off1) + int64(off2))) {
2950 v.reset(OpAMD64ANDLload)
2951 v.AuxInt = int32ToAuxInt(off1 + off2)
2952 v.Aux = symToAux(sym)
2953 v.AddArg3(val, base, mem)
2956 // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
2957 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
2958 // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
2960 off1 := auxIntToInt32(v.AuxInt)
2961 sym1 := auxToSym(v.Aux)
2963 if v_1.Op != OpAMD64LEAQ {
2966 off2 := auxIntToInt32(v_1.AuxInt)
2967 sym2 := auxToSym(v_1.Aux)
2970 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2973 v.reset(OpAMD64ANDLload)
2974 v.AuxInt = int32ToAuxInt(off1 + off2)
2975 v.Aux = symToAux(mergeSym(sym1, sym2))
2976 v.AddArg3(val, base, mem)
2979 // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
2980 // result: (ANDL x (MOVLf2i y))
2982 off := auxIntToInt32(v.AuxInt)
2983 sym := auxToSym(v.Aux)
2986 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2990 if ptr != v_2.Args[0] {
2993 v.reset(OpAMD64ANDL)
2994 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
3001 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
3006 // match: (ANDLmodify [off] {sym} ptr (NOTL s:(SHLL (MOVLconst [1]) <t> x)) mem)
3007 // result: (BTRLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
3009 off := auxIntToInt32(v.AuxInt)
3010 sym := auxToSym(v.Aux)
3012 if v_1.Op != OpAMD64NOTL {
3016 if s.Op != OpAMD64SHLL {
3022 if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
3026 v.reset(OpAMD64BTRLmodify)
3027 v.AuxInt = int32ToAuxInt(off)
3028 v.Aux = symToAux(sym)
3029 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
3030 v0.AuxInt = int32ToAuxInt(31)
3032 v.AddArg3(ptr, v0, mem)
3035 // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
3036 // cond: is32Bit(int64(off1)+int64(off2))
3037 // result: (ANDLmodify [off1+off2] {sym} base val mem)
3039 off1 := auxIntToInt32(v.AuxInt)
3040 sym := auxToSym(v.Aux)
3041 if v_0.Op != OpAMD64ADDQconst {
3044 off2 := auxIntToInt32(v_0.AuxInt)
3048 if !(is32Bit(int64(off1) + int64(off2))) {
3051 v.reset(OpAMD64ANDLmodify)
3052 v.AuxInt = int32ToAuxInt(off1 + off2)
3053 v.Aux = symToAux(sym)
3054 v.AddArg3(base, val, mem)
3057 // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
3058 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
3059 // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
3061 off1 := auxIntToInt32(v.AuxInt)
3062 sym1 := auxToSym(v.Aux)
3063 if v_0.Op != OpAMD64LEAQ {
3066 off2 := auxIntToInt32(v_0.AuxInt)
3067 sym2 := auxToSym(v_0.Aux)
3071 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3074 v.reset(OpAMD64ANDLmodify)
3075 v.AuxInt = int32ToAuxInt(off1 + off2)
3076 v.Aux = symToAux(mergeSym(sym1, sym2))
3077 v.AddArg3(base, val, mem)
3082 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3085 // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x)
3086 // result: (BTRQ x y)
3088 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3089 if v_0.Op != OpAMD64NOTQ {
3092 v_0_0 := v_0.Args[0]
3093 if v_0_0.Op != OpAMD64SHLQ {
3097 v_0_0_0 := v_0_0.Args[0]
3098 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3102 v.reset(OpAMD64BTRQ)
3108 // match: (ANDQ (MOVQconst [c]) x)
3109 // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128
3110 // result: (BTRQconst [int8(log64(^c))] x)
3112 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3113 if v_0.Op != OpAMD64MOVQconst {
3116 c := auxIntToInt64(v_0.AuxInt)
3118 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) {
3121 v.reset(OpAMD64BTRQconst)
3122 v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3128 // match: (ANDQ x (MOVQconst [c]))
3130 // result: (ANDQconst [int32(c)] x)
3132 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3134 if v_1.Op != OpAMD64MOVQconst {
3137 c := auxIntToInt64(v_1.AuxInt)
3141 v.reset(OpAMD64ANDQconst)
3142 v.AuxInt = int32ToAuxInt(int32(c))
3148 // match: (ANDQ x x)
3158 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem))
3159 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
3160 // result: (ANDQload x [off] {sym} ptr mem)
3162 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3165 if l.Op != OpAMD64MOVQload {
3168 off := auxIntToInt32(l.AuxInt)
3169 sym := auxToSym(l.Aux)
3172 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3175 v.reset(OpAMD64ANDQload)
3176 v.AuxInt = int32ToAuxInt(off)
3177 v.Aux = symToAux(sym)
3178 v.AddArg3(x, ptr, mem)
3185 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3187 // match: (ANDQconst [c] x)
3188 // cond: isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
3189 // result: (BTRQconst [int8(log32(^c))] x)
3191 c := auxIntToInt32(v.AuxInt)
3193 if !(isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
3196 v.reset(OpAMD64BTRQconst)
3197 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
3201 // match: (ANDQconst [c] (ANDQconst [d] x))
3202 // result: (ANDQconst [c & d] x)
3204 c := auxIntToInt32(v.AuxInt)
3205 if v_0.Op != OpAMD64ANDQconst {
3208 d := auxIntToInt32(v_0.AuxInt)
3210 v.reset(OpAMD64ANDQconst)
3211 v.AuxInt = int32ToAuxInt(c & d)
3215 // match: (ANDQconst [c] (BTRQconst [d] x))
3216 // cond: is32Bit(int64(c) &^ (1<<uint32(d)))
3217 // result: (ANDQconst [c &^ (1<<uint32(d))] x)
3219 c := auxIntToInt32(v.AuxInt)
3220 if v_0.Op != OpAMD64BTRQconst {
3223 d := auxIntToInt8(v_0.AuxInt)
3225 if !(is32Bit(int64(c) &^ (1 << uint32(d)))) {
3228 v.reset(OpAMD64ANDQconst)
3229 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
3233 // match: (ANDQconst [ 0xFF] x)
3234 // result: (MOVBQZX x)
3236 if auxIntToInt32(v.AuxInt) != 0xFF {
3240 v.reset(OpAMD64MOVBQZX)
3244 // match: (ANDQconst [0xFFFF] x)
3245 // result: (MOVWQZX x)
3247 if auxIntToInt32(v.AuxInt) != 0xFFFF {
3251 v.reset(OpAMD64MOVWQZX)
3255 // match: (ANDQconst [0] _)
3256 // result: (MOVQconst [0])
3258 if auxIntToInt32(v.AuxInt) != 0 {
3261 v.reset(OpAMD64MOVQconst)
3262 v.AuxInt = int64ToAuxInt(0)
3265 // match: (ANDQconst [-1] x)
3268 if auxIntToInt32(v.AuxInt) != -1 {
3275 // match: (ANDQconst [c] (MOVQconst [d]))
3276 // result: (MOVQconst [int64(c)&d])
3278 c := auxIntToInt32(v.AuxInt)
3279 if v_0.Op != OpAMD64MOVQconst {
3282 d := auxIntToInt64(v_0.AuxInt)
3283 v.reset(OpAMD64MOVQconst)
3284 v.AuxInt = int64ToAuxInt(int64(c) & d)
3289 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3292 // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
3293 // cond: ValAndOff(valoff1).canAdd32(off2)
3294 // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
3296 valoff1 := auxIntToValAndOff(v.AuxInt)
3297 sym := auxToSym(v.Aux)
3298 if v_0.Op != OpAMD64ADDQconst {
3301 off2 := auxIntToInt32(v_0.AuxInt)
3304 if !(ValAndOff(valoff1).canAdd32(off2)) {
3307 v.reset(OpAMD64ANDQconstmodify)
3308 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3309 v.Aux = symToAux(sym)
3310 v.AddArg2(base, mem)
3313 // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
3314 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
3315 // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
3317 valoff1 := auxIntToValAndOff(v.AuxInt)
3318 sym1 := auxToSym(v.Aux)
3319 if v_0.Op != OpAMD64LEAQ {
3322 off2 := auxIntToInt32(v_0.AuxInt)
3323 sym2 := auxToSym(v_0.Aux)
3326 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3329 v.reset(OpAMD64ANDQconstmodify)
3330 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3331 v.Aux = symToAux(mergeSym(sym1, sym2))
3332 v.AddArg2(base, mem)
3337 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3342 typ := &b.Func.Config.Types
3343 // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem)
3344 // cond: is32Bit(int64(off1)+int64(off2))
3345 // result: (ANDQload [off1+off2] {sym} val base mem)
3347 off1 := auxIntToInt32(v.AuxInt)
3348 sym := auxToSym(v.Aux)
3350 if v_1.Op != OpAMD64ADDQconst {
3353 off2 := auxIntToInt32(v_1.AuxInt)
3356 if !(is32Bit(int64(off1) + int64(off2))) {
3359 v.reset(OpAMD64ANDQload)
3360 v.AuxInt = int32ToAuxInt(off1 + off2)
3361 v.Aux = symToAux(sym)
3362 v.AddArg3(val, base, mem)
3365 // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
3366 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
3367 // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
3369 off1 := auxIntToInt32(v.AuxInt)
3370 sym1 := auxToSym(v.Aux)
3372 if v_1.Op != OpAMD64LEAQ {
3375 off2 := auxIntToInt32(v_1.AuxInt)
3376 sym2 := auxToSym(v_1.Aux)
3379 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3382 v.reset(OpAMD64ANDQload)
3383 v.AuxInt = int32ToAuxInt(off1 + off2)
3384 v.Aux = symToAux(mergeSym(sym1, sym2))
3385 v.AddArg3(val, base, mem)
3388 // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
3389 // result: (ANDQ x (MOVQf2i y))
3391 off := auxIntToInt32(v.AuxInt)
3392 sym := auxToSym(v.Aux)
3395 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3399 if ptr != v_2.Args[0] {
3402 v.reset(OpAMD64ANDQ)
3403 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3410 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3415 // match: (ANDQmodify [off] {sym} ptr (NOTQ s:(SHLQ (MOVQconst [1]) <t> x)) mem)
3416 // result: (BTRQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
3418 off := auxIntToInt32(v.AuxInt)
3419 sym := auxToSym(v.Aux)
3421 if v_1.Op != OpAMD64NOTQ {
3425 if s.Op != OpAMD64SHLQ {
3431 if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
3435 v.reset(OpAMD64BTRQmodify)
3436 v.AuxInt = int32ToAuxInt(off)
3437 v.Aux = symToAux(sym)
3438 v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
3439 v0.AuxInt = int32ToAuxInt(63)
3441 v.AddArg3(ptr, v0, mem)
3444 // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
3445 // cond: is32Bit(int64(off1)+int64(off2))
3446 // result: (ANDQmodify [off1+off2] {sym} base val mem)
3448 off1 := auxIntToInt32(v.AuxInt)
3449 sym := auxToSym(v.Aux)
3450 if v_0.Op != OpAMD64ADDQconst {
3453 off2 := auxIntToInt32(v_0.AuxInt)
3457 if !(is32Bit(int64(off1) + int64(off2))) {
3460 v.reset(OpAMD64ANDQmodify)
3461 v.AuxInt = int32ToAuxInt(off1 + off2)
3462 v.Aux = symToAux(sym)
3463 v.AddArg3(base, val, mem)
3466 // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
3467 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
3468 // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
3470 off1 := auxIntToInt32(v.AuxInt)
3471 sym1 := auxToSym(v.Aux)
3472 if v_0.Op != OpAMD64LEAQ {
3475 off2 := auxIntToInt32(v_0.AuxInt)
3476 sym2 := auxToSym(v_0.Aux)
3480 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3483 v.reset(OpAMD64ANDQmodify)
3484 v.AuxInt = int32ToAuxInt(off1 + off2)
3485 v.Aux = symToAux(mergeSym(sym1, sym2))
3486 v.AddArg3(base, val, mem)
3491 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3494 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x)))
3495 // result: (BSFQ (ORQconst <t> [1<<8] x))
3497 if v_0.Op != OpAMD64ORQconst {
3501 if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3504 v_0_0 := v_0.Args[0]
3505 if v_0_0.Op != OpAMD64MOVBQZX {
3509 v.reset(OpAMD64BSFQ)
3510 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3511 v0.AuxInt = int32ToAuxInt(1 << 8)
3516 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x)))
3517 // result: (BSFQ (ORQconst <t> [1<<16] x))
3519 if v_0.Op != OpAMD64ORQconst {
3523 if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3526 v_0_0 := v_0.Args[0]
3527 if v_0_0.Op != OpAMD64MOVWQZX {
3531 v.reset(OpAMD64BSFQ)
3532 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3533 v0.AuxInt = int32ToAuxInt(1 << 16)
3540 func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
3542 // match: (BTCLconst [c] (XORLconst [d] x))
3543 // result: (XORLconst [d ^ 1<<uint32(c)] x)
3545 c := auxIntToInt8(v.AuxInt)
3546 if v_0.Op != OpAMD64XORLconst {
3549 d := auxIntToInt32(v_0.AuxInt)
3551 v.reset(OpAMD64XORLconst)
3552 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3556 // match: (BTCLconst [c] (BTCLconst [d] x))
3557 // result: (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
3559 c := auxIntToInt8(v.AuxInt)
3560 if v_0.Op != OpAMD64BTCLconst {
3563 d := auxIntToInt8(v_0.AuxInt)
3565 v.reset(OpAMD64XORLconst)
3566 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
3570 // match: (BTCLconst [c] (MOVLconst [d]))
3571 // result: (MOVLconst [d^(1<<uint32(c))])
3573 c := auxIntToInt8(v.AuxInt)
3574 if v_0.Op != OpAMD64MOVLconst {
3577 d := auxIntToInt32(v_0.AuxInt)
3578 v.reset(OpAMD64MOVLconst)
3579 v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c)))
3584 func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool {
3587 // match: (BTCLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
3588 // cond: ValAndOff(valoff1).canAdd32(off2)
3589 // result: (BTCLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
3591 valoff1 := auxIntToValAndOff(v.AuxInt)
3592 sym := auxToSym(v.Aux)
3593 if v_0.Op != OpAMD64ADDQconst {
3596 off2 := auxIntToInt32(v_0.AuxInt)
3599 if !(ValAndOff(valoff1).canAdd32(off2)) {
3602 v.reset(OpAMD64BTCLconstmodify)
3603 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3604 v.Aux = symToAux(sym)
3605 v.AddArg2(base, mem)
3608 // match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
3609 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
3610 // result: (BTCLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
3612 valoff1 := auxIntToValAndOff(v.AuxInt)
3613 sym1 := auxToSym(v.Aux)
3614 if v_0.Op != OpAMD64LEAQ {
3617 off2 := auxIntToInt32(v_0.AuxInt)
3618 sym2 := auxToSym(v_0.Aux)
3621 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3624 v.reset(OpAMD64BTCLconstmodify)
3625 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3626 v.Aux = symToAux(mergeSym(sym1, sym2))
3627 v.AddArg2(base, mem)
3632 func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool {
3636 // match: (BTCLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
3637 // cond: is32Bit(int64(off1)+int64(off2))
3638 // result: (BTCLmodify [off1+off2] {sym} base val mem)
3640 off1 := auxIntToInt32(v.AuxInt)
3641 sym := auxToSym(v.Aux)
3642 if v_0.Op != OpAMD64ADDQconst {
3645 off2 := auxIntToInt32(v_0.AuxInt)
3649 if !(is32Bit(int64(off1) + int64(off2))) {
3652 v.reset(OpAMD64BTCLmodify)
3653 v.AuxInt = int32ToAuxInt(off1 + off2)
3654 v.Aux = symToAux(sym)
3655 v.AddArg3(base, val, mem)
3658 // match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
3659 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
3660 // result: (BTCLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
3662 off1 := auxIntToInt32(v.AuxInt)
3663 sym1 := auxToSym(v.Aux)
3664 if v_0.Op != OpAMD64LEAQ {
3667 off2 := auxIntToInt32(v_0.AuxInt)
3668 sym2 := auxToSym(v_0.Aux)
3672 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3675 v.reset(OpAMD64BTCLmodify)
3676 v.AuxInt = int32ToAuxInt(off1 + off2)
3677 v.Aux = symToAux(mergeSym(sym1, sym2))
3678 v.AddArg3(base, val, mem)
3683 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3685 // match: (BTCQconst [c] (XORQconst [d] x))
3686 // cond: is32Bit(int64(d) ^ 1<<uint32(c))
3687 // result: (XORQconst [d ^ 1<<uint32(c)] x)
3689 c := auxIntToInt8(v.AuxInt)
3690 if v_0.Op != OpAMD64XORQconst {
3693 d := auxIntToInt32(v_0.AuxInt)
3695 if !(is32Bit(int64(d) ^ 1<<uint32(c))) {
3698 v.reset(OpAMD64XORQconst)
3699 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3703 // match: (BTCQconst [c] (BTCQconst [d] x))
3704 // cond: is32Bit(1<<uint32(c) ^ 1<<uint32(d))
3705 // result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
3707 c := auxIntToInt8(v.AuxInt)
3708 if v_0.Op != OpAMD64BTCQconst {
3711 d := auxIntToInt8(v_0.AuxInt)
3713 if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) {
3716 v.reset(OpAMD64XORQconst)
3717 v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d))
3721 // match: (BTCQconst [c] (MOVQconst [d]))
3722 // result: (MOVQconst [d^(1<<uint32(c))])
3724 c := auxIntToInt8(v.AuxInt)
3725 if v_0.Op != OpAMD64MOVQconst {
3728 d := auxIntToInt64(v_0.AuxInt)
3729 v.reset(OpAMD64MOVQconst)
3730 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3735 func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool {
3738 // match: (BTCQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
3739 // cond: ValAndOff(valoff1).canAdd32(off2)
3740 // result: (BTCQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
3742 valoff1 := auxIntToValAndOff(v.AuxInt)
3743 sym := auxToSym(v.Aux)
3744 if v_0.Op != OpAMD64ADDQconst {
3747 off2 := auxIntToInt32(v_0.AuxInt)
3750 if !(ValAndOff(valoff1).canAdd32(off2)) {
3753 v.reset(OpAMD64BTCQconstmodify)
3754 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3755 v.Aux = symToAux(sym)
3756 v.AddArg2(base, mem)
3759 // match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
3760 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
3761 // result: (BTCQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
3763 valoff1 := auxIntToValAndOff(v.AuxInt)
3764 sym1 := auxToSym(v.Aux)
3765 if v_0.Op != OpAMD64LEAQ {
3768 off2 := auxIntToInt32(v_0.AuxInt)
3769 sym2 := auxToSym(v_0.Aux)
3772 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3775 v.reset(OpAMD64BTCQconstmodify)
3776 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3777 v.Aux = symToAux(mergeSym(sym1, sym2))
3778 v.AddArg2(base, mem)
3783 func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool {
3787 // match: (BTCQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
3788 // cond: is32Bit(int64(off1)+int64(off2))
3789 // result: (BTCQmodify [off1+off2] {sym} base val mem)
3791 off1 := auxIntToInt32(v.AuxInt)
3792 sym := auxToSym(v.Aux)
3793 if v_0.Op != OpAMD64ADDQconst {
3796 off2 := auxIntToInt32(v_0.AuxInt)
3800 if !(is32Bit(int64(off1) + int64(off2))) {
3803 v.reset(OpAMD64BTCQmodify)
3804 v.AuxInt = int32ToAuxInt(off1 + off2)
3805 v.Aux = symToAux(sym)
3806 v.AddArg3(base, val, mem)
3809 // match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
3810 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
3811 // result: (BTCQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
3813 off1 := auxIntToInt32(v.AuxInt)
3814 sym1 := auxToSym(v.Aux)
3815 if v_0.Op != OpAMD64LEAQ {
3818 off2 := auxIntToInt32(v_0.AuxInt)
3819 sym2 := auxToSym(v_0.Aux)
3823 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3826 v.reset(OpAMD64BTCQmodify)
3827 v.AuxInt = int32ToAuxInt(off1 + off2)
3828 v.Aux = symToAux(mergeSym(sym1, sym2))
3829 v.AddArg3(base, val, mem)
3834 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3836 // match: (BTLconst [c] (SHRQconst [d] x))
3838 // result: (BTQconst [c+d] x)
3840 c := auxIntToInt8(v.AuxInt)
3841 if v_0.Op != OpAMD64SHRQconst {
3844 d := auxIntToInt8(v_0.AuxInt)
3846 if !((c + d) < 64) {
3849 v.reset(OpAMD64BTQconst)
3850 v.AuxInt = int8ToAuxInt(c + d)
3854 // match: (BTLconst [c] (SHLQconst [d] x))
3856 // result: (BTLconst [c-d] x)
3858 c := auxIntToInt8(v.AuxInt)
3859 if v_0.Op != OpAMD64SHLQconst {
3862 d := auxIntToInt8(v_0.AuxInt)
3867 v.reset(OpAMD64BTLconst)
3868 v.AuxInt = int8ToAuxInt(c - d)
3872 // match: (BTLconst [0] s:(SHRQ x y))
3873 // result: (BTQ y x)
3875 if auxIntToInt8(v.AuxInt) != 0 {
3879 if s.Op != OpAMD64SHRQ {
3888 // match: (BTLconst [c] (SHRLconst [d] x))
3890 // result: (BTLconst [c+d] x)
3892 c := auxIntToInt8(v.AuxInt)
3893 if v_0.Op != OpAMD64SHRLconst {
3896 d := auxIntToInt8(v_0.AuxInt)
3898 if !((c + d) < 32) {
3901 v.reset(OpAMD64BTLconst)
3902 v.AuxInt = int8ToAuxInt(c + d)
3906 // match: (BTLconst [c] (SHLLconst [d] x))
3908 // result: (BTLconst [c-d] x)
3910 c := auxIntToInt8(v.AuxInt)
3911 if v_0.Op != OpAMD64SHLLconst {
3914 d := auxIntToInt8(v_0.AuxInt)
3919 v.reset(OpAMD64BTLconst)
3920 v.AuxInt = int8ToAuxInt(c - d)
3924 // match: (BTLconst [0] s:(SHRL x y))
3925 // result: (BTL y x)
3927 if auxIntToInt8(v.AuxInt) != 0 {
3931 if s.Op != OpAMD64SHRL {
3942 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3944 // match: (BTQconst [c] (SHRQconst [d] x))
3946 // result: (BTQconst [c+d] x)
3948 c := auxIntToInt8(v.AuxInt)
3949 if v_0.Op != OpAMD64SHRQconst {
3952 d := auxIntToInt8(v_0.AuxInt)
3954 if !((c + d) < 64) {
3957 v.reset(OpAMD64BTQconst)
3958 v.AuxInt = int8ToAuxInt(c + d)
3962 // match: (BTQconst [c] (SHLQconst [d] x))
3964 // result: (BTQconst [c-d] x)
3966 c := auxIntToInt8(v.AuxInt)
3967 if v_0.Op != OpAMD64SHLQconst {
3970 d := auxIntToInt8(v_0.AuxInt)
3975 v.reset(OpAMD64BTQconst)
3976 v.AuxInt = int8ToAuxInt(c - d)
3980 // match: (BTQconst [0] s:(SHRQ x y))
3981 // result: (BTQ y x)
3983 if auxIntToInt8(v.AuxInt) != 0 {
3987 if s.Op != OpAMD64SHRQ {
3998 func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
4000 // match: (BTRLconst [c] (BTSLconst [c] x))
4001 // result: (BTRLconst [c] x)
4003 c := auxIntToInt8(v.AuxInt)
4004 if v_0.Op != OpAMD64BTSLconst || auxIntToInt8(v_0.AuxInt) != c {
4008 v.reset(OpAMD64BTRLconst)
4009 v.AuxInt = int8ToAuxInt(c)
4013 // match: (BTRLconst [c] (BTCLconst [c] x))
4014 // result: (BTRLconst [c] x)
4016 c := auxIntToInt8(v.AuxInt)
4017 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
4021 v.reset(OpAMD64BTRLconst)
4022 v.AuxInt = int8ToAuxInt(c)
4026 // match: (BTRLconst [c] (ANDLconst [d] x))
4027 // result: (ANDLconst [d &^ (1<<uint32(c))] x)
4029 c := auxIntToInt8(v.AuxInt)
4030 if v_0.Op != OpAMD64ANDLconst {
4033 d := auxIntToInt32(v_0.AuxInt)
4035 v.reset(OpAMD64ANDLconst)
4036 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4040 // match: (BTRLconst [c] (BTRLconst [d] x))
4041 // result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
4043 c := auxIntToInt8(v.AuxInt)
4044 if v_0.Op != OpAMD64BTRLconst {
4047 d := auxIntToInt8(v_0.AuxInt)
4049 v.reset(OpAMD64ANDLconst)
4050 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4054 // match: (BTRLconst [c] (MOVLconst [d]))
4055 // result: (MOVLconst [d&^(1<<uint32(c))])
4057 c := auxIntToInt8(v.AuxInt)
4058 if v_0.Op != OpAMD64MOVLconst {
4061 d := auxIntToInt32(v_0.AuxInt)
4062 v.reset(OpAMD64MOVLconst)
4063 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4068 func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool {
4071 // match: (BTRLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
4072 // cond: ValAndOff(valoff1).canAdd32(off2)
4073 // result: (BTRLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
4075 valoff1 := auxIntToValAndOff(v.AuxInt)
4076 sym := auxToSym(v.Aux)
4077 if v_0.Op != OpAMD64ADDQconst {
4080 off2 := auxIntToInt32(v_0.AuxInt)
4083 if !(ValAndOff(valoff1).canAdd32(off2)) {
4086 v.reset(OpAMD64BTRLconstmodify)
4087 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4088 v.Aux = symToAux(sym)
4089 v.AddArg2(base, mem)
4092 // match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
4093 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
4094 // result: (BTRLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
4096 valoff1 := auxIntToValAndOff(v.AuxInt)
4097 sym1 := auxToSym(v.Aux)
4098 if v_0.Op != OpAMD64LEAQ {
4101 off2 := auxIntToInt32(v_0.AuxInt)
4102 sym2 := auxToSym(v_0.Aux)
4105 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
4108 v.reset(OpAMD64BTRLconstmodify)
4109 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4110 v.Aux = symToAux(mergeSym(sym1, sym2))
4111 v.AddArg2(base, mem)
4116 func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool {
4120 // match: (BTRLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
4121 // cond: is32Bit(int64(off1)+int64(off2))
4122 // result: (BTRLmodify [off1+off2] {sym} base val mem)
4124 off1 := auxIntToInt32(v.AuxInt)
4125 sym := auxToSym(v.Aux)
4126 if v_0.Op != OpAMD64ADDQconst {
4129 off2 := auxIntToInt32(v_0.AuxInt)
4133 if !(is32Bit(int64(off1) + int64(off2))) {
4136 v.reset(OpAMD64BTRLmodify)
4137 v.AuxInt = int32ToAuxInt(off1 + off2)
4138 v.Aux = symToAux(sym)
4139 v.AddArg3(base, val, mem)
4142 // match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
4143 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
4144 // result: (BTRLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
4146 off1 := auxIntToInt32(v.AuxInt)
4147 sym1 := auxToSym(v.Aux)
4148 if v_0.Op != OpAMD64LEAQ {
4151 off2 := auxIntToInt32(v_0.AuxInt)
4152 sym2 := auxToSym(v_0.Aux)
4156 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
4159 v.reset(OpAMD64BTRLmodify)
4160 v.AuxInt = int32ToAuxInt(off1 + off2)
4161 v.Aux = symToAux(mergeSym(sym1, sym2))
4162 v.AddArg3(base, val, mem)
4167 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
4169 // match: (BTRQconst [c] (BTSQconst [c] x))
4170 // result: (BTRQconst [c] x)
4172 c := auxIntToInt8(v.AuxInt)
4173 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
4177 v.reset(OpAMD64BTRQconst)
4178 v.AuxInt = int8ToAuxInt(c)
4182 // match: (BTRQconst [c] (BTCQconst [c] x))
4183 // result: (BTRQconst [c] x)
4185 c := auxIntToInt8(v.AuxInt)
4186 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
4190 v.reset(OpAMD64BTRQconst)
4191 v.AuxInt = int8ToAuxInt(c)
4195 // match: (BTRQconst [c] (ANDQconst [d] x))
4196 // cond: is32Bit(int64(d) &^ (1<<uint32(c)))
4197 // result: (ANDQconst [d &^ (1<<uint32(c))] x)
4199 c := auxIntToInt8(v.AuxInt)
4200 if v_0.Op != OpAMD64ANDQconst {
4203 d := auxIntToInt32(v_0.AuxInt)
4205 if !(is32Bit(int64(d) &^ (1 << uint32(c)))) {
4208 v.reset(OpAMD64ANDQconst)
4209 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4213 // match: (BTRQconst [c] (BTRQconst [d] x))
4214 // cond: is32Bit(^(1<<uint32(c) | 1<<uint32(d)))
4215 // result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
4217 c := auxIntToInt8(v.AuxInt)
4218 if v_0.Op != OpAMD64BTRQconst {
4221 d := auxIntToInt8(v_0.AuxInt)
4223 if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) {
4226 v.reset(OpAMD64ANDQconst)
4227 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4231 // match: (BTRQconst [c] (MOVQconst [d]))
4232 // result: (MOVQconst [d&^(1<<uint32(c))])
4234 c := auxIntToInt8(v.AuxInt)
4235 if v_0.Op != OpAMD64MOVQconst {
4238 d := auxIntToInt64(v_0.AuxInt)
4239 v.reset(OpAMD64MOVQconst)
4240 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
4245 func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool {
4248 // match: (BTRQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
4249 // cond: ValAndOff(valoff1).canAdd32(off2)
4250 // result: (BTRQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
4252 valoff1 := auxIntToValAndOff(v.AuxInt)
4253 sym := auxToSym(v.Aux)
4254 if v_0.Op != OpAMD64ADDQconst {
4257 off2 := auxIntToInt32(v_0.AuxInt)
4260 if !(ValAndOff(valoff1).canAdd32(off2)) {
4263 v.reset(OpAMD64BTRQconstmodify)
4264 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4265 v.Aux = symToAux(sym)
4266 v.AddArg2(base, mem)
4269 // match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
4270 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
4271 // result: (BTRQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
4273 valoff1 := auxIntToValAndOff(v.AuxInt)
4274 sym1 := auxToSym(v.Aux)
4275 if v_0.Op != OpAMD64LEAQ {
4278 off2 := auxIntToInt32(v_0.AuxInt)
4279 sym2 := auxToSym(v_0.Aux)
4282 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
4285 v.reset(OpAMD64BTRQconstmodify)
4286 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4287 v.Aux = symToAux(mergeSym(sym1, sym2))
4288 v.AddArg2(base, mem)
4293 func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool {
4297 // match: (BTRQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
4298 // cond: is32Bit(int64(off1)+int64(off2))
4299 // result: (BTRQmodify [off1+off2] {sym} base val mem)
4301 off1 := auxIntToInt32(v.AuxInt)
4302 sym := auxToSym(v.Aux)
4303 if v_0.Op != OpAMD64ADDQconst {
4306 off2 := auxIntToInt32(v_0.AuxInt)
4310 if !(is32Bit(int64(off1) + int64(off2))) {
4313 v.reset(OpAMD64BTRQmodify)
4314 v.AuxInt = int32ToAuxInt(off1 + off2)
4315 v.Aux = symToAux(sym)
4316 v.AddArg3(base, val, mem)
4319 // match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
4320 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
4321 // result: (BTRQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
4323 off1 := auxIntToInt32(v.AuxInt)
4324 sym1 := auxToSym(v.Aux)
4325 if v_0.Op != OpAMD64LEAQ {
4328 off2 := auxIntToInt32(v_0.AuxInt)
4329 sym2 := auxToSym(v_0.Aux)
4333 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
4336 v.reset(OpAMD64BTRQmodify)
4337 v.AuxInt = int32ToAuxInt(off1 + off2)
4338 v.Aux = symToAux(mergeSym(sym1, sym2))
4339 v.AddArg3(base, val, mem)
4344 func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool {
4346 // match: (BTSLconst [c] (BTRLconst [c] x))
4347 // result: (BTSLconst [c] x)
4349 c := auxIntToInt8(v.AuxInt)
4350 if v_0.Op != OpAMD64BTRLconst || auxIntToInt8(v_0.AuxInt) != c {
4354 v.reset(OpAMD64BTSLconst)
4355 v.AuxInt = int8ToAuxInt(c)
4359 // match: (BTSLconst [c] (BTCLconst [c] x))
4360 // result: (BTSLconst [c] x)
4362 c := auxIntToInt8(v.AuxInt)
4363 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
4367 v.reset(OpAMD64BTSLconst)
4368 v.AuxInt = int8ToAuxInt(c)
4372 // match: (BTSLconst [c] (ORLconst [d] x))
4373 // result: (ORLconst [d | 1<<uint32(c)] x)
4375 c := auxIntToInt8(v.AuxInt)
4376 if v_0.Op != OpAMD64ORLconst {
4379 d := auxIntToInt32(v_0.AuxInt)
4381 v.reset(OpAMD64ORLconst)
4382 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4386 // match: (BTSLconst [c] (BTSLconst [d] x))
4387 // result: (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
4389 c := auxIntToInt8(v.AuxInt)
4390 if v_0.Op != OpAMD64BTSLconst {
4393 d := auxIntToInt8(v_0.AuxInt)
4395 v.reset(OpAMD64ORLconst)
4396 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4400 // match: (BTSLconst [c] (MOVLconst [d]))
4401 // result: (MOVLconst [d|(1<<uint32(c))])
4403 c := auxIntToInt8(v.AuxInt)
4404 if v_0.Op != OpAMD64MOVLconst {
4407 d := auxIntToInt32(v_0.AuxInt)
4408 v.reset(OpAMD64MOVLconst)
4409 v.AuxInt = int32ToAuxInt(d | (1 << uint32(c)))
4414 func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool {
4417 // match: (BTSLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
4418 // cond: ValAndOff(valoff1).canAdd32(off2)
4419 // result: (BTSLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
4421 valoff1 := auxIntToValAndOff(v.AuxInt)
4422 sym := auxToSym(v.Aux)
4423 if v_0.Op != OpAMD64ADDQconst {
4426 off2 := auxIntToInt32(v_0.AuxInt)
4429 if !(ValAndOff(valoff1).canAdd32(off2)) {
4432 v.reset(OpAMD64BTSLconstmodify)
4433 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4434 v.Aux = symToAux(sym)
4435 v.AddArg2(base, mem)
4438 // match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
4439 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
4440 // result: (BTSLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
4442 valoff1 := auxIntToValAndOff(v.AuxInt)
4443 sym1 := auxToSym(v.Aux)
4444 if v_0.Op != OpAMD64LEAQ {
4447 off2 := auxIntToInt32(v_0.AuxInt)
4448 sym2 := auxToSym(v_0.Aux)
4451 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
4454 v.reset(OpAMD64BTSLconstmodify)
4455 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4456 v.Aux = symToAux(mergeSym(sym1, sym2))
4457 v.AddArg2(base, mem)
4462 func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool {
4466 // match: (BTSLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
4467 // cond: is32Bit(int64(off1)+int64(off2))
4468 // result: (BTSLmodify [off1+off2] {sym} base val mem)
4470 off1 := auxIntToInt32(v.AuxInt)
4471 sym := auxToSym(v.Aux)
4472 if v_0.Op != OpAMD64ADDQconst {
4475 off2 := auxIntToInt32(v_0.AuxInt)
4479 if !(is32Bit(int64(off1) + int64(off2))) {
4482 v.reset(OpAMD64BTSLmodify)
4483 v.AuxInt = int32ToAuxInt(off1 + off2)
4484 v.Aux = symToAux(sym)
4485 v.AddArg3(base, val, mem)
4488 // match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
4489 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
4490 // result: (BTSLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
4492 off1 := auxIntToInt32(v.AuxInt)
4493 sym1 := auxToSym(v.Aux)
4494 if v_0.Op != OpAMD64LEAQ {
4497 off2 := auxIntToInt32(v_0.AuxInt)
4498 sym2 := auxToSym(v_0.Aux)
4502 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
4505 v.reset(OpAMD64BTSLmodify)
4506 v.AuxInt = int32ToAuxInt(off1 + off2)
4507 v.Aux = symToAux(mergeSym(sym1, sym2))
4508 v.AddArg3(base, val, mem)
4513 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
4515 // match: (BTSQconst [c] (BTRQconst [c] x))
4516 // result: (BTSQconst [c] x)
4518 c := auxIntToInt8(v.AuxInt)
4519 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
4523 v.reset(OpAMD64BTSQconst)
4524 v.AuxInt = int8ToAuxInt(c)
4528 // match: (BTSQconst [c] (BTCQconst [c] x))
4529 // result: (BTSQconst [c] x)
4531 c := auxIntToInt8(v.AuxInt)
4532 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
4536 v.reset(OpAMD64BTSQconst)
4537 v.AuxInt = int8ToAuxInt(c)
4541 // match: (BTSQconst [c] (ORQconst [d] x))
4542 // cond: is32Bit(int64(d) | 1<<uint32(c))
4543 // result: (ORQconst [d | 1<<uint32(c)] x)
4545 c := auxIntToInt8(v.AuxInt)
4546 if v_0.Op != OpAMD64ORQconst {
4549 d := auxIntToInt32(v_0.AuxInt)
4551 if !(is32Bit(int64(d) | 1<<uint32(c))) {
4554 v.reset(OpAMD64ORQconst)
4555 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4559 // match: (BTSQconst [c] (BTSQconst [d] x))
4560 // cond: is32Bit(1<<uint32(c) | 1<<uint32(d))
4561 // result: (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
4563 c := auxIntToInt8(v.AuxInt)
4564 if v_0.Op != OpAMD64BTSQconst {
4567 d := auxIntToInt8(v_0.AuxInt)
4569 if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) {
4572 v.reset(OpAMD64ORQconst)
4573 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4577 // match: (BTSQconst [c] (MOVQconst [d]))
4578 // result: (MOVQconst [d|(1<<uint32(c))])
4580 c := auxIntToInt8(v.AuxInt)
4581 if v_0.Op != OpAMD64MOVQconst {
4584 d := auxIntToInt64(v_0.AuxInt)
4585 v.reset(OpAMD64MOVQconst)
4586 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
4591 func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool {
4594 // match: (BTSQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
4595 // cond: ValAndOff(valoff1).canAdd32(off2)
4596 // result: (BTSQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
4598 valoff1 := auxIntToValAndOff(v.AuxInt)
4599 sym := auxToSym(v.Aux)
4600 if v_0.Op != OpAMD64ADDQconst {
4603 off2 := auxIntToInt32(v_0.AuxInt)
4606 if !(ValAndOff(valoff1).canAdd32(off2)) {
4609 v.reset(OpAMD64BTSQconstmodify)
4610 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4611 v.Aux = symToAux(sym)
4612 v.AddArg2(base, mem)
4615 // match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
4616 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
4617 // result: (BTSQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
4619 valoff1 := auxIntToValAndOff(v.AuxInt)
4620 sym1 := auxToSym(v.Aux)
4621 if v_0.Op != OpAMD64LEAQ {
4624 off2 := auxIntToInt32(v_0.AuxInt)
4625 sym2 := auxToSym(v_0.Aux)
4628 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
4631 v.reset(OpAMD64BTSQconstmodify)
4632 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4633 v.Aux = symToAux(mergeSym(sym1, sym2))
4634 v.AddArg2(base, mem)
4639 func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool {
4643 // match: (BTSQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
4644 // cond: is32Bit(int64(off1)+int64(off2))
4645 // result: (BTSQmodify [off1+off2] {sym} base val mem)
4647 off1 := auxIntToInt32(v.AuxInt)
4648 sym := auxToSym(v.Aux)
4649 if v_0.Op != OpAMD64ADDQconst {
4652 off2 := auxIntToInt32(v_0.AuxInt)
4656 if !(is32Bit(int64(off1) + int64(off2))) {
4659 v.reset(OpAMD64BTSQmodify)
4660 v.AuxInt = int32ToAuxInt(off1 + off2)
4661 v.Aux = symToAux(sym)
4662 v.AddArg3(base, val, mem)
4665 // match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
4666 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
4667 // result: (BTSQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
4669 off1 := auxIntToInt32(v.AuxInt)
4670 sym1 := auxToSym(v.Aux)
4671 if v_0.Op != OpAMD64LEAQ {
4674 off2 := auxIntToInt32(v_0.AuxInt)
4675 sym2 := auxToSym(v_0.Aux)
4679 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
4682 v.reset(OpAMD64BTSQmodify)
4683 v.AuxInt = int32ToAuxInt(off1 + off2)
4684 v.Aux = symToAux(mergeSym(sym1, sym2))
4685 v.AddArg3(base, val, mem)
4690 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
4694 // match: (CMOVLCC x y (InvertFlags cond))
4695 // result: (CMOVLLS x y cond)
4699 if v_2.Op != OpAMD64InvertFlags {
4703 v.reset(OpAMD64CMOVLLS)
4704 v.AddArg3(x, y, cond)
4707 // match: (CMOVLCC _ x (FlagEQ))
4711 if v_2.Op != OpAMD64FlagEQ {
4717 // match: (CMOVLCC _ x (FlagGT_UGT))
4721 if v_2.Op != OpAMD64FlagGT_UGT {
4727 // match: (CMOVLCC y _ (FlagGT_ULT))
4731 if v_2.Op != OpAMD64FlagGT_ULT {
4737 // match: (CMOVLCC y _ (FlagLT_ULT))
4741 if v_2.Op != OpAMD64FlagLT_ULT {
4747 // match: (CMOVLCC _ x (FlagLT_UGT))
4751 if v_2.Op != OpAMD64FlagLT_UGT {
4759 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
4763 // match: (CMOVLCS x y (InvertFlags cond))
4764 // result: (CMOVLHI x y cond)
4768 if v_2.Op != OpAMD64InvertFlags {
4772 v.reset(OpAMD64CMOVLHI)
4773 v.AddArg3(x, y, cond)
4776 // match: (CMOVLCS y _ (FlagEQ))
4780 if v_2.Op != OpAMD64FlagEQ {
4786 // match: (CMOVLCS y _ (FlagGT_UGT))
4790 if v_2.Op != OpAMD64FlagGT_UGT {
4796 // match: (CMOVLCS _ x (FlagGT_ULT))
4800 if v_2.Op != OpAMD64FlagGT_ULT {
4806 // match: (CMOVLCS _ x (FlagLT_ULT))
4810 if v_2.Op != OpAMD64FlagLT_ULT {
4816 // match: (CMOVLCS y _ (FlagLT_UGT))
4820 if v_2.Op != OpAMD64FlagLT_UGT {
4828 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4832 // match: (CMOVLEQ x y (InvertFlags cond))
4833 // result: (CMOVLEQ x y cond)
4837 if v_2.Op != OpAMD64InvertFlags {
4841 v.reset(OpAMD64CMOVLEQ)
4842 v.AddArg3(x, y, cond)
4845 // match: (CMOVLEQ _ x (FlagEQ))
4849 if v_2.Op != OpAMD64FlagEQ {
4855 // match: (CMOVLEQ y _ (FlagGT_UGT))
4859 if v_2.Op != OpAMD64FlagGT_UGT {
4865 // match: (CMOVLEQ y _ (FlagGT_ULT))
4869 if v_2.Op != OpAMD64FlagGT_ULT {
4875 // match: (CMOVLEQ y _ (FlagLT_ULT))
4879 if v_2.Op != OpAMD64FlagLT_ULT {
4885 // match: (CMOVLEQ y _ (FlagLT_UGT))
4889 if v_2.Op != OpAMD64FlagLT_UGT {
4897 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4901 // match: (CMOVLGE x y (InvertFlags cond))
4902 // result: (CMOVLLE x y cond)
4906 if v_2.Op != OpAMD64InvertFlags {
4910 v.reset(OpAMD64CMOVLLE)
4911 v.AddArg3(x, y, cond)
4914 // match: (CMOVLGE _ x (FlagEQ))
4918 if v_2.Op != OpAMD64FlagEQ {
4924 // match: (CMOVLGE _ x (FlagGT_UGT))
4928 if v_2.Op != OpAMD64FlagGT_UGT {
4934 // match: (CMOVLGE _ x (FlagGT_ULT))
4938 if v_2.Op != OpAMD64FlagGT_ULT {
4944 // match: (CMOVLGE y _ (FlagLT_ULT))
4948 if v_2.Op != OpAMD64FlagLT_ULT {
4954 // match: (CMOVLGE y _ (FlagLT_UGT))
4958 if v_2.Op != OpAMD64FlagLT_UGT {
4966 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4970 // match: (CMOVLGT x y (InvertFlags cond))
4971 // result: (CMOVLLT x y cond)
4975 if v_2.Op != OpAMD64InvertFlags {
4979 v.reset(OpAMD64CMOVLLT)
4980 v.AddArg3(x, y, cond)
4983 // match: (CMOVLGT y _ (FlagEQ))
4987 if v_2.Op != OpAMD64FlagEQ {
4993 // match: (CMOVLGT _ x (FlagGT_UGT))
4997 if v_2.Op != OpAMD64FlagGT_UGT {
5003 // match: (CMOVLGT _ x (FlagGT_ULT))
5007 if v_2.Op != OpAMD64FlagGT_ULT {
5013 // match: (CMOVLGT y _ (FlagLT_ULT))
5017 if v_2.Op != OpAMD64FlagLT_ULT {
5023 // match: (CMOVLGT y _ (FlagLT_UGT))
5027 if v_2.Op != OpAMD64FlagLT_UGT {
5035 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
5039 // match: (CMOVLHI x y (InvertFlags cond))
5040 // result: (CMOVLCS x y cond)
5044 if v_2.Op != OpAMD64InvertFlags {
5048 v.reset(OpAMD64CMOVLCS)
5049 v.AddArg3(x, y, cond)
5052 // match: (CMOVLHI y _ (FlagEQ))
5056 if v_2.Op != OpAMD64FlagEQ {
5062 // match: (CMOVLHI _ x (FlagGT_UGT))
5066 if v_2.Op != OpAMD64FlagGT_UGT {
5072 // match: (CMOVLHI y _ (FlagGT_ULT))
5076 if v_2.Op != OpAMD64FlagGT_ULT {
5082 // match: (CMOVLHI y _ (FlagLT_ULT))
5086 if v_2.Op != OpAMD64FlagLT_ULT {
5092 // match: (CMOVLHI _ x (FlagLT_UGT))
5096 if v_2.Op != OpAMD64FlagLT_UGT {
5104 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
5108 // match: (CMOVLLE x y (InvertFlags cond))
5109 // result: (CMOVLGE x y cond)
5113 if v_2.Op != OpAMD64InvertFlags {
5117 v.reset(OpAMD64CMOVLGE)
5118 v.AddArg3(x, y, cond)
5121 // match: (CMOVLLE _ x (FlagEQ))
5125 if v_2.Op != OpAMD64FlagEQ {
5131 // match: (CMOVLLE y _ (FlagGT_UGT))
5135 if v_2.Op != OpAMD64FlagGT_UGT {
5141 // match: (CMOVLLE y _ (FlagGT_ULT))
5145 if v_2.Op != OpAMD64FlagGT_ULT {
5151 // match: (CMOVLLE _ x (FlagLT_ULT))
5155 if v_2.Op != OpAMD64FlagLT_ULT {
5161 // match: (CMOVLLE _ x (FlagLT_UGT))
5165 if v_2.Op != OpAMD64FlagLT_UGT {
5173 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
5177 // match: (CMOVLLS x y (InvertFlags cond))
5178 // result: (CMOVLCC x y cond)
5182 if v_2.Op != OpAMD64InvertFlags {
5186 v.reset(OpAMD64CMOVLCC)
5187 v.AddArg3(x, y, cond)
5190 // match: (CMOVLLS _ x (FlagEQ))
5194 if v_2.Op != OpAMD64FlagEQ {
5200 // match: (CMOVLLS y _ (FlagGT_UGT))
5204 if v_2.Op != OpAMD64FlagGT_UGT {
5210 // match: (CMOVLLS _ x (FlagGT_ULT))
5214 if v_2.Op != OpAMD64FlagGT_ULT {
5220 // match: (CMOVLLS _ x (FlagLT_ULT))
5224 if v_2.Op != OpAMD64FlagLT_ULT {
5230 // match: (CMOVLLS y _ (FlagLT_UGT))
5234 if v_2.Op != OpAMD64FlagLT_UGT {
5242 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
5246 // match: (CMOVLLT x y (InvertFlags cond))
5247 // result: (CMOVLGT x y cond)
5251 if v_2.Op != OpAMD64InvertFlags {
5255 v.reset(OpAMD64CMOVLGT)
5256 v.AddArg3(x, y, cond)
5259 // match: (CMOVLLT y _ (FlagEQ))
5263 if v_2.Op != OpAMD64FlagEQ {
5269 // match: (CMOVLLT y _ (FlagGT_UGT))
5273 if v_2.Op != OpAMD64FlagGT_UGT {
5279 // match: (CMOVLLT y _ (FlagGT_ULT))
5283 if v_2.Op != OpAMD64FlagGT_ULT {
5289 // match: (CMOVLLT _ x (FlagLT_ULT))
5293 if v_2.Op != OpAMD64FlagLT_ULT {
5299 // match: (CMOVLLT _ x (FlagLT_UGT))
5303 if v_2.Op != OpAMD64FlagLT_UGT {
5311 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
5315 // match: (CMOVLNE x y (InvertFlags cond))
5316 // result: (CMOVLNE x y cond)
5320 if v_2.Op != OpAMD64InvertFlags {
5324 v.reset(OpAMD64CMOVLNE)
5325 v.AddArg3(x, y, cond)
5328 // match: (CMOVLNE y _ (FlagEQ))
5332 if v_2.Op != OpAMD64FlagEQ {
5338 // match: (CMOVLNE _ x (FlagGT_UGT))
5342 if v_2.Op != OpAMD64FlagGT_UGT {
5348 // match: (CMOVLNE _ x (FlagGT_ULT))
5352 if v_2.Op != OpAMD64FlagGT_ULT {
5358 // match: (CMOVLNE _ x (FlagLT_ULT))
5362 if v_2.Op != OpAMD64FlagLT_ULT {
5368 // match: (CMOVLNE _ x (FlagLT_UGT))
5372 if v_2.Op != OpAMD64FlagLT_UGT {
5380 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
5384 // match: (CMOVQCC x y (InvertFlags cond))
5385 // result: (CMOVQLS x y cond)
5389 if v_2.Op != OpAMD64InvertFlags {
5393 v.reset(OpAMD64CMOVQLS)
5394 v.AddArg3(x, y, cond)
5397 // match: (CMOVQCC _ x (FlagEQ))
5401 if v_2.Op != OpAMD64FlagEQ {
5407 // match: (CMOVQCC _ x (FlagGT_UGT))
5411 if v_2.Op != OpAMD64FlagGT_UGT {
5417 // match: (CMOVQCC y _ (FlagGT_ULT))
5421 if v_2.Op != OpAMD64FlagGT_ULT {
5427 // match: (CMOVQCC y _ (FlagLT_ULT))
5431 if v_2.Op != OpAMD64FlagLT_ULT {
5437 // match: (CMOVQCC _ x (FlagLT_UGT))
5441 if v_2.Op != OpAMD64FlagLT_UGT {
5449 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
5453 // match: (CMOVQCS x y (InvertFlags cond))
5454 // result: (CMOVQHI x y cond)
5458 if v_2.Op != OpAMD64InvertFlags {
5462 v.reset(OpAMD64CMOVQHI)
5463 v.AddArg3(x, y, cond)
5466 // match: (CMOVQCS y _ (FlagEQ))
5470 if v_2.Op != OpAMD64FlagEQ {
5476 // match: (CMOVQCS y _ (FlagGT_UGT))
5480 if v_2.Op != OpAMD64FlagGT_UGT {
5486 // match: (CMOVQCS _ x (FlagGT_ULT))
5490 if v_2.Op != OpAMD64FlagGT_ULT {
5496 // match: (CMOVQCS _ x (FlagLT_ULT))
5500 if v_2.Op != OpAMD64FlagLT_ULT {
5506 // match: (CMOVQCS y _ (FlagLT_UGT))
5510 if v_2.Op != OpAMD64FlagLT_UGT {
5518 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
5522 // match: (CMOVQEQ x y (InvertFlags cond))
5523 // result: (CMOVQEQ x y cond)
5527 if v_2.Op != OpAMD64InvertFlags {
5531 v.reset(OpAMD64CMOVQEQ)
5532 v.AddArg3(x, y, cond)
5535 // match: (CMOVQEQ _ x (FlagEQ))
5539 if v_2.Op != OpAMD64FlagEQ {
5545 // match: (CMOVQEQ y _ (FlagGT_UGT))
5549 if v_2.Op != OpAMD64FlagGT_UGT {
5555 // match: (CMOVQEQ y _ (FlagGT_ULT))
5559 if v_2.Op != OpAMD64FlagGT_ULT {
5565 // match: (CMOVQEQ y _ (FlagLT_ULT))
5569 if v_2.Op != OpAMD64FlagLT_ULT {
5575 // match: (CMOVQEQ y _ (FlagLT_UGT))
5579 if v_2.Op != OpAMD64FlagLT_UGT {
5585 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
5590 if v_2.Op != OpSelect1 {
5593 v_2_0 := v_2.Args[0]
5594 if v_2_0.Op != OpAMD64BSFQ {
5597 v_2_0_0 := v_2_0.Args[0]
5598 if v_2_0_0.Op != OpAMD64ORQconst {
5601 c := auxIntToInt32(v_2_0_0.AuxInt)
5610 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
5614 // match: (CMOVQGE x y (InvertFlags cond))
5615 // result: (CMOVQLE x y cond)
5619 if v_2.Op != OpAMD64InvertFlags {
5623 v.reset(OpAMD64CMOVQLE)
5624 v.AddArg3(x, y, cond)
5627 // match: (CMOVQGE _ x (FlagEQ))
5631 if v_2.Op != OpAMD64FlagEQ {
5637 // match: (CMOVQGE _ x (FlagGT_UGT))
5641 if v_2.Op != OpAMD64FlagGT_UGT {
5647 // match: (CMOVQGE _ x (FlagGT_ULT))
5651 if v_2.Op != OpAMD64FlagGT_ULT {
5657 // match: (CMOVQGE y _ (FlagLT_ULT))
5661 if v_2.Op != OpAMD64FlagLT_ULT {
5667 // match: (CMOVQGE y _ (FlagLT_UGT))
5671 if v_2.Op != OpAMD64FlagLT_UGT {
5679 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5683 // match: (CMOVQGT x y (InvertFlags cond))
5684 // result: (CMOVQLT x y cond)
5688 if v_2.Op != OpAMD64InvertFlags {
5692 v.reset(OpAMD64CMOVQLT)
5693 v.AddArg3(x, y, cond)
5696 // match: (CMOVQGT y _ (FlagEQ))
5700 if v_2.Op != OpAMD64FlagEQ {
5706 // match: (CMOVQGT _ x (FlagGT_UGT))
5710 if v_2.Op != OpAMD64FlagGT_UGT {
5716 // match: (CMOVQGT _ x (FlagGT_ULT))
5720 if v_2.Op != OpAMD64FlagGT_ULT {
5726 // match: (CMOVQGT y _ (FlagLT_ULT))
5730 if v_2.Op != OpAMD64FlagLT_ULT {
5736 // match: (CMOVQGT y _ (FlagLT_UGT))
5740 if v_2.Op != OpAMD64FlagLT_UGT {
5748 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5752 // match: (CMOVQHI x y (InvertFlags cond))
5753 // result: (CMOVQCS x y cond)
5757 if v_2.Op != OpAMD64InvertFlags {
5761 v.reset(OpAMD64CMOVQCS)
5762 v.AddArg3(x, y, cond)
5765 // match: (CMOVQHI y _ (FlagEQ))
5769 if v_2.Op != OpAMD64FlagEQ {
5775 // match: (CMOVQHI _ x (FlagGT_UGT))
5779 if v_2.Op != OpAMD64FlagGT_UGT {
5785 // match: (CMOVQHI y _ (FlagGT_ULT))
5789 if v_2.Op != OpAMD64FlagGT_ULT {
5795 // match: (CMOVQHI y _ (FlagLT_ULT))
5799 if v_2.Op != OpAMD64FlagLT_ULT {
5805 // match: (CMOVQHI _ x (FlagLT_UGT))
5809 if v_2.Op != OpAMD64FlagLT_UGT {
5817 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5821 // match: (CMOVQLE x y (InvertFlags cond))
5822 // result: (CMOVQGE x y cond)
5826 if v_2.Op != OpAMD64InvertFlags {
5830 v.reset(OpAMD64CMOVQGE)
5831 v.AddArg3(x, y, cond)
5834 // match: (CMOVQLE _ x (FlagEQ))
5838 if v_2.Op != OpAMD64FlagEQ {
5844 // match: (CMOVQLE y _ (FlagGT_UGT))
5848 if v_2.Op != OpAMD64FlagGT_UGT {
5854 // match: (CMOVQLE y _ (FlagGT_ULT))
5858 if v_2.Op != OpAMD64FlagGT_ULT {
5864 // match: (CMOVQLE _ x (FlagLT_ULT))
5868 if v_2.Op != OpAMD64FlagLT_ULT {
5874 // match: (CMOVQLE _ x (FlagLT_UGT))
5878 if v_2.Op != OpAMD64FlagLT_UGT {
5886 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5890 // match: (CMOVQLS x y (InvertFlags cond))
5891 // result: (CMOVQCC x y cond)
5895 if v_2.Op != OpAMD64InvertFlags {
5899 v.reset(OpAMD64CMOVQCC)
5900 v.AddArg3(x, y, cond)
5903 // match: (CMOVQLS _ x (FlagEQ))
5907 if v_2.Op != OpAMD64FlagEQ {
5913 // match: (CMOVQLS y _ (FlagGT_UGT))
5917 if v_2.Op != OpAMD64FlagGT_UGT {
5923 // match: (CMOVQLS _ x (FlagGT_ULT))
5927 if v_2.Op != OpAMD64FlagGT_ULT {
5933 // match: (CMOVQLS _ x (FlagLT_ULT))
5937 if v_2.Op != OpAMD64FlagLT_ULT {
5943 // match: (CMOVQLS y _ (FlagLT_UGT))
5947 if v_2.Op != OpAMD64FlagLT_UGT {
5955 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5959 // match: (CMOVQLT x y (InvertFlags cond))
5960 // result: (CMOVQGT x y cond)
5964 if v_2.Op != OpAMD64InvertFlags {
5968 v.reset(OpAMD64CMOVQGT)
5969 v.AddArg3(x, y, cond)
5972 // match: (CMOVQLT y _ (FlagEQ))
5976 if v_2.Op != OpAMD64FlagEQ {
5982 // match: (CMOVQLT y _ (FlagGT_UGT))
5986 if v_2.Op != OpAMD64FlagGT_UGT {
5992 // match: (CMOVQLT y _ (FlagGT_ULT))
5996 if v_2.Op != OpAMD64FlagGT_ULT {
6002 // match: (CMOVQLT _ x (FlagLT_ULT))
6006 if v_2.Op != OpAMD64FlagLT_ULT {
6012 // match: (CMOVQLT _ x (FlagLT_UGT))
6016 if v_2.Op != OpAMD64FlagLT_UGT {
6024 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
6028 // match: (CMOVQNE x y (InvertFlags cond))
6029 // result: (CMOVQNE x y cond)
6033 if v_2.Op != OpAMD64InvertFlags {
6037 v.reset(OpAMD64CMOVQNE)
6038 v.AddArg3(x, y, cond)
6041 // match: (CMOVQNE y _ (FlagEQ))
6045 if v_2.Op != OpAMD64FlagEQ {
6051 // match: (CMOVQNE _ x (FlagGT_UGT))
6055 if v_2.Op != OpAMD64FlagGT_UGT {
6061 // match: (CMOVQNE _ x (FlagGT_ULT))
6065 if v_2.Op != OpAMD64FlagGT_ULT {
6071 // match: (CMOVQNE _ x (FlagLT_ULT))
6075 if v_2.Op != OpAMD64FlagLT_ULT {
6081 // match: (CMOVQNE _ x (FlagLT_UGT))
6085 if v_2.Op != OpAMD64FlagLT_UGT {
6093 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
6097 // match: (CMOVWCC x y (InvertFlags cond))
6098 // result: (CMOVWLS x y cond)
6102 if v_2.Op != OpAMD64InvertFlags {
6106 v.reset(OpAMD64CMOVWLS)
6107 v.AddArg3(x, y, cond)
6110 // match: (CMOVWCC _ x (FlagEQ))
6114 if v_2.Op != OpAMD64FlagEQ {
6120 // match: (CMOVWCC _ x (FlagGT_UGT))
6124 if v_2.Op != OpAMD64FlagGT_UGT {
6130 // match: (CMOVWCC y _ (FlagGT_ULT))
6134 if v_2.Op != OpAMD64FlagGT_ULT {
6140 // match: (CMOVWCC y _ (FlagLT_ULT))
6144 if v_2.Op != OpAMD64FlagLT_ULT {
6150 // match: (CMOVWCC _ x (FlagLT_UGT))
6154 if v_2.Op != OpAMD64FlagLT_UGT {
6162 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
6166 // match: (CMOVWCS x y (InvertFlags cond))
6167 // result: (CMOVWHI x y cond)
6171 if v_2.Op != OpAMD64InvertFlags {
6175 v.reset(OpAMD64CMOVWHI)
6176 v.AddArg3(x, y, cond)
6179 // match: (CMOVWCS y _ (FlagEQ))
6183 if v_2.Op != OpAMD64FlagEQ {
6189 // match: (CMOVWCS y _ (FlagGT_UGT))
6193 if v_2.Op != OpAMD64FlagGT_UGT {
6199 // match: (CMOVWCS _ x (FlagGT_ULT))
6203 if v_2.Op != OpAMD64FlagGT_ULT {
6209 // match: (CMOVWCS _ x (FlagLT_ULT))
6213 if v_2.Op != OpAMD64FlagLT_ULT {
6219 // match: (CMOVWCS y _ (FlagLT_UGT))
6223 if v_2.Op != OpAMD64FlagLT_UGT {
6231 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
6235 // match: (CMOVWEQ x y (InvertFlags cond))
6236 // result: (CMOVWEQ x y cond)
6240 if v_2.Op != OpAMD64InvertFlags {
6244 v.reset(OpAMD64CMOVWEQ)
6245 v.AddArg3(x, y, cond)
6248 // match: (CMOVWEQ _ x (FlagEQ))
6252 if v_2.Op != OpAMD64FlagEQ {
6258 // match: (CMOVWEQ y _ (FlagGT_UGT))
6262 if v_2.Op != OpAMD64FlagGT_UGT {
6268 // match: (CMOVWEQ y _ (FlagGT_ULT))
6272 if v_2.Op != OpAMD64FlagGT_ULT {
6278 // match: (CMOVWEQ y _ (FlagLT_ULT))
6282 if v_2.Op != OpAMD64FlagLT_ULT {
6288 // match: (CMOVWEQ y _ (FlagLT_UGT))
6292 if v_2.Op != OpAMD64FlagLT_UGT {
6300 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
6304 // match: (CMOVWGE x y (InvertFlags cond))
6305 // result: (CMOVWLE x y cond)
6309 if v_2.Op != OpAMD64InvertFlags {
6313 v.reset(OpAMD64CMOVWLE)
6314 v.AddArg3(x, y, cond)
6317 // match: (CMOVWGE _ x (FlagEQ))
6321 if v_2.Op != OpAMD64FlagEQ {
6327 // match: (CMOVWGE _ x (FlagGT_UGT))
6331 if v_2.Op != OpAMD64FlagGT_UGT {
6337 // match: (CMOVWGE _ x (FlagGT_ULT))
6341 if v_2.Op != OpAMD64FlagGT_ULT {
6347 // match: (CMOVWGE y _ (FlagLT_ULT))
6351 if v_2.Op != OpAMD64FlagLT_ULT {
6357 // match: (CMOVWGE y _ (FlagLT_UGT))
6361 if v_2.Op != OpAMD64FlagLT_UGT {
6369 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
6373 // match: (CMOVWGT x y (InvertFlags cond))
6374 // result: (CMOVWLT x y cond)
6378 if v_2.Op != OpAMD64InvertFlags {
6382 v.reset(OpAMD64CMOVWLT)
6383 v.AddArg3(x, y, cond)
6386 // match: (CMOVWGT y _ (FlagEQ))
6390 if v_2.Op != OpAMD64FlagEQ {
6396 // match: (CMOVWGT _ x (FlagGT_UGT))
6400 if v_2.Op != OpAMD64FlagGT_UGT {
6406 // match: (CMOVWGT _ x (FlagGT_ULT))
6410 if v_2.Op != OpAMD64FlagGT_ULT {
6416 // match: (CMOVWGT y _ (FlagLT_ULT))
6420 if v_2.Op != OpAMD64FlagLT_ULT {
6426 // match: (CMOVWGT y _ (FlagLT_UGT))
6430 if v_2.Op != OpAMD64FlagLT_UGT {
6438 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
6442 // match: (CMOVWHI x y (InvertFlags cond))
6443 // result: (CMOVWCS x y cond)
6447 if v_2.Op != OpAMD64InvertFlags {
6451 v.reset(OpAMD64CMOVWCS)
6452 v.AddArg3(x, y, cond)
6455 // match: (CMOVWHI y _ (FlagEQ))
6459 if v_2.Op != OpAMD64FlagEQ {
6465 // match: (CMOVWHI _ x (FlagGT_UGT))
6469 if v_2.Op != OpAMD64FlagGT_UGT {
6475 // match: (CMOVWHI y _ (FlagGT_ULT))
6479 if v_2.Op != OpAMD64FlagGT_ULT {
6485 // match: (CMOVWHI y _ (FlagLT_ULT))
6489 if v_2.Op != OpAMD64FlagLT_ULT {
6495 // match: (CMOVWHI _ x (FlagLT_UGT))
6499 if v_2.Op != OpAMD64FlagLT_UGT {
6507 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
6511 // match: (CMOVWLE x y (InvertFlags cond))
6512 // result: (CMOVWGE x y cond)
6516 if v_2.Op != OpAMD64InvertFlags {
6520 v.reset(OpAMD64CMOVWGE)
6521 v.AddArg3(x, y, cond)
6524 // match: (CMOVWLE _ x (FlagEQ))
6528 if v_2.Op != OpAMD64FlagEQ {
6534 // match: (CMOVWLE y _ (FlagGT_UGT))
6538 if v_2.Op != OpAMD64FlagGT_UGT {
6544 // match: (CMOVWLE y _ (FlagGT_ULT))
6548 if v_2.Op != OpAMD64FlagGT_ULT {
6554 // match: (CMOVWLE _ x (FlagLT_ULT))
6558 if v_2.Op != OpAMD64FlagLT_ULT {
6564 // match: (CMOVWLE _ x (FlagLT_UGT))
6568 if v_2.Op != OpAMD64FlagLT_UGT {
6576 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6580 // match: (CMOVWLS x y (InvertFlags cond))
6581 // result: (CMOVWCC x y cond)
6585 if v_2.Op != OpAMD64InvertFlags {
6589 v.reset(OpAMD64CMOVWCC)
6590 v.AddArg3(x, y, cond)
6593 // match: (CMOVWLS _ x (FlagEQ))
6597 if v_2.Op != OpAMD64FlagEQ {
6603 // match: (CMOVWLS y _ (FlagGT_UGT))
6607 if v_2.Op != OpAMD64FlagGT_UGT {
6613 // match: (CMOVWLS _ x (FlagGT_ULT))
6617 if v_2.Op != OpAMD64FlagGT_ULT {
6623 // match: (CMOVWLS _ x (FlagLT_ULT))
6627 if v_2.Op != OpAMD64FlagLT_ULT {
6633 // match: (CMOVWLS y _ (FlagLT_UGT))
6637 if v_2.Op != OpAMD64FlagLT_UGT {
6645 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6649 // match: (CMOVWLT x y (InvertFlags cond))
6650 // result: (CMOVWGT x y cond)
6654 if v_2.Op != OpAMD64InvertFlags {
6658 v.reset(OpAMD64CMOVWGT)
6659 v.AddArg3(x, y, cond)
6662 // match: (CMOVWLT y _ (FlagEQ))
6666 if v_2.Op != OpAMD64FlagEQ {
6672 // match: (CMOVWLT y _ (FlagGT_UGT))
6676 if v_2.Op != OpAMD64FlagGT_UGT {
6682 // match: (CMOVWLT y _ (FlagGT_ULT))
6686 if v_2.Op != OpAMD64FlagGT_ULT {
6692 // match: (CMOVWLT _ x (FlagLT_ULT))
6696 if v_2.Op != OpAMD64FlagLT_ULT {
6702 // match: (CMOVWLT _ x (FlagLT_UGT))
6706 if v_2.Op != OpAMD64FlagLT_UGT {
6714 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6718 // match: (CMOVWNE x y (InvertFlags cond))
6719 // result: (CMOVWNE x y cond)
6723 if v_2.Op != OpAMD64InvertFlags {
6727 v.reset(OpAMD64CMOVWNE)
6728 v.AddArg3(x, y, cond)
6731 // match: (CMOVWNE y _ (FlagEQ))
6735 if v_2.Op != OpAMD64FlagEQ {
6741 // match: (CMOVWNE _ x (FlagGT_UGT))
6745 if v_2.Op != OpAMD64FlagGT_UGT {
6751 // match: (CMOVWNE _ x (FlagGT_ULT))
6755 if v_2.Op != OpAMD64FlagGT_ULT {
6761 // match: (CMOVWNE _ x (FlagLT_ULT))
6765 if v_2.Op != OpAMD64FlagLT_ULT {
6771 // match: (CMOVWNE _ x (FlagLT_UGT))
6775 if v_2.Op != OpAMD64FlagLT_UGT {
6783 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6787 // match: (CMPB x (MOVLconst [c]))
6788 // result: (CMPBconst x [int8(c)])
6791 if v_1.Op != OpAMD64MOVLconst {
6794 c := auxIntToInt32(v_1.AuxInt)
6795 v.reset(OpAMD64CMPBconst)
6796 v.AuxInt = int8ToAuxInt(int8(c))
6800 // match: (CMPB (MOVLconst [c]) x)
6801 // result: (InvertFlags (CMPBconst x [int8(c)]))
6803 if v_0.Op != OpAMD64MOVLconst {
6806 c := auxIntToInt32(v_0.AuxInt)
6808 v.reset(OpAMD64InvertFlags)
6809 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6810 v0.AuxInt = int8ToAuxInt(int8(c))
6815 // match: (CMPB x y)
6816 // cond: canonLessThan(x,y)
6817 // result: (InvertFlags (CMPB y x))
6821 if !(canonLessThan(x, y)) {
6824 v.reset(OpAMD64InvertFlags)
6825 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6830 // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
6831 // cond: canMergeLoad(v, l) && clobber(l)
6832 // result: (CMPBload {sym} [off] ptr x mem)
6835 if l.Op != OpAMD64MOVBload {
6838 off := auxIntToInt32(l.AuxInt)
6839 sym := auxToSym(l.Aux)
6843 if !(canMergeLoad(v, l) && clobber(l)) {
6846 v.reset(OpAMD64CMPBload)
6847 v.AuxInt = int32ToAuxInt(off)
6848 v.Aux = symToAux(sym)
6849 v.AddArg3(ptr, x, mem)
6852 // match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
6853 // cond: canMergeLoad(v, l) && clobber(l)
6854 // result: (InvertFlags (CMPBload {sym} [off] ptr x mem))
6858 if l.Op != OpAMD64MOVBload {
6861 off := auxIntToInt32(l.AuxInt)
6862 sym := auxToSym(l.Aux)
6865 if !(canMergeLoad(v, l) && clobber(l)) {
6868 v.reset(OpAMD64InvertFlags)
6869 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6870 v0.AuxInt = int32ToAuxInt(off)
6871 v0.Aux = symToAux(sym)
6872 v0.AddArg3(ptr, x, mem)
6878 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6881 // match: (CMPBconst (MOVLconst [x]) [y])
6885 y := auxIntToInt8(v.AuxInt)
6886 if v_0.Op != OpAMD64MOVLconst {
6889 x := auxIntToInt32(v_0.AuxInt)
6890 if !(int8(x) == y) {
6893 v.reset(OpAMD64FlagEQ)
6896 // match: (CMPBconst (MOVLconst [x]) [y])
6897 // cond: int8(x)<y && uint8(x)<uint8(y)
6898 // result: (FlagLT_ULT)
6900 y := auxIntToInt8(v.AuxInt)
6901 if v_0.Op != OpAMD64MOVLconst {
6904 x := auxIntToInt32(v_0.AuxInt)
6905 if !(int8(x) < y && uint8(x) < uint8(y)) {
6908 v.reset(OpAMD64FlagLT_ULT)
6911 // match: (CMPBconst (MOVLconst [x]) [y])
6912 // cond: int8(x)<y && uint8(x)>uint8(y)
6913 // result: (FlagLT_UGT)
6915 y := auxIntToInt8(v.AuxInt)
6916 if v_0.Op != OpAMD64MOVLconst {
6919 x := auxIntToInt32(v_0.AuxInt)
6920 if !(int8(x) < y && uint8(x) > uint8(y)) {
6923 v.reset(OpAMD64FlagLT_UGT)
6926 // match: (CMPBconst (MOVLconst [x]) [y])
6927 // cond: int8(x)>y && uint8(x)<uint8(y)
6928 // result: (FlagGT_ULT)
6930 y := auxIntToInt8(v.AuxInt)
6931 if v_0.Op != OpAMD64MOVLconst {
6934 x := auxIntToInt32(v_0.AuxInt)
6935 if !(int8(x) > y && uint8(x) < uint8(y)) {
6938 v.reset(OpAMD64FlagGT_ULT)
6941 // match: (CMPBconst (MOVLconst [x]) [y])
6942 // cond: int8(x)>y && uint8(x)>uint8(y)
6943 // result: (FlagGT_UGT)
6945 y := auxIntToInt8(v.AuxInt)
6946 if v_0.Op != OpAMD64MOVLconst {
6949 x := auxIntToInt32(v_0.AuxInt)
6950 if !(int8(x) > y && uint8(x) > uint8(y)) {
6953 v.reset(OpAMD64FlagGT_UGT)
6956 // match: (CMPBconst (ANDLconst _ [m]) [n])
6957 // cond: 0 <= int8(m) && int8(m) < n
6958 // result: (FlagLT_ULT)
6960 n := auxIntToInt8(v.AuxInt)
6961 if v_0.Op != OpAMD64ANDLconst {
6964 m := auxIntToInt32(v_0.AuxInt)
6965 if !(0 <= int8(m) && int8(m) < n) {
6968 v.reset(OpAMD64FlagLT_ULT)
6971 // match: (CMPBconst a:(ANDL x y) [0])
6972 // cond: a.Uses == 1
6973 // result: (TESTB x y)
6975 if auxIntToInt8(v.AuxInt) != 0 {
6979 if a.Op != OpAMD64ANDL {
6987 v.reset(OpAMD64TESTB)
6991 // match: (CMPBconst a:(ANDLconst [c] x) [0])
6992 // cond: a.Uses == 1
6993 // result: (TESTBconst [int8(c)] x)
6995 if auxIntToInt8(v.AuxInt) != 0 {
6999 if a.Op != OpAMD64ANDLconst {
7002 c := auxIntToInt32(a.AuxInt)
7007 v.reset(OpAMD64TESTBconst)
7008 v.AuxInt = int8ToAuxInt(int8(c))
7012 // match: (CMPBconst x [0])
7013 // result: (TESTB x x)
7015 if auxIntToInt8(v.AuxInt) != 0 {
7019 v.reset(OpAMD64TESTB)
7023 // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
7024 // cond: l.Uses == 1 && clobber(l)
7025 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
7027 c := auxIntToInt8(v.AuxInt)
7029 if l.Op != OpAMD64MOVBload {
7032 off := auxIntToInt32(l.AuxInt)
7033 sym := auxToSym(l.Aux)
7036 if !(l.Uses == 1 && clobber(l)) {
7040 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
7042 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7043 v0.Aux = symToAux(sym)
7044 v0.AddArg2(ptr, mem)
7049 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
7052 // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
7053 // cond: ValAndOff(valoff1).canAdd32(off2)
7054 // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
7056 valoff1 := auxIntToValAndOff(v.AuxInt)
7057 sym := auxToSym(v.Aux)
7058 if v_0.Op != OpAMD64ADDQconst {
7061 off2 := auxIntToInt32(v_0.AuxInt)
7064 if !(ValAndOff(valoff1).canAdd32(off2)) {
7067 v.reset(OpAMD64CMPBconstload)
7068 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7069 v.Aux = symToAux(sym)
7070 v.AddArg2(base, mem)
7073 // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
7074 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
7075 // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
7077 valoff1 := auxIntToValAndOff(v.AuxInt)
7078 sym1 := auxToSym(v.Aux)
7079 if v_0.Op != OpAMD64LEAQ {
7082 off2 := auxIntToInt32(v_0.AuxInt)
7083 sym2 := auxToSym(v_0.Aux)
7086 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7089 v.reset(OpAMD64CMPBconstload)
7090 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7091 v.Aux = symToAux(mergeSym(sym1, sym2))
7092 v.AddArg2(base, mem)
7097 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
7101 // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem)
7102 // cond: is32Bit(int64(off1)+int64(off2))
7103 // result: (CMPBload [off1+off2] {sym} base val mem)
7105 off1 := auxIntToInt32(v.AuxInt)
7106 sym := auxToSym(v.Aux)
7107 if v_0.Op != OpAMD64ADDQconst {
7110 off2 := auxIntToInt32(v_0.AuxInt)
7114 if !(is32Bit(int64(off1) + int64(off2))) {
7117 v.reset(OpAMD64CMPBload)
7118 v.AuxInt = int32ToAuxInt(off1 + off2)
7119 v.Aux = symToAux(sym)
7120 v.AddArg3(base, val, mem)
7123 // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
7124 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
7125 // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
7127 off1 := auxIntToInt32(v.AuxInt)
7128 sym1 := auxToSym(v.Aux)
7129 if v_0.Op != OpAMD64LEAQ {
7132 off2 := auxIntToInt32(v_0.AuxInt)
7133 sym2 := auxToSym(v_0.Aux)
7137 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7140 v.reset(OpAMD64CMPBload)
7141 v.AuxInt = int32ToAuxInt(off1 + off2)
7142 v.Aux = symToAux(mergeSym(sym1, sym2))
7143 v.AddArg3(base, val, mem)
7146 // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
7147 // result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
7149 off := auxIntToInt32(v.AuxInt)
7150 sym := auxToSym(v.Aux)
7152 if v_1.Op != OpAMD64MOVLconst {
7155 c := auxIntToInt32(v_1.AuxInt)
7157 v.reset(OpAMD64CMPBconstload)
7158 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
7159 v.Aux = symToAux(sym)
7165 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
7169 // match: (CMPL x (MOVLconst [c]))
7170 // result: (CMPLconst x [c])
7173 if v_1.Op != OpAMD64MOVLconst {
7176 c := auxIntToInt32(v_1.AuxInt)
7177 v.reset(OpAMD64CMPLconst)
7178 v.AuxInt = int32ToAuxInt(c)
7182 // match: (CMPL (MOVLconst [c]) x)
7183 // result: (InvertFlags (CMPLconst x [c]))
7185 if v_0.Op != OpAMD64MOVLconst {
7188 c := auxIntToInt32(v_0.AuxInt)
7190 v.reset(OpAMD64InvertFlags)
7191 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
7192 v0.AuxInt = int32ToAuxInt(c)
7197 // match: (CMPL x y)
7198 // cond: canonLessThan(x,y)
7199 // result: (InvertFlags (CMPL y x))
7203 if !(canonLessThan(x, y)) {
7206 v.reset(OpAMD64InvertFlags)
7207 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
7212 // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
7213 // cond: canMergeLoad(v, l) && clobber(l)
7214 // result: (CMPLload {sym} [off] ptr x mem)
7217 if l.Op != OpAMD64MOVLload {
7220 off := auxIntToInt32(l.AuxInt)
7221 sym := auxToSym(l.Aux)
7225 if !(canMergeLoad(v, l) && clobber(l)) {
7228 v.reset(OpAMD64CMPLload)
7229 v.AuxInt = int32ToAuxInt(off)
7230 v.Aux = symToAux(sym)
7231 v.AddArg3(ptr, x, mem)
7234 // match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
7235 // cond: canMergeLoad(v, l) && clobber(l)
7236 // result: (InvertFlags (CMPLload {sym} [off] ptr x mem))
7240 if l.Op != OpAMD64MOVLload {
7243 off := auxIntToInt32(l.AuxInt)
7244 sym := auxToSym(l.Aux)
7247 if !(canMergeLoad(v, l) && clobber(l)) {
7250 v.reset(OpAMD64InvertFlags)
7251 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
7252 v0.AuxInt = int32ToAuxInt(off)
7253 v0.Aux = symToAux(sym)
7254 v0.AddArg3(ptr, x, mem)
7260 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
7263 // match: (CMPLconst (MOVLconst [x]) [y])
7267 y := auxIntToInt32(v.AuxInt)
7268 if v_0.Op != OpAMD64MOVLconst {
7271 x := auxIntToInt32(v_0.AuxInt)
7275 v.reset(OpAMD64FlagEQ)
7278 // match: (CMPLconst (MOVLconst [x]) [y])
7279 // cond: x<y && uint32(x)<uint32(y)
7280 // result: (FlagLT_ULT)
7282 y := auxIntToInt32(v.AuxInt)
7283 if v_0.Op != OpAMD64MOVLconst {
7286 x := auxIntToInt32(v_0.AuxInt)
7287 if !(x < y && uint32(x) < uint32(y)) {
7290 v.reset(OpAMD64FlagLT_ULT)
7293 // match: (CMPLconst (MOVLconst [x]) [y])
7294 // cond: x<y && uint32(x)>uint32(y)
7295 // result: (FlagLT_UGT)
7297 y := auxIntToInt32(v.AuxInt)
7298 if v_0.Op != OpAMD64MOVLconst {
7301 x := auxIntToInt32(v_0.AuxInt)
7302 if !(x < y && uint32(x) > uint32(y)) {
7305 v.reset(OpAMD64FlagLT_UGT)
7308 // match: (CMPLconst (MOVLconst [x]) [y])
7309 // cond: x>y && uint32(x)<uint32(y)
7310 // result: (FlagGT_ULT)
7312 y := auxIntToInt32(v.AuxInt)
7313 if v_0.Op != OpAMD64MOVLconst {
7316 x := auxIntToInt32(v_0.AuxInt)
7317 if !(x > y && uint32(x) < uint32(y)) {
7320 v.reset(OpAMD64FlagGT_ULT)
7323 // match: (CMPLconst (MOVLconst [x]) [y])
7324 // cond: x>y && uint32(x)>uint32(y)
7325 // result: (FlagGT_UGT)
7327 y := auxIntToInt32(v.AuxInt)
7328 if v_0.Op != OpAMD64MOVLconst {
7331 x := auxIntToInt32(v_0.AuxInt)
7332 if !(x > y && uint32(x) > uint32(y)) {
7335 v.reset(OpAMD64FlagGT_UGT)
7338 // match: (CMPLconst (SHRLconst _ [c]) [n])
7339 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
7340 // result: (FlagLT_ULT)
7342 n := auxIntToInt32(v.AuxInt)
7343 if v_0.Op != OpAMD64SHRLconst {
7346 c := auxIntToInt8(v_0.AuxInt)
7347 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
7350 v.reset(OpAMD64FlagLT_ULT)
7353 // match: (CMPLconst (ANDLconst _ [m]) [n])
7354 // cond: 0 <= m && m < n
7355 // result: (FlagLT_ULT)
7357 n := auxIntToInt32(v.AuxInt)
7358 if v_0.Op != OpAMD64ANDLconst {
7361 m := auxIntToInt32(v_0.AuxInt)
7362 if !(0 <= m && m < n) {
7365 v.reset(OpAMD64FlagLT_ULT)
7368 // match: (CMPLconst a:(ANDL x y) [0])
7369 // cond: a.Uses == 1
7370 // result: (TESTL x y)
7372 if auxIntToInt32(v.AuxInt) != 0 {
7376 if a.Op != OpAMD64ANDL {
7384 v.reset(OpAMD64TESTL)
7388 // match: (CMPLconst a:(ANDLconst [c] x) [0])
7389 // cond: a.Uses == 1
7390 // result: (TESTLconst [c] x)
7392 if auxIntToInt32(v.AuxInt) != 0 {
7396 if a.Op != OpAMD64ANDLconst {
7399 c := auxIntToInt32(a.AuxInt)
7404 v.reset(OpAMD64TESTLconst)
7405 v.AuxInt = int32ToAuxInt(c)
7409 // match: (CMPLconst x [0])
7410 // result: (TESTL x x)
7412 if auxIntToInt32(v.AuxInt) != 0 {
7416 v.reset(OpAMD64TESTL)
7420 // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
7421 // cond: l.Uses == 1 && clobber(l)
7422 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
7424 c := auxIntToInt32(v.AuxInt)
7426 if l.Op != OpAMD64MOVLload {
7429 off := auxIntToInt32(l.AuxInt)
7430 sym := auxToSym(l.Aux)
7433 if !(l.Uses == 1 && clobber(l)) {
7437 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
7439 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7440 v0.Aux = symToAux(sym)
7441 v0.AddArg2(ptr, mem)
7446 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
7449 // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
7450 // cond: ValAndOff(valoff1).canAdd32(off2)
7451 // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
7453 valoff1 := auxIntToValAndOff(v.AuxInt)
7454 sym := auxToSym(v.Aux)
7455 if v_0.Op != OpAMD64ADDQconst {
7458 off2 := auxIntToInt32(v_0.AuxInt)
7461 if !(ValAndOff(valoff1).canAdd32(off2)) {
7464 v.reset(OpAMD64CMPLconstload)
7465 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7466 v.Aux = symToAux(sym)
7467 v.AddArg2(base, mem)
7470 // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
7471 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
7472 // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
7474 valoff1 := auxIntToValAndOff(v.AuxInt)
7475 sym1 := auxToSym(v.Aux)
7476 if v_0.Op != OpAMD64LEAQ {
7479 off2 := auxIntToInt32(v_0.AuxInt)
7480 sym2 := auxToSym(v_0.Aux)
7483 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7486 v.reset(OpAMD64CMPLconstload)
7487 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7488 v.Aux = symToAux(mergeSym(sym1, sym2))
7489 v.AddArg2(base, mem)
7494 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
7498 // match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem)
7499 // cond: is32Bit(int64(off1)+int64(off2))
7500 // result: (CMPLload [off1+off2] {sym} base val mem)
7502 off1 := auxIntToInt32(v.AuxInt)
7503 sym := auxToSym(v.Aux)
7504 if v_0.Op != OpAMD64ADDQconst {
7507 off2 := auxIntToInt32(v_0.AuxInt)
7511 if !(is32Bit(int64(off1) + int64(off2))) {
7514 v.reset(OpAMD64CMPLload)
7515 v.AuxInt = int32ToAuxInt(off1 + off2)
7516 v.Aux = symToAux(sym)
7517 v.AddArg3(base, val, mem)
7520 // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
7521 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
7522 // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
7524 off1 := auxIntToInt32(v.AuxInt)
7525 sym1 := auxToSym(v.Aux)
7526 if v_0.Op != OpAMD64LEAQ {
7529 off2 := auxIntToInt32(v_0.AuxInt)
7530 sym2 := auxToSym(v_0.Aux)
7534 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7537 v.reset(OpAMD64CMPLload)
7538 v.AuxInt = int32ToAuxInt(off1 + off2)
7539 v.Aux = symToAux(mergeSym(sym1, sym2))
7540 v.AddArg3(base, val, mem)
7543 // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
7544 // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
7546 off := auxIntToInt32(v.AuxInt)
7547 sym := auxToSym(v.Aux)
7549 if v_1.Op != OpAMD64MOVLconst {
7552 c := auxIntToInt32(v_1.AuxInt)
7554 v.reset(OpAMD64CMPLconstload)
7555 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7556 v.Aux = symToAux(sym)
7562 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
7566 // match: (CMPQ x (MOVQconst [c]))
7568 // result: (CMPQconst x [int32(c)])
7571 if v_1.Op != OpAMD64MOVQconst {
7574 c := auxIntToInt64(v_1.AuxInt)
7578 v.reset(OpAMD64CMPQconst)
7579 v.AuxInt = int32ToAuxInt(int32(c))
7583 // match: (CMPQ (MOVQconst [c]) x)
7585 // result: (InvertFlags (CMPQconst x [int32(c)]))
7587 if v_0.Op != OpAMD64MOVQconst {
7590 c := auxIntToInt64(v_0.AuxInt)
7595 v.reset(OpAMD64InvertFlags)
7596 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7597 v0.AuxInt = int32ToAuxInt(int32(c))
7602 // match: (CMPQ x y)
7603 // cond: canonLessThan(x,y)
7604 // result: (InvertFlags (CMPQ y x))
7608 if !(canonLessThan(x, y)) {
7611 v.reset(OpAMD64InvertFlags)
7612 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7617 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7621 if v_0.Op != OpAMD64MOVQconst {
7624 x := auxIntToInt64(v_0.AuxInt)
7625 if v_1.Op != OpAMD64MOVQconst {
7628 y := auxIntToInt64(v_1.AuxInt)
7632 v.reset(OpAMD64FlagEQ)
7635 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7636 // cond: x<y && uint64(x)<uint64(y)
7637 // result: (FlagLT_ULT)
7639 if v_0.Op != OpAMD64MOVQconst {
7642 x := auxIntToInt64(v_0.AuxInt)
7643 if v_1.Op != OpAMD64MOVQconst {
7646 y := auxIntToInt64(v_1.AuxInt)
7647 if !(x < y && uint64(x) < uint64(y)) {
7650 v.reset(OpAMD64FlagLT_ULT)
7653 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7654 // cond: x<y && uint64(x)>uint64(y)
7655 // result: (FlagLT_UGT)
7657 if v_0.Op != OpAMD64MOVQconst {
7660 x := auxIntToInt64(v_0.AuxInt)
7661 if v_1.Op != OpAMD64MOVQconst {
7664 y := auxIntToInt64(v_1.AuxInt)
7665 if !(x < y && uint64(x) > uint64(y)) {
7668 v.reset(OpAMD64FlagLT_UGT)
7671 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7672 // cond: x>y && uint64(x)<uint64(y)
7673 // result: (FlagGT_ULT)
7675 if v_0.Op != OpAMD64MOVQconst {
7678 x := auxIntToInt64(v_0.AuxInt)
7679 if v_1.Op != OpAMD64MOVQconst {
7682 y := auxIntToInt64(v_1.AuxInt)
7683 if !(x > y && uint64(x) < uint64(y)) {
7686 v.reset(OpAMD64FlagGT_ULT)
7689 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
7690 // cond: x>y && uint64(x)>uint64(y)
7691 // result: (FlagGT_UGT)
7693 if v_0.Op != OpAMD64MOVQconst {
7696 x := auxIntToInt64(v_0.AuxInt)
7697 if v_1.Op != OpAMD64MOVQconst {
7700 y := auxIntToInt64(v_1.AuxInt)
7701 if !(x > y && uint64(x) > uint64(y)) {
7704 v.reset(OpAMD64FlagGT_UGT)
7707 // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x)
7708 // cond: canMergeLoad(v, l) && clobber(l)
7709 // result: (CMPQload {sym} [off] ptr x mem)
7712 if l.Op != OpAMD64MOVQload {
7715 off := auxIntToInt32(l.AuxInt)
7716 sym := auxToSym(l.Aux)
7720 if !(canMergeLoad(v, l) && clobber(l)) {
7723 v.reset(OpAMD64CMPQload)
7724 v.AuxInt = int32ToAuxInt(off)
7725 v.Aux = symToAux(sym)
7726 v.AddArg3(ptr, x, mem)
7729 // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem))
7730 // cond: canMergeLoad(v, l) && clobber(l)
7731 // result: (InvertFlags (CMPQload {sym} [off] ptr x mem))
7735 if l.Op != OpAMD64MOVQload {
7738 off := auxIntToInt32(l.AuxInt)
7739 sym := auxToSym(l.Aux)
7742 if !(canMergeLoad(v, l) && clobber(l)) {
7745 v.reset(OpAMD64InvertFlags)
7746 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7747 v0.AuxInt = int32ToAuxInt(off)
7748 v0.Aux = symToAux(sym)
7749 v0.AddArg3(ptr, x, mem)
7755 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7758 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32])
7759 // result: (FlagLT_ULT)
7761 if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
7764 v_0_0 := v_0.Args[0]
7765 if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -16 {
7768 v_0_0_0 := v_0_0.Args[0]
7769 if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 15 {
7772 v.reset(OpAMD64FlagLT_ULT)
7775 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32])
7776 // result: (FlagLT_ULT)
7778 if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
7781 v_0_0 := v_0.Args[0]
7782 if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -8 {
7785 v_0_0_0 := v_0_0.Args[0]
7786 if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 7 {
7789 v.reset(OpAMD64FlagLT_ULT)
7792 // match: (CMPQconst (MOVQconst [x]) [y])
7793 // cond: x==int64(y)
7796 y := auxIntToInt32(v.AuxInt)
7797 if v_0.Op != OpAMD64MOVQconst {
7800 x := auxIntToInt64(v_0.AuxInt)
7801 if !(x == int64(y)) {
7804 v.reset(OpAMD64FlagEQ)
7807 // match: (CMPQconst (MOVQconst [x]) [y])
7808 // cond: x<int64(y) && uint64(x)<uint64(int64(y))
7809 // result: (FlagLT_ULT)
7811 y := auxIntToInt32(v.AuxInt)
7812 if v_0.Op != OpAMD64MOVQconst {
7815 x := auxIntToInt64(v_0.AuxInt)
7816 if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7819 v.reset(OpAMD64FlagLT_ULT)
7822 // match: (CMPQconst (MOVQconst [x]) [y])
7823 // cond: x<int64(y) && uint64(x)>uint64(int64(y))
7824 // result: (FlagLT_UGT)
7826 y := auxIntToInt32(v.AuxInt)
7827 if v_0.Op != OpAMD64MOVQconst {
7830 x := auxIntToInt64(v_0.AuxInt)
7831 if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7834 v.reset(OpAMD64FlagLT_UGT)
7837 // match: (CMPQconst (MOVQconst [x]) [y])
7838 // cond: x>int64(y) && uint64(x)<uint64(int64(y))
7839 // result: (FlagGT_ULT)
7841 y := auxIntToInt32(v.AuxInt)
7842 if v_0.Op != OpAMD64MOVQconst {
7845 x := auxIntToInt64(v_0.AuxInt)
7846 if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7849 v.reset(OpAMD64FlagGT_ULT)
7852 // match: (CMPQconst (MOVQconst [x]) [y])
7853 // cond: x>int64(y) && uint64(x)>uint64(int64(y))
7854 // result: (FlagGT_UGT)
7856 y := auxIntToInt32(v.AuxInt)
7857 if v_0.Op != OpAMD64MOVQconst {
7860 x := auxIntToInt64(v_0.AuxInt)
7861 if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7864 v.reset(OpAMD64FlagGT_UGT)
7867 // match: (CMPQconst (MOVBQZX _) [c])
7869 // result: (FlagLT_ULT)
7871 c := auxIntToInt32(v.AuxInt)
7872 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7875 v.reset(OpAMD64FlagLT_ULT)
7878 // match: (CMPQconst (MOVWQZX _) [c])
7880 // result: (FlagLT_ULT)
7882 c := auxIntToInt32(v.AuxInt)
7883 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7886 v.reset(OpAMD64FlagLT_ULT)
7889 // match: (CMPQconst (SHRQconst _ [c]) [n])
7890 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
7891 // result: (FlagLT_ULT)
7893 n := auxIntToInt32(v.AuxInt)
7894 if v_0.Op != OpAMD64SHRQconst {
7897 c := auxIntToInt8(v_0.AuxInt)
7898 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7901 v.reset(OpAMD64FlagLT_ULT)
7904 // match: (CMPQconst (ANDQconst _ [m]) [n])
7905 // cond: 0 <= m && m < n
7906 // result: (FlagLT_ULT)
7908 n := auxIntToInt32(v.AuxInt)
7909 if v_0.Op != OpAMD64ANDQconst {
7912 m := auxIntToInt32(v_0.AuxInt)
7913 if !(0 <= m && m < n) {
7916 v.reset(OpAMD64FlagLT_ULT)
7919 // match: (CMPQconst (ANDLconst _ [m]) [n])
7920 // cond: 0 <= m && m < n
7921 // result: (FlagLT_ULT)
7923 n := auxIntToInt32(v.AuxInt)
7924 if v_0.Op != OpAMD64ANDLconst {
7927 m := auxIntToInt32(v_0.AuxInt)
7928 if !(0 <= m && m < n) {
7931 v.reset(OpAMD64FlagLT_ULT)
7934 // match: (CMPQconst a:(ANDQ x y) [0])
7935 // cond: a.Uses == 1
7936 // result: (TESTQ x y)
7938 if auxIntToInt32(v.AuxInt) != 0 {
7942 if a.Op != OpAMD64ANDQ {
7950 v.reset(OpAMD64TESTQ)
7954 // match: (CMPQconst a:(ANDQconst [c] x) [0])
7955 // cond: a.Uses == 1
7956 // result: (TESTQconst [c] x)
7958 if auxIntToInt32(v.AuxInt) != 0 {
7962 if a.Op != OpAMD64ANDQconst {
7965 c := auxIntToInt32(a.AuxInt)
7970 v.reset(OpAMD64TESTQconst)
7971 v.AuxInt = int32ToAuxInt(c)
7975 // match: (CMPQconst x [0])
7976 // result: (TESTQ x x)
7978 if auxIntToInt32(v.AuxInt) != 0 {
7982 v.reset(OpAMD64TESTQ)
7986 // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c])
7987 // cond: l.Uses == 1 && clobber(l)
7988 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
7990 c := auxIntToInt32(v.AuxInt)
7992 if l.Op != OpAMD64MOVQload {
7995 off := auxIntToInt32(l.AuxInt)
7996 sym := auxToSym(l.Aux)
7999 if !(l.Uses == 1 && clobber(l)) {
8003 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
8005 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
8006 v0.Aux = symToAux(sym)
8007 v0.AddArg2(ptr, mem)
8012 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
8015 // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
8016 // cond: ValAndOff(valoff1).canAdd32(off2)
8017 // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
8019 valoff1 := auxIntToValAndOff(v.AuxInt)
8020 sym := auxToSym(v.Aux)
8021 if v_0.Op != OpAMD64ADDQconst {
8024 off2 := auxIntToInt32(v_0.AuxInt)
8027 if !(ValAndOff(valoff1).canAdd32(off2)) {
8030 v.reset(OpAMD64CMPQconstload)
8031 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8032 v.Aux = symToAux(sym)
8033 v.AddArg2(base, mem)
8036 // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
8037 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
8038 // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
8040 valoff1 := auxIntToValAndOff(v.AuxInt)
8041 sym1 := auxToSym(v.Aux)
8042 if v_0.Op != OpAMD64LEAQ {
8045 off2 := auxIntToInt32(v_0.AuxInt)
8046 sym2 := auxToSym(v_0.Aux)
8049 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
8052 v.reset(OpAMD64CMPQconstload)
8053 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8054 v.Aux = symToAux(mergeSym(sym1, sym2))
8055 v.AddArg2(base, mem)
8060 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
8064 // match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem)
8065 // cond: is32Bit(int64(off1)+int64(off2))
8066 // result: (CMPQload [off1+off2] {sym} base val mem)
8068 off1 := auxIntToInt32(v.AuxInt)
8069 sym := auxToSym(v.Aux)
8070 if v_0.Op != OpAMD64ADDQconst {
8073 off2 := auxIntToInt32(v_0.AuxInt)
8077 if !(is32Bit(int64(off1) + int64(off2))) {
8080 v.reset(OpAMD64CMPQload)
8081 v.AuxInt = int32ToAuxInt(off1 + off2)
8082 v.Aux = symToAux(sym)
8083 v.AddArg3(base, val, mem)
8086 // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
8087 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8088 // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
8090 off1 := auxIntToInt32(v.AuxInt)
8091 sym1 := auxToSym(v.Aux)
8092 if v_0.Op != OpAMD64LEAQ {
8095 off2 := auxIntToInt32(v_0.AuxInt)
8096 sym2 := auxToSym(v_0.Aux)
8100 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8103 v.reset(OpAMD64CMPQload)
8104 v.AuxInt = int32ToAuxInt(off1 + off2)
8105 v.Aux = symToAux(mergeSym(sym1, sym2))
8106 v.AddArg3(base, val, mem)
8109 // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem)
8110 // cond: validVal(c)
8111 // result: (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
8113 off := auxIntToInt32(v.AuxInt)
8114 sym := auxToSym(v.Aux)
8116 if v_1.Op != OpAMD64MOVQconst {
8119 c := auxIntToInt64(v_1.AuxInt)
8124 v.reset(OpAMD64CMPQconstload)
8125 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
8126 v.Aux = symToAux(sym)
8132 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
8136 // match: (CMPW x (MOVLconst [c]))
8137 // result: (CMPWconst x [int16(c)])
8140 if v_1.Op != OpAMD64MOVLconst {
8143 c := auxIntToInt32(v_1.AuxInt)
8144 v.reset(OpAMD64CMPWconst)
8145 v.AuxInt = int16ToAuxInt(int16(c))
8149 // match: (CMPW (MOVLconst [c]) x)
8150 // result: (InvertFlags (CMPWconst x [int16(c)]))
8152 if v_0.Op != OpAMD64MOVLconst {
8155 c := auxIntToInt32(v_0.AuxInt)
8157 v.reset(OpAMD64InvertFlags)
8158 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
8159 v0.AuxInt = int16ToAuxInt(int16(c))
8164 // match: (CMPW x y)
8165 // cond: canonLessThan(x,y)
8166 // result: (InvertFlags (CMPW y x))
8170 if !(canonLessThan(x, y)) {
8173 v.reset(OpAMD64InvertFlags)
8174 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
8179 // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
8180 // cond: canMergeLoad(v, l) && clobber(l)
8181 // result: (CMPWload {sym} [off] ptr x mem)
8184 if l.Op != OpAMD64MOVWload {
8187 off := auxIntToInt32(l.AuxInt)
8188 sym := auxToSym(l.Aux)
8192 if !(canMergeLoad(v, l) && clobber(l)) {
8195 v.reset(OpAMD64CMPWload)
8196 v.AuxInt = int32ToAuxInt(off)
8197 v.Aux = symToAux(sym)
8198 v.AddArg3(ptr, x, mem)
8201 // match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
8202 // cond: canMergeLoad(v, l) && clobber(l)
8203 // result: (InvertFlags (CMPWload {sym} [off] ptr x mem))
8207 if l.Op != OpAMD64MOVWload {
8210 off := auxIntToInt32(l.AuxInt)
8211 sym := auxToSym(l.Aux)
8214 if !(canMergeLoad(v, l) && clobber(l)) {
8217 v.reset(OpAMD64InvertFlags)
8218 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
8219 v0.AuxInt = int32ToAuxInt(off)
8220 v0.Aux = symToAux(sym)
8221 v0.AddArg3(ptr, x, mem)
8227 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
8230 // match: (CMPWconst (MOVLconst [x]) [y])
8231 // cond: int16(x)==y
8234 y := auxIntToInt16(v.AuxInt)
8235 if v_0.Op != OpAMD64MOVLconst {
8238 x := auxIntToInt32(v_0.AuxInt)
8239 if !(int16(x) == y) {
8242 v.reset(OpAMD64FlagEQ)
8245 // match: (CMPWconst (MOVLconst [x]) [y])
8246 // cond: int16(x)<y && uint16(x)<uint16(y)
8247 // result: (FlagLT_ULT)
8249 y := auxIntToInt16(v.AuxInt)
8250 if v_0.Op != OpAMD64MOVLconst {
8253 x := auxIntToInt32(v_0.AuxInt)
8254 if !(int16(x) < y && uint16(x) < uint16(y)) {
8257 v.reset(OpAMD64FlagLT_ULT)
8260 // match: (CMPWconst (MOVLconst [x]) [y])
8261 // cond: int16(x)<y && uint16(x)>uint16(y)
8262 // result: (FlagLT_UGT)
8264 y := auxIntToInt16(v.AuxInt)
8265 if v_0.Op != OpAMD64MOVLconst {
8268 x := auxIntToInt32(v_0.AuxInt)
8269 if !(int16(x) < y && uint16(x) > uint16(y)) {
8272 v.reset(OpAMD64FlagLT_UGT)
8275 // match: (CMPWconst (MOVLconst [x]) [y])
8276 // cond: int16(x)>y && uint16(x)<uint16(y)
8277 // result: (FlagGT_ULT)
8279 y := auxIntToInt16(v.AuxInt)
8280 if v_0.Op != OpAMD64MOVLconst {
8283 x := auxIntToInt32(v_0.AuxInt)
8284 if !(int16(x) > y && uint16(x) < uint16(y)) {
8287 v.reset(OpAMD64FlagGT_ULT)
8290 // match: (CMPWconst (MOVLconst [x]) [y])
8291 // cond: int16(x)>y && uint16(x)>uint16(y)
8292 // result: (FlagGT_UGT)
8294 y := auxIntToInt16(v.AuxInt)
8295 if v_0.Op != OpAMD64MOVLconst {
8298 x := auxIntToInt32(v_0.AuxInt)
8299 if !(int16(x) > y && uint16(x) > uint16(y)) {
8302 v.reset(OpAMD64FlagGT_UGT)
8305 // match: (CMPWconst (ANDLconst _ [m]) [n])
8306 // cond: 0 <= int16(m) && int16(m) < n
8307 // result: (FlagLT_ULT)
8309 n := auxIntToInt16(v.AuxInt)
8310 if v_0.Op != OpAMD64ANDLconst {
8313 m := auxIntToInt32(v_0.AuxInt)
8314 if !(0 <= int16(m) && int16(m) < n) {
8317 v.reset(OpAMD64FlagLT_ULT)
8320 // match: (CMPWconst a:(ANDL x y) [0])
8321 // cond: a.Uses == 1
8322 // result: (TESTW x y)
8324 if auxIntToInt16(v.AuxInt) != 0 {
8328 if a.Op != OpAMD64ANDL {
8336 v.reset(OpAMD64TESTW)
8340 // match: (CMPWconst a:(ANDLconst [c] x) [0])
8341 // cond: a.Uses == 1
8342 // result: (TESTWconst [int16(c)] x)
8344 if auxIntToInt16(v.AuxInt) != 0 {
8348 if a.Op != OpAMD64ANDLconst {
8351 c := auxIntToInt32(a.AuxInt)
8356 v.reset(OpAMD64TESTWconst)
8357 v.AuxInt = int16ToAuxInt(int16(c))
8361 // match: (CMPWconst x [0])
8362 // result: (TESTW x x)
8364 if auxIntToInt16(v.AuxInt) != 0 {
8368 v.reset(OpAMD64TESTW)
8372 // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
8373 // cond: l.Uses == 1 && clobber(l)
8374 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
8376 c := auxIntToInt16(v.AuxInt)
8378 if l.Op != OpAMD64MOVWload {
8381 off := auxIntToInt32(l.AuxInt)
8382 sym := auxToSym(l.Aux)
8385 if !(l.Uses == 1 && clobber(l)) {
8389 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
8391 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
8392 v0.Aux = symToAux(sym)
8393 v0.AddArg2(ptr, mem)
8398 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
8401 // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
8402 // cond: ValAndOff(valoff1).canAdd32(off2)
8403 // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
8405 valoff1 := auxIntToValAndOff(v.AuxInt)
8406 sym := auxToSym(v.Aux)
8407 if v_0.Op != OpAMD64ADDQconst {
8410 off2 := auxIntToInt32(v_0.AuxInt)
8413 if !(ValAndOff(valoff1).canAdd32(off2)) {
8416 v.reset(OpAMD64CMPWconstload)
8417 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8418 v.Aux = symToAux(sym)
8419 v.AddArg2(base, mem)
8422 // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
8423 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
8424 // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
8426 valoff1 := auxIntToValAndOff(v.AuxInt)
8427 sym1 := auxToSym(v.Aux)
8428 if v_0.Op != OpAMD64LEAQ {
8431 off2 := auxIntToInt32(v_0.AuxInt)
8432 sym2 := auxToSym(v_0.Aux)
8435 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
8438 v.reset(OpAMD64CMPWconstload)
8439 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8440 v.Aux = symToAux(mergeSym(sym1, sym2))
8441 v.AddArg2(base, mem)
8446 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
8450 // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem)
8451 // cond: is32Bit(int64(off1)+int64(off2))
8452 // result: (CMPWload [off1+off2] {sym} base val mem)
8454 off1 := auxIntToInt32(v.AuxInt)
8455 sym := auxToSym(v.Aux)
8456 if v_0.Op != OpAMD64ADDQconst {
8459 off2 := auxIntToInt32(v_0.AuxInt)
8463 if !(is32Bit(int64(off1) + int64(off2))) {
8466 v.reset(OpAMD64CMPWload)
8467 v.AuxInt = int32ToAuxInt(off1 + off2)
8468 v.Aux = symToAux(sym)
8469 v.AddArg3(base, val, mem)
8472 // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
8473 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8474 // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
8476 off1 := auxIntToInt32(v.AuxInt)
8477 sym1 := auxToSym(v.Aux)
8478 if v_0.Op != OpAMD64LEAQ {
8481 off2 := auxIntToInt32(v_0.AuxInt)
8482 sym2 := auxToSym(v_0.Aux)
8486 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8489 v.reset(OpAMD64CMPWload)
8490 v.AuxInt = int32ToAuxInt(off1 + off2)
8491 v.Aux = symToAux(mergeSym(sym1, sym2))
8492 v.AddArg3(base, val, mem)
8495 // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
8496 // result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
8498 off := auxIntToInt32(v.AuxInt)
8499 sym := auxToSym(v.Aux)
8501 if v_1.Op != OpAMD64MOVLconst {
8504 c := auxIntToInt32(v_1.AuxInt)
8506 v.reset(OpAMD64CMPWconstload)
8507 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
8508 v.Aux = symToAux(sym)
8514 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
8519 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
8520 // cond: is32Bit(int64(off1)+int64(off2))
8521 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
8523 off1 := auxIntToInt32(v.AuxInt)
8524 sym := auxToSym(v.Aux)
8525 if v_0.Op != OpAMD64ADDQconst {
8528 off2 := auxIntToInt32(v_0.AuxInt)
8533 if !(is32Bit(int64(off1) + int64(off2))) {
8536 v.reset(OpAMD64CMPXCHGLlock)
8537 v.AuxInt = int32ToAuxInt(off1 + off2)
8538 v.Aux = symToAux(sym)
8539 v.AddArg4(ptr, old, new_, mem)
8544 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
8549 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
8550 // cond: is32Bit(int64(off1)+int64(off2))
8551 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
8553 off1 := auxIntToInt32(v.AuxInt)
8554 sym := auxToSym(v.Aux)
8555 if v_0.Op != OpAMD64ADDQconst {
8558 off2 := auxIntToInt32(v_0.AuxInt)
8563 if !(is32Bit(int64(off1) + int64(off2))) {
8566 v.reset(OpAMD64CMPXCHGQlock)
8567 v.AuxInt = int32ToAuxInt(off1 + off2)
8568 v.Aux = symToAux(sym)
8569 v.AddArg4(ptr, old, new_, mem)
8574 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
8577 // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem))
8578 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
8579 // result: (DIVSDload x [off] {sym} ptr mem)
8583 if l.Op != OpAMD64MOVSDload {
8586 off := auxIntToInt32(l.AuxInt)
8587 sym := auxToSym(l.Aux)
8590 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8593 v.reset(OpAMD64DIVSDload)
8594 v.AuxInt = int32ToAuxInt(off)
8595 v.Aux = symToAux(sym)
8596 v.AddArg3(x, ptr, mem)
8601 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8605 // match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem)
8606 // cond: is32Bit(int64(off1)+int64(off2))
8607 // result: (DIVSDload [off1+off2] {sym} val base mem)
8609 off1 := auxIntToInt32(v.AuxInt)
8610 sym := auxToSym(v.Aux)
8612 if v_1.Op != OpAMD64ADDQconst {
8615 off2 := auxIntToInt32(v_1.AuxInt)
8618 if !(is32Bit(int64(off1) + int64(off2))) {
8621 v.reset(OpAMD64DIVSDload)
8622 v.AuxInt = int32ToAuxInt(off1 + off2)
8623 v.Aux = symToAux(sym)
8624 v.AddArg3(val, base, mem)
8627 // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
8628 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8629 // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
8631 off1 := auxIntToInt32(v.AuxInt)
8632 sym1 := auxToSym(v.Aux)
8634 if v_1.Op != OpAMD64LEAQ {
8637 off2 := auxIntToInt32(v_1.AuxInt)
8638 sym2 := auxToSym(v_1.Aux)
8641 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8644 v.reset(OpAMD64DIVSDload)
8645 v.AuxInt = int32ToAuxInt(off1 + off2)
8646 v.Aux = symToAux(mergeSym(sym1, sym2))
8647 v.AddArg3(val, base, mem)
8652 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8655 // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem))
8656 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
8657 // result: (DIVSSload x [off] {sym} ptr mem)
8661 if l.Op != OpAMD64MOVSSload {
8664 off := auxIntToInt32(l.AuxInt)
8665 sym := auxToSym(l.Aux)
8668 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8671 v.reset(OpAMD64DIVSSload)
8672 v.AuxInt = int32ToAuxInt(off)
8673 v.Aux = symToAux(sym)
8674 v.AddArg3(x, ptr, mem)
8679 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8683 // match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem)
8684 // cond: is32Bit(int64(off1)+int64(off2))
8685 // result: (DIVSSload [off1+off2] {sym} val base mem)
8687 off1 := auxIntToInt32(v.AuxInt)
8688 sym := auxToSym(v.Aux)
8690 if v_1.Op != OpAMD64ADDQconst {
8693 off2 := auxIntToInt32(v_1.AuxInt)
8696 if !(is32Bit(int64(off1) + int64(off2))) {
8699 v.reset(OpAMD64DIVSSload)
8700 v.AuxInt = int32ToAuxInt(off1 + off2)
8701 v.Aux = symToAux(sym)
8702 v.AddArg3(val, base, mem)
8705 // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
8706 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
8707 // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
8709 off1 := auxIntToInt32(v.AuxInt)
8710 sym1 := auxToSym(v.Aux)
8712 if v_1.Op != OpAMD64LEAQ {
8715 off2 := auxIntToInt32(v_1.AuxInt)
8716 sym2 := auxToSym(v_1.Aux)
8719 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8722 v.reset(OpAMD64DIVSSload)
8723 v.AuxInt = int32ToAuxInt(off1 + off2)
8724 v.Aux = symToAux(mergeSym(sym1, sym2))
8725 v.AddArg3(val, base, mem)
8730 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8733 // match: (HMULL x y)
8734 // cond: !x.rematerializeable() && y.rematerializeable()
8735 // result: (HMULL y x)
8739 if !(!x.rematerializeable() && y.rematerializeable()) {
8742 v.reset(OpAMD64HMULL)
8748 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8751 // match: (HMULLU x y)
8752 // cond: !x.rematerializeable() && y.rematerializeable()
8753 // result: (HMULLU y x)
8757 if !(!x.rematerializeable() && y.rematerializeable()) {
8760 v.reset(OpAMD64HMULLU)
8766 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8769 // match: (HMULQ x y)
8770 // cond: !x.rematerializeable() && y.rematerializeable()
8771 // result: (HMULQ y x)
8775 if !(!x.rematerializeable() && y.rematerializeable()) {
8778 v.reset(OpAMD64HMULQ)
8784 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8787 // match: (HMULQU x y)
8788 // cond: !x.rematerializeable() && y.rematerializeable()
8789 // result: (HMULQU y x)
8793 if !(!x.rematerializeable() && y.rematerializeable()) {
8796 v.reset(OpAMD64HMULQU)
8802 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8804 // match: (LEAL [c] {s} (ADDLconst [d] x))
8805 // cond: is32Bit(int64(c)+int64(d))
8806 // result: (LEAL [c+d] {s} x)
8808 c := auxIntToInt32(v.AuxInt)
8809 s := auxToSym(v.Aux)
8810 if v_0.Op != OpAMD64ADDLconst {
8813 d := auxIntToInt32(v_0.AuxInt)
8815 if !(is32Bit(int64(c) + int64(d))) {
8818 v.reset(OpAMD64LEAL)
8819 v.AuxInt = int32ToAuxInt(c + d)
8824 // match: (LEAL [c] {s} (ADDL x y))
8825 // cond: x.Op != OpSB && y.Op != OpSB
8826 // result: (LEAL1 [c] {s} x y)
8828 c := auxIntToInt32(v.AuxInt)
8829 s := auxToSym(v.Aux)
8830 if v_0.Op != OpAMD64ADDL {
8834 v_0_0 := v_0.Args[0]
8835 v_0_1 := v_0.Args[1]
8836 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8839 if !(x.Op != OpSB && y.Op != OpSB) {
8842 v.reset(OpAMD64LEAL1)
8843 v.AuxInt = int32ToAuxInt(c)
8852 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8855 // match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
8856 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
8857 // result: (LEAL1 [c+d] {s} x y)
8859 c := auxIntToInt32(v.AuxInt)
8860 s := auxToSym(v.Aux)
8861 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8862 if v_0.Op != OpAMD64ADDLconst {
8865 d := auxIntToInt32(v_0.AuxInt)
8868 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8871 v.reset(OpAMD64LEAL1)
8872 v.AuxInt = int32ToAuxInt(c + d)
8879 // match: (LEAL1 [c] {s} x (SHLLconst [1] y))
8880 // result: (LEAL2 [c] {s} x y)
8882 c := auxIntToInt32(v.AuxInt)
8883 s := auxToSym(v.Aux)
8884 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8886 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8890 v.reset(OpAMD64LEAL2)
8891 v.AuxInt = int32ToAuxInt(c)
8898 // match: (LEAL1 [c] {s} x (SHLLconst [2] y))
8899 // result: (LEAL4 [c] {s} x y)
8901 c := auxIntToInt32(v.AuxInt)
8902 s := auxToSym(v.Aux)
8903 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8905 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8909 v.reset(OpAMD64LEAL4)
8910 v.AuxInt = int32ToAuxInt(c)
8917 // match: (LEAL1 [c] {s} x (SHLLconst [3] y))
8918 // result: (LEAL8 [c] {s} x y)
8920 c := auxIntToInt32(v.AuxInt)
8921 s := auxToSym(v.Aux)
8922 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8924 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8928 v.reset(OpAMD64LEAL8)
8929 v.AuxInt = int32ToAuxInt(c)
8938 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8941 // match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
8942 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
8943 // result: (LEAL2 [c+d] {s} x y)
8945 c := auxIntToInt32(v.AuxInt)
8946 s := auxToSym(v.Aux)
8947 if v_0.Op != OpAMD64ADDLconst {
8950 d := auxIntToInt32(v_0.AuxInt)
8953 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8956 v.reset(OpAMD64LEAL2)
8957 v.AuxInt = int32ToAuxInt(c + d)
8962 // match: (LEAL2 [c] {s} x (ADDLconst [d] y))
8963 // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
8964 // result: (LEAL2 [c+2*d] {s} x y)
8966 c := auxIntToInt32(v.AuxInt)
8967 s := auxToSym(v.Aux)
8969 if v_1.Op != OpAMD64ADDLconst {
8972 d := auxIntToInt32(v_1.AuxInt)
8974 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8977 v.reset(OpAMD64LEAL2)
8978 v.AuxInt = int32ToAuxInt(c + 2*d)
8983 // match: (LEAL2 [c] {s} x (SHLLconst [1] y))
8984 // result: (LEAL4 [c] {s} x y)
8986 c := auxIntToInt32(v.AuxInt)
8987 s := auxToSym(v.Aux)
8989 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8993 v.reset(OpAMD64LEAL4)
8994 v.AuxInt = int32ToAuxInt(c)
8999 // match: (LEAL2 [c] {s} x (SHLLconst [2] y))
9000 // result: (LEAL8 [c] {s} x y)
9002 c := auxIntToInt32(v.AuxInt)
9003 s := auxToSym(v.Aux)
9005 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
9009 v.reset(OpAMD64LEAL8)
9010 v.AuxInt = int32ToAuxInt(c)
9017 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
9020 // match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
9021 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9022 // result: (LEAL4 [c+d] {s} x y)
9024 c := auxIntToInt32(v.AuxInt)
9025 s := auxToSym(v.Aux)
9026 if v_0.Op != OpAMD64ADDLconst {
9029 d := auxIntToInt32(v_0.AuxInt)
9032 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9035 v.reset(OpAMD64LEAL4)
9036 v.AuxInt = int32ToAuxInt(c + d)
9041 // match: (LEAL4 [c] {s} x (ADDLconst [d] y))
9042 // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
9043 // result: (LEAL4 [c+4*d] {s} x y)
9045 c := auxIntToInt32(v.AuxInt)
9046 s := auxToSym(v.Aux)
9048 if v_1.Op != OpAMD64ADDLconst {
9051 d := auxIntToInt32(v_1.AuxInt)
9053 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9056 v.reset(OpAMD64LEAL4)
9057 v.AuxInt = int32ToAuxInt(c + 4*d)
9062 // match: (LEAL4 [c] {s} x (SHLLconst [1] y))
9063 // result: (LEAL8 [c] {s} x y)
9065 c := auxIntToInt32(v.AuxInt)
9066 s := auxToSym(v.Aux)
9068 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
9072 v.reset(OpAMD64LEAL8)
9073 v.AuxInt = int32ToAuxInt(c)
9080 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
9083 // match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
9084 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9085 // result: (LEAL8 [c+d] {s} x y)
9087 c := auxIntToInt32(v.AuxInt)
9088 s := auxToSym(v.Aux)
9089 if v_0.Op != OpAMD64ADDLconst {
9092 d := auxIntToInt32(v_0.AuxInt)
9095 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9098 v.reset(OpAMD64LEAL8)
9099 v.AuxInt = int32ToAuxInt(c + d)
9104 // match: (LEAL8 [c] {s} x (ADDLconst [d] y))
9105 // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
9106 // result: (LEAL8 [c+8*d] {s} x y)
9108 c := auxIntToInt32(v.AuxInt)
9109 s := auxToSym(v.Aux)
9111 if v_1.Op != OpAMD64ADDLconst {
9114 d := auxIntToInt32(v_1.AuxInt)
9116 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9119 v.reset(OpAMD64LEAL8)
9120 v.AuxInt = int32ToAuxInt(c + 8*d)
9127 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
9129 // match: (LEAQ [c] {s} (ADDQconst [d] x))
9130 // cond: is32Bit(int64(c)+int64(d))
9131 // result: (LEAQ [c+d] {s} x)
9133 c := auxIntToInt32(v.AuxInt)
9134 s := auxToSym(v.Aux)
9135 if v_0.Op != OpAMD64ADDQconst {
9138 d := auxIntToInt32(v_0.AuxInt)
9140 if !(is32Bit(int64(c) + int64(d))) {
9143 v.reset(OpAMD64LEAQ)
9144 v.AuxInt = int32ToAuxInt(c + d)
9149 // match: (LEAQ [c] {s} (ADDQ x y))
9150 // cond: x.Op != OpSB && y.Op != OpSB
9151 // result: (LEAQ1 [c] {s} x y)
9153 c := auxIntToInt32(v.AuxInt)
9154 s := auxToSym(v.Aux)
9155 if v_0.Op != OpAMD64ADDQ {
9159 v_0_0 := v_0.Args[0]
9160 v_0_1 := v_0.Args[1]
9161 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
9164 if !(x.Op != OpSB && y.Op != OpSB) {
9167 v.reset(OpAMD64LEAQ1)
9168 v.AuxInt = int32ToAuxInt(c)
9175 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
9176 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9177 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
9179 off1 := auxIntToInt32(v.AuxInt)
9180 sym1 := auxToSym(v.Aux)
9181 if v_0.Op != OpAMD64LEAQ {
9184 off2 := auxIntToInt32(v_0.AuxInt)
9185 sym2 := auxToSym(v_0.Aux)
9187 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9190 v.reset(OpAMD64LEAQ)
9191 v.AuxInt = int32ToAuxInt(off1 + off2)
9192 v.Aux = symToAux(mergeSym(sym1, sym2))
9196 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
9197 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9198 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
9200 off1 := auxIntToInt32(v.AuxInt)
9201 sym1 := auxToSym(v.Aux)
9202 if v_0.Op != OpAMD64LEAQ1 {
9205 off2 := auxIntToInt32(v_0.AuxInt)
9206 sym2 := auxToSym(v_0.Aux)
9209 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9212 v.reset(OpAMD64LEAQ1)
9213 v.AuxInt = int32ToAuxInt(off1 + off2)
9214 v.Aux = symToAux(mergeSym(sym1, sym2))
9218 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
9219 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9220 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
9222 off1 := auxIntToInt32(v.AuxInt)
9223 sym1 := auxToSym(v.Aux)
9224 if v_0.Op != OpAMD64LEAQ2 {
9227 off2 := auxIntToInt32(v_0.AuxInt)
9228 sym2 := auxToSym(v_0.Aux)
9231 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9234 v.reset(OpAMD64LEAQ2)
9235 v.AuxInt = int32ToAuxInt(off1 + off2)
9236 v.Aux = symToAux(mergeSym(sym1, sym2))
9240 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
9241 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9242 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
9244 off1 := auxIntToInt32(v.AuxInt)
9245 sym1 := auxToSym(v.Aux)
9246 if v_0.Op != OpAMD64LEAQ4 {
9249 off2 := auxIntToInt32(v_0.AuxInt)
9250 sym2 := auxToSym(v_0.Aux)
9253 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9256 v.reset(OpAMD64LEAQ4)
9257 v.AuxInt = int32ToAuxInt(off1 + off2)
9258 v.Aux = symToAux(mergeSym(sym1, sym2))
9262 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
9263 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9264 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
9266 off1 := auxIntToInt32(v.AuxInt)
9267 sym1 := auxToSym(v.Aux)
9268 if v_0.Op != OpAMD64LEAQ8 {
9271 off2 := auxIntToInt32(v_0.AuxInt)
9272 sym2 := auxToSym(v_0.Aux)
9275 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9278 v.reset(OpAMD64LEAQ8)
9279 v.AuxInt = int32ToAuxInt(off1 + off2)
9280 v.Aux = symToAux(mergeSym(sym1, sym2))
9286 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
9289 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
9290 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9291 // result: (LEAQ1 [c+d] {s} x y)
9293 c := auxIntToInt32(v.AuxInt)
9294 s := auxToSym(v.Aux)
9295 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9296 if v_0.Op != OpAMD64ADDQconst {
9299 d := auxIntToInt32(v_0.AuxInt)
9302 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9305 v.reset(OpAMD64LEAQ1)
9306 v.AuxInt = int32ToAuxInt(c + d)
9313 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
9314 // result: (LEAQ2 [c] {s} x y)
9316 c := auxIntToInt32(v.AuxInt)
9317 s := auxToSym(v.Aux)
9318 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9320 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9324 v.reset(OpAMD64LEAQ2)
9325 v.AuxInt = int32ToAuxInt(c)
9332 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
9333 // result: (LEAQ4 [c] {s} x y)
9335 c := auxIntToInt32(v.AuxInt)
9336 s := auxToSym(v.Aux)
9337 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9339 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9343 v.reset(OpAMD64LEAQ4)
9344 v.AuxInt = int32ToAuxInt(c)
9351 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
9352 // result: (LEAQ8 [c] {s} x y)
9354 c := auxIntToInt32(v.AuxInt)
9355 s := auxToSym(v.Aux)
9356 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9358 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
9362 v.reset(OpAMD64LEAQ8)
9363 v.AuxInt = int32ToAuxInt(c)
9370 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
9371 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
9372 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
9374 off1 := auxIntToInt32(v.AuxInt)
9375 sym1 := auxToSym(v.Aux)
9376 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9377 if v_0.Op != OpAMD64LEAQ {
9380 off2 := auxIntToInt32(v_0.AuxInt)
9381 sym2 := auxToSym(v_0.Aux)
9384 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9387 v.reset(OpAMD64LEAQ1)
9388 v.AuxInt = int32ToAuxInt(off1 + off2)
9389 v.Aux = symToAux(mergeSym(sym1, sym2))
9395 // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
9396 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9397 // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
9399 off1 := auxIntToInt32(v.AuxInt)
9400 sym1 := auxToSym(v.Aux)
9401 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9403 if v_1.Op != OpAMD64LEAQ1 {
9406 off2 := auxIntToInt32(v_1.AuxInt)
9407 sym2 := auxToSym(v_1.Aux)
9409 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9412 v.reset(OpAMD64LEAQ2)
9413 v.AuxInt = int32ToAuxInt(off1 + off2)
9414 v.Aux = symToAux(mergeSym(sym1, sym2))
9420 // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y))
9421 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
9422 // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
9424 off1 := auxIntToInt32(v.AuxInt)
9425 sym1 := auxToSym(v.Aux)
9426 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9428 if v_1.Op != OpAMD64LEAQ1 {
9431 off2 := auxIntToInt32(v_1.AuxInt)
9432 sym2 := auxToSym(v_1.Aux)
9434 v_1_0 := v_1.Args[0]
9435 v_1_1 := v_1.Args[1]
9436 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
9441 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9444 v.reset(OpAMD64LEAQ2)
9445 v.AuxInt = int32ToAuxInt(off1 + off2)
9446 v.Aux = symToAux(mergeSym(sym1, sym2))
9453 // match: (LEAQ1 [0] x y)
9454 // cond: v.Aux == nil
9455 // result: (ADDQ x y)
9457 if auxIntToInt32(v.AuxInt) != 0 {
9462 if !(v.Aux == nil) {
9465 v.reset(OpAMD64ADDQ)
9471 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
9474 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
9475 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9476 // result: (LEAQ2 [c+d] {s} x y)
9478 c := auxIntToInt32(v.AuxInt)
9479 s := auxToSym(v.Aux)
9480 if v_0.Op != OpAMD64ADDQconst {
9483 d := auxIntToInt32(v_0.AuxInt)
9486 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9489 v.reset(OpAMD64LEAQ2)
9490 v.AuxInt = int32ToAuxInt(c + d)
9495 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
9496 // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
9497 // result: (LEAQ2 [c+2*d] {s} x y)
9499 c := auxIntToInt32(v.AuxInt)
9500 s := auxToSym(v.Aux)
9502 if v_1.Op != OpAMD64ADDQconst {
9505 d := auxIntToInt32(v_1.AuxInt)
9507 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
9510 v.reset(OpAMD64LEAQ2)
9511 v.AuxInt = int32ToAuxInt(c + 2*d)
9516 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
9517 // result: (LEAQ4 [c] {s} x y)
9519 c := auxIntToInt32(v.AuxInt)
9520 s := auxToSym(v.Aux)
9522 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9526 v.reset(OpAMD64LEAQ4)
9527 v.AuxInt = int32ToAuxInt(c)
9532 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
9533 // result: (LEAQ8 [c] {s} x y)
9535 c := auxIntToInt32(v.AuxInt)
9536 s := auxToSym(v.Aux)
9538 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9542 v.reset(OpAMD64LEAQ8)
9543 v.AuxInt = int32ToAuxInt(c)
9548 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
9549 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
9550 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
9552 off1 := auxIntToInt32(v.AuxInt)
9553 sym1 := auxToSym(v.Aux)
9554 if v_0.Op != OpAMD64LEAQ {
9557 off2 := auxIntToInt32(v_0.AuxInt)
9558 sym2 := auxToSym(v_0.Aux)
9561 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9564 v.reset(OpAMD64LEAQ2)
9565 v.AuxInt = int32ToAuxInt(off1 + off2)
9566 v.Aux = symToAux(mergeSym(sym1, sym2))
9570 // match: (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
9571 // cond: is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil
9572 // result: (LEAQ4 [off1+2*off2] {sym1} x y)
9574 off1 := auxIntToInt32(v.AuxInt)
9575 sym1 := auxToSym(v.Aux)
9577 if v_1.Op != OpAMD64LEAQ1 {
9580 off2 := auxIntToInt32(v_1.AuxInt)
9581 sym2 := auxToSym(v_1.Aux)
9583 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
9586 v.reset(OpAMD64LEAQ4)
9587 v.AuxInt = int32ToAuxInt(off1 + 2*off2)
9588 v.Aux = symToAux(sym1)
9592 // match: (LEAQ2 [off] {sym} x (MOVQconst [scale]))
9593 // cond: is32Bit(int64(off)+int64(scale)*2)
9594 // result: (LEAQ [off+int32(scale)*2] {sym} x)
9596 off := auxIntToInt32(v.AuxInt)
9597 sym := auxToSym(v.Aux)
9599 if v_1.Op != OpAMD64MOVQconst {
9602 scale := auxIntToInt64(v_1.AuxInt)
9603 if !(is32Bit(int64(off) + int64(scale)*2)) {
9606 v.reset(OpAMD64LEAQ)
9607 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9608 v.Aux = symToAux(sym)
9612 // match: (LEAQ2 [off] {sym} x (MOVLconst [scale]))
9613 // cond: is32Bit(int64(off)+int64(scale)*2)
9614 // result: (LEAQ [off+int32(scale)*2] {sym} x)
9616 off := auxIntToInt32(v.AuxInt)
9617 sym := auxToSym(v.Aux)
9619 if v_1.Op != OpAMD64MOVLconst {
9622 scale := auxIntToInt32(v_1.AuxInt)
9623 if !(is32Bit(int64(off) + int64(scale)*2)) {
9626 v.reset(OpAMD64LEAQ)
9627 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9628 v.Aux = symToAux(sym)
9634 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9637 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
9638 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9639 // result: (LEAQ4 [c+d] {s} x y)
9641 c := auxIntToInt32(v.AuxInt)
9642 s := auxToSym(v.Aux)
9643 if v_0.Op != OpAMD64ADDQconst {
9646 d := auxIntToInt32(v_0.AuxInt)
9649 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9652 v.reset(OpAMD64LEAQ4)
9653 v.AuxInt = int32ToAuxInt(c + d)
9658 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
9659 // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
9660 // result: (LEAQ4 [c+4*d] {s} x y)
9662 c := auxIntToInt32(v.AuxInt)
9663 s := auxToSym(v.Aux)
9665 if v_1.Op != OpAMD64ADDQconst {
9668 d := auxIntToInt32(v_1.AuxInt)
9670 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9673 v.reset(OpAMD64LEAQ4)
9674 v.AuxInt = int32ToAuxInt(c + 4*d)
9679 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
9680 // result: (LEAQ8 [c] {s} x y)
9682 c := auxIntToInt32(v.AuxInt)
9683 s := auxToSym(v.Aux)
9685 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9689 v.reset(OpAMD64LEAQ8)
9690 v.AuxInt = int32ToAuxInt(c)
9695 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
9696 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
9697 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
9699 off1 := auxIntToInt32(v.AuxInt)
9700 sym1 := auxToSym(v.Aux)
9701 if v_0.Op != OpAMD64LEAQ {
9704 off2 := auxIntToInt32(v_0.AuxInt)
9705 sym2 := auxToSym(v_0.Aux)
9708 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9711 v.reset(OpAMD64LEAQ4)
9712 v.AuxInt = int32ToAuxInt(off1 + off2)
9713 v.Aux = symToAux(mergeSym(sym1, sym2))
9717 // match: (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
9718 // cond: is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil
9719 // result: (LEAQ8 [off1+4*off2] {sym1} x y)
9721 off1 := auxIntToInt32(v.AuxInt)
9722 sym1 := auxToSym(v.Aux)
9724 if v_1.Op != OpAMD64LEAQ1 {
9727 off2 := auxIntToInt32(v_1.AuxInt)
9728 sym2 := auxToSym(v_1.Aux)
9730 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9733 v.reset(OpAMD64LEAQ8)
9734 v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9735 v.Aux = symToAux(sym1)
9739 // match: (LEAQ4 [off] {sym} x (MOVQconst [scale]))
9740 // cond: is32Bit(int64(off)+int64(scale)*4)
9741 // result: (LEAQ [off+int32(scale)*4] {sym} x)
9743 off := auxIntToInt32(v.AuxInt)
9744 sym := auxToSym(v.Aux)
9746 if v_1.Op != OpAMD64MOVQconst {
9749 scale := auxIntToInt64(v_1.AuxInt)
9750 if !(is32Bit(int64(off) + int64(scale)*4)) {
9753 v.reset(OpAMD64LEAQ)
9754 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9755 v.Aux = symToAux(sym)
9759 // match: (LEAQ4 [off] {sym} x (MOVLconst [scale]))
9760 // cond: is32Bit(int64(off)+int64(scale)*4)
9761 // result: (LEAQ [off+int32(scale)*4] {sym} x)
9763 off := auxIntToInt32(v.AuxInt)
9764 sym := auxToSym(v.Aux)
9766 if v_1.Op != OpAMD64MOVLconst {
9769 scale := auxIntToInt32(v_1.AuxInt)
9770 if !(is32Bit(int64(off) + int64(scale)*4)) {
9773 v.reset(OpAMD64LEAQ)
9774 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9775 v.Aux = symToAux(sym)
9781 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9784 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
9785 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
9786 // result: (LEAQ8 [c+d] {s} x y)
9788 c := auxIntToInt32(v.AuxInt)
9789 s := auxToSym(v.Aux)
9790 if v_0.Op != OpAMD64ADDQconst {
9793 d := auxIntToInt32(v_0.AuxInt)
9796 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9799 v.reset(OpAMD64LEAQ8)
9800 v.AuxInt = int32ToAuxInt(c + d)
9805 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
9806 // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
9807 // result: (LEAQ8 [c+8*d] {s} x y)
9809 c := auxIntToInt32(v.AuxInt)
9810 s := auxToSym(v.Aux)
9812 if v_1.Op != OpAMD64ADDQconst {
9815 d := auxIntToInt32(v_1.AuxInt)
9817 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9820 v.reset(OpAMD64LEAQ8)
9821 v.AuxInt = int32ToAuxInt(c + 8*d)
9826 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
9827 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
9828 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
9830 off1 := auxIntToInt32(v.AuxInt)
9831 sym1 := auxToSym(v.Aux)
9832 if v_0.Op != OpAMD64LEAQ {
9835 off2 := auxIntToInt32(v_0.AuxInt)
9836 sym2 := auxToSym(v_0.Aux)
9839 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9842 v.reset(OpAMD64LEAQ8)
9843 v.AuxInt = int32ToAuxInt(off1 + off2)
9844 v.Aux = symToAux(mergeSym(sym1, sym2))
9848 // match: (LEAQ8 [off] {sym} x (MOVQconst [scale]))
9849 // cond: is32Bit(int64(off)+int64(scale)*8)
9850 // result: (LEAQ [off+int32(scale)*8] {sym} x)
9852 off := auxIntToInt32(v.AuxInt)
9853 sym := auxToSym(v.Aux)
9855 if v_1.Op != OpAMD64MOVQconst {
9858 scale := auxIntToInt64(v_1.AuxInt)
9859 if !(is32Bit(int64(off) + int64(scale)*8)) {
9862 v.reset(OpAMD64LEAQ)
9863 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9864 v.Aux = symToAux(sym)
9868 // match: (LEAQ8 [off] {sym} x (MOVLconst [scale]))
9869 // cond: is32Bit(int64(off)+int64(scale)*8)
9870 // result: (LEAQ [off+int32(scale)*8] {sym} x)
9872 off := auxIntToInt32(v.AuxInt)
9873 sym := auxToSym(v.Aux)
9875 if v_1.Op != OpAMD64MOVLconst {
9878 scale := auxIntToInt32(v_1.AuxInt)
9879 if !(is32Bit(int64(off) + int64(scale)*8)) {
9882 v.reset(OpAMD64LEAQ)
9883 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9884 v.Aux = symToAux(sym)
9890 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9893 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
9894 // cond: x.Uses == 1 && clobber(x)
9895 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
9898 if x.Op != OpAMD64MOVBload {
9901 off := auxIntToInt32(x.AuxInt)
9902 sym := auxToSym(x.Aux)
9905 if !(x.Uses == 1 && clobber(x)) {
9909 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9911 v0.AuxInt = int32ToAuxInt(off)
9912 v0.Aux = symToAux(sym)
9913 v0.AddArg2(ptr, mem)
9916 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
9917 // cond: x.Uses == 1 && clobber(x)
9918 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
9921 if x.Op != OpAMD64MOVWload {
9924 off := auxIntToInt32(x.AuxInt)
9925 sym := auxToSym(x.Aux)
9928 if !(x.Uses == 1 && clobber(x)) {
9932 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9934 v0.AuxInt = int32ToAuxInt(off)
9935 v0.Aux = symToAux(sym)
9936 v0.AddArg2(ptr, mem)
9939 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
9940 // cond: x.Uses == 1 && clobber(x)
9941 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
9944 if x.Op != OpAMD64MOVLload {
9947 off := auxIntToInt32(x.AuxInt)
9948 sym := auxToSym(x.Aux)
9951 if !(x.Uses == 1 && clobber(x)) {
9955 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9957 v0.AuxInt = int32ToAuxInt(off)
9958 v0.Aux = symToAux(sym)
9959 v0.AddArg2(ptr, mem)
9962 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
9963 // cond: x.Uses == 1 && clobber(x)
9964 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
9967 if x.Op != OpAMD64MOVQload {
9970 off := auxIntToInt32(x.AuxInt)
9971 sym := auxToSym(x.Aux)
9974 if !(x.Uses == 1 && clobber(x)) {
9978 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9980 v0.AuxInt = int32ToAuxInt(off)
9981 v0.Aux = symToAux(sym)
9982 v0.AddArg2(ptr, mem)
9985 // match: (MOVBQSX (ANDLconst [c] x))
9986 // cond: c & 0x80 == 0
9987 // result: (ANDLconst [c & 0x7f] x)
9989 if v_0.Op != OpAMD64ANDLconst {
9992 c := auxIntToInt32(v_0.AuxInt)
9997 v.reset(OpAMD64ANDLconst)
9998 v.AuxInt = int32ToAuxInt(c & 0x7f)
10002 // match: (MOVBQSX (MOVBQSX x))
10003 // result: (MOVBQSX x)
10005 if v_0.Op != OpAMD64MOVBQSX {
10009 v.reset(OpAMD64MOVBQSX)
10015 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
10018 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
10019 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
10020 // result: (MOVBQSX x)
10022 off := auxIntToInt32(v.AuxInt)
10023 sym := auxToSym(v.Aux)
10025 if v_1.Op != OpAMD64MOVBstore {
10028 off2 := auxIntToInt32(v_1.AuxInt)
10029 sym2 := auxToSym(v_1.Aux)
10031 ptr2 := v_1.Args[0]
10032 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10035 v.reset(OpAMD64MOVBQSX)
10039 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
10040 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10041 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
10043 off1 := auxIntToInt32(v.AuxInt)
10044 sym1 := auxToSym(v.Aux)
10045 if v_0.Op != OpAMD64LEAQ {
10048 off2 := auxIntToInt32(v_0.AuxInt)
10049 sym2 := auxToSym(v_0.Aux)
10050 base := v_0.Args[0]
10052 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10055 v.reset(OpAMD64MOVBQSXload)
10056 v.AuxInt = int32ToAuxInt(off1 + off2)
10057 v.Aux = symToAux(mergeSym(sym1, sym2))
10058 v.AddArg2(base, mem)
10063 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
10066 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
10067 // cond: x.Uses == 1 && clobber(x)
10068 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
10071 if x.Op != OpAMD64MOVBload {
10074 off := auxIntToInt32(x.AuxInt)
10075 sym := auxToSym(x.Aux)
10078 if !(x.Uses == 1 && clobber(x)) {
10082 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10084 v0.AuxInt = int32ToAuxInt(off)
10085 v0.Aux = symToAux(sym)
10086 v0.AddArg2(ptr, mem)
10089 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
10090 // cond: x.Uses == 1 && clobber(x)
10091 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
10094 if x.Op != OpAMD64MOVWload {
10097 off := auxIntToInt32(x.AuxInt)
10098 sym := auxToSym(x.Aux)
10101 if !(x.Uses == 1 && clobber(x)) {
10105 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10107 v0.AuxInt = int32ToAuxInt(off)
10108 v0.Aux = symToAux(sym)
10109 v0.AddArg2(ptr, mem)
10112 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
10113 // cond: x.Uses == 1 && clobber(x)
10114 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
10117 if x.Op != OpAMD64MOVLload {
10120 off := auxIntToInt32(x.AuxInt)
10121 sym := auxToSym(x.Aux)
10124 if !(x.Uses == 1 && clobber(x)) {
10128 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10130 v0.AuxInt = int32ToAuxInt(off)
10131 v0.Aux = symToAux(sym)
10132 v0.AddArg2(ptr, mem)
10135 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
10136 // cond: x.Uses == 1 && clobber(x)
10137 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
10140 if x.Op != OpAMD64MOVQload {
10143 off := auxIntToInt32(x.AuxInt)
10144 sym := auxToSym(x.Aux)
10147 if !(x.Uses == 1 && clobber(x)) {
10151 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10153 v0.AuxInt = int32ToAuxInt(off)
10154 v0.Aux = symToAux(sym)
10155 v0.AddArg2(ptr, mem)
10158 // match: (MOVBQZX x)
10159 // cond: zeroUpper56Bits(x,3)
10163 if !(zeroUpper56Bits(x, 3)) {
10169 // match: (MOVBQZX (ANDLconst [c] x))
10170 // result: (ANDLconst [c & 0xff] x)
10172 if v_0.Op != OpAMD64ANDLconst {
10175 c := auxIntToInt32(v_0.AuxInt)
10177 v.reset(OpAMD64ANDLconst)
10178 v.AuxInt = int32ToAuxInt(c & 0xff)
10182 // match: (MOVBQZX (MOVBQZX x))
10183 // result: (MOVBQZX x)
10185 if v_0.Op != OpAMD64MOVBQZX {
10189 v.reset(OpAMD64MOVBQZX)
10195 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
10198 // match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
10199 // cond: is32Bit(int64(off1)+int64(off2))
10200 // result: (MOVBatomicload [off1+off2] {sym} ptr mem)
10202 off1 := auxIntToInt32(v.AuxInt)
10203 sym := auxToSym(v.Aux)
10204 if v_0.Op != OpAMD64ADDQconst {
10207 off2 := auxIntToInt32(v_0.AuxInt)
10210 if !(is32Bit(int64(off1) + int64(off2))) {
10213 v.reset(OpAMD64MOVBatomicload)
10214 v.AuxInt = int32ToAuxInt(off1 + off2)
10215 v.Aux = symToAux(sym)
10216 v.AddArg2(ptr, mem)
10219 // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
10220 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10221 // result: (MOVBatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
10223 off1 := auxIntToInt32(v.AuxInt)
10224 sym1 := auxToSym(v.Aux)
10225 if v_0.Op != OpAMD64LEAQ {
10228 off2 := auxIntToInt32(v_0.AuxInt)
10229 sym2 := auxToSym(v_0.Aux)
10232 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10235 v.reset(OpAMD64MOVBatomicload)
10236 v.AuxInt = int32ToAuxInt(off1 + off2)
10237 v.Aux = symToAux(mergeSym(sym1, sym2))
10238 v.AddArg2(ptr, mem)
10243 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
10246 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
10247 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
10248 // result: (MOVBQZX x)
10250 off := auxIntToInt32(v.AuxInt)
10251 sym := auxToSym(v.Aux)
10253 if v_1.Op != OpAMD64MOVBstore {
10256 off2 := auxIntToInt32(v_1.AuxInt)
10257 sym2 := auxToSym(v_1.Aux)
10259 ptr2 := v_1.Args[0]
10260 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10263 v.reset(OpAMD64MOVBQZX)
10267 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
10268 // cond: is32Bit(int64(off1)+int64(off2))
10269 // result: (MOVBload [off1+off2] {sym} ptr mem)
10271 off1 := auxIntToInt32(v.AuxInt)
10272 sym := auxToSym(v.Aux)
10273 if v_0.Op != OpAMD64ADDQconst {
10276 off2 := auxIntToInt32(v_0.AuxInt)
10279 if !(is32Bit(int64(off1) + int64(off2))) {
10282 v.reset(OpAMD64MOVBload)
10283 v.AuxInt = int32ToAuxInt(off1 + off2)
10284 v.Aux = symToAux(sym)
10285 v.AddArg2(ptr, mem)
10288 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
10289 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10290 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
10292 off1 := auxIntToInt32(v.AuxInt)
10293 sym1 := auxToSym(v.Aux)
10294 if v_0.Op != OpAMD64LEAQ {
10297 off2 := auxIntToInt32(v_0.AuxInt)
10298 sym2 := auxToSym(v_0.Aux)
10299 base := v_0.Args[0]
10301 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10304 v.reset(OpAMD64MOVBload)
10305 v.AuxInt = int32ToAuxInt(off1 + off2)
10306 v.Aux = symToAux(mergeSym(sym1, sym2))
10307 v.AddArg2(base, mem)
10310 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
10311 // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
10312 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
10314 off1 := auxIntToInt32(v.AuxInt)
10315 sym1 := auxToSym(v.Aux)
10316 if v_0.Op != OpAMD64LEAL {
10319 off2 := auxIntToInt32(v_0.AuxInt)
10320 sym2 := auxToSym(v_0.Aux)
10321 base := v_0.Args[0]
10323 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
10326 v.reset(OpAMD64MOVBload)
10327 v.AuxInt = int32ToAuxInt(off1 + off2)
10328 v.Aux = symToAux(mergeSym(sym1, sym2))
10329 v.AddArg2(base, mem)
10332 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
10333 // cond: is32Bit(int64(off1)+int64(off2))
10334 // result: (MOVBload [off1+off2] {sym} ptr mem)
10336 off1 := auxIntToInt32(v.AuxInt)
10337 sym := auxToSym(v.Aux)
10338 if v_0.Op != OpAMD64ADDLconst {
10341 off2 := auxIntToInt32(v_0.AuxInt)
10344 if !(is32Bit(int64(off1) + int64(off2))) {
10347 v.reset(OpAMD64MOVBload)
10348 v.AuxInt = int32ToAuxInt(off1 + off2)
10349 v.Aux = symToAux(sym)
10350 v.AddArg2(ptr, mem)
10353 // match: (MOVBload [off] {sym} (SB) _)
10354 // cond: symIsRO(sym)
10355 // result: (MOVLconst [int32(read8(sym, int64(off)))])
10357 off := auxIntToInt32(v.AuxInt)
10358 sym := auxToSym(v.Aux)
10359 if v_0.Op != OpSB || !(symIsRO(sym)) {
10362 v.reset(OpAMD64MOVLconst)
10363 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
10368 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
10373 typ := &b.Func.Config.Types
10374 // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
10375 // cond: y.Uses == 1
10376 // result: (SETLstore [off] {sym} ptr x mem)
10378 off := auxIntToInt32(v.AuxInt)
10379 sym := auxToSym(v.Aux)
10382 if y.Op != OpAMD64SETL {
10387 if !(y.Uses == 1) {
10390 v.reset(OpAMD64SETLstore)
10391 v.AuxInt = int32ToAuxInt(off)
10392 v.Aux = symToAux(sym)
10393 v.AddArg3(ptr, x, mem)
10396 // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
10397 // cond: y.Uses == 1
10398 // result: (SETLEstore [off] {sym} ptr x mem)
10400 off := auxIntToInt32(v.AuxInt)
10401 sym := auxToSym(v.Aux)
10404 if y.Op != OpAMD64SETLE {
10409 if !(y.Uses == 1) {
10412 v.reset(OpAMD64SETLEstore)
10413 v.AuxInt = int32ToAuxInt(off)
10414 v.Aux = symToAux(sym)
10415 v.AddArg3(ptr, x, mem)
10418 // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
10419 // cond: y.Uses == 1
10420 // result: (SETGstore [off] {sym} ptr x mem)
10422 off := auxIntToInt32(v.AuxInt)
10423 sym := auxToSym(v.Aux)
10426 if y.Op != OpAMD64SETG {
10431 if !(y.Uses == 1) {
10434 v.reset(OpAMD64SETGstore)
10435 v.AuxInt = int32ToAuxInt(off)
10436 v.Aux = symToAux(sym)
10437 v.AddArg3(ptr, x, mem)
10440 // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
10441 // cond: y.Uses == 1
10442 // result: (SETGEstore [off] {sym} ptr x mem)
10444 off := auxIntToInt32(v.AuxInt)
10445 sym := auxToSym(v.Aux)
10448 if y.Op != OpAMD64SETGE {
10453 if !(y.Uses == 1) {
10456 v.reset(OpAMD64SETGEstore)
10457 v.AuxInt = int32ToAuxInt(off)
10458 v.Aux = symToAux(sym)
10459 v.AddArg3(ptr, x, mem)
10462 // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
10463 // cond: y.Uses == 1
10464 // result: (SETEQstore [off] {sym} ptr x mem)
10466 off := auxIntToInt32(v.AuxInt)
10467 sym := auxToSym(v.Aux)
10470 if y.Op != OpAMD64SETEQ {
10475 if !(y.Uses == 1) {
10478 v.reset(OpAMD64SETEQstore)
10479 v.AuxInt = int32ToAuxInt(off)
10480 v.Aux = symToAux(sym)
10481 v.AddArg3(ptr, x, mem)
10484 // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
10485 // cond: y.Uses == 1
10486 // result: (SETNEstore [off] {sym} ptr x mem)
10488 off := auxIntToInt32(v.AuxInt)
10489 sym := auxToSym(v.Aux)
10492 if y.Op != OpAMD64SETNE {
10497 if !(y.Uses == 1) {
10500 v.reset(OpAMD64SETNEstore)
10501 v.AuxInt = int32ToAuxInt(off)
10502 v.Aux = symToAux(sym)
10503 v.AddArg3(ptr, x, mem)
10506 // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
10507 // cond: y.Uses == 1
10508 // result: (SETBstore [off] {sym} ptr x mem)
10510 off := auxIntToInt32(v.AuxInt)
10511 sym := auxToSym(v.Aux)
10514 if y.Op != OpAMD64SETB {
10519 if !(y.Uses == 1) {
10522 v.reset(OpAMD64SETBstore)
10523 v.AuxInt = int32ToAuxInt(off)
10524 v.Aux = symToAux(sym)
10525 v.AddArg3(ptr, x, mem)
10528 // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
10529 // cond: y.Uses == 1
10530 // result: (SETBEstore [off] {sym} ptr x mem)
10532 off := auxIntToInt32(v.AuxInt)
10533 sym := auxToSym(v.Aux)
10536 if y.Op != OpAMD64SETBE {
10541 if !(y.Uses == 1) {
10544 v.reset(OpAMD64SETBEstore)
10545 v.AuxInt = int32ToAuxInt(off)
10546 v.Aux = symToAux(sym)
10547 v.AddArg3(ptr, x, mem)
10550 // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
10551 // cond: y.Uses == 1
10552 // result: (SETAstore [off] {sym} ptr x mem)
10554 off := auxIntToInt32(v.AuxInt)
10555 sym := auxToSym(v.Aux)
10558 if y.Op != OpAMD64SETA {
10563 if !(y.Uses == 1) {
10566 v.reset(OpAMD64SETAstore)
10567 v.AuxInt = int32ToAuxInt(off)
10568 v.Aux = symToAux(sym)
10569 v.AddArg3(ptr, x, mem)
10572 // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
10573 // cond: y.Uses == 1
10574 // result: (SETAEstore [off] {sym} ptr x mem)
10576 off := auxIntToInt32(v.AuxInt)
10577 sym := auxToSym(v.Aux)
10580 if y.Op != OpAMD64SETAE {
10585 if !(y.Uses == 1) {
10588 v.reset(OpAMD64SETAEstore)
10589 v.AuxInt = int32ToAuxInt(off)
10590 v.Aux = symToAux(sym)
10591 v.AddArg3(ptr, x, mem)
10594 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
10595 // result: (MOVBstore [off] {sym} ptr x mem)
10597 off := auxIntToInt32(v.AuxInt)
10598 sym := auxToSym(v.Aux)
10600 if v_1.Op != OpAMD64MOVBQSX {
10605 v.reset(OpAMD64MOVBstore)
10606 v.AuxInt = int32ToAuxInt(off)
10607 v.Aux = symToAux(sym)
10608 v.AddArg3(ptr, x, mem)
10611 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
10612 // result: (MOVBstore [off] {sym} ptr x mem)
10614 off := auxIntToInt32(v.AuxInt)
10615 sym := auxToSym(v.Aux)
10617 if v_1.Op != OpAMD64MOVBQZX {
10622 v.reset(OpAMD64MOVBstore)
10623 v.AuxInt = int32ToAuxInt(off)
10624 v.Aux = symToAux(sym)
10625 v.AddArg3(ptr, x, mem)
10628 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
10629 // cond: is32Bit(int64(off1)+int64(off2))
10630 // result: (MOVBstore [off1+off2] {sym} ptr val mem)
10632 off1 := auxIntToInt32(v.AuxInt)
10633 sym := auxToSym(v.Aux)
10634 if v_0.Op != OpAMD64ADDQconst {
10637 off2 := auxIntToInt32(v_0.AuxInt)
10641 if !(is32Bit(int64(off1) + int64(off2))) {
10644 v.reset(OpAMD64MOVBstore)
10645 v.AuxInt = int32ToAuxInt(off1 + off2)
10646 v.Aux = symToAux(sym)
10647 v.AddArg3(ptr, val, mem)
10650 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
10651 // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
10653 off := auxIntToInt32(v.AuxInt)
10654 sym := auxToSym(v.Aux)
10656 if v_1.Op != OpAMD64MOVLconst {
10659 c := auxIntToInt32(v_1.AuxInt)
10661 v.reset(OpAMD64MOVBstoreconst)
10662 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10663 v.Aux = symToAux(sym)
10664 v.AddArg2(ptr, mem)
10667 // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem)
10668 // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
10670 off := auxIntToInt32(v.AuxInt)
10671 sym := auxToSym(v.Aux)
10673 if v_1.Op != OpAMD64MOVQconst {
10676 c := auxIntToInt64(v_1.AuxInt)
10678 v.reset(OpAMD64MOVBstoreconst)
10679 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10680 v.Aux = symToAux(sym)
10681 v.AddArg2(ptr, mem)
10684 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
10685 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
10686 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
10688 off1 := auxIntToInt32(v.AuxInt)
10689 sym1 := auxToSym(v.Aux)
10690 if v_0.Op != OpAMD64LEAQ {
10693 off2 := auxIntToInt32(v_0.AuxInt)
10694 sym2 := auxToSym(v_0.Aux)
10695 base := v_0.Args[0]
10698 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10701 v.reset(OpAMD64MOVBstore)
10702 v.AuxInt = int32ToAuxInt(off1 + off2)
10703 v.Aux = symToAux(mergeSym(sym1, sym2))
10704 v.AddArg3(base, val, mem)
10707 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
10708 // cond: x0.Uses == 1 && clobber(x0)
10709 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
10711 i := auxIntToInt32(v.AuxInt)
10712 s := auxToSym(v.Aux)
10716 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
10720 if p != x0.Args[0] {
10724 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) {
10727 v.reset(OpAMD64MOVWstore)
10728 v.AuxInt = int32ToAuxInt(i - 1)
10729 v.Aux = symToAux(s)
10730 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10731 v0.AuxInt = int8ToAuxInt(8)
10733 v.AddArg3(p, v0, mem)
10736 // match: (MOVBstore [i] {s} p1 w x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem))
10737 // cond: x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)
10738 // result: (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
10740 i := auxIntToInt32(v.AuxInt)
10741 s := auxToSym(v.Aux)
10745 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10751 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) {
10754 v.reset(OpAMD64MOVWstore)
10755 v.AuxInt = int32ToAuxInt(i)
10756 v.Aux = symToAux(s)
10757 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10758 v0.AuxInt = int8ToAuxInt(8)
10760 v.AddArg3(p0, v0, mem)
10763 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
10764 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
10765 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
10767 i := auxIntToInt32(v.AuxInt)
10768 s := auxToSym(v.Aux)
10772 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-1 || auxToSym(x2.Aux) != s {
10776 if p != x2.Args[0] {
10780 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
10784 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
10788 if p != x1.Args[0] {
10792 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
10796 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-3 || auxToSym(x0.Aux) != s {
10800 if p != x0.Args[0] {
10804 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
10807 v.reset(OpAMD64MOVLstore)
10808 v.AuxInt = int32ToAuxInt(i - 3)
10809 v.Aux = symToAux(s)
10810 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10812 v.AddArg3(p, v0, mem)
10815 // match: (MOVBstore [i] {s} p3 w x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w) x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w) x0:(MOVBstore [i] {s} p0 (SHRLconst [24] w) mem))))
10816 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)
10817 // result: (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
10819 i := auxIntToInt32(v.AuxInt)
10820 s := auxToSym(v.Aux)
10824 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
10830 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
10834 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
10840 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
10844 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10850 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) {
10853 v.reset(OpAMD64MOVLstore)
10854 v.AuxInt = int32ToAuxInt(i)
10855 v.Aux = symToAux(s)
10856 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10858 v.AddArg3(p0, v0, mem)
10861 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
10862 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
10863 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
10865 i := auxIntToInt32(v.AuxInt)
10866 s := auxToSym(v.Aux)
10870 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i-1 || auxToSym(x6.Aux) != s {
10874 if p != x6.Args[0] {
10878 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
10882 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i-2 || auxToSym(x5.Aux) != s {
10886 if p != x5.Args[0] {
10890 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
10894 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i-3 || auxToSym(x4.Aux) != s {
10898 if p != x4.Args[0] {
10902 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
10906 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
10910 if p != x3.Args[0] {
10914 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
10918 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-5 || auxToSym(x2.Aux) != s {
10922 if p != x2.Args[0] {
10926 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
10930 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-6 || auxToSym(x1.Aux) != s {
10934 if p != x1.Args[0] {
10938 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
10942 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-7 || auxToSym(x0.Aux) != s {
10946 if p != x0.Args[0] {
10950 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
10953 v.reset(OpAMD64MOVQstore)
10954 v.AuxInt = int32ToAuxInt(i - 7)
10955 v.Aux = symToAux(s)
10956 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
10958 v.AddArg3(p, v0, mem)
10961 // match: (MOVBstore [i] {s} p7 w x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w) x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w) x4:(MOVBstore [i] {s} p4 (SHRQconst [24] w) x3:(MOVBstore [i] {s} p3 (SHRQconst [32] w) x2:(MOVBstore [i] {s} p2 (SHRQconst [40] w) x1:(MOVBstore [i] {s} p1 (SHRQconst [48] w) x0:(MOVBstore [i] {s} p0 (SHRQconst [56] w) mem))))))))
10962 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)
10963 // result: (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
10965 i := auxIntToInt32(v.AuxInt)
10966 s := auxToSym(v.Aux)
10970 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i || auxToSym(x6.Aux) != s {
10976 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
10980 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i || auxToSym(x5.Aux) != s {
10986 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
10990 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i || auxToSym(x4.Aux) != s {
10996 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
11000 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i || auxToSym(x3.Aux) != s {
11006 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
11010 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
11016 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
11020 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
11026 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
11030 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
11036 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
11039 v.reset(OpAMD64MOVQstore)
11040 v.AuxInt = int32ToAuxInt(i)
11041 v.Aux = symToAux(s)
11042 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
11044 v.AddArg3(p0, v0, mem)
11047 // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
11048 // cond: x.Uses == 1 && clobber(x)
11049 // result: (MOVWstore [i-1] {s} p w mem)
11051 i := auxIntToInt32(v.AuxInt)
11052 s := auxToSym(v.Aux)
11054 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
11059 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11063 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11066 v.reset(OpAMD64MOVWstore)
11067 v.AuxInt = int32ToAuxInt(i - 1)
11068 v.Aux = symToAux(s)
11069 v.AddArg3(p, w, mem)
11072 // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
11073 // cond: x.Uses == 1 && clobber(x)
11074 // result: (MOVWstore [i-1] {s} p w mem)
11076 i := auxIntToInt32(v.AuxInt)
11077 s := auxToSym(v.Aux)
11079 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
11084 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11088 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11091 v.reset(OpAMD64MOVWstore)
11092 v.AuxInt = int32ToAuxInt(i - 1)
11093 v.Aux = symToAux(s)
11094 v.AddArg3(p, w, mem)
11097 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
11098 // cond: x.Uses == 1 && clobber(x)
11099 // result: (MOVWstore [i-1] {s} p w mem)
11101 i := auxIntToInt32(v.AuxInt)
11102 s := auxToSym(v.Aux)
11104 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
11109 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11113 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11116 v.reset(OpAMD64MOVWstore)
11117 v.AuxInt = int32ToAuxInt(i - 1)
11118 v.Aux = symToAux(s)
11119 v.AddArg3(p, w, mem)
11122 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem))
11123 // cond: x.Uses == 1 && clobber(x)
11124 // result: (MOVWstore [i] {s} p w mem)
11126 i := auxIntToInt32(v.AuxInt)
11127 s := auxToSym(v.Aux)
11131 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
11135 if p != x.Args[0] {
11139 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
11142 v.reset(OpAMD64MOVWstore)
11143 v.AuxInt = int32ToAuxInt(i)
11144 v.Aux = symToAux(s)
11145 v.AddArg3(p, w, mem)
11148 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem))
11149 // cond: x.Uses == 1 && clobber(x)
11150 // result: (MOVWstore [i] {s} p w mem)
11152 i := auxIntToInt32(v.AuxInt)
11153 s := auxToSym(v.Aux)
11157 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
11161 if p != x.Args[0] {
11165 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
11168 v.reset(OpAMD64MOVWstore)
11169 v.AuxInt = int32ToAuxInt(i)
11170 v.Aux = symToAux(s)
11171 v.AddArg3(p, w, mem)
11174 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem))
11175 // cond: x.Uses == 1 && clobber(x)
11176 // result: (MOVWstore [i] {s} p w mem)
11178 i := auxIntToInt32(v.AuxInt)
11179 s := auxToSym(v.Aux)
11183 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
11187 if p != x.Args[0] {
11191 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
11194 v.reset(OpAMD64MOVWstore)
11195 v.AuxInt = int32ToAuxInt(i)
11196 v.Aux = symToAux(s)
11197 v.AddArg3(p, w, mem)
11200 // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
11201 // cond: x.Uses == 1 && clobber(x)
11202 // result: (MOVWstore [i-1] {s} p w0 mem)
11204 i := auxIntToInt32(v.AuxInt)
11205 s := auxToSym(v.Aux)
11207 if v_1.Op != OpAMD64SHRLconst {
11210 j := auxIntToInt8(v_1.AuxInt)
11213 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11217 if p != x.Args[0] {
11221 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
11224 v.reset(OpAMD64MOVWstore)
11225 v.AuxInt = int32ToAuxInt(i - 1)
11226 v.Aux = symToAux(s)
11227 v.AddArg3(p, w0, mem)
11230 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
11231 // cond: x.Uses == 1 && clobber(x)
11232 // result: (MOVWstore [i-1] {s} p w0 mem)
11234 i := auxIntToInt32(v.AuxInt)
11235 s := auxToSym(v.Aux)
11237 if v_1.Op != OpAMD64SHRQconst {
11240 j := auxIntToInt8(v_1.AuxInt)
11243 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11247 if p != x.Args[0] {
11251 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
11254 v.reset(OpAMD64MOVWstore)
11255 v.AuxInt = int32ToAuxInt(i - 1)
11256 v.Aux = symToAux(s)
11257 v.AddArg3(p, w0, mem)
11260 // match: (MOVBstore [i] {s} p1 (SHRWconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
11261 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11262 // result: (MOVWstore [i] {s} p0 w mem)
11264 i := auxIntToInt32(v.AuxInt)
11265 s := auxToSym(v.Aux)
11267 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
11272 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11277 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11280 v.reset(OpAMD64MOVWstore)
11281 v.AuxInt = int32ToAuxInt(i)
11282 v.Aux = symToAux(s)
11283 v.AddArg3(p0, w, mem)
11286 // match: (MOVBstore [i] {s} p1 (SHRLconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
11287 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11288 // result: (MOVWstore [i] {s} p0 w mem)
11290 i := auxIntToInt32(v.AuxInt)
11291 s := auxToSym(v.Aux)
11293 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
11298 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11303 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11306 v.reset(OpAMD64MOVWstore)
11307 v.AuxInt = int32ToAuxInt(i)
11308 v.Aux = symToAux(s)
11309 v.AddArg3(p0, w, mem)
11312 // match: (MOVBstore [i] {s} p1 (SHRQconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
11313 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11314 // result: (MOVWstore [i] {s} p0 w mem)
11316 i := auxIntToInt32(v.AuxInt)
11317 s := auxToSym(v.Aux)
11319 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
11324 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11329 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11332 v.reset(OpAMD64MOVWstore)
11333 v.AuxInt = int32ToAuxInt(i)
11334 v.Aux = symToAux(s)
11335 v.AddArg3(p0, w, mem)
11338 // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRWconst [8] w) mem))
11339 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11340 // result: (MOVWstore [i] {s} p0 w mem)
11342 i := auxIntToInt32(v.AuxInt)
11343 s := auxToSym(v.Aux)
11347 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11353 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11356 v.reset(OpAMD64MOVWstore)
11357 v.AuxInt = int32ToAuxInt(i)
11358 v.Aux = symToAux(s)
11359 v.AddArg3(p0, w, mem)
11362 // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRLconst [8] w) mem))
11363 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11364 // result: (MOVWstore [i] {s} p0 w mem)
11366 i := auxIntToInt32(v.AuxInt)
11367 s := auxToSym(v.Aux)
11371 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11377 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11380 v.reset(OpAMD64MOVWstore)
11381 v.AuxInt = int32ToAuxInt(i)
11382 v.Aux = symToAux(s)
11383 v.AddArg3(p0, w, mem)
11386 // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRQconst [8] w) mem))
11387 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11388 // result: (MOVWstore [i] {s} p0 w mem)
11390 i := auxIntToInt32(v.AuxInt)
11391 s := auxToSym(v.Aux)
11395 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11401 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11404 v.reset(OpAMD64MOVWstore)
11405 v.AuxInt = int32ToAuxInt(i)
11406 v.Aux = symToAux(s)
11407 v.AddArg3(p0, w, mem)
11410 // match: (MOVBstore [i] {s} p1 (SHRLconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRLconst [j-8] w) mem))
11411 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11412 // result: (MOVWstore [i] {s} p0 w0 mem)
11414 i := auxIntToInt32(v.AuxInt)
11415 s := auxToSym(v.Aux)
11417 if v_1.Op != OpAMD64SHRLconst {
11420 j := auxIntToInt8(v_1.AuxInt)
11423 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11429 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11432 v.reset(OpAMD64MOVWstore)
11433 v.AuxInt = int32ToAuxInt(i)
11434 v.Aux = symToAux(s)
11435 v.AddArg3(p0, w0, mem)
11438 // match: (MOVBstore [i] {s} p1 (SHRQconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRQconst [j-8] w) mem))
11439 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
11440 // result: (MOVWstore [i] {s} p0 w0 mem)
11442 i := auxIntToInt32(v.AuxInt)
11443 s := auxToSym(v.Aux)
11445 if v_1.Op != OpAMD64SHRQconst {
11448 j := auxIntToInt8(v_1.AuxInt)
11451 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11457 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11460 v.reset(OpAMD64MOVWstore)
11461 v.AuxInt = int32ToAuxInt(i)
11462 v.Aux = symToAux(s)
11463 v.AddArg3(p0, w0, mem)
11466 // match: (MOVBstore [7] {s} p1 (SHRQconst [56] w) x1:(MOVWstore [5] {s} p1 (SHRQconst [40] w) x2:(MOVLstore [1] {s} p1 (SHRQconst [8] w) x3:(MOVBstore [0] {s} p1 w mem))))
11467 // cond: x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)
11468 // result: (MOVQstore {s} p1 w mem)
11470 if auxIntToInt32(v.AuxInt) != 7 {
11473 s := auxToSym(v.Aux)
11475 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 56 {
11480 if x1.Op != OpAMD64MOVWstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s {
11484 if p1 != x1.Args[0] {
11488 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 40 || w != x1_1.Args[0] {
11492 if x2.Op != OpAMD64MOVLstore || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s {
11496 if p1 != x2.Args[0] {
11500 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
11504 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != 0 || auxToSym(x3.Aux) != s {
11508 if p1 != x3.Args[0] || w != x3.Args[1] || !(x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)) {
11511 v.reset(OpAMD64MOVQstore)
11512 v.Aux = symToAux(s)
11513 v.AddArg3(p1, w, mem)
11516 // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem))
11517 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
11518 // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
11520 i := auxIntToInt32(v.AuxInt)
11521 s := auxToSym(v.Aux)
11524 if x1.Op != OpAMD64MOVBload {
11527 j := auxIntToInt32(x1.AuxInt)
11528 s2 := auxToSym(x1.Aux)
11532 if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s {
11536 if p != mem2.Args[0] {
11540 if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 {
11544 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
11547 v.reset(OpAMD64MOVWstore)
11548 v.AuxInt = int32ToAuxInt(i - 1)
11549 v.Aux = symToAux(s)
11550 v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
11551 v0.AuxInt = int32ToAuxInt(j - 1)
11552 v0.Aux = symToAux(s2)
11553 v0.AddArg2(p2, mem)
11554 v.AddArg3(p, v0, mem)
11557 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
11558 // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
11559 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
11561 off1 := auxIntToInt32(v.AuxInt)
11562 sym1 := auxToSym(v.Aux)
11563 if v_0.Op != OpAMD64LEAL {
11566 off2 := auxIntToInt32(v_0.AuxInt)
11567 sym2 := auxToSym(v_0.Aux)
11568 base := v_0.Args[0]
11571 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
11574 v.reset(OpAMD64MOVBstore)
11575 v.AuxInt = int32ToAuxInt(off1 + off2)
11576 v.Aux = symToAux(mergeSym(sym1, sym2))
11577 v.AddArg3(base, val, mem)
11580 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
11581 // cond: is32Bit(int64(off1)+int64(off2))
11582 // result: (MOVBstore [off1+off2] {sym} ptr val mem)
11584 off1 := auxIntToInt32(v.AuxInt)
11585 sym := auxToSym(v.Aux)
11586 if v_0.Op != OpAMD64ADDLconst {
11589 off2 := auxIntToInt32(v_0.AuxInt)
11593 if !(is32Bit(int64(off1) + int64(off2))) {
11596 v.reset(OpAMD64MOVBstore)
11597 v.AuxInt = int32ToAuxInt(off1 + off2)
11598 v.Aux = symToAux(sym)
11599 v.AddArg3(ptr, val, mem)
11604 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
11607 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
11608 // cond: ValAndOff(sc).canAdd32(off)
11609 // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
11611 sc := auxIntToValAndOff(v.AuxInt)
11612 s := auxToSym(v.Aux)
11613 if v_0.Op != OpAMD64ADDQconst {
11616 off := auxIntToInt32(v_0.AuxInt)
11619 if !(ValAndOff(sc).canAdd32(off)) {
11622 v.reset(OpAMD64MOVBstoreconst)
11623 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11624 v.Aux = symToAux(s)
11625 v.AddArg2(ptr, mem)
11628 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
11629 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
11630 // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
11632 sc := auxIntToValAndOff(v.AuxInt)
11633 sym1 := auxToSym(v.Aux)
11634 if v_0.Op != OpAMD64LEAQ {
11637 off := auxIntToInt32(v_0.AuxInt)
11638 sym2 := auxToSym(v_0.Aux)
11641 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11644 v.reset(OpAMD64MOVBstoreconst)
11645 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11646 v.Aux = symToAux(mergeSym(sym1, sym2))
11647 v.AddArg2(ptr, mem)
11650 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
11651 // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
11652 // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
11654 c := auxIntToValAndOff(v.AuxInt)
11655 s := auxToSym(v.Aux)
11658 if x.Op != OpAMD64MOVBstoreconst {
11661 a := auxIntToValAndOff(x.AuxInt)
11662 if auxToSym(x.Aux) != s {
11666 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
11669 v.reset(OpAMD64MOVWstoreconst)
11670 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
11671 v.Aux = symToAux(s)
11675 // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
11676 // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
11677 // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
11679 a := auxIntToValAndOff(v.AuxInt)
11680 s := auxToSym(v.Aux)
11683 if x.Op != OpAMD64MOVBstoreconst {
11686 c := auxIntToValAndOff(x.AuxInt)
11687 if auxToSym(x.Aux) != s {
11691 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
11694 v.reset(OpAMD64MOVWstoreconst)
11695 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
11696 v.Aux = symToAux(s)
11700 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
11701 // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
11702 // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
11704 sc := auxIntToValAndOff(v.AuxInt)
11705 sym1 := auxToSym(v.Aux)
11706 if v_0.Op != OpAMD64LEAL {
11709 off := auxIntToInt32(v_0.AuxInt)
11710 sym2 := auxToSym(v_0.Aux)
11713 if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
11716 v.reset(OpAMD64MOVBstoreconst)
11717 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
11718 v.Aux = symToAux(mergeSym(sym1, sym2))
11719 v.AddArg2(ptr, mem)
11722 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
11723 // cond: sc.canAdd32(off)
11724 // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
11726 sc := auxIntToValAndOff(v.AuxInt)
11727 s := auxToSym(v.Aux)
11728 if v_0.Op != OpAMD64ADDLconst {
11731 off := auxIntToInt32(v_0.AuxInt)
11734 if !(sc.canAdd32(off)) {
11737 v.reset(OpAMD64MOVBstoreconst)
11738 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
11739 v.Aux = symToAux(s)
11740 v.AddArg2(ptr, mem)
11745 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
11748 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
11749 // cond: x.Uses == 1 && clobber(x)
11750 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
11753 if x.Op != OpAMD64MOVLload {
11756 off := auxIntToInt32(x.AuxInt)
11757 sym := auxToSym(x.Aux)
11760 if !(x.Uses == 1 && clobber(x)) {
11764 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11766 v0.AuxInt = int32ToAuxInt(off)
11767 v0.Aux = symToAux(sym)
11768 v0.AddArg2(ptr, mem)
11771 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem))
11772 // cond: x.Uses == 1 && clobber(x)
11773 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
11776 if x.Op != OpAMD64MOVQload {
11779 off := auxIntToInt32(x.AuxInt)
11780 sym := auxToSym(x.Aux)
11783 if !(x.Uses == 1 && clobber(x)) {
11787 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11789 v0.AuxInt = int32ToAuxInt(off)
11790 v0.Aux = symToAux(sym)
11791 v0.AddArg2(ptr, mem)
11794 // match: (MOVLQSX (ANDLconst [c] x))
11795 // cond: uint32(c) & 0x80000000 == 0
11796 // result: (ANDLconst [c & 0x7fffffff] x)
11798 if v_0.Op != OpAMD64ANDLconst {
11801 c := auxIntToInt32(v_0.AuxInt)
11803 if !(uint32(c)&0x80000000 == 0) {
11806 v.reset(OpAMD64ANDLconst)
11807 v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
11811 // match: (MOVLQSX (MOVLQSX x))
11812 // result: (MOVLQSX x)
11814 if v_0.Op != OpAMD64MOVLQSX {
11818 v.reset(OpAMD64MOVLQSX)
11822 // match: (MOVLQSX (MOVWQSX x))
11823 // result: (MOVWQSX x)
11825 if v_0.Op != OpAMD64MOVWQSX {
11829 v.reset(OpAMD64MOVWQSX)
11833 // match: (MOVLQSX (MOVBQSX x))
11834 // result: (MOVBQSX x)
11836 if v_0.Op != OpAMD64MOVBQSX {
11840 v.reset(OpAMD64MOVBQSX)
11846 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
11849 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
11850 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
11851 // result: (MOVLQSX x)
11853 off := auxIntToInt32(v.AuxInt)
11854 sym := auxToSym(v.Aux)
11856 if v_1.Op != OpAMD64MOVLstore {
11859 off2 := auxIntToInt32(v_1.AuxInt)
11860 sym2 := auxToSym(v_1.Aux)
11862 ptr2 := v_1.Args[0]
11863 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11866 v.reset(OpAMD64MOVLQSX)
11870 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
11871 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
11872 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
11874 off1 := auxIntToInt32(v.AuxInt)
11875 sym1 := auxToSym(v.Aux)
11876 if v_0.Op != OpAMD64LEAQ {
11879 off2 := auxIntToInt32(v_0.AuxInt)
11880 sym2 := auxToSym(v_0.Aux)
11881 base := v_0.Args[0]
11883 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11886 v.reset(OpAMD64MOVLQSXload)
11887 v.AuxInt = int32ToAuxInt(off1 + off2)
11888 v.Aux = symToAux(mergeSym(sym1, sym2))
11889 v.AddArg2(base, mem)
11894 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
11897 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
11898 // cond: x.Uses == 1 && clobber(x)
11899 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
11902 if x.Op != OpAMD64MOVLload {
11905 off := auxIntToInt32(x.AuxInt)
11906 sym := auxToSym(x.Aux)
11909 if !(x.Uses == 1 && clobber(x)) {
11913 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11915 v0.AuxInt = int32ToAuxInt(off)
11916 v0.Aux = symToAux(sym)
11917 v0.AddArg2(ptr, mem)
11920 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem))
11921 // cond: x.Uses == 1 && clobber(x)
11922 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
11925 if x.Op != OpAMD64MOVQload {
11928 off := auxIntToInt32(x.AuxInt)
11929 sym := auxToSym(x.Aux)
11932 if !(x.Uses == 1 && clobber(x)) {
11936 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11938 v0.AuxInt = int32ToAuxInt(off)
11939 v0.Aux = symToAux(sym)
11940 v0.AddArg2(ptr, mem)
11943 // match: (MOVLQZX x)
11944 // cond: zeroUpper32Bits(x,3)
11948 if !(zeroUpper32Bits(x, 3)) {
11954 // match: (MOVLQZX (ANDLconst [c] x))
11955 // result: (ANDLconst [c] x)
11957 if v_0.Op != OpAMD64ANDLconst {
11960 c := auxIntToInt32(v_0.AuxInt)
11962 v.reset(OpAMD64ANDLconst)
11963 v.AuxInt = int32ToAuxInt(c)
11967 // match: (MOVLQZX (MOVLQZX x))
11968 // result: (MOVLQZX x)
11970 if v_0.Op != OpAMD64MOVLQZX {
11974 v.reset(OpAMD64MOVLQZX)
11978 // match: (MOVLQZX (MOVWQZX x))
11979 // result: (MOVWQZX x)
11981 if v_0.Op != OpAMD64MOVWQZX {
11985 v.reset(OpAMD64MOVWQZX)
11989 // match: (MOVLQZX (MOVBQZX x))
11990 // result: (MOVBQZX x)
11992 if v_0.Op != OpAMD64MOVBQZX {
11996 v.reset(OpAMD64MOVBQZX)
12002 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
12005 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
12006 // cond: is32Bit(int64(off1)+int64(off2))
12007 // result: (MOVLatomicload [off1+off2] {sym} ptr mem)
12009 off1 := auxIntToInt32(v.AuxInt)
12010 sym := auxToSym(v.Aux)
12011 if v_0.Op != OpAMD64ADDQconst {
12014 off2 := auxIntToInt32(v_0.AuxInt)
12017 if !(is32Bit(int64(off1) + int64(off2))) {
12020 v.reset(OpAMD64MOVLatomicload)
12021 v.AuxInt = int32ToAuxInt(off1 + off2)
12022 v.Aux = symToAux(sym)
12023 v.AddArg2(ptr, mem)
12026 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
12027 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12028 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
12030 off1 := auxIntToInt32(v.AuxInt)
12031 sym1 := auxToSym(v.Aux)
12032 if v_0.Op != OpAMD64LEAQ {
12035 off2 := auxIntToInt32(v_0.AuxInt)
12036 sym2 := auxToSym(v_0.Aux)
12039 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12042 v.reset(OpAMD64MOVLatomicload)
12043 v.AuxInt = int32ToAuxInt(off1 + off2)
12044 v.Aux = symToAux(mergeSym(sym1, sym2))
12045 v.AddArg2(ptr, mem)
12050 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
12053 // match: (MOVLf2i <t> (Arg <u> [off] {sym}))
12054 // cond: t.Size() == u.Size()
12055 // result: @b.Func.Entry (Arg <t> [off] {sym})
12058 if v_0.Op != OpArg {
12062 off := auxIntToInt32(v_0.AuxInt)
12063 sym := auxToSym(v_0.Aux)
12064 if !(t.Size() == u.Size()) {
12068 v0 := b.NewValue0(v.Pos, OpArg, t)
12070 v0.AuxInt = int32ToAuxInt(off)
12071 v0.Aux = symToAux(sym)
12076 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
12079 // match: (MOVLi2f <t> (Arg <u> [off] {sym}))
12080 // cond: t.Size() == u.Size()
12081 // result: @b.Func.Entry (Arg <t> [off] {sym})
12084 if v_0.Op != OpArg {
12088 off := auxIntToInt32(v_0.AuxInt)
12089 sym := auxToSym(v_0.Aux)
12090 if !(t.Size() == u.Size()) {
12094 v0 := b.NewValue0(v.Pos, OpArg, t)
12096 v0.AuxInt = int32ToAuxInt(off)
12097 v0.Aux = symToAux(sym)
12102 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
12106 config := b.Func.Config
12107 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
12108 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
12109 // result: (MOVLQZX x)
12111 off := auxIntToInt32(v.AuxInt)
12112 sym := auxToSym(v.Aux)
12114 if v_1.Op != OpAMD64MOVLstore {
12117 off2 := auxIntToInt32(v_1.AuxInt)
12118 sym2 := auxToSym(v_1.Aux)
12120 ptr2 := v_1.Args[0]
12121 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12124 v.reset(OpAMD64MOVLQZX)
12128 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem)
12129 // cond: is32Bit(int64(off1)+int64(off2))
12130 // result: (MOVLload [off1+off2] {sym} ptr mem)
12132 off1 := auxIntToInt32(v.AuxInt)
12133 sym := auxToSym(v.Aux)
12134 if v_0.Op != OpAMD64ADDQconst {
12137 off2 := auxIntToInt32(v_0.AuxInt)
12140 if !(is32Bit(int64(off1) + int64(off2))) {
12143 v.reset(OpAMD64MOVLload)
12144 v.AuxInt = int32ToAuxInt(off1 + off2)
12145 v.Aux = symToAux(sym)
12146 v.AddArg2(ptr, mem)
12149 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
12150 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12151 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
12153 off1 := auxIntToInt32(v.AuxInt)
12154 sym1 := auxToSym(v.Aux)
12155 if v_0.Op != OpAMD64LEAQ {
12158 off2 := auxIntToInt32(v_0.AuxInt)
12159 sym2 := auxToSym(v_0.Aux)
12160 base := v_0.Args[0]
12162 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12165 v.reset(OpAMD64MOVLload)
12166 v.AuxInt = int32ToAuxInt(off1 + off2)
12167 v.Aux = symToAux(mergeSym(sym1, sym2))
12168 v.AddArg2(base, mem)
12171 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
12172 // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
12173 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
12175 off1 := auxIntToInt32(v.AuxInt)
12176 sym1 := auxToSym(v.Aux)
12177 if v_0.Op != OpAMD64LEAL {
12180 off2 := auxIntToInt32(v_0.AuxInt)
12181 sym2 := auxToSym(v_0.Aux)
12182 base := v_0.Args[0]
12184 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
12187 v.reset(OpAMD64MOVLload)
12188 v.AuxInt = int32ToAuxInt(off1 + off2)
12189 v.Aux = symToAux(mergeSym(sym1, sym2))
12190 v.AddArg2(base, mem)
12193 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
12194 // cond: is32Bit(int64(off1)+int64(off2))
12195 // result: (MOVLload [off1+off2] {sym} ptr mem)
12197 off1 := auxIntToInt32(v.AuxInt)
12198 sym := auxToSym(v.Aux)
12199 if v_0.Op != OpAMD64ADDLconst {
12202 off2 := auxIntToInt32(v_0.AuxInt)
12205 if !(is32Bit(int64(off1) + int64(off2))) {
12208 v.reset(OpAMD64MOVLload)
12209 v.AuxInt = int32ToAuxInt(off1 + off2)
12210 v.Aux = symToAux(sym)
12211 v.AddArg2(ptr, mem)
12214 // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _))
12215 // result: (MOVLf2i val)
12217 off := auxIntToInt32(v.AuxInt)
12218 sym := auxToSym(v.Aux)
12220 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12224 if ptr != v_1.Args[0] {
12227 v.reset(OpAMD64MOVLf2i)
12231 // match: (MOVLload [off] {sym} (SB) _)
12232 // cond: symIsRO(sym)
12233 // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
12235 off := auxIntToInt32(v.AuxInt)
12236 sym := auxToSym(v.Aux)
12237 if v_0.Op != OpSB || !(symIsRO(sym)) {
12240 v.reset(OpAMD64MOVQconst)
12241 v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
12246 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
12251 typ := &b.Func.Config.Types
12252 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
12253 // result: (MOVLstore [off] {sym} ptr x mem)
12255 off := auxIntToInt32(v.AuxInt)
12256 sym := auxToSym(v.Aux)
12258 if v_1.Op != OpAMD64MOVLQSX {
12263 v.reset(OpAMD64MOVLstore)
12264 v.AuxInt = int32ToAuxInt(off)
12265 v.Aux = symToAux(sym)
12266 v.AddArg3(ptr, x, mem)
12269 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
12270 // result: (MOVLstore [off] {sym} ptr x mem)
12272 off := auxIntToInt32(v.AuxInt)
12273 sym := auxToSym(v.Aux)
12275 if v_1.Op != OpAMD64MOVLQZX {
12280 v.reset(OpAMD64MOVLstore)
12281 v.AuxInt = int32ToAuxInt(off)
12282 v.Aux = symToAux(sym)
12283 v.AddArg3(ptr, x, mem)
12286 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
12287 // cond: is32Bit(int64(off1)+int64(off2))
12288 // result: (MOVLstore [off1+off2] {sym} ptr val mem)
12290 off1 := auxIntToInt32(v.AuxInt)
12291 sym := auxToSym(v.Aux)
12292 if v_0.Op != OpAMD64ADDQconst {
12295 off2 := auxIntToInt32(v_0.AuxInt)
12299 if !(is32Bit(int64(off1) + int64(off2))) {
12302 v.reset(OpAMD64MOVLstore)
12303 v.AuxInt = int32ToAuxInt(off1 + off2)
12304 v.Aux = symToAux(sym)
12305 v.AddArg3(ptr, val, mem)
12308 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
12309 // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
12311 off := auxIntToInt32(v.AuxInt)
12312 sym := auxToSym(v.Aux)
12314 if v_1.Op != OpAMD64MOVLconst {
12317 c := auxIntToInt32(v_1.AuxInt)
12319 v.reset(OpAMD64MOVLstoreconst)
12320 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12321 v.Aux = symToAux(sym)
12322 v.AddArg2(ptr, mem)
12325 // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem)
12326 // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
12328 off := auxIntToInt32(v.AuxInt)
12329 sym := auxToSym(v.Aux)
12331 if v_1.Op != OpAMD64MOVQconst {
12334 c := auxIntToInt64(v_1.AuxInt)
12336 v.reset(OpAMD64MOVLstoreconst)
12337 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12338 v.Aux = symToAux(sym)
12339 v.AddArg2(ptr, mem)
12342 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
12343 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
12344 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
12346 off1 := auxIntToInt32(v.AuxInt)
12347 sym1 := auxToSym(v.Aux)
12348 if v_0.Op != OpAMD64LEAQ {
12351 off2 := auxIntToInt32(v_0.AuxInt)
12352 sym2 := auxToSym(v_0.Aux)
12353 base := v_0.Args[0]
12356 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12359 v.reset(OpAMD64MOVLstore)
12360 v.AuxInt = int32ToAuxInt(off1 + off2)
12361 v.Aux = symToAux(mergeSym(sym1, sym2))
12362 v.AddArg3(base, val, mem)
12365 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
12366 // cond: x.Uses == 1 && clobber(x)
12367 // result: (MOVQstore [i-4] {s} p w mem)
12369 i := auxIntToInt32(v.AuxInt)
12370 s := auxToSym(v.Aux)
12372 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
12377 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
12381 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
12384 v.reset(OpAMD64MOVQstore)
12385 v.AuxInt = int32ToAuxInt(i - 4)
12386 v.Aux = symToAux(s)
12387 v.AddArg3(p, w, mem)
12390 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
12391 // cond: x.Uses == 1 && clobber(x)
12392 // result: (MOVQstore [i-4] {s} p w0 mem)
12394 i := auxIntToInt32(v.AuxInt)
12395 s := auxToSym(v.Aux)
12397 if v_1.Op != OpAMD64SHRQconst {
12400 j := auxIntToInt8(v_1.AuxInt)
12403 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
12407 if p != x.Args[0] {
12411 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
12414 v.reset(OpAMD64MOVQstore)
12415 v.AuxInt = int32ToAuxInt(i - 4)
12416 v.Aux = symToAux(s)
12417 v.AddArg3(p, w0, mem)
12420 // match: (MOVLstore [i] {s} p1 (SHRQconst [32] w) x:(MOVLstore [i] {s} p0 w mem))
12421 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)
12422 // result: (MOVQstore [i] {s} p0 w mem)
12424 i := auxIntToInt32(v.AuxInt)
12425 s := auxToSym(v.Aux)
12427 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
12432 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
12437 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
12440 v.reset(OpAMD64MOVQstore)
12441 v.AuxInt = int32ToAuxInt(i)
12442 v.Aux = symToAux(s)
12443 v.AddArg3(p0, w, mem)
12446 // match: (MOVLstore [i] {s} p1 (SHRQconst [j] w) x:(MOVLstore [i] {s} p0 w0:(SHRQconst [j-32] w) mem))
12447 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)
12448 // result: (MOVQstore [i] {s} p0 w0 mem)
12450 i := auxIntToInt32(v.AuxInt)
12451 s := auxToSym(v.Aux)
12453 if v_1.Op != OpAMD64SHRQconst {
12456 j := auxIntToInt8(v_1.AuxInt)
12459 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
12465 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
12468 v.reset(OpAMD64MOVQstore)
12469 v.AuxInt = int32ToAuxInt(i)
12470 v.Aux = symToAux(s)
12471 v.AddArg3(p0, w0, mem)
12474 // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem))
12475 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
12476 // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
12478 i := auxIntToInt32(v.AuxInt)
12479 s := auxToSym(v.Aux)
12482 if x1.Op != OpAMD64MOVLload {
12485 j := auxIntToInt32(x1.AuxInt)
12486 s2 := auxToSym(x1.Aux)
12490 if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s {
12494 if p != mem2.Args[0] {
12498 if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 {
12502 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
12505 v.reset(OpAMD64MOVQstore)
12506 v.AuxInt = int32ToAuxInt(i - 4)
12507 v.Aux = symToAux(s)
12508 v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
12509 v0.AuxInt = int32ToAuxInt(j - 4)
12510 v0.Aux = symToAux(s2)
12511 v0.AddArg2(p2, mem)
12512 v.AddArg3(p, v0, mem)
12515 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
12516 // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
12517 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
12519 off1 := auxIntToInt32(v.AuxInt)
12520 sym1 := auxToSym(v.Aux)
12521 if v_0.Op != OpAMD64LEAL {
12524 off2 := auxIntToInt32(v_0.AuxInt)
12525 sym2 := auxToSym(v_0.Aux)
12526 base := v_0.Args[0]
12529 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
12532 v.reset(OpAMD64MOVLstore)
12533 v.AuxInt = int32ToAuxInt(off1 + off2)
12534 v.Aux = symToAux(mergeSym(sym1, sym2))
12535 v.AddArg3(base, val, mem)
12538 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
12539 // cond: is32Bit(int64(off1)+int64(off2))
12540 // result: (MOVLstore [off1+off2] {sym} ptr val mem)
12542 off1 := auxIntToInt32(v.AuxInt)
12543 sym := auxToSym(v.Aux)
12544 if v_0.Op != OpAMD64ADDLconst {
12547 off2 := auxIntToInt32(v_0.AuxInt)
12551 if !(is32Bit(int64(off1) + int64(off2))) {
12554 v.reset(OpAMD64MOVLstore)
12555 v.AuxInt = int32ToAuxInt(off1 + off2)
12556 v.Aux = symToAux(sym)
12557 v.AddArg3(ptr, val, mem)
12560 // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
12561 // cond: y.Uses==1 && clobber(y)
12562 // result: (ADDLmodify [off] {sym} ptr x mem)
12564 off := auxIntToInt32(v.AuxInt)
12565 sym := auxToSym(v.Aux)
12568 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12573 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12576 v.reset(OpAMD64ADDLmodify)
12577 v.AuxInt = int32ToAuxInt(off)
12578 v.Aux = symToAux(sym)
12579 v.AddArg3(ptr, x, mem)
12582 // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem)
12583 // cond: y.Uses==1 && clobber(y)
12584 // result: (ANDLmodify [off] {sym} ptr x mem)
12586 off := auxIntToInt32(v.AuxInt)
12587 sym := auxToSym(v.Aux)
12590 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12595 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12598 v.reset(OpAMD64ANDLmodify)
12599 v.AuxInt = int32ToAuxInt(off)
12600 v.Aux = symToAux(sym)
12601 v.AddArg3(ptr, x, mem)
12604 // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem)
12605 // cond: y.Uses==1 && clobber(y)
12606 // result: (ORLmodify [off] {sym} ptr x mem)
12608 off := auxIntToInt32(v.AuxInt)
12609 sym := auxToSym(v.Aux)
12612 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12617 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12620 v.reset(OpAMD64ORLmodify)
12621 v.AuxInt = int32ToAuxInt(off)
12622 v.Aux = symToAux(sym)
12623 v.AddArg3(ptr, x, mem)
12626 // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem)
12627 // cond: y.Uses==1 && clobber(y)
12628 // result: (XORLmodify [off] {sym} ptr x mem)
12630 off := auxIntToInt32(v.AuxInt)
12631 sym := auxToSym(v.Aux)
12634 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12639 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12642 v.reset(OpAMD64XORLmodify)
12643 v.AuxInt = int32ToAuxInt(off)
12644 v.Aux = symToAux(sym)
12645 v.AddArg3(ptr, x, mem)
12648 // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem)
12649 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12650 // result: (ADDLmodify [off] {sym} ptr x mem)
12652 off := auxIntToInt32(v.AuxInt)
12653 sym := auxToSym(v.Aux)
12656 if y.Op != OpAMD64ADDL {
12662 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12664 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12668 if ptr != l.Args[0] {
12672 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12675 v.reset(OpAMD64ADDLmodify)
12676 v.AuxInt = int32ToAuxInt(off)
12677 v.Aux = symToAux(sym)
12678 v.AddArg3(ptr, x, mem)
12683 // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem)
12684 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12685 // result: (SUBLmodify [off] {sym} ptr x mem)
12687 off := auxIntToInt32(v.AuxInt)
12688 sym := auxToSym(v.Aux)
12691 if y.Op != OpAMD64SUBL {
12696 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12700 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12703 v.reset(OpAMD64SUBLmodify)
12704 v.AuxInt = int32ToAuxInt(off)
12705 v.Aux = symToAux(sym)
12706 v.AddArg3(ptr, x, mem)
12709 // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem)
12710 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12711 // result: (ANDLmodify [off] {sym} ptr x mem)
12713 off := auxIntToInt32(v.AuxInt)
12714 sym := auxToSym(v.Aux)
12717 if y.Op != OpAMD64ANDL {
12723 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12725 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12729 if ptr != l.Args[0] {
12733 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12736 v.reset(OpAMD64ANDLmodify)
12737 v.AuxInt = int32ToAuxInt(off)
12738 v.Aux = symToAux(sym)
12739 v.AddArg3(ptr, x, mem)
12744 // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem)
12745 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12746 // result: (ORLmodify [off] {sym} ptr x mem)
12748 off := auxIntToInt32(v.AuxInt)
12749 sym := auxToSym(v.Aux)
12752 if y.Op != OpAMD64ORL {
12758 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12760 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12764 if ptr != l.Args[0] {
12768 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12771 v.reset(OpAMD64ORLmodify)
12772 v.AuxInt = int32ToAuxInt(off)
12773 v.Aux = symToAux(sym)
12774 v.AddArg3(ptr, x, mem)
12779 // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem)
12780 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12781 // result: (XORLmodify [off] {sym} ptr x mem)
12783 off := auxIntToInt32(v.AuxInt)
12784 sym := auxToSym(v.Aux)
12787 if y.Op != OpAMD64XORL {
12793 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12795 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12799 if ptr != l.Args[0] {
12803 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12806 v.reset(OpAMD64XORLmodify)
12807 v.AuxInt = int32ToAuxInt(off)
12808 v.Aux = symToAux(sym)
12809 v.AddArg3(ptr, x, mem)
12814 // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) <t> x) mem)
12815 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12816 // result: (BTCLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
12818 off := auxIntToInt32(v.AuxInt)
12819 sym := auxToSym(v.Aux)
12822 if y.Op != OpAMD64BTCL {
12828 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12832 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12835 v.reset(OpAMD64BTCLmodify)
12836 v.AuxInt = int32ToAuxInt(off)
12837 v.Aux = symToAux(sym)
12838 v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
12839 v0.AuxInt = int32ToAuxInt(31)
12841 v.AddArg3(ptr, v0, mem)
12844 // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) <t> x) mem)
12845 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12846 // result: (BTRLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
12848 off := auxIntToInt32(v.AuxInt)
12849 sym := auxToSym(v.Aux)
12852 if y.Op != OpAMD64BTRL {
12858 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12862 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12865 v.reset(OpAMD64BTRLmodify)
12866 v.AuxInt = int32ToAuxInt(off)
12867 v.Aux = symToAux(sym)
12868 v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
12869 v0.AuxInt = int32ToAuxInt(31)
12871 v.AddArg3(ptr, v0, mem)
12874 // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) <t> x) mem)
12875 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
12876 // result: (BTSLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
12878 off := auxIntToInt32(v.AuxInt)
12879 sym := auxToSym(v.Aux)
12882 if y.Op != OpAMD64BTSL {
12888 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12892 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12895 v.reset(OpAMD64BTSLmodify)
12896 v.AuxInt = int32ToAuxInt(off)
12897 v.Aux = symToAux(sym)
12898 v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
12899 v0.AuxInt = int32ToAuxInt(31)
12901 v.AddArg3(ptr, v0, mem)
12904 // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
12905 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
12906 // result: (ADDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
12908 off := auxIntToInt32(v.AuxInt)
12909 sym := auxToSym(v.Aux)
12912 if a.Op != OpAMD64ADDLconst {
12915 c := auxIntToInt32(a.AuxInt)
12917 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12922 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12925 v.reset(OpAMD64ADDLconstmodify)
12926 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12927 v.Aux = symToAux(sym)
12928 v.AddArg2(ptr, mem)
12931 // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
12932 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
12933 // result: (ANDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
12935 off := auxIntToInt32(v.AuxInt)
12936 sym := auxToSym(v.Aux)
12939 if a.Op != OpAMD64ANDLconst {
12942 c := auxIntToInt32(a.AuxInt)
12944 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12949 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12952 v.reset(OpAMD64ANDLconstmodify)
12953 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12954 v.Aux = symToAux(sym)
12955 v.AddArg2(ptr, mem)
12958 // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
12959 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
12960 // result: (ORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
12962 off := auxIntToInt32(v.AuxInt)
12963 sym := auxToSym(v.Aux)
12966 if a.Op != OpAMD64ORLconst {
12969 c := auxIntToInt32(a.AuxInt)
12971 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12976 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12979 v.reset(OpAMD64ORLconstmodify)
12980 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12981 v.Aux = symToAux(sym)
12982 v.AddArg2(ptr, mem)
12985 // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
12986 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
12987 // result: (XORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
12989 off := auxIntToInt32(v.AuxInt)
12990 sym := auxToSym(v.Aux)
12993 if a.Op != OpAMD64XORLconst {
12996 c := auxIntToInt32(a.AuxInt)
12998 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13003 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
13006 v.reset(OpAMD64XORLconstmodify)
13007 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13008 v.Aux = symToAux(sym)
13009 v.AddArg2(ptr, mem)
13012 // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
13013 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
13014 // result: (BTCLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
13016 off := auxIntToInt32(v.AuxInt)
13017 sym := auxToSym(v.Aux)
13020 if a.Op != OpAMD64BTCLconst {
13023 c := auxIntToInt8(a.AuxInt)
13025 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13030 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
13033 v.reset(OpAMD64BTCLconstmodify)
13034 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13035 v.Aux = symToAux(sym)
13036 v.AddArg2(ptr, mem)
13039 // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
13040 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
13041 // result: (BTRLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
13043 off := auxIntToInt32(v.AuxInt)
13044 sym := auxToSym(v.Aux)
13047 if a.Op != OpAMD64BTRLconst {
13050 c := auxIntToInt8(a.AuxInt)
13052 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13057 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
13060 v.reset(OpAMD64BTRLconstmodify)
13061 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13062 v.Aux = symToAux(sym)
13063 v.AddArg2(ptr, mem)
13066 // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
13067 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
13068 // result: (BTSLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
13070 off := auxIntToInt32(v.AuxInt)
13071 sym := auxToSym(v.Aux)
13074 if a.Op != OpAMD64BTSLconst {
13077 c := auxIntToInt8(a.AuxInt)
13079 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13084 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
13087 v.reset(OpAMD64BTSLconstmodify)
13088 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13089 v.Aux = symToAux(sym)
13090 v.AddArg2(ptr, mem)
13093 // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem)
13094 // result: (MOVSSstore [off] {sym} ptr val mem)
13096 off := auxIntToInt32(v.AuxInt)
13097 sym := auxToSym(v.Aux)
13099 if v_1.Op != OpAMD64MOVLf2i {
13104 v.reset(OpAMD64MOVSSstore)
13105 v.AuxInt = int32ToAuxInt(off)
13106 v.Aux = symToAux(sym)
13107 v.AddArg3(ptr, val, mem)
13112 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
13116 typ := &b.Func.Config.Types
13117 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
13118 // cond: ValAndOff(sc).canAdd32(off)
13119 // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
13121 sc := auxIntToValAndOff(v.AuxInt)
13122 s := auxToSym(v.Aux)
13123 if v_0.Op != OpAMD64ADDQconst {
13126 off := auxIntToInt32(v_0.AuxInt)
13129 if !(ValAndOff(sc).canAdd32(off)) {
13132 v.reset(OpAMD64MOVLstoreconst)
13133 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13134 v.Aux = symToAux(s)
13135 v.AddArg2(ptr, mem)
13138 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
13139 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
13140 // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
13142 sc := auxIntToValAndOff(v.AuxInt)
13143 sym1 := auxToSym(v.Aux)
13144 if v_0.Op != OpAMD64LEAQ {
13147 off := auxIntToInt32(v_0.AuxInt)
13148 sym2 := auxToSym(v_0.Aux)
13151 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
13154 v.reset(OpAMD64MOVLstoreconst)
13155 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13156 v.Aux = symToAux(mergeSym(sym1, sym2))
13157 v.AddArg2(ptr, mem)
13160 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
13161 // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x)
13162 // result: (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem)
13164 c := auxIntToValAndOff(v.AuxInt)
13165 s := auxToSym(v.Aux)
13168 if x.Op != OpAMD64MOVLstoreconst {
13171 a := auxIntToValAndOff(x.AuxInt)
13172 if auxToSym(x.Aux) != s {
13176 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
13179 v.reset(OpAMD64MOVQstore)
13180 v.AuxInt = int32ToAuxInt(a.Off())
13181 v.Aux = symToAux(s)
13182 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
13183 v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32)
13184 v.AddArg3(p, v0, mem)
13187 // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
13188 // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x)
13189 // result: (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem)
13191 a := auxIntToValAndOff(v.AuxInt)
13192 s := auxToSym(v.Aux)
13195 if x.Op != OpAMD64MOVLstoreconst {
13198 c := auxIntToValAndOff(x.AuxInt)
13199 if auxToSym(x.Aux) != s {
13203 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
13206 v.reset(OpAMD64MOVQstore)
13207 v.AuxInt = int32ToAuxInt(a.Off())
13208 v.Aux = symToAux(s)
13209 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
13210 v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32)
13211 v.AddArg3(p, v0, mem)
13214 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
13215 // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
13216 // result: (MOVLstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
13218 sc := auxIntToValAndOff(v.AuxInt)
13219 sym1 := auxToSym(v.Aux)
13220 if v_0.Op != OpAMD64LEAL {
13223 off := auxIntToInt32(v_0.AuxInt)
13224 sym2 := auxToSym(v_0.Aux)
13227 if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
13230 v.reset(OpAMD64MOVLstoreconst)
13231 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
13232 v.Aux = symToAux(mergeSym(sym1, sym2))
13233 v.AddArg2(ptr, mem)
13236 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
13237 // cond: sc.canAdd32(off)
13238 // result: (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem)
13240 sc := auxIntToValAndOff(v.AuxInt)
13241 s := auxToSym(v.Aux)
13242 if v_0.Op != OpAMD64ADDLconst {
13245 off := auxIntToInt32(v_0.AuxInt)
13248 if !(sc.canAdd32(off)) {
13251 v.reset(OpAMD64MOVLstoreconst)
13252 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
13253 v.Aux = symToAux(s)
13254 v.AddArg2(ptr, mem)
13259 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
13262 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem)
13263 // cond: is32Bit(int64(off1)+int64(off2))
13264 // result: (MOVOload [off1+off2] {sym} ptr mem)
13266 off1 := auxIntToInt32(v.AuxInt)
13267 sym := auxToSym(v.Aux)
13268 if v_0.Op != OpAMD64ADDQconst {
13271 off2 := auxIntToInt32(v_0.AuxInt)
13274 if !(is32Bit(int64(off1) + int64(off2))) {
13277 v.reset(OpAMD64MOVOload)
13278 v.AuxInt = int32ToAuxInt(off1 + off2)
13279 v.Aux = symToAux(sym)
13280 v.AddArg2(ptr, mem)
13283 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
13284 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
13285 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
13287 off1 := auxIntToInt32(v.AuxInt)
13288 sym1 := auxToSym(v.Aux)
13289 if v_0.Op != OpAMD64LEAQ {
13292 off2 := auxIntToInt32(v_0.AuxInt)
13293 sym2 := auxToSym(v_0.Aux)
13294 base := v_0.Args[0]
13296 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13299 v.reset(OpAMD64MOVOload)
13300 v.AuxInt = int32ToAuxInt(off1 + off2)
13301 v.Aux = symToAux(mergeSym(sym1, sym2))
13302 v.AddArg2(base, mem)
13307 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
13312 config := b.Func.Config
13313 typ := &b.Func.Config.Types
13314 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
13315 // cond: is32Bit(int64(off1)+int64(off2))
13316 // result: (MOVOstore [off1+off2] {sym} ptr val mem)
13318 off1 := auxIntToInt32(v.AuxInt)
13319 sym := auxToSym(v.Aux)
13320 if v_0.Op != OpAMD64ADDQconst {
13323 off2 := auxIntToInt32(v_0.AuxInt)
13327 if !(is32Bit(int64(off1) + int64(off2))) {
13330 v.reset(OpAMD64MOVOstore)
13331 v.AuxInt = int32ToAuxInt(off1 + off2)
13332 v.Aux = symToAux(sym)
13333 v.AddArg3(ptr, val, mem)
13336 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
13337 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
13338 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
13340 off1 := auxIntToInt32(v.AuxInt)
13341 sym1 := auxToSym(v.Aux)
13342 if v_0.Op != OpAMD64LEAQ {
13345 off2 := auxIntToInt32(v_0.AuxInt)
13346 sym2 := auxToSym(v_0.Aux)
13347 base := v_0.Args[0]
13350 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13353 v.reset(OpAMD64MOVOstore)
13354 v.AuxInt = int32ToAuxInt(off1 + off2)
13355 v.Aux = symToAux(mergeSym(sym1, sym2))
13356 v.AddArg3(base, val, mem)
13359 // match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem)
13360 // cond: symIsRO(srcSym)
13361 // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
13363 dstOff := auxIntToInt32(v.AuxInt)
13364 dstSym := auxToSym(v.Aux)
13366 if v_1.Op != OpAMD64MOVOload {
13369 srcOff := auxIntToInt32(v_1.AuxInt)
13370 srcSym := auxToSym(v_1.Aux)
13371 v_1_0 := v_1.Args[0]
13372 if v_1_0.Op != OpSB {
13376 if !(symIsRO(srcSym)) {
13379 v.reset(OpAMD64MOVQstore)
13380 v.AuxInt = int32ToAuxInt(dstOff + 8)
13381 v.Aux = symToAux(dstSym)
13382 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
13383 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
13384 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
13385 v1.AuxInt = int32ToAuxInt(dstOff)
13386 v1.Aux = symToAux(dstSym)
13387 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
13388 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
13389 v1.AddArg3(ptr, v2, mem)
13390 v.AddArg3(ptr, v0, v1)
13395 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
13398 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
13399 // cond: is32Bit(int64(off1)+int64(off2))
13400 // result: (MOVQatomicload [off1+off2] {sym} ptr mem)
13402 off1 := auxIntToInt32(v.AuxInt)
13403 sym := auxToSym(v.Aux)
13404 if v_0.Op != OpAMD64ADDQconst {
13407 off2 := auxIntToInt32(v_0.AuxInt)
13410 if !(is32Bit(int64(off1) + int64(off2))) {
13413 v.reset(OpAMD64MOVQatomicload)
13414 v.AuxInt = int32ToAuxInt(off1 + off2)
13415 v.Aux = symToAux(sym)
13416 v.AddArg2(ptr, mem)
13419 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
13420 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
13421 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
13423 off1 := auxIntToInt32(v.AuxInt)
13424 sym1 := auxToSym(v.Aux)
13425 if v_0.Op != OpAMD64LEAQ {
13428 off2 := auxIntToInt32(v_0.AuxInt)
13429 sym2 := auxToSym(v_0.Aux)
13432 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13435 v.reset(OpAMD64MOVQatomicload)
13436 v.AuxInt = int32ToAuxInt(off1 + off2)
13437 v.Aux = symToAux(mergeSym(sym1, sym2))
13438 v.AddArg2(ptr, mem)
13443 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
13446 // match: (MOVQf2i <t> (Arg <u> [off] {sym}))
13447 // cond: t.Size() == u.Size()
13448 // result: @b.Func.Entry (Arg <t> [off] {sym})
13451 if v_0.Op != OpArg {
13455 off := auxIntToInt32(v_0.AuxInt)
13456 sym := auxToSym(v_0.Aux)
13457 if !(t.Size() == u.Size()) {
13461 v0 := b.NewValue0(v.Pos, OpArg, t)
13463 v0.AuxInt = int32ToAuxInt(off)
13464 v0.Aux = symToAux(sym)
13469 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
13472 // match: (MOVQi2f <t> (Arg <u> [off] {sym}))
13473 // cond: t.Size() == u.Size()
13474 // result: @b.Func.Entry (Arg <t> [off] {sym})
13477 if v_0.Op != OpArg {
13481 off := auxIntToInt32(v_0.AuxInt)
13482 sym := auxToSym(v_0.Aux)
13483 if !(t.Size() == u.Size()) {
13487 v0 := b.NewValue0(v.Pos, OpArg, t)
13489 v0.AuxInt = int32ToAuxInt(off)
13490 v0.Aux = symToAux(sym)
13495 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
13499 config := b.Func.Config
13500 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
13501 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
13504 off := auxIntToInt32(v.AuxInt)
13505 sym := auxToSym(v.Aux)
13507 if v_1.Op != OpAMD64MOVQstore {
13510 off2 := auxIntToInt32(v_1.AuxInt)
13511 sym2 := auxToSym(v_1.Aux)
13513 ptr2 := v_1.Args[0]
13514 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
13520 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem)
13521 // cond: is32Bit(int64(off1)+int64(off2))
13522 // result: (MOVQload [off1+off2] {sym} ptr mem)
13524 off1 := auxIntToInt32(v.AuxInt)
13525 sym := auxToSym(v.Aux)
13526 if v_0.Op != OpAMD64ADDQconst {
13529 off2 := auxIntToInt32(v_0.AuxInt)
13532 if !(is32Bit(int64(off1) + int64(off2))) {
13535 v.reset(OpAMD64MOVQload)
13536 v.AuxInt = int32ToAuxInt(off1 + off2)
13537 v.Aux = symToAux(sym)
13538 v.AddArg2(ptr, mem)
13541 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
13542 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
13543 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
13545 off1 := auxIntToInt32(v.AuxInt)
13546 sym1 := auxToSym(v.Aux)
13547 if v_0.Op != OpAMD64LEAQ {
13550 off2 := auxIntToInt32(v_0.AuxInt)
13551 sym2 := auxToSym(v_0.Aux)
13552 base := v_0.Args[0]
13554 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13557 v.reset(OpAMD64MOVQload)
13558 v.AuxInt = int32ToAuxInt(off1 + off2)
13559 v.Aux = symToAux(mergeSym(sym1, sym2))
13560 v.AddArg2(base, mem)
13563 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
13564 // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
13565 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
13567 off1 := auxIntToInt32(v.AuxInt)
13568 sym1 := auxToSym(v.Aux)
13569 if v_0.Op != OpAMD64LEAL {
13572 off2 := auxIntToInt32(v_0.AuxInt)
13573 sym2 := auxToSym(v_0.Aux)
13574 base := v_0.Args[0]
13576 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
13579 v.reset(OpAMD64MOVQload)
13580 v.AuxInt = int32ToAuxInt(off1 + off2)
13581 v.Aux = symToAux(mergeSym(sym1, sym2))
13582 v.AddArg2(base, mem)
13585 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem)
13586 // cond: is32Bit(int64(off1)+int64(off2))
13587 // result: (MOVQload [off1+off2] {sym} ptr mem)
13589 off1 := auxIntToInt32(v.AuxInt)
13590 sym := auxToSym(v.Aux)
13591 if v_0.Op != OpAMD64ADDLconst {
13594 off2 := auxIntToInt32(v_0.AuxInt)
13597 if !(is32Bit(int64(off1) + int64(off2))) {
13600 v.reset(OpAMD64MOVQload)
13601 v.AuxInt = int32ToAuxInt(off1 + off2)
13602 v.Aux = symToAux(sym)
13603 v.AddArg2(ptr, mem)
13606 // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _))
13607 // result: (MOVQf2i val)
13609 off := auxIntToInt32(v.AuxInt)
13610 sym := auxToSym(v.Aux)
13612 if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
13616 if ptr != v_1.Args[0] {
13619 v.reset(OpAMD64MOVQf2i)
13623 // match: (MOVQload [off] {sym} (SB) _)
13624 // cond: symIsRO(sym)
13625 // result: (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
13627 off := auxIntToInt32(v.AuxInt)
13628 sym := auxToSym(v.Aux)
13629 if v_0.Op != OpSB || !(symIsRO(sym)) {
13632 v.reset(OpAMD64MOVQconst)
13633 v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
13638 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
13643 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
13644 // cond: is32Bit(int64(off1)+int64(off2))
13645 // result: (MOVQstore [off1+off2] {sym} ptr val mem)
13647 off1 := auxIntToInt32(v.AuxInt)
13648 sym := auxToSym(v.Aux)
13649 if v_0.Op != OpAMD64ADDQconst {
13652 off2 := auxIntToInt32(v_0.AuxInt)
13656 if !(is32Bit(int64(off1) + int64(off2))) {
13659 v.reset(OpAMD64MOVQstore)
13660 v.AuxInt = int32ToAuxInt(off1 + off2)
13661 v.Aux = symToAux(sym)
13662 v.AddArg3(ptr, val, mem)
13665 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
13666 // cond: validVal(c)
13667 // result: (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
13669 off := auxIntToInt32(v.AuxInt)
13670 sym := auxToSym(v.Aux)
13672 if v_1.Op != OpAMD64MOVQconst {
13675 c := auxIntToInt64(v_1.AuxInt)
13677 if !(validVal(c)) {
13680 v.reset(OpAMD64MOVQstoreconst)
13681 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13682 v.Aux = symToAux(sym)
13683 v.AddArg2(ptr, mem)
13686 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
13687 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
13688 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
13690 off1 := auxIntToInt32(v.AuxInt)
13691 sym1 := auxToSym(v.Aux)
13692 if v_0.Op != OpAMD64LEAQ {
13695 off2 := auxIntToInt32(v_0.AuxInt)
13696 sym2 := auxToSym(v_0.Aux)
13697 base := v_0.Args[0]
13700 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13703 v.reset(OpAMD64MOVQstore)
13704 v.AuxInt = int32ToAuxInt(off1 + off2)
13705 v.Aux = symToAux(mergeSym(sym1, sym2))
13706 v.AddArg3(base, val, mem)
13709 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
13710 // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
13711 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
13713 off1 := auxIntToInt32(v.AuxInt)
13714 sym1 := auxToSym(v.Aux)
13715 if v_0.Op != OpAMD64LEAL {
13718 off2 := auxIntToInt32(v_0.AuxInt)
13719 sym2 := auxToSym(v_0.Aux)
13720 base := v_0.Args[0]
13723 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
13726 v.reset(OpAMD64MOVQstore)
13727 v.AuxInt = int32ToAuxInt(off1 + off2)
13728 v.Aux = symToAux(mergeSym(sym1, sym2))
13729 v.AddArg3(base, val, mem)
13732 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
13733 // cond: is32Bit(int64(off1)+int64(off2))
13734 // result: (MOVQstore [off1+off2] {sym} ptr val mem)
13736 off1 := auxIntToInt32(v.AuxInt)
13737 sym := auxToSym(v.Aux)
13738 if v_0.Op != OpAMD64ADDLconst {
13741 off2 := auxIntToInt32(v_0.AuxInt)
13745 if !(is32Bit(int64(off1) + int64(off2))) {
13748 v.reset(OpAMD64MOVQstore)
13749 v.AuxInt = int32ToAuxInt(off1 + off2)
13750 v.Aux = symToAux(sym)
13751 v.AddArg3(ptr, val, mem)
13754 // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem)
13755 // cond: y.Uses==1 && clobber(y)
13756 // result: (ADDQmodify [off] {sym} ptr x mem)
13758 off := auxIntToInt32(v.AuxInt)
13759 sym := auxToSym(v.Aux)
13762 if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
13767 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
13770 v.reset(OpAMD64ADDQmodify)
13771 v.AuxInt = int32ToAuxInt(off)
13772 v.Aux = symToAux(sym)
13773 v.AddArg3(ptr, x, mem)
13776 // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem)
13777 // cond: y.Uses==1 && clobber(y)
13778 // result: (ANDQmodify [off] {sym} ptr x mem)
13780 off := auxIntToInt32(v.AuxInt)
13781 sym := auxToSym(v.Aux)
13784 if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
13789 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
13792 v.reset(OpAMD64ANDQmodify)
13793 v.AuxInt = int32ToAuxInt(off)
13794 v.Aux = symToAux(sym)
13795 v.AddArg3(ptr, x, mem)
13798 // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem)
13799 // cond: y.Uses==1 && clobber(y)
13800 // result: (ORQmodify [off] {sym} ptr x mem)
13802 off := auxIntToInt32(v.AuxInt)
13803 sym := auxToSym(v.Aux)
13806 if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
13811 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
13814 v.reset(OpAMD64ORQmodify)
13815 v.AuxInt = int32ToAuxInt(off)
13816 v.Aux = symToAux(sym)
13817 v.AddArg3(ptr, x, mem)
13820 // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem)
13821 // cond: y.Uses==1 && clobber(y)
13822 // result: (XORQmodify [off] {sym} ptr x mem)
13824 off := auxIntToInt32(v.AuxInt)
13825 sym := auxToSym(v.Aux)
13828 if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
13833 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
13836 v.reset(OpAMD64XORQmodify)
13837 v.AuxInt = int32ToAuxInt(off)
13838 v.Aux = symToAux(sym)
13839 v.AddArg3(ptr, x, mem)
13842 // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
13843 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
13844 // result: (ADDQmodify [off] {sym} ptr x mem)
13846 off := auxIntToInt32(v.AuxInt)
13847 sym := auxToSym(v.Aux)
13850 if y.Op != OpAMD64ADDQ {
13856 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
13858 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13862 if ptr != l.Args[0] {
13866 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13869 v.reset(OpAMD64ADDQmodify)
13870 v.AuxInt = int32ToAuxInt(off)
13871 v.Aux = symToAux(sym)
13872 v.AddArg3(ptr, x, mem)
13877 // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem)
13878 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
13879 // result: (SUBQmodify [off] {sym} ptr x mem)
13881 off := auxIntToInt32(v.AuxInt)
13882 sym := auxToSym(v.Aux)
13885 if y.Op != OpAMD64SUBQ {
13890 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13894 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13897 v.reset(OpAMD64SUBQmodify)
13898 v.AuxInt = int32ToAuxInt(off)
13899 v.Aux = symToAux(sym)
13900 v.AddArg3(ptr, x, mem)
13903 // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
13904 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
13905 // result: (ANDQmodify [off] {sym} ptr x mem)
13907 off := auxIntToInt32(v.AuxInt)
13908 sym := auxToSym(v.Aux)
13911 if y.Op != OpAMD64ANDQ {
13917 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
13919 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13923 if ptr != l.Args[0] {
13927 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13930 v.reset(OpAMD64ANDQmodify)
13931 v.AuxInt = int32ToAuxInt(off)
13932 v.Aux = symToAux(sym)
13933 v.AddArg3(ptr, x, mem)
13938 // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
13939 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
13940 // result: (ORQmodify [off] {sym} ptr x mem)
13942 off := auxIntToInt32(v.AuxInt)
13943 sym := auxToSym(v.Aux)
13946 if y.Op != OpAMD64ORQ {
13952 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
13954 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13958 if ptr != l.Args[0] {
13962 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13965 v.reset(OpAMD64ORQmodify)
13966 v.AuxInt = int32ToAuxInt(off)
13967 v.Aux = symToAux(sym)
13968 v.AddArg3(ptr, x, mem)
13973 // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
13974 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
13975 // result: (XORQmodify [off] {sym} ptr x mem)
13977 off := auxIntToInt32(v.AuxInt)
13978 sym := auxToSym(v.Aux)
13981 if y.Op != OpAMD64XORQ {
13987 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
13989 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13993 if ptr != l.Args[0] {
13997 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
14000 v.reset(OpAMD64XORQmodify)
14001 v.AuxInt = int32ToAuxInt(off)
14002 v.Aux = symToAux(sym)
14003 v.AddArg3(ptr, x, mem)
14008 // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) <t> x) mem)
14009 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
14010 // result: (BTCQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
14012 off := auxIntToInt32(v.AuxInt)
14013 sym := auxToSym(v.Aux)
14016 if y.Op != OpAMD64BTCQ {
14022 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14026 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
14029 v.reset(OpAMD64BTCQmodify)
14030 v.AuxInt = int32ToAuxInt(off)
14031 v.Aux = symToAux(sym)
14032 v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t)
14033 v0.AuxInt = int32ToAuxInt(63)
14035 v.AddArg3(ptr, v0, mem)
14038 // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) <t> x) mem)
14039 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
14040 // result: (BTRQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
14042 off := auxIntToInt32(v.AuxInt)
14043 sym := auxToSym(v.Aux)
14046 if y.Op != OpAMD64BTRQ {
14052 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14056 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
14059 v.reset(OpAMD64BTRQmodify)
14060 v.AuxInt = int32ToAuxInt(off)
14061 v.Aux = symToAux(sym)
14062 v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t)
14063 v0.AuxInt = int32ToAuxInt(63)
14065 v.AddArg3(ptr, v0, mem)
14068 // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) <t> x) mem)
14069 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
14070 // result: (BTSQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
14072 off := auxIntToInt32(v.AuxInt)
14073 sym := auxToSym(v.Aux)
14076 if y.Op != OpAMD64BTSQ {
14082 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14086 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
14089 v.reset(OpAMD64BTSQmodify)
14090 v.AuxInt = int32ToAuxInt(off)
14091 v.Aux = symToAux(sym)
14092 v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t)
14093 v0.AuxInt = int32ToAuxInt(63)
14095 v.AddArg3(ptr, v0, mem)
14098 // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14099 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14100 // result: (ADDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14102 off := auxIntToInt32(v.AuxInt)
14103 sym := auxToSym(v.Aux)
14106 if a.Op != OpAMD64ADDQconst {
14109 c := auxIntToInt32(a.AuxInt)
14111 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14116 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14119 v.reset(OpAMD64ADDQconstmodify)
14120 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14121 v.Aux = symToAux(sym)
14122 v.AddArg2(ptr, mem)
14125 // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14126 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14127 // result: (ANDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14129 off := auxIntToInt32(v.AuxInt)
14130 sym := auxToSym(v.Aux)
14133 if a.Op != OpAMD64ANDQconst {
14136 c := auxIntToInt32(a.AuxInt)
14138 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14143 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14146 v.reset(OpAMD64ANDQconstmodify)
14147 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14148 v.Aux = symToAux(sym)
14149 v.AddArg2(ptr, mem)
14152 // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14153 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14154 // result: (ORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14156 off := auxIntToInt32(v.AuxInt)
14157 sym := auxToSym(v.Aux)
14160 if a.Op != OpAMD64ORQconst {
14163 c := auxIntToInt32(a.AuxInt)
14165 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14170 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14173 v.reset(OpAMD64ORQconstmodify)
14174 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14175 v.Aux = symToAux(sym)
14176 v.AddArg2(ptr, mem)
14179 // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14180 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14181 // result: (XORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14183 off := auxIntToInt32(v.AuxInt)
14184 sym := auxToSym(v.Aux)
14187 if a.Op != OpAMD64XORQconst {
14190 c := auxIntToInt32(a.AuxInt)
14192 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14197 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14200 v.reset(OpAMD64XORQconstmodify)
14201 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14202 v.Aux = symToAux(sym)
14203 v.AddArg2(ptr, mem)
14206 // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14207 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14208 // result: (BTCQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14210 off := auxIntToInt32(v.AuxInt)
14211 sym := auxToSym(v.Aux)
14214 if a.Op != OpAMD64BTCQconst {
14217 c := auxIntToInt8(a.AuxInt)
14219 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14224 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14227 v.reset(OpAMD64BTCQconstmodify)
14228 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14229 v.Aux = symToAux(sym)
14230 v.AddArg2(ptr, mem)
14233 // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14234 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14235 // result: (BTRQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14237 off := auxIntToInt32(v.AuxInt)
14238 sym := auxToSym(v.Aux)
14241 if a.Op != OpAMD64BTRQconst {
14244 c := auxIntToInt8(a.AuxInt)
14246 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14251 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14254 v.reset(OpAMD64BTRQconstmodify)
14255 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14256 v.Aux = symToAux(sym)
14257 v.AddArg2(ptr, mem)
14260 // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
14261 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
14262 // result: (BTSQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
14264 off := auxIntToInt32(v.AuxInt)
14265 sym := auxToSym(v.Aux)
14268 if a.Op != OpAMD64BTSQconst {
14271 c := auxIntToInt8(a.AuxInt)
14273 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
14278 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
14281 v.reset(OpAMD64BTSQconstmodify)
14282 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
14283 v.Aux = symToAux(sym)
14284 v.AddArg2(ptr, mem)
14287 // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
14288 // result: (MOVSDstore [off] {sym} ptr val mem)
14290 off := auxIntToInt32(v.AuxInt)
14291 sym := auxToSym(v.Aux)
14293 if v_1.Op != OpAMD64MOVQf2i {
14298 v.reset(OpAMD64MOVSDstore)
14299 v.AuxInt = int32ToAuxInt(off)
14300 v.Aux = symToAux(sym)
14301 v.AddArg3(ptr, val, mem)
14306 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
14310 config := b.Func.Config
14311 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
14312 // cond: ValAndOff(sc).canAdd32(off)
14313 // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
14315 sc := auxIntToValAndOff(v.AuxInt)
14316 s := auxToSym(v.Aux)
14317 if v_0.Op != OpAMD64ADDQconst {
14320 off := auxIntToInt32(v_0.AuxInt)
14323 if !(ValAndOff(sc).canAdd32(off)) {
14326 v.reset(OpAMD64MOVQstoreconst)
14327 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
14328 v.Aux = symToAux(s)
14329 v.AddArg2(ptr, mem)
14332 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
14333 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
14334 // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
14336 sc := auxIntToValAndOff(v.AuxInt)
14337 sym1 := auxToSym(v.Aux)
14338 if v_0.Op != OpAMD64LEAQ {
14341 off := auxIntToInt32(v_0.AuxInt)
14342 sym2 := auxToSym(v_0.Aux)
14345 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
14348 v.reset(OpAMD64MOVQstoreconst)
14349 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
14350 v.Aux = symToAux(mergeSym(sym1, sym2))
14351 v.AddArg2(ptr, mem)
14354 // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
14355 // cond: config.useSSE && x.Uses == 1 && c2.Off() + 8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x)
14356 // result: (MOVOstorezero [c2.Off()] {s} p mem)
14358 c := auxIntToValAndOff(v.AuxInt)
14359 s := auxToSym(v.Aux)
14362 if x.Op != OpAMD64MOVQstoreconst {
14365 c2 := auxIntToValAndOff(x.AuxInt)
14366 if auxToSym(x.Aux) != s {
14370 if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && c2.Off()+8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x)) {
14373 v.reset(OpAMD64MOVOstorezero)
14374 v.AuxInt = int32ToAuxInt(c2.Off())
14375 v.Aux = symToAux(s)
14379 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
14380 // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
14381 // result: (MOVQstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
14383 sc := auxIntToValAndOff(v.AuxInt)
14384 sym1 := auxToSym(v.Aux)
14385 if v_0.Op != OpAMD64LEAL {
14388 off := auxIntToInt32(v_0.AuxInt)
14389 sym2 := auxToSym(v_0.Aux)
14392 if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
14395 v.reset(OpAMD64MOVQstoreconst)
14396 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
14397 v.Aux = symToAux(mergeSym(sym1, sym2))
14398 v.AddArg2(ptr, mem)
14401 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
14402 // cond: sc.canAdd32(off)
14403 // result: (MOVQstoreconst [sc.addOffset32(off)] {s} ptr mem)
14405 sc := auxIntToValAndOff(v.AuxInt)
14406 s := auxToSym(v.Aux)
14407 if v_0.Op != OpAMD64ADDLconst {
14410 off := auxIntToInt32(v_0.AuxInt)
14413 if !(sc.canAdd32(off)) {
14416 v.reset(OpAMD64MOVQstoreconst)
14417 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
14418 v.Aux = symToAux(s)
14419 v.AddArg2(ptr, mem)
14424 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
14427 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
14428 // cond: is32Bit(int64(off1)+int64(off2))
14429 // result: (MOVSDload [off1+off2] {sym} ptr mem)
14431 off1 := auxIntToInt32(v.AuxInt)
14432 sym := auxToSym(v.Aux)
14433 if v_0.Op != OpAMD64ADDQconst {
14436 off2 := auxIntToInt32(v_0.AuxInt)
14439 if !(is32Bit(int64(off1) + int64(off2))) {
14442 v.reset(OpAMD64MOVSDload)
14443 v.AuxInt = int32ToAuxInt(off1 + off2)
14444 v.Aux = symToAux(sym)
14445 v.AddArg2(ptr, mem)
14448 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
14449 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14450 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
14452 off1 := auxIntToInt32(v.AuxInt)
14453 sym1 := auxToSym(v.Aux)
14454 if v_0.Op != OpAMD64LEAQ {
14457 off2 := auxIntToInt32(v_0.AuxInt)
14458 sym2 := auxToSym(v_0.Aux)
14459 base := v_0.Args[0]
14461 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14464 v.reset(OpAMD64MOVSDload)
14465 v.AuxInt = int32ToAuxInt(off1 + off2)
14466 v.Aux = symToAux(mergeSym(sym1, sym2))
14467 v.AddArg2(base, mem)
14470 // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _))
14471 // result: (MOVQi2f val)
14473 off := auxIntToInt32(v.AuxInt)
14474 sym := auxToSym(v.Aux)
14476 if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
14480 if ptr != v_1.Args[0] {
14483 v.reset(OpAMD64MOVQi2f)
14489 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
14493 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
14494 // cond: is32Bit(int64(off1)+int64(off2))
14495 // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
14497 off1 := auxIntToInt32(v.AuxInt)
14498 sym := auxToSym(v.Aux)
14499 if v_0.Op != OpAMD64ADDQconst {
14502 off2 := auxIntToInt32(v_0.AuxInt)
14506 if !(is32Bit(int64(off1) + int64(off2))) {
14509 v.reset(OpAMD64MOVSDstore)
14510 v.AuxInt = int32ToAuxInt(off1 + off2)
14511 v.Aux = symToAux(sym)
14512 v.AddArg3(ptr, val, mem)
14515 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
14516 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14517 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
14519 off1 := auxIntToInt32(v.AuxInt)
14520 sym1 := auxToSym(v.Aux)
14521 if v_0.Op != OpAMD64LEAQ {
14524 off2 := auxIntToInt32(v_0.AuxInt)
14525 sym2 := auxToSym(v_0.Aux)
14526 base := v_0.Args[0]
14529 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14532 v.reset(OpAMD64MOVSDstore)
14533 v.AuxInt = int32ToAuxInt(off1 + off2)
14534 v.Aux = symToAux(mergeSym(sym1, sym2))
14535 v.AddArg3(base, val, mem)
14538 // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem)
14539 // result: (MOVQstore [off] {sym} ptr val mem)
14541 off := auxIntToInt32(v.AuxInt)
14542 sym := auxToSym(v.Aux)
14544 if v_1.Op != OpAMD64MOVQi2f {
14549 v.reset(OpAMD64MOVQstore)
14550 v.AuxInt = int32ToAuxInt(off)
14551 v.Aux = symToAux(sym)
14552 v.AddArg3(ptr, val, mem)
14557 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
14560 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
14561 // cond: is32Bit(int64(off1)+int64(off2))
14562 // result: (MOVSSload [off1+off2] {sym} ptr mem)
14564 off1 := auxIntToInt32(v.AuxInt)
14565 sym := auxToSym(v.Aux)
14566 if v_0.Op != OpAMD64ADDQconst {
14569 off2 := auxIntToInt32(v_0.AuxInt)
14572 if !(is32Bit(int64(off1) + int64(off2))) {
14575 v.reset(OpAMD64MOVSSload)
14576 v.AuxInt = int32ToAuxInt(off1 + off2)
14577 v.Aux = symToAux(sym)
14578 v.AddArg2(ptr, mem)
14581 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
14582 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14583 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
14585 off1 := auxIntToInt32(v.AuxInt)
14586 sym1 := auxToSym(v.Aux)
14587 if v_0.Op != OpAMD64LEAQ {
14590 off2 := auxIntToInt32(v_0.AuxInt)
14591 sym2 := auxToSym(v_0.Aux)
14592 base := v_0.Args[0]
14594 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14597 v.reset(OpAMD64MOVSSload)
14598 v.AuxInt = int32ToAuxInt(off1 + off2)
14599 v.Aux = symToAux(mergeSym(sym1, sym2))
14600 v.AddArg2(base, mem)
14603 // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _))
14604 // result: (MOVLi2f val)
14606 off := auxIntToInt32(v.AuxInt)
14607 sym := auxToSym(v.Aux)
14609 if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
14613 if ptr != v_1.Args[0] {
14616 v.reset(OpAMD64MOVLi2f)
14622 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
14626 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
14627 // cond: is32Bit(int64(off1)+int64(off2))
14628 // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
14630 off1 := auxIntToInt32(v.AuxInt)
14631 sym := auxToSym(v.Aux)
14632 if v_0.Op != OpAMD64ADDQconst {
14635 off2 := auxIntToInt32(v_0.AuxInt)
14639 if !(is32Bit(int64(off1) + int64(off2))) {
14642 v.reset(OpAMD64MOVSSstore)
14643 v.AuxInt = int32ToAuxInt(off1 + off2)
14644 v.Aux = symToAux(sym)
14645 v.AddArg3(ptr, val, mem)
14648 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
14649 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14650 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
14652 off1 := auxIntToInt32(v.AuxInt)
14653 sym1 := auxToSym(v.Aux)
14654 if v_0.Op != OpAMD64LEAQ {
14657 off2 := auxIntToInt32(v_0.AuxInt)
14658 sym2 := auxToSym(v_0.Aux)
14659 base := v_0.Args[0]
14662 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14665 v.reset(OpAMD64MOVSSstore)
14666 v.AuxInt = int32ToAuxInt(off1 + off2)
14667 v.Aux = symToAux(mergeSym(sym1, sym2))
14668 v.AddArg3(base, val, mem)
14671 // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem)
14672 // result: (MOVLstore [off] {sym} ptr val mem)
14674 off := auxIntToInt32(v.AuxInt)
14675 sym := auxToSym(v.Aux)
14677 if v_1.Op != OpAMD64MOVLi2f {
14682 v.reset(OpAMD64MOVLstore)
14683 v.AuxInt = int32ToAuxInt(off)
14684 v.Aux = symToAux(sym)
14685 v.AddArg3(ptr, val, mem)
14690 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
14693 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
14694 // cond: x.Uses == 1 && clobber(x)
14695 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
14698 if x.Op != OpAMD64MOVWload {
14701 off := auxIntToInt32(x.AuxInt)
14702 sym := auxToSym(x.Aux)
14705 if !(x.Uses == 1 && clobber(x)) {
14709 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
14711 v0.AuxInt = int32ToAuxInt(off)
14712 v0.Aux = symToAux(sym)
14713 v0.AddArg2(ptr, mem)
14716 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem))
14717 // cond: x.Uses == 1 && clobber(x)
14718 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
14721 if x.Op != OpAMD64MOVLload {
14724 off := auxIntToInt32(x.AuxInt)
14725 sym := auxToSym(x.Aux)
14728 if !(x.Uses == 1 && clobber(x)) {
14732 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
14734 v0.AuxInt = int32ToAuxInt(off)
14735 v0.Aux = symToAux(sym)
14736 v0.AddArg2(ptr, mem)
14739 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem))
14740 // cond: x.Uses == 1 && clobber(x)
14741 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
14744 if x.Op != OpAMD64MOVQload {
14747 off := auxIntToInt32(x.AuxInt)
14748 sym := auxToSym(x.Aux)
14751 if !(x.Uses == 1 && clobber(x)) {
14755 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
14757 v0.AuxInt = int32ToAuxInt(off)
14758 v0.Aux = symToAux(sym)
14759 v0.AddArg2(ptr, mem)
14762 // match: (MOVWQSX (ANDLconst [c] x))
14763 // cond: c & 0x8000 == 0
14764 // result: (ANDLconst [c & 0x7fff] x)
14766 if v_0.Op != OpAMD64ANDLconst {
14769 c := auxIntToInt32(v_0.AuxInt)
14771 if !(c&0x8000 == 0) {
14774 v.reset(OpAMD64ANDLconst)
14775 v.AuxInt = int32ToAuxInt(c & 0x7fff)
14779 // match: (MOVWQSX (MOVWQSX x))
14780 // result: (MOVWQSX x)
14782 if v_0.Op != OpAMD64MOVWQSX {
14786 v.reset(OpAMD64MOVWQSX)
14790 // match: (MOVWQSX (MOVBQSX x))
14791 // result: (MOVBQSX x)
14793 if v_0.Op != OpAMD64MOVBQSX {
14797 v.reset(OpAMD64MOVBQSX)
14803 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
14806 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
14807 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
14808 // result: (MOVWQSX x)
14810 off := auxIntToInt32(v.AuxInt)
14811 sym := auxToSym(v.Aux)
14813 if v_1.Op != OpAMD64MOVWstore {
14816 off2 := auxIntToInt32(v_1.AuxInt)
14817 sym2 := auxToSym(v_1.Aux)
14819 ptr2 := v_1.Args[0]
14820 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
14823 v.reset(OpAMD64MOVWQSX)
14827 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
14828 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
14829 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
14831 off1 := auxIntToInt32(v.AuxInt)
14832 sym1 := auxToSym(v.Aux)
14833 if v_0.Op != OpAMD64LEAQ {
14836 off2 := auxIntToInt32(v_0.AuxInt)
14837 sym2 := auxToSym(v_0.Aux)
14838 base := v_0.Args[0]
14840 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14843 v.reset(OpAMD64MOVWQSXload)
14844 v.AuxInt = int32ToAuxInt(off1 + off2)
14845 v.Aux = symToAux(mergeSym(sym1, sym2))
14846 v.AddArg2(base, mem)
14851 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
14854 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
14855 // cond: x.Uses == 1 && clobber(x)
14856 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
14859 if x.Op != OpAMD64MOVWload {
14862 off := auxIntToInt32(x.AuxInt)
14863 sym := auxToSym(x.Aux)
14866 if !(x.Uses == 1 && clobber(x)) {
14870 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
14872 v0.AuxInt = int32ToAuxInt(off)
14873 v0.Aux = symToAux(sym)
14874 v0.AddArg2(ptr, mem)
14877 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem))
14878 // cond: x.Uses == 1 && clobber(x)
14879 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
14882 if x.Op != OpAMD64MOVLload {
14885 off := auxIntToInt32(x.AuxInt)
14886 sym := auxToSym(x.Aux)
14889 if !(x.Uses == 1 && clobber(x)) {
14893 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
14895 v0.AuxInt = int32ToAuxInt(off)
14896 v0.Aux = symToAux(sym)
14897 v0.AddArg2(ptr, mem)
14900 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem))
14901 // cond: x.Uses == 1 && clobber(x)
14902 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
14905 if x.Op != OpAMD64MOVQload {
14908 off := auxIntToInt32(x.AuxInt)
14909 sym := auxToSym(x.Aux)
14912 if !(x.Uses == 1 && clobber(x)) {
14916 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
14918 v0.AuxInt = int32ToAuxInt(off)
14919 v0.Aux = symToAux(sym)
14920 v0.AddArg2(ptr, mem)
14923 // match: (MOVWQZX x)
14924 // cond: zeroUpper48Bits(x,3)
14928 if !(zeroUpper48Bits(x, 3)) {
14934 // match: (MOVWQZX (ANDLconst [c] x))
14935 // result: (ANDLconst [c & 0xffff] x)
14937 if v_0.Op != OpAMD64ANDLconst {
14940 c := auxIntToInt32(v_0.AuxInt)
14942 v.reset(OpAMD64ANDLconst)
14943 v.AuxInt = int32ToAuxInt(c & 0xffff)
14947 // match: (MOVWQZX (MOVWQZX x))
14948 // result: (MOVWQZX x)
14950 if v_0.Op != OpAMD64MOVWQZX {
14954 v.reset(OpAMD64MOVWQZX)
14958 // match: (MOVWQZX (MOVBQZX x))
14959 // result: (MOVBQZX x)
14961 if v_0.Op != OpAMD64MOVBQZX {
14965 v.reset(OpAMD64MOVBQZX)
14971 func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
14975 config := b.Func.Config
14976 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
14977 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
14978 // result: (MOVWQZX x)
14980 off := auxIntToInt32(v.AuxInt)
14981 sym := auxToSym(v.Aux)
14983 if v_1.Op != OpAMD64MOVWstore {
14986 off2 := auxIntToInt32(v_1.AuxInt)
14987 sym2 := auxToSym(v_1.Aux)
14989 ptr2 := v_1.Args[0]
14990 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
14993 v.reset(OpAMD64MOVWQZX)
14997 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
14998 // cond: is32Bit(int64(off1)+int64(off2))
14999 // result: (MOVWload [off1+off2] {sym} ptr mem)
15001 off1 := auxIntToInt32(v.AuxInt)
15002 sym := auxToSym(v.Aux)
15003 if v_0.Op != OpAMD64ADDQconst {
15006 off2 := auxIntToInt32(v_0.AuxInt)
15009 if !(is32Bit(int64(off1) + int64(off2))) {
15012 v.reset(OpAMD64MOVWload)
15013 v.AuxInt = int32ToAuxInt(off1 + off2)
15014 v.Aux = symToAux(sym)
15015 v.AddArg2(ptr, mem)
15018 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
15019 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
15020 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
15022 off1 := auxIntToInt32(v.AuxInt)
15023 sym1 := auxToSym(v.Aux)
15024 if v_0.Op != OpAMD64LEAQ {
15027 off2 := auxIntToInt32(v_0.AuxInt)
15028 sym2 := auxToSym(v_0.Aux)
15029 base := v_0.Args[0]
15031 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15034 v.reset(OpAMD64MOVWload)
15035 v.AuxInt = int32ToAuxInt(off1 + off2)
15036 v.Aux = symToAux(mergeSym(sym1, sym2))
15037 v.AddArg2(base, mem)
15040 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
15041 // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
15042 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
15044 off1 := auxIntToInt32(v.AuxInt)
15045 sym1 := auxToSym(v.Aux)
15046 if v_0.Op != OpAMD64LEAL {
15049 off2 := auxIntToInt32(v_0.AuxInt)
15050 sym2 := auxToSym(v_0.Aux)
15051 base := v_0.Args[0]
15053 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
15056 v.reset(OpAMD64MOVWload)
15057 v.AuxInt = int32ToAuxInt(off1 + off2)
15058 v.Aux = symToAux(mergeSym(sym1, sym2))
15059 v.AddArg2(base, mem)
15062 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
15063 // cond: is32Bit(int64(off1)+int64(off2))
15064 // result: (MOVWload [off1+off2] {sym} ptr mem)
15066 off1 := auxIntToInt32(v.AuxInt)
15067 sym := auxToSym(v.Aux)
15068 if v_0.Op != OpAMD64ADDLconst {
15071 off2 := auxIntToInt32(v_0.AuxInt)
15074 if !(is32Bit(int64(off1) + int64(off2))) {
15077 v.reset(OpAMD64MOVWload)
15078 v.AuxInt = int32ToAuxInt(off1 + off2)
15079 v.Aux = symToAux(sym)
15080 v.AddArg2(ptr, mem)
15083 // match: (MOVWload [off] {sym} (SB) _)
15084 // cond: symIsRO(sym)
15085 // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
15087 off := auxIntToInt32(v.AuxInt)
15088 sym := auxToSym(v.Aux)
15089 if v_0.Op != OpSB || !(symIsRO(sym)) {
15092 v.reset(OpAMD64MOVLconst)
15093 v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
15098 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
15103 typ := &b.Func.Config.Types
15104 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
15105 // result: (MOVWstore [off] {sym} ptr x mem)
15107 off := auxIntToInt32(v.AuxInt)
15108 sym := auxToSym(v.Aux)
15110 if v_1.Op != OpAMD64MOVWQSX {
15115 v.reset(OpAMD64MOVWstore)
15116 v.AuxInt = int32ToAuxInt(off)
15117 v.Aux = symToAux(sym)
15118 v.AddArg3(ptr, x, mem)
15121 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
15122 // result: (MOVWstore [off] {sym} ptr x mem)
15124 off := auxIntToInt32(v.AuxInt)
15125 sym := auxToSym(v.Aux)
15127 if v_1.Op != OpAMD64MOVWQZX {
15132 v.reset(OpAMD64MOVWstore)
15133 v.AuxInt = int32ToAuxInt(off)
15134 v.Aux = symToAux(sym)
15135 v.AddArg3(ptr, x, mem)
15138 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
15139 // cond: is32Bit(int64(off1)+int64(off2))
15140 // result: (MOVWstore [off1+off2] {sym} ptr val mem)
15142 off1 := auxIntToInt32(v.AuxInt)
15143 sym := auxToSym(v.Aux)
15144 if v_0.Op != OpAMD64ADDQconst {
15147 off2 := auxIntToInt32(v_0.AuxInt)
15151 if !(is32Bit(int64(off1) + int64(off2))) {
15154 v.reset(OpAMD64MOVWstore)
15155 v.AuxInt = int32ToAuxInt(off1 + off2)
15156 v.Aux = symToAux(sym)
15157 v.AddArg3(ptr, val, mem)
15160 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
15161 // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
15163 off := auxIntToInt32(v.AuxInt)
15164 sym := auxToSym(v.Aux)
15166 if v_1.Op != OpAMD64MOVLconst {
15169 c := auxIntToInt32(v_1.AuxInt)
15171 v.reset(OpAMD64MOVWstoreconst)
15172 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
15173 v.Aux = symToAux(sym)
15174 v.AddArg2(ptr, mem)
15177 // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem)
15178 // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
15180 off := auxIntToInt32(v.AuxInt)
15181 sym := auxToSym(v.Aux)
15183 if v_1.Op != OpAMD64MOVQconst {
15186 c := auxIntToInt64(v_1.AuxInt)
15188 v.reset(OpAMD64MOVWstoreconst)
15189 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
15190 v.Aux = symToAux(sym)
15191 v.AddArg2(ptr, mem)
15194 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
15195 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
15196 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
15198 off1 := auxIntToInt32(v.AuxInt)
15199 sym1 := auxToSym(v.Aux)
15200 if v_0.Op != OpAMD64LEAQ {
15203 off2 := auxIntToInt32(v_0.AuxInt)
15204 sym2 := auxToSym(v_0.Aux)
15205 base := v_0.Args[0]
15208 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15211 v.reset(OpAMD64MOVWstore)
15212 v.AuxInt = int32ToAuxInt(off1 + off2)
15213 v.Aux = symToAux(mergeSym(sym1, sym2))
15214 v.AddArg3(base, val, mem)
15217 // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
15218 // cond: x.Uses == 1 && clobber(x)
15219 // result: (MOVLstore [i-2] {s} p w mem)
15221 i := auxIntToInt32(v.AuxInt)
15222 s := auxToSym(v.Aux)
15224 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 {
15229 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
15233 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
15236 v.reset(OpAMD64MOVLstore)
15237 v.AuxInt = int32ToAuxInt(i - 2)
15238 v.Aux = symToAux(s)
15239 v.AddArg3(p, w, mem)
15242 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
15243 // cond: x.Uses == 1 && clobber(x)
15244 // result: (MOVLstore [i-2] {s} p w mem)
15246 i := auxIntToInt32(v.AuxInt)
15247 s := auxToSym(v.Aux)
15249 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 {
15254 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
15258 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
15261 v.reset(OpAMD64MOVLstore)
15262 v.AuxInt = int32ToAuxInt(i - 2)
15263 v.Aux = symToAux(s)
15264 v.AddArg3(p, w, mem)
15267 // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
15268 // cond: x.Uses == 1 && clobber(x)
15269 // result: (MOVLstore [i-2] {s} p w0 mem)
15271 i := auxIntToInt32(v.AuxInt)
15272 s := auxToSym(v.Aux)
15274 if v_1.Op != OpAMD64SHRLconst {
15277 j := auxIntToInt8(v_1.AuxInt)
15280 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
15284 if p != x.Args[0] {
15288 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
15291 v.reset(OpAMD64MOVLstore)
15292 v.AuxInt = int32ToAuxInt(i - 2)
15293 v.Aux = symToAux(s)
15294 v.AddArg3(p, w0, mem)
15297 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
15298 // cond: x.Uses == 1 && clobber(x)
15299 // result: (MOVLstore [i-2] {s} p w0 mem)
15301 i := auxIntToInt32(v.AuxInt)
15302 s := auxToSym(v.Aux)
15304 if v_1.Op != OpAMD64SHRQconst {
15307 j := auxIntToInt8(v_1.AuxInt)
15310 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
15314 if p != x.Args[0] {
15318 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
15321 v.reset(OpAMD64MOVLstore)
15322 v.AuxInt = int32ToAuxInt(i - 2)
15323 v.Aux = symToAux(s)
15324 v.AddArg3(p, w0, mem)
15327 // match: (MOVWstore [i] {s} p1 (SHRLconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
15328 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
15329 // result: (MOVLstore [i] {s} p0 w mem)
15331 i := auxIntToInt32(v.AuxInt)
15332 s := auxToSym(v.Aux)
15334 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 {
15339 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
15344 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
15347 v.reset(OpAMD64MOVLstore)
15348 v.AuxInt = int32ToAuxInt(i)
15349 v.Aux = symToAux(s)
15350 v.AddArg3(p0, w, mem)
15353 // match: (MOVWstore [i] {s} p1 (SHRQconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
15354 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
15355 // result: (MOVLstore [i] {s} p0 w mem)
15357 i := auxIntToInt32(v.AuxInt)
15358 s := auxToSym(v.Aux)
15360 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 {
15365 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
15370 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
15373 v.reset(OpAMD64MOVLstore)
15374 v.AuxInt = int32ToAuxInt(i)
15375 v.Aux = symToAux(s)
15376 v.AddArg3(p0, w, mem)
15379 // match: (MOVWstore [i] {s} p1 (SHRLconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRLconst [j-16] w) mem))
15380 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
15381 // result: (MOVLstore [i] {s} p0 w0 mem)
15383 i := auxIntToInt32(v.AuxInt)
15384 s := auxToSym(v.Aux)
15386 if v_1.Op != OpAMD64SHRLconst {
15389 j := auxIntToInt8(v_1.AuxInt)
15392 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
15398 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
15401 v.reset(OpAMD64MOVLstore)
15402 v.AuxInt = int32ToAuxInt(i)
15403 v.Aux = symToAux(s)
15404 v.AddArg3(p0, w0, mem)
15407 // match: (MOVWstore [i] {s} p1 (SHRQconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRQconst [j-16] w) mem))
15408 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
15409 // result: (MOVLstore [i] {s} p0 w0 mem)
15411 i := auxIntToInt32(v.AuxInt)
15412 s := auxToSym(v.Aux)
15414 if v_1.Op != OpAMD64SHRQconst {
15417 j := auxIntToInt8(v_1.AuxInt)
15420 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
15426 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
15429 v.reset(OpAMD64MOVLstore)
15430 v.AuxInt = int32ToAuxInt(i)
15431 v.Aux = symToAux(s)
15432 v.AddArg3(p0, w0, mem)
15435 // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem))
15436 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
15437 // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
15439 i := auxIntToInt32(v.AuxInt)
15440 s := auxToSym(v.Aux)
15443 if x1.Op != OpAMD64MOVWload {
15446 j := auxIntToInt32(x1.AuxInt)
15447 s2 := auxToSym(x1.Aux)
15451 if mem2.Op != OpAMD64MOVWstore || auxIntToInt32(mem2.AuxInt) != i-2 || auxToSym(mem2.Aux) != s {
15455 if p != mem2.Args[0] {
15459 if x2.Op != OpAMD64MOVWload || auxIntToInt32(x2.AuxInt) != j-2 || auxToSym(x2.Aux) != s2 {
15463 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
15466 v.reset(OpAMD64MOVLstore)
15467 v.AuxInt = int32ToAuxInt(i - 2)
15468 v.Aux = symToAux(s)
15469 v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32)
15470 v0.AuxInt = int32ToAuxInt(j - 2)
15471 v0.Aux = symToAux(s2)
15472 v0.AddArg2(p2, mem)
15473 v.AddArg3(p, v0, mem)
15476 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
15477 // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
15478 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
15480 off1 := auxIntToInt32(v.AuxInt)
15481 sym1 := auxToSym(v.Aux)
15482 if v_0.Op != OpAMD64LEAL {
15485 off2 := auxIntToInt32(v_0.AuxInt)
15486 sym2 := auxToSym(v_0.Aux)
15487 base := v_0.Args[0]
15490 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
15493 v.reset(OpAMD64MOVWstore)
15494 v.AuxInt = int32ToAuxInt(off1 + off2)
15495 v.Aux = symToAux(mergeSym(sym1, sym2))
15496 v.AddArg3(base, val, mem)
15499 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
15500 // cond: is32Bit(int64(off1)+int64(off2))
15501 // result: (MOVWstore [off1+off2] {sym} ptr val mem)
15503 off1 := auxIntToInt32(v.AuxInt)
15504 sym := auxToSym(v.Aux)
15505 if v_0.Op != OpAMD64ADDLconst {
15508 off2 := auxIntToInt32(v_0.AuxInt)
15512 if !(is32Bit(int64(off1) + int64(off2))) {
15515 v.reset(OpAMD64MOVWstore)
15516 v.AuxInt = int32ToAuxInt(off1 + off2)
15517 v.Aux = symToAux(sym)
15518 v.AddArg3(ptr, val, mem)
15523 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
15526 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
15527 // cond: ValAndOff(sc).canAdd32(off)
15528 // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
15530 sc := auxIntToValAndOff(v.AuxInt)
15531 s := auxToSym(v.Aux)
15532 if v_0.Op != OpAMD64ADDQconst {
15535 off := auxIntToInt32(v_0.AuxInt)
15538 if !(ValAndOff(sc).canAdd32(off)) {
15541 v.reset(OpAMD64MOVWstoreconst)
15542 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
15543 v.Aux = symToAux(s)
15544 v.AddArg2(ptr, mem)
15547 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
15548 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
15549 // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
15551 sc := auxIntToValAndOff(v.AuxInt)
15552 sym1 := auxToSym(v.Aux)
15553 if v_0.Op != OpAMD64LEAQ {
15556 off := auxIntToInt32(v_0.AuxInt)
15557 sym2 := auxToSym(v_0.Aux)
15560 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
15563 v.reset(OpAMD64MOVWstoreconst)
15564 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
15565 v.Aux = symToAux(mergeSym(sym1, sym2))
15566 v.AddArg2(ptr, mem)
15569 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
15570 // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
15571 // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
15573 c := auxIntToValAndOff(v.AuxInt)
15574 s := auxToSym(v.Aux)
15577 if x.Op != OpAMD64MOVWstoreconst {
15580 a := auxIntToValAndOff(x.AuxInt)
15581 if auxToSym(x.Aux) != s {
15585 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
15588 v.reset(OpAMD64MOVLstoreconst)
15589 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off()))
15590 v.Aux = symToAux(s)
15594 // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
15595 // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
15596 // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
15598 a := auxIntToValAndOff(v.AuxInt)
15599 s := auxToSym(v.Aux)
15602 if x.Op != OpAMD64MOVWstoreconst {
15605 c := auxIntToValAndOff(x.AuxInt)
15606 if auxToSym(x.Aux) != s {
15610 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
15613 v.reset(OpAMD64MOVLstoreconst)
15614 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off()))
15615 v.Aux = symToAux(s)
15619 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
15620 // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
15621 // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
15623 sc := auxIntToValAndOff(v.AuxInt)
15624 sym1 := auxToSym(v.Aux)
15625 if v_0.Op != OpAMD64LEAL {
15628 off := auxIntToInt32(v_0.AuxInt)
15629 sym2 := auxToSym(v_0.Aux)
15632 if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
15635 v.reset(OpAMD64MOVWstoreconst)
15636 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
15637 v.Aux = symToAux(mergeSym(sym1, sym2))
15638 v.AddArg2(ptr, mem)
15641 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
15642 // cond: sc.canAdd32(off)
15643 // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
15645 sc := auxIntToValAndOff(v.AuxInt)
15646 s := auxToSym(v.Aux)
15647 if v_0.Op != OpAMD64ADDLconst {
15650 off := auxIntToInt32(v_0.AuxInt)
15653 if !(sc.canAdd32(off)) {
15656 v.reset(OpAMD64MOVWstoreconst)
15657 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
15658 v.Aux = symToAux(s)
15659 v.AddArg2(ptr, mem)
15664 func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
15667 // match: (MULL x (MOVLconst [c]))
15668 // result: (MULLconst [c] x)
15670 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15672 if v_1.Op != OpAMD64MOVLconst {
15675 c := auxIntToInt32(v_1.AuxInt)
15676 v.reset(OpAMD64MULLconst)
15677 v.AuxInt = int32ToAuxInt(c)
15685 func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
15688 // match: (MULLconst [c] (MULLconst [d] x))
15689 // result: (MULLconst [c * d] x)
15691 c := auxIntToInt32(v.AuxInt)
15692 if v_0.Op != OpAMD64MULLconst {
15695 d := auxIntToInt32(v_0.AuxInt)
15697 v.reset(OpAMD64MULLconst)
15698 v.AuxInt = int32ToAuxInt(c * d)
15702 // match: (MULLconst [-9] x)
15703 // result: (NEGL (LEAL8 <v.Type> x x))
15705 if auxIntToInt32(v.AuxInt) != -9 {
15709 v.reset(OpAMD64NEGL)
15710 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
15715 // match: (MULLconst [-5] x)
15716 // result: (NEGL (LEAL4 <v.Type> x x))
15718 if auxIntToInt32(v.AuxInt) != -5 {
15722 v.reset(OpAMD64NEGL)
15723 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
15728 // match: (MULLconst [-3] x)
15729 // result: (NEGL (LEAL2 <v.Type> x x))
15731 if auxIntToInt32(v.AuxInt) != -3 {
15735 v.reset(OpAMD64NEGL)
15736 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
15741 // match: (MULLconst [-1] x)
15742 // result: (NEGL x)
15744 if auxIntToInt32(v.AuxInt) != -1 {
15748 v.reset(OpAMD64NEGL)
15752 // match: (MULLconst [ 0] _)
15753 // result: (MOVLconst [0])
15755 if auxIntToInt32(v.AuxInt) != 0 {
15758 v.reset(OpAMD64MOVLconst)
15759 v.AuxInt = int32ToAuxInt(0)
15762 // match: (MULLconst [ 1] x)
15765 if auxIntToInt32(v.AuxInt) != 1 {
15772 // match: (MULLconst [ 3] x)
15773 // result: (LEAL2 x x)
15775 if auxIntToInt32(v.AuxInt) != 3 {
15779 v.reset(OpAMD64LEAL2)
15783 // match: (MULLconst [ 5] x)
15784 // result: (LEAL4 x x)
15786 if auxIntToInt32(v.AuxInt) != 5 {
15790 v.reset(OpAMD64LEAL4)
15794 // match: (MULLconst [ 7] x)
15795 // result: (LEAL2 x (LEAL2 <v.Type> x x))
15797 if auxIntToInt32(v.AuxInt) != 7 {
15801 v.reset(OpAMD64LEAL2)
15802 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
15807 // match: (MULLconst [ 9] x)
15808 // result: (LEAL8 x x)
15810 if auxIntToInt32(v.AuxInt) != 9 {
15814 v.reset(OpAMD64LEAL8)
15818 // match: (MULLconst [11] x)
15819 // result: (LEAL2 x (LEAL4 <v.Type> x x))
15821 if auxIntToInt32(v.AuxInt) != 11 {
15825 v.reset(OpAMD64LEAL2)
15826 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
15831 // match: (MULLconst [13] x)
15832 // result: (LEAL4 x (LEAL2 <v.Type> x x))
15834 if auxIntToInt32(v.AuxInt) != 13 {
15838 v.reset(OpAMD64LEAL4)
15839 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
15844 // match: (MULLconst [19] x)
15845 // result: (LEAL2 x (LEAL8 <v.Type> x x))
15847 if auxIntToInt32(v.AuxInt) != 19 {
15851 v.reset(OpAMD64LEAL2)
15852 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
15857 // match: (MULLconst [21] x)
15858 // result: (LEAL4 x (LEAL4 <v.Type> x x))
15860 if auxIntToInt32(v.AuxInt) != 21 {
15864 v.reset(OpAMD64LEAL4)
15865 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
15870 // match: (MULLconst [25] x)
15871 // result: (LEAL8 x (LEAL2 <v.Type> x x))
15873 if auxIntToInt32(v.AuxInt) != 25 {
15877 v.reset(OpAMD64LEAL8)
15878 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
15883 // match: (MULLconst [27] x)
15884 // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
15886 if auxIntToInt32(v.AuxInt) != 27 {
15890 v.reset(OpAMD64LEAL8)
15891 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
15896 // match: (MULLconst [37] x)
15897 // result: (LEAL4 x (LEAL8 <v.Type> x x))
15899 if auxIntToInt32(v.AuxInt) != 37 {
15903 v.reset(OpAMD64LEAL4)
15904 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
15909 // match: (MULLconst [41] x)
15910 // result: (LEAL8 x (LEAL4 <v.Type> x x))
15912 if auxIntToInt32(v.AuxInt) != 41 {
15916 v.reset(OpAMD64LEAL8)
15917 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
15922 // match: (MULLconst [45] x)
15923 // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
15925 if auxIntToInt32(v.AuxInt) != 45 {
15929 v.reset(OpAMD64LEAL8)
15930 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
15935 // match: (MULLconst [73] x)
15936 // result: (LEAL8 x (LEAL8 <v.Type> x x))
15938 if auxIntToInt32(v.AuxInt) != 73 {
15942 v.reset(OpAMD64LEAL8)
15943 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
15948 // match: (MULLconst [81] x)
15949 // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
15951 if auxIntToInt32(v.AuxInt) != 81 {
15955 v.reset(OpAMD64LEAL8)
15956 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
15961 // match: (MULLconst [c] x)
15962 // cond: isPowerOfTwo64(int64(c)+1) && c >= 15
15963 // result: (SUBL (SHLLconst <v.Type> [int8(log64(int64(c)+1))] x) x)
15965 c := auxIntToInt32(v.AuxInt)
15967 if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
15970 v.reset(OpAMD64SUBL)
15971 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
15972 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
15977 // match: (MULLconst [c] x)
15978 // cond: isPowerOfTwo32(c-1) && c >= 17
15979 // result: (LEAL1 (SHLLconst <v.Type> [int8(log32(c-1))] x) x)
15981 c := auxIntToInt32(v.AuxInt)
15983 if !(isPowerOfTwo32(c-1) && c >= 17) {
15986 v.reset(OpAMD64LEAL1)
15987 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
15988 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
15993 // match: (MULLconst [c] x)
15994 // cond: isPowerOfTwo32(c-2) && c >= 34
15995 // result: (LEAL2 (SHLLconst <v.Type> [int8(log32(c-2))] x) x)
15997 c := auxIntToInt32(v.AuxInt)
15999 if !(isPowerOfTwo32(c-2) && c >= 34) {
16002 v.reset(OpAMD64LEAL2)
16003 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
16004 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
16009 // match: (MULLconst [c] x)
16010 // cond: isPowerOfTwo32(c-4) && c >= 68
16011 // result: (LEAL4 (SHLLconst <v.Type> [int8(log32(c-4))] x) x)
16013 c := auxIntToInt32(v.AuxInt)
16015 if !(isPowerOfTwo32(c-4) && c >= 68) {
16018 v.reset(OpAMD64LEAL4)
16019 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
16020 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
16025 // match: (MULLconst [c] x)
16026 // cond: isPowerOfTwo32(c-8) && c >= 136
16027 // result: (LEAL8 (SHLLconst <v.Type> [int8(log32(c-8))] x) x)
16029 c := auxIntToInt32(v.AuxInt)
16031 if !(isPowerOfTwo32(c-8) && c >= 136) {
16034 v.reset(OpAMD64LEAL8)
16035 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
16036 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
16041 // match: (MULLconst [c] x)
16042 // cond: c%3 == 0 && isPowerOfTwo32(c/3)
16043 // result: (SHLLconst [int8(log32(c/3))] (LEAL2 <v.Type> x x))
16045 c := auxIntToInt32(v.AuxInt)
16047 if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
16050 v.reset(OpAMD64SHLLconst)
16051 v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
16052 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
16057 // match: (MULLconst [c] x)
16058 // cond: c%5 == 0 && isPowerOfTwo32(c/5)
16059 // result: (SHLLconst [int8(log32(c/5))] (LEAL4 <v.Type> x x))
16061 c := auxIntToInt32(v.AuxInt)
16063 if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
16066 v.reset(OpAMD64SHLLconst)
16067 v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
16068 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
16073 // match: (MULLconst [c] x)
16074 // cond: c%9 == 0 && isPowerOfTwo32(c/9)
16075 // result: (SHLLconst [int8(log32(c/9))] (LEAL8 <v.Type> x x))
16077 c := auxIntToInt32(v.AuxInt)
16079 if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
16082 v.reset(OpAMD64SHLLconst)
16083 v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
16084 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
16089 // match: (MULLconst [c] (MOVLconst [d]))
16090 // result: (MOVLconst [c*d])
16092 c := auxIntToInt32(v.AuxInt)
16093 if v_0.Op != OpAMD64MOVLconst {
16096 d := auxIntToInt32(v_0.AuxInt)
16097 v.reset(OpAMD64MOVLconst)
16098 v.AuxInt = int32ToAuxInt(c * d)
16103 func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
16106 // match: (MULQ x (MOVQconst [c]))
16107 // cond: is32Bit(c)
16108 // result: (MULQconst [int32(c)] x)
16110 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16112 if v_1.Op != OpAMD64MOVQconst {
16115 c := auxIntToInt64(v_1.AuxInt)
16119 v.reset(OpAMD64MULQconst)
16120 v.AuxInt = int32ToAuxInt(int32(c))
16128 func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
16131 // match: (MULQconst [c] (MULQconst [d] x))
16132 // cond: is32Bit(int64(c)*int64(d))
16133 // result: (MULQconst [c * d] x)
16135 c := auxIntToInt32(v.AuxInt)
16136 if v_0.Op != OpAMD64MULQconst {
16139 d := auxIntToInt32(v_0.AuxInt)
16141 if !(is32Bit(int64(c) * int64(d))) {
16144 v.reset(OpAMD64MULQconst)
16145 v.AuxInt = int32ToAuxInt(c * d)
16149 // match: (MULQconst [-9] x)
16150 // result: (NEGQ (LEAQ8 <v.Type> x x))
16152 if auxIntToInt32(v.AuxInt) != -9 {
16156 v.reset(OpAMD64NEGQ)
16157 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
16162 // match: (MULQconst [-5] x)
16163 // result: (NEGQ (LEAQ4 <v.Type> x x))
16165 if auxIntToInt32(v.AuxInt) != -5 {
16169 v.reset(OpAMD64NEGQ)
16170 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
16175 // match: (MULQconst [-3] x)
16176 // result: (NEGQ (LEAQ2 <v.Type> x x))
16178 if auxIntToInt32(v.AuxInt) != -3 {
16182 v.reset(OpAMD64NEGQ)
16183 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
16188 // match: (MULQconst [-1] x)
16189 // result: (NEGQ x)
16191 if auxIntToInt32(v.AuxInt) != -1 {
16195 v.reset(OpAMD64NEGQ)
16199 // match: (MULQconst [ 0] _)
16200 // result: (MOVQconst [0])
16202 if auxIntToInt32(v.AuxInt) != 0 {
16205 v.reset(OpAMD64MOVQconst)
16206 v.AuxInt = int64ToAuxInt(0)
16209 // match: (MULQconst [ 1] x)
16212 if auxIntToInt32(v.AuxInt) != 1 {
16219 // match: (MULQconst [ 3] x)
16220 // result: (LEAQ2 x x)
16222 if auxIntToInt32(v.AuxInt) != 3 {
16226 v.reset(OpAMD64LEAQ2)
16230 // match: (MULQconst [ 5] x)
16231 // result: (LEAQ4 x x)
16233 if auxIntToInt32(v.AuxInt) != 5 {
16237 v.reset(OpAMD64LEAQ4)
16241 // match: (MULQconst [ 7] x)
16242 // result: (LEAQ2 x (LEAQ2 <v.Type> x x))
16244 if auxIntToInt32(v.AuxInt) != 7 {
16248 v.reset(OpAMD64LEAQ2)
16249 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
16254 // match: (MULQconst [ 9] x)
16255 // result: (LEAQ8 x x)
16257 if auxIntToInt32(v.AuxInt) != 9 {
16261 v.reset(OpAMD64LEAQ8)
16265 // match: (MULQconst [11] x)
16266 // result: (LEAQ2 x (LEAQ4 <v.Type> x x))
16268 if auxIntToInt32(v.AuxInt) != 11 {
16272 v.reset(OpAMD64LEAQ2)
16273 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
16278 // match: (MULQconst [13] x)
16279 // result: (LEAQ4 x (LEAQ2 <v.Type> x x))
16281 if auxIntToInt32(v.AuxInt) != 13 {
16285 v.reset(OpAMD64LEAQ4)
16286 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
16291 // match: (MULQconst [19] x)
16292 // result: (LEAQ2 x (LEAQ8 <v.Type> x x))
16294 if auxIntToInt32(v.AuxInt) != 19 {
16298 v.reset(OpAMD64LEAQ2)
16299 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
16304 // match: (MULQconst [21] x)
16305 // result: (LEAQ4 x (LEAQ4 <v.Type> x x))
16307 if auxIntToInt32(v.AuxInt) != 21 {
16311 v.reset(OpAMD64LEAQ4)
16312 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
16317 // match: (MULQconst [25] x)
16318 // result: (LEAQ8 x (LEAQ2 <v.Type> x x))
16320 if auxIntToInt32(v.AuxInt) != 25 {
16324 v.reset(OpAMD64LEAQ8)
16325 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
16330 // match: (MULQconst [27] x)
16331 // result: (LEAQ8 (LEAQ2 <v.Type> x x) (LEAQ2 <v.Type> x x))
16333 if auxIntToInt32(v.AuxInt) != 27 {
16337 v.reset(OpAMD64LEAQ8)
16338 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
16343 // match: (MULQconst [37] x)
16344 // result: (LEAQ4 x (LEAQ8 <v.Type> x x))
16346 if auxIntToInt32(v.AuxInt) != 37 {
16350 v.reset(OpAMD64LEAQ4)
16351 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
16356 // match: (MULQconst [41] x)
16357 // result: (LEAQ8 x (LEAQ4 <v.Type> x x))
16359 if auxIntToInt32(v.AuxInt) != 41 {
16363 v.reset(OpAMD64LEAQ8)
16364 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
16369 // match: (MULQconst [45] x)
16370 // result: (LEAQ8 (LEAQ4 <v.Type> x x) (LEAQ4 <v.Type> x x))
16372 if auxIntToInt32(v.AuxInt) != 45 {
16376 v.reset(OpAMD64LEAQ8)
16377 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
16382 // match: (MULQconst [73] x)
16383 // result: (LEAQ8 x (LEAQ8 <v.Type> x x))
16385 if auxIntToInt32(v.AuxInt) != 73 {
16389 v.reset(OpAMD64LEAQ8)
16390 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
16395 // match: (MULQconst [81] x)
16396 // result: (LEAQ8 (LEAQ8 <v.Type> x x) (LEAQ8 <v.Type> x x))
16398 if auxIntToInt32(v.AuxInt) != 81 {
16402 v.reset(OpAMD64LEAQ8)
16403 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
16408 // match: (MULQconst [c] x)
16409 // cond: isPowerOfTwo64(int64(c)+1) && c >= 15
16410 // result: (SUBQ (SHLQconst <v.Type> [int8(log64(int64(c)+1))] x) x)
16412 c := auxIntToInt32(v.AuxInt)
16414 if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
16417 v.reset(OpAMD64SUBQ)
16418 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
16419 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
16424 // match: (MULQconst [c] x)
16425 // cond: isPowerOfTwo32(c-1) && c >= 17
16426 // result: (LEAQ1 (SHLQconst <v.Type> [int8(log32(c-1))] x) x)
16428 c := auxIntToInt32(v.AuxInt)
16430 if !(isPowerOfTwo32(c-1) && c >= 17) {
16433 v.reset(OpAMD64LEAQ1)
16434 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
16435 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
16440 // match: (MULQconst [c] x)
16441 // cond: isPowerOfTwo32(c-2) && c >= 34
16442 // result: (LEAQ2 (SHLQconst <v.Type> [int8(log32(c-2))] x) x)
16444 c := auxIntToInt32(v.AuxInt)
16446 if !(isPowerOfTwo32(c-2) && c >= 34) {
16449 v.reset(OpAMD64LEAQ2)
16450 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
16451 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
16456 // match: (MULQconst [c] x)
16457 // cond: isPowerOfTwo32(c-4) && c >= 68
16458 // result: (LEAQ4 (SHLQconst <v.Type> [int8(log32(c-4))] x) x)
16460 c := auxIntToInt32(v.AuxInt)
16462 if !(isPowerOfTwo32(c-4) && c >= 68) {
16465 v.reset(OpAMD64LEAQ4)
16466 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
16467 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
16472 // match: (MULQconst [c] x)
16473 // cond: isPowerOfTwo32(c-8) && c >= 136
16474 // result: (LEAQ8 (SHLQconst <v.Type> [int8(log32(c-8))] x) x)
16476 c := auxIntToInt32(v.AuxInt)
16478 if !(isPowerOfTwo32(c-8) && c >= 136) {
16481 v.reset(OpAMD64LEAQ8)
16482 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
16483 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
16488 // match: (MULQconst [c] x)
16489 // cond: c%3 == 0 && isPowerOfTwo32(c/3)
16490 // result: (SHLQconst [int8(log32(c/3))] (LEAQ2 <v.Type> x x))
16492 c := auxIntToInt32(v.AuxInt)
16494 if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
16497 v.reset(OpAMD64SHLQconst)
16498 v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
16499 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
16504 // match: (MULQconst [c] x)
16505 // cond: c%5 == 0 && isPowerOfTwo32(c/5)
16506 // result: (SHLQconst [int8(log32(c/5))] (LEAQ4 <v.Type> x x))
16508 c := auxIntToInt32(v.AuxInt)
16510 if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
16513 v.reset(OpAMD64SHLQconst)
16514 v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
16515 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
16520 // match: (MULQconst [c] x)
16521 // cond: c%9 == 0 && isPowerOfTwo32(c/9)
16522 // result: (SHLQconst [int8(log32(c/9))] (LEAQ8 <v.Type> x x))
16524 c := auxIntToInt32(v.AuxInt)
16526 if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
16529 v.reset(OpAMD64SHLQconst)
16530 v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
16531 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
16536 // match: (MULQconst [c] (MOVQconst [d]))
16537 // result: (MOVQconst [int64(c)*d])
16539 c := auxIntToInt32(v.AuxInt)
16540 if v_0.Op != OpAMD64MOVQconst {
16543 d := auxIntToInt64(v_0.AuxInt)
16544 v.reset(OpAMD64MOVQconst)
16545 v.AuxInt = int64ToAuxInt(int64(c) * d)
16548 // match: (MULQconst [c] (NEGQ x))
16549 // cond: c != -(1<<31)
16550 // result: (MULQconst [-c] x)
16552 c := auxIntToInt32(v.AuxInt)
16553 if v_0.Op != OpAMD64NEGQ {
16557 if !(c != -(1 << 31)) {
16560 v.reset(OpAMD64MULQconst)
16561 v.AuxInt = int32ToAuxInt(-c)
16567 func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
16570 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
16571 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
16572 // result: (MULSDload x [off] {sym} ptr mem)
16574 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16577 if l.Op != OpAMD64MOVSDload {
16580 off := auxIntToInt32(l.AuxInt)
16581 sym := auxToSym(l.Aux)
16584 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
16587 v.reset(OpAMD64MULSDload)
16588 v.AuxInt = int32ToAuxInt(off)
16589 v.Aux = symToAux(sym)
16590 v.AddArg3(x, ptr, mem)
16597 func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
16602 typ := &b.Func.Config.Types
16603 // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem)
16604 // cond: is32Bit(int64(off1)+int64(off2))
16605 // result: (MULSDload [off1+off2] {sym} val base mem)
16607 off1 := auxIntToInt32(v.AuxInt)
16608 sym := auxToSym(v.Aux)
16610 if v_1.Op != OpAMD64ADDQconst {
16613 off2 := auxIntToInt32(v_1.AuxInt)
16614 base := v_1.Args[0]
16616 if !(is32Bit(int64(off1) + int64(off2))) {
16619 v.reset(OpAMD64MULSDload)
16620 v.AuxInt = int32ToAuxInt(off1 + off2)
16621 v.Aux = symToAux(sym)
16622 v.AddArg3(val, base, mem)
16625 // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
16626 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
16627 // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
16629 off1 := auxIntToInt32(v.AuxInt)
16630 sym1 := auxToSym(v.Aux)
16632 if v_1.Op != OpAMD64LEAQ {
16635 off2 := auxIntToInt32(v_1.AuxInt)
16636 sym2 := auxToSym(v_1.Aux)
16637 base := v_1.Args[0]
16639 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16642 v.reset(OpAMD64MULSDload)
16643 v.AuxInt = int32ToAuxInt(off1 + off2)
16644 v.Aux = symToAux(mergeSym(sym1, sym2))
16645 v.AddArg3(val, base, mem)
16648 // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
16649 // result: (MULSD x (MOVQi2f y))
16651 off := auxIntToInt32(v.AuxInt)
16652 sym := auxToSym(v.Aux)
16655 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
16659 if ptr != v_2.Args[0] {
16662 v.reset(OpAMD64MULSD)
16663 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
16670 func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
16673 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
16674 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
16675 // result: (MULSSload x [off] {sym} ptr mem)
16677 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16680 if l.Op != OpAMD64MOVSSload {
16683 off := auxIntToInt32(l.AuxInt)
16684 sym := auxToSym(l.Aux)
16687 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
16690 v.reset(OpAMD64MULSSload)
16691 v.AuxInt = int32ToAuxInt(off)
16692 v.Aux = symToAux(sym)
16693 v.AddArg3(x, ptr, mem)
16700 func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
16705 typ := &b.Func.Config.Types
16706 // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem)
16707 // cond: is32Bit(int64(off1)+int64(off2))
16708 // result: (MULSSload [off1+off2] {sym} val base mem)
16710 off1 := auxIntToInt32(v.AuxInt)
16711 sym := auxToSym(v.Aux)
16713 if v_1.Op != OpAMD64ADDQconst {
16716 off2 := auxIntToInt32(v_1.AuxInt)
16717 base := v_1.Args[0]
16719 if !(is32Bit(int64(off1) + int64(off2))) {
16722 v.reset(OpAMD64MULSSload)
16723 v.AuxInt = int32ToAuxInt(off1 + off2)
16724 v.Aux = symToAux(sym)
16725 v.AddArg3(val, base, mem)
16728 // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
16729 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
16730 // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
16732 off1 := auxIntToInt32(v.AuxInt)
16733 sym1 := auxToSym(v.Aux)
16735 if v_1.Op != OpAMD64LEAQ {
16738 off2 := auxIntToInt32(v_1.AuxInt)
16739 sym2 := auxToSym(v_1.Aux)
16740 base := v_1.Args[0]
16742 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16745 v.reset(OpAMD64MULSSload)
16746 v.AuxInt = int32ToAuxInt(off1 + off2)
16747 v.Aux = symToAux(mergeSym(sym1, sym2))
16748 v.AddArg3(val, base, mem)
16751 // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
16752 // result: (MULSS x (MOVLi2f y))
16754 off := auxIntToInt32(v.AuxInt)
16755 sym := auxToSym(v.Aux)
16758 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
16762 if ptr != v_2.Args[0] {
16765 v.reset(OpAMD64MULSS)
16766 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
16773 func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
16775 // match: (NEGL (NEGL x))
16778 if v_0.Op != OpAMD64NEGL {
16785 // match: (NEGL s:(SUBL x y))
16786 // cond: s.Uses == 1
16787 // result: (SUBL y x)
16790 if s.Op != OpAMD64SUBL {
16795 if !(s.Uses == 1) {
16798 v.reset(OpAMD64SUBL)
16802 // match: (NEGL (MOVLconst [c]))
16803 // result: (MOVLconst [-c])
16805 if v_0.Op != OpAMD64MOVLconst {
16808 c := auxIntToInt32(v_0.AuxInt)
16809 v.reset(OpAMD64MOVLconst)
16810 v.AuxInt = int32ToAuxInt(-c)
16815 func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
16817 // match: (NEGQ (NEGQ x))
16820 if v_0.Op != OpAMD64NEGQ {
16827 // match: (NEGQ s:(SUBQ x y))
16828 // cond: s.Uses == 1
16829 // result: (SUBQ y x)
16832 if s.Op != OpAMD64SUBQ {
16837 if !(s.Uses == 1) {
16840 v.reset(OpAMD64SUBQ)
16844 // match: (NEGQ (MOVQconst [c]))
16845 // result: (MOVQconst [-c])
16847 if v_0.Op != OpAMD64MOVQconst {
16850 c := auxIntToInt64(v_0.AuxInt)
16851 v.reset(OpAMD64MOVQconst)
16852 v.AuxInt = int64ToAuxInt(-c)
16855 // match: (NEGQ (ADDQconst [c] (NEGQ x)))
16856 // cond: c != -(1<<31)
16857 // result: (ADDQconst [-c] x)
16859 if v_0.Op != OpAMD64ADDQconst {
16862 c := auxIntToInt32(v_0.AuxInt)
16863 v_0_0 := v_0.Args[0]
16864 if v_0_0.Op != OpAMD64NEGQ {
16868 if !(c != -(1 << 31)) {
16871 v.reset(OpAMD64ADDQconst)
16872 v.AuxInt = int32ToAuxInt(-c)
16878 func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
16880 // match: (NOTL (MOVLconst [c]))
16881 // result: (MOVLconst [^c])
16883 if v_0.Op != OpAMD64MOVLconst {
16886 c := auxIntToInt32(v_0.AuxInt)
16887 v.reset(OpAMD64MOVLconst)
16888 v.AuxInt = int32ToAuxInt(^c)
16893 func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
16895 // match: (NOTQ (MOVQconst [c]))
16896 // result: (MOVQconst [^c])
16898 if v_0.Op != OpAMD64MOVQconst {
16901 c := auxIntToInt64(v_0.AuxInt)
16902 v.reset(OpAMD64MOVQconst)
16903 v.AuxInt = int64ToAuxInt(^c)
16908 func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
16912 typ := &b.Func.Config.Types
16913 // match: (ORL (SHLL (MOVLconst [1]) y) x)
16914 // result: (BTSL x y)
16916 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16917 if v_0.Op != OpAMD64SHLL {
16921 v_0_0 := v_0.Args[0]
16922 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
16926 v.reset(OpAMD64BTSL)
16932 // match: (ORL (MOVLconst [c]) x)
16933 // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
16934 // result: (BTSLconst [int8(log32(c))] x)
16936 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16937 if v_0.Op != OpAMD64MOVLconst {
16940 c := auxIntToInt32(v_0.AuxInt)
16942 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
16945 v.reset(OpAMD64BTSLconst)
16946 v.AuxInt = int8ToAuxInt(int8(log32(c)))
16952 // match: (ORL x (MOVLconst [c]))
16953 // result: (ORLconst [c] x)
16955 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16957 if v_1.Op != OpAMD64MOVLconst {
16960 c := auxIntToInt32(v_1.AuxInt)
16961 v.reset(OpAMD64ORLconst)
16962 v.AuxInt = int32ToAuxInt(c)
16968 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d]))
16970 // result: (ROLLconst x [c])
16972 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16973 if v_0.Op != OpAMD64SHLLconst {
16976 c := auxIntToInt8(v_0.AuxInt)
16978 if v_1.Op != OpAMD64SHRLconst {
16981 d := auxIntToInt8(v_1.AuxInt)
16982 if x != v_1.Args[0] || !(d == 32-c) {
16985 v.reset(OpAMD64ROLLconst)
16986 v.AuxInt = int8ToAuxInt(c)
16992 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
16993 // cond: d==16-c && c < 16 && t.Size() == 2
16994 // result: (ROLWconst x [c])
16997 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16998 if v_0.Op != OpAMD64SHLLconst {
17001 c := auxIntToInt8(v_0.AuxInt)
17003 if v_1.Op != OpAMD64SHRWconst {
17006 d := auxIntToInt8(v_1.AuxInt)
17007 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
17010 v.reset(OpAMD64ROLWconst)
17011 v.AuxInt = int8ToAuxInt(c)
17017 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
17018 // cond: d==8-c && c < 8 && t.Size() == 1
17019 // result: (ROLBconst x [c])
17022 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17023 if v_0.Op != OpAMD64SHLLconst {
17026 c := auxIntToInt8(v_0.AuxInt)
17028 if v_1.Op != OpAMD64SHRBconst {
17031 d := auxIntToInt8(v_1.AuxInt)
17032 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
17035 v.reset(OpAMD64ROLBconst)
17036 v.AuxInt = int8ToAuxInt(c)
17042 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
17043 // result: (ROLL x y)
17045 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17046 if v_0.Op != OpAMD64SHLL {
17051 if v_1.Op != OpAMD64ANDL {
17055 v_1_0 := v_1.Args[0]
17056 v_1_1 := v_1.Args[1]
17057 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17058 if v_1_0.Op != OpAMD64SHRL {
17062 if x != v_1_0.Args[0] {
17065 v_1_0_1 := v_1_0.Args[1]
17066 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17069 v_1_1_0 := v_1_1.Args[0]
17070 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
17073 v_1_1_0_0 := v_1_1_0.Args[0]
17074 if v_1_1_0_0.Op != OpAMD64NEGQ {
17077 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17078 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
17081 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17082 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
17085 v.reset(OpAMD64ROLL)
17092 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
17093 // result: (ROLL x y)
17095 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17096 if v_0.Op != OpAMD64SHLL {
17101 if v_1.Op != OpAMD64ANDL {
17105 v_1_0 := v_1.Args[0]
17106 v_1_1 := v_1.Args[1]
17107 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17108 if v_1_0.Op != OpAMD64SHRL {
17112 if x != v_1_0.Args[0] {
17115 v_1_0_1 := v_1_0.Args[1]
17116 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17119 v_1_1_0 := v_1_1.Args[0]
17120 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
17123 v_1_1_0_0 := v_1_1_0.Args[0]
17124 if v_1_1_0_0.Op != OpAMD64NEGL {
17127 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17128 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
17131 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17132 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
17135 v.reset(OpAMD64ROLL)
17142 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
17143 // result: (RORL x y)
17145 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17146 if v_0.Op != OpAMD64SHRL {
17151 if v_1.Op != OpAMD64ANDL {
17155 v_1_0 := v_1.Args[0]
17156 v_1_1 := v_1.Args[1]
17157 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17158 if v_1_0.Op != OpAMD64SHLL {
17162 if x != v_1_0.Args[0] {
17165 v_1_0_1 := v_1_0.Args[1]
17166 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17169 v_1_1_0 := v_1_1.Args[0]
17170 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
17173 v_1_1_0_0 := v_1_1_0.Args[0]
17174 if v_1_1_0_0.Op != OpAMD64NEGQ {
17177 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17178 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
17181 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17182 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
17185 v.reset(OpAMD64RORL)
17192 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
17193 // result: (RORL x y)
17195 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17196 if v_0.Op != OpAMD64SHRL {
17201 if v_1.Op != OpAMD64ANDL {
17205 v_1_0 := v_1.Args[0]
17206 v_1_1 := v_1.Args[1]
17207 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17208 if v_1_0.Op != OpAMD64SHLL {
17212 if x != v_1_0.Args[0] {
17215 v_1_0_1 := v_1_0.Args[1]
17216 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17219 v_1_1_0 := v_1_1.Args[0]
17220 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
17223 v_1_1_0_0 := v_1_1_0.Args[0]
17224 if v_1_1_0_0.Op != OpAMD64NEGL {
17227 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17228 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
17231 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17232 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
17235 v.reset(OpAMD64RORL)
17242 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
17243 // cond: v.Type.Size() == 2
17244 // result: (ROLW x y)
17246 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17247 if v_0.Op != OpAMD64SHLL {
17252 v_0_1 := v_0.Args[1]
17253 if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
17257 if v_1.Op != OpAMD64ANDL {
17261 v_1_0 := v_1.Args[0]
17262 v_1_1 := v_1.Args[1]
17263 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17264 if v_1_0.Op != OpAMD64SHRW {
17268 if x != v_1_0.Args[0] {
17271 v_1_0_1 := v_1_0.Args[1]
17272 if v_1_0_1.Op != OpAMD64NEGQ {
17275 v_1_0_1_0 := v_1_0_1.Args[0]
17276 if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
17279 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
17280 if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17283 v_1_1_0 := v_1_1.Args[0]
17284 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
17287 v_1_1_0_0 := v_1_1_0.Args[0]
17288 if v_1_1_0_0.Op != OpAMD64NEGQ {
17291 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17292 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
17295 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17296 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
17299 v.reset(OpAMD64ROLW)
17306 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
17307 // cond: v.Type.Size() == 2
17308 // result: (ROLW x y)
17310 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17311 if v_0.Op != OpAMD64SHLL {
17316 v_0_1 := v_0.Args[1]
17317 if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
17321 if v_1.Op != OpAMD64ANDL {
17325 v_1_0 := v_1.Args[0]
17326 v_1_1 := v_1.Args[1]
17327 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17328 if v_1_0.Op != OpAMD64SHRW {
17332 if x != v_1_0.Args[0] {
17335 v_1_0_1 := v_1_0.Args[1]
17336 if v_1_0_1.Op != OpAMD64NEGL {
17339 v_1_0_1_0 := v_1_0_1.Args[0]
17340 if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
17343 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
17344 if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17347 v_1_1_0 := v_1_1.Args[0]
17348 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
17351 v_1_1_0_0 := v_1_1_0.Args[0]
17352 if v_1_1_0_0.Op != OpAMD64NEGL {
17355 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17356 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
17359 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17360 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
17363 v.reset(OpAMD64ROLW)
17370 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
17371 // cond: v.Type.Size() == 2
17372 // result: (RORW x y)
17374 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17375 if v_0.Op != OpAMD64SHRW {
17380 v_0_1 := v_0.Args[1]
17381 if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
17385 if v_1.Op != OpAMD64SHLL {
17389 if x != v_1.Args[0] {
17392 v_1_1 := v_1.Args[1]
17393 if v_1_1.Op != OpAMD64NEGQ {
17396 v_1_1_0 := v_1_1.Args[0]
17397 if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
17400 v_1_1_0_0 := v_1_1_0.Args[0]
17401 if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
17404 v.reset(OpAMD64RORW)
17410 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
17411 // cond: v.Type.Size() == 2
17412 // result: (RORW x y)
17414 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17415 if v_0.Op != OpAMD64SHRW {
17420 v_0_1 := v_0.Args[1]
17421 if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
17425 if v_1.Op != OpAMD64SHLL {
17429 if x != v_1.Args[0] {
17432 v_1_1 := v_1.Args[1]
17433 if v_1_1.Op != OpAMD64NEGL {
17436 v_1_1_0 := v_1_1.Args[0]
17437 if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
17440 v_1_1_0_0 := v_1_1_0.Args[0]
17441 if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
17444 v.reset(OpAMD64RORW)
17450 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
17451 // cond: v.Type.Size() == 1
17452 // result: (ROLB x y)
17454 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17455 if v_0.Op != OpAMD64SHLL {
17460 v_0_1 := v_0.Args[1]
17461 if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
17465 if v_1.Op != OpAMD64ANDL {
17469 v_1_0 := v_1.Args[0]
17470 v_1_1 := v_1.Args[1]
17471 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17472 if v_1_0.Op != OpAMD64SHRB {
17476 if x != v_1_0.Args[0] {
17479 v_1_0_1 := v_1_0.Args[1]
17480 if v_1_0_1.Op != OpAMD64NEGQ {
17483 v_1_0_1_0 := v_1_0_1.Args[0]
17484 if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
17487 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
17488 if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17491 v_1_1_0 := v_1_1.Args[0]
17492 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
17495 v_1_1_0_0 := v_1_1_0.Args[0]
17496 if v_1_1_0_0.Op != OpAMD64NEGQ {
17499 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17500 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
17503 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17504 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
17507 v.reset(OpAMD64ROLB)
17514 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
17515 // cond: v.Type.Size() == 1
17516 // result: (ROLB x y)
17518 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17519 if v_0.Op != OpAMD64SHLL {
17524 v_0_1 := v_0.Args[1]
17525 if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
17529 if v_1.Op != OpAMD64ANDL {
17533 v_1_0 := v_1.Args[0]
17534 v_1_1 := v_1.Args[1]
17535 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17536 if v_1_0.Op != OpAMD64SHRB {
17540 if x != v_1_0.Args[0] {
17543 v_1_0_1 := v_1_0.Args[1]
17544 if v_1_0_1.Op != OpAMD64NEGL {
17547 v_1_0_1_0 := v_1_0_1.Args[0]
17548 if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
17551 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
17552 if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
17555 v_1_1_0 := v_1_1.Args[0]
17556 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
17559 v_1_1_0_0 := v_1_1_0.Args[0]
17560 if v_1_1_0_0.Op != OpAMD64NEGL {
17563 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17564 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
17567 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17568 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
17571 v.reset(OpAMD64ROLB)
17578 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
17579 // cond: v.Type.Size() == 1
17580 // result: (RORB x y)
17582 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17583 if v_0.Op != OpAMD64SHRB {
17588 v_0_1 := v_0.Args[1]
17589 if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
17593 if v_1.Op != OpAMD64SHLL {
17597 if x != v_1.Args[0] {
17600 v_1_1 := v_1.Args[1]
17601 if v_1_1.Op != OpAMD64NEGQ {
17604 v_1_1_0 := v_1_1.Args[0]
17605 if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
17608 v_1_1_0_0 := v_1_1_0.Args[0]
17609 if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
17612 v.reset(OpAMD64RORB)
17618 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
17619 // cond: v.Type.Size() == 1
17620 // result: (RORB x y)
17622 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17623 if v_0.Op != OpAMD64SHRB {
17628 v_0_1 := v_0.Args[1]
17629 if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
17633 if v_1.Op != OpAMD64SHLL {
17637 if x != v_1.Args[0] {
17640 v_1_1 := v_1.Args[1]
17641 if v_1_1.Op != OpAMD64NEGL {
17644 v_1_1_0 := v_1_1.Args[0]
17645 if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
17648 v_1_1_0_0 := v_1_1_0.Args[0]
17649 if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
17652 v.reset(OpAMD64RORB)
17658 // match: (ORL x x)
17668 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
17669 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
17670 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
17672 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17674 if x0.Op != OpAMD64MOVBload {
17677 i0 := auxIntToInt32(x0.AuxInt)
17678 s := auxToSym(x0.Aux)
17682 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
17686 if x1.Op != OpAMD64MOVBload {
17689 i1 := auxIntToInt32(x1.AuxInt)
17690 if auxToSym(x1.Aux) != s {
17694 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17697 b = mergePoint(b, x0, x1)
17698 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
17700 v0.AuxInt = int32ToAuxInt(i0)
17701 v0.Aux = symToAux(s)
17707 // match: (ORL x0:(MOVBload [i] {s} p0 mem) sh:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
17708 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
17709 // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
17711 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17713 if x0.Op != OpAMD64MOVBload {
17716 i := auxIntToInt32(x0.AuxInt)
17717 s := auxToSym(x0.Aux)
17721 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
17725 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
17730 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17733 b = mergePoint(b, x0, x1)
17734 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
17736 v0.AuxInt = int32ToAuxInt(i)
17737 v0.Aux = symToAux(s)
17738 v0.AddArg2(p0, mem)
17743 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)))
17744 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
17745 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
17747 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17749 if x0.Op != OpAMD64MOVWload {
17752 i0 := auxIntToInt32(x0.AuxInt)
17753 s := auxToSym(x0.Aux)
17757 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
17761 if x1.Op != OpAMD64MOVWload {
17764 i1 := auxIntToInt32(x1.AuxInt)
17765 if auxToSym(x1.Aux) != s {
17769 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17772 b = mergePoint(b, x0, x1)
17773 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
17775 v0.AuxInt = int32ToAuxInt(i0)
17776 v0.Aux = symToAux(s)
17782 // match: (ORL x0:(MOVWload [i] {s} p0 mem) sh:(SHLLconst [16] x1:(MOVWload [i] {s} p1 mem)))
17783 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
17784 // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
17786 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17788 if x0.Op != OpAMD64MOVWload {
17791 i := auxIntToInt32(x0.AuxInt)
17792 s := auxToSym(x0.Aux)
17796 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
17800 if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
17805 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17808 b = mergePoint(b, x0, x1)
17809 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
17811 v0.AuxInt = int32ToAuxInt(i)
17812 v0.Aux = symToAux(s)
17813 v0.AddArg2(p0, mem)
17818 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
17819 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
17820 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
17822 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17824 if s1.Op != OpAMD64SHLLconst {
17827 j1 := auxIntToInt8(s1.AuxInt)
17829 if x1.Op != OpAMD64MOVBload {
17832 i1 := auxIntToInt32(x1.AuxInt)
17833 s := auxToSym(x1.Aux)
17837 if or.Op != OpAMD64ORL {
17843 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
17845 if s0.Op != OpAMD64SHLLconst {
17848 j0 := auxIntToInt8(s0.AuxInt)
17850 if x0.Op != OpAMD64MOVBload {
17853 i0 := auxIntToInt32(x0.AuxInt)
17854 if auxToSym(x0.Aux) != s {
17858 if p != x0.Args[0] || mem != x0.Args[1] {
17862 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
17865 b = mergePoint(b, x0, x1, y)
17866 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
17868 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
17869 v1.AuxInt = int8ToAuxInt(j0)
17870 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
17871 v2.AuxInt = int32ToAuxInt(i0)
17872 v2.Aux = symToAux(s)
17881 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i] {s} p1 mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i] {s} p0 mem)) y))
17882 // cond: j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
17883 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
17885 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17887 if s1.Op != OpAMD64SHLLconst {
17890 j1 := auxIntToInt8(s1.AuxInt)
17892 if x1.Op != OpAMD64MOVBload {
17895 i := auxIntToInt32(x1.AuxInt)
17896 s := auxToSym(x1.Aux)
17900 if or.Op != OpAMD64ORL {
17906 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
17908 if s0.Op != OpAMD64SHLLconst {
17911 j0 := auxIntToInt8(s0.AuxInt)
17913 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
17918 if mem != x0.Args[1] {
17922 if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
17925 b = mergePoint(b, x0, x1, y)
17926 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
17928 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
17929 v1.AuxInt = int8ToAuxInt(j0)
17930 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
17931 v2.AuxInt = int32ToAuxInt(i)
17932 v2.Aux = symToAux(s)
17933 v2.AddArg2(p0, mem)
17941 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)))
17942 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
17943 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
17945 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17947 if x1.Op != OpAMD64MOVBload {
17950 i1 := auxIntToInt32(x1.AuxInt)
17951 s := auxToSym(x1.Aux)
17955 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
17959 if x0.Op != OpAMD64MOVBload {
17962 i0 := auxIntToInt32(x0.AuxInt)
17963 if auxToSym(x0.Aux) != s {
17967 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17970 b = mergePoint(b, x0, x1)
17971 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
17973 v0.AuxInt = int8ToAuxInt(8)
17974 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
17975 v1.AuxInt = int32ToAuxInt(i0)
17976 v1.Aux = symToAux(s)
17983 // match: (ORL x1:(MOVBload [i] {s} p1 mem) sh:(SHLLconst [8] x0:(MOVBload [i] {s} p0 mem)))
17984 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
17985 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
17987 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17989 if x1.Op != OpAMD64MOVBload {
17992 i := auxIntToInt32(x1.AuxInt)
17993 s := auxToSym(x1.Aux)
17997 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
18001 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
18006 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
18009 b = mergePoint(b, x0, x1)
18010 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
18012 v0.AuxInt = int8ToAuxInt(8)
18013 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
18014 v1.AuxInt = int32ToAuxInt(i)
18015 v1.Aux = symToAux(s)
18016 v1.AddArg2(p0, mem)
18022 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
18023 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
18024 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
18026 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18028 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
18032 if x1.Op != OpAMD64MOVWload {
18035 i1 := auxIntToInt32(x1.AuxInt)
18036 s := auxToSym(x1.Aux)
18040 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
18044 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
18048 if x0.Op != OpAMD64MOVWload {
18051 i0 := auxIntToInt32(x0.AuxInt)
18052 if auxToSym(x0.Aux) != s {
18056 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
18059 b = mergePoint(b, x0, x1)
18060 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
18062 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
18063 v1.AuxInt = int32ToAuxInt(i0)
18064 v1.Aux = symToAux(s)
18071 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))))
18072 // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
18073 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
18075 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18077 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
18081 if x1.Op != OpAMD64MOVWload {
18084 i := auxIntToInt32(x1.AuxInt)
18085 s := auxToSym(x1.Aux)
18089 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
18093 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
18097 if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
18102 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
18105 b = mergePoint(b, x0, x1)
18106 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
18108 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
18109 v1.AuxInt = int32ToAuxInt(i)
18110 v1.Aux = symToAux(s)
18111 v1.AddArg2(p0, mem)
18117 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
18118 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
18119 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
18121 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18123 if s0.Op != OpAMD64SHLLconst {
18126 j0 := auxIntToInt8(s0.AuxInt)
18128 if x0.Op != OpAMD64MOVBload {
18131 i0 := auxIntToInt32(x0.AuxInt)
18132 s := auxToSym(x0.Aux)
18136 if or.Op != OpAMD64ORL {
18142 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
18144 if s1.Op != OpAMD64SHLLconst {
18147 j1 := auxIntToInt8(s1.AuxInt)
18149 if x1.Op != OpAMD64MOVBload {
18152 i1 := auxIntToInt32(x1.AuxInt)
18153 if auxToSym(x1.Aux) != s {
18157 if p != x1.Args[0] || mem != x1.Args[1] {
18161 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
18164 b = mergePoint(b, x0, x1, y)
18165 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
18167 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
18168 v1.AuxInt = int8ToAuxInt(j1)
18169 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
18170 v2.AuxInt = int8ToAuxInt(8)
18171 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
18172 v3.AuxInt = int32ToAuxInt(i0)
18173 v3.Aux = symToAux(s)
18183 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i] {s} p0 mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i] {s} p1 mem)) y))
18184 // cond: j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
18185 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
18187 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18189 if s0.Op != OpAMD64SHLLconst {
18192 j0 := auxIntToInt8(s0.AuxInt)
18194 if x0.Op != OpAMD64MOVBload {
18197 i := auxIntToInt32(x0.AuxInt)
18198 s := auxToSym(x0.Aux)
18202 if or.Op != OpAMD64ORL {
18208 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
18210 if s1.Op != OpAMD64SHLLconst {
18213 j1 := auxIntToInt8(s1.AuxInt)
18215 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
18220 if mem != x1.Args[1] {
18224 if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
18227 b = mergePoint(b, x0, x1, y)
18228 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
18230 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
18231 v1.AuxInt = int8ToAuxInt(j1)
18232 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
18233 v2.AuxInt = int8ToAuxInt(8)
18234 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
18235 v3.AuxInt = int32ToAuxInt(i)
18236 v3.Aux = symToAux(s)
18237 v3.AddArg2(p0, mem)
18246 // match: (ORL x l:(MOVLload [off] {sym} ptr mem))
18247 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
18248 // result: (ORLload x [off] {sym} ptr mem)
18250 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18253 if l.Op != OpAMD64MOVLload {
18256 off := auxIntToInt32(l.AuxInt)
18257 sym := auxToSym(l.Aux)
18260 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
18263 v.reset(OpAMD64ORLload)
18264 v.AuxInt = int32ToAuxInt(off)
18265 v.Aux = symToAux(sym)
18266 v.AddArg3(x, ptr, mem)
18273 func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
18275 // match: (ORLconst [c] x)
18276 // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
18277 // result: (BTSLconst [int8(log32(c))] x)
18279 c := auxIntToInt32(v.AuxInt)
18281 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
18284 v.reset(OpAMD64BTSLconst)
18285 v.AuxInt = int8ToAuxInt(int8(log32(c)))
18289 // match: (ORLconst [c] (ORLconst [d] x))
18290 // result: (ORLconst [c | d] x)
18292 c := auxIntToInt32(v.AuxInt)
18293 if v_0.Op != OpAMD64ORLconst {
18296 d := auxIntToInt32(v_0.AuxInt)
18298 v.reset(OpAMD64ORLconst)
18299 v.AuxInt = int32ToAuxInt(c | d)
18303 // match: (ORLconst [c] (BTSLconst [d] x))
18304 // result: (ORLconst [c | 1<<uint32(d)] x)
18306 c := auxIntToInt32(v.AuxInt)
18307 if v_0.Op != OpAMD64BTSLconst {
18310 d := auxIntToInt8(v_0.AuxInt)
18312 v.reset(OpAMD64ORLconst)
18313 v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
18317 // match: (ORLconst [c] x)
18321 c := auxIntToInt32(v.AuxInt)
18329 // match: (ORLconst [c] _)
18331 // result: (MOVLconst [-1])
18333 c := auxIntToInt32(v.AuxInt)
18337 v.reset(OpAMD64MOVLconst)
18338 v.AuxInt = int32ToAuxInt(-1)
18341 // match: (ORLconst [c] (MOVLconst [d]))
18342 // result: (MOVLconst [c|d])
18344 c := auxIntToInt32(v.AuxInt)
18345 if v_0.Op != OpAMD64MOVLconst {
18348 d := auxIntToInt32(v_0.AuxInt)
18349 v.reset(OpAMD64MOVLconst)
18350 v.AuxInt = int32ToAuxInt(c | d)
18355 func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
18358 // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
18359 // cond: ValAndOff(valoff1).canAdd32(off2)
18360 // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
18362 valoff1 := auxIntToValAndOff(v.AuxInt)
18363 sym := auxToSym(v.Aux)
18364 if v_0.Op != OpAMD64ADDQconst {
18367 off2 := auxIntToInt32(v_0.AuxInt)
18368 base := v_0.Args[0]
18370 if !(ValAndOff(valoff1).canAdd32(off2)) {
18373 v.reset(OpAMD64ORLconstmodify)
18374 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
18375 v.Aux = symToAux(sym)
18376 v.AddArg2(base, mem)
18379 // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
18380 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
18381 // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
18383 valoff1 := auxIntToValAndOff(v.AuxInt)
18384 sym1 := auxToSym(v.Aux)
18385 if v_0.Op != OpAMD64LEAQ {
18388 off2 := auxIntToInt32(v_0.AuxInt)
18389 sym2 := auxToSym(v_0.Aux)
18390 base := v_0.Args[0]
18392 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
18395 v.reset(OpAMD64ORLconstmodify)
18396 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
18397 v.Aux = symToAux(mergeSym(sym1, sym2))
18398 v.AddArg2(base, mem)
18403 func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
18408 typ := &b.Func.Config.Types
18409 // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem)
18410 // cond: is32Bit(int64(off1)+int64(off2))
18411 // result: (ORLload [off1+off2] {sym} val base mem)
18413 off1 := auxIntToInt32(v.AuxInt)
18414 sym := auxToSym(v.Aux)
18416 if v_1.Op != OpAMD64ADDQconst {
18419 off2 := auxIntToInt32(v_1.AuxInt)
18420 base := v_1.Args[0]
18422 if !(is32Bit(int64(off1) + int64(off2))) {
18425 v.reset(OpAMD64ORLload)
18426 v.AuxInt = int32ToAuxInt(off1 + off2)
18427 v.Aux = symToAux(sym)
18428 v.AddArg3(val, base, mem)
18431 // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
18432 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
18433 // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
18435 off1 := auxIntToInt32(v.AuxInt)
18436 sym1 := auxToSym(v.Aux)
18438 if v_1.Op != OpAMD64LEAQ {
18441 off2 := auxIntToInt32(v_1.AuxInt)
18442 sym2 := auxToSym(v_1.Aux)
18443 base := v_1.Args[0]
18445 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18448 v.reset(OpAMD64ORLload)
18449 v.AuxInt = int32ToAuxInt(off1 + off2)
18450 v.Aux = symToAux(mergeSym(sym1, sym2))
18451 v.AddArg3(val, base, mem)
18454 // match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
18455 // result: ( ORL x (MOVLf2i y))
18457 off := auxIntToInt32(v.AuxInt)
18458 sym := auxToSym(v.Aux)
18461 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
18465 if ptr != v_2.Args[0] {
18468 v.reset(OpAMD64ORL)
18469 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
18476 func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
18481 // match: (ORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) <t> x) mem)
18482 // result: (BTSLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
18484 off := auxIntToInt32(v.AuxInt)
18485 sym := auxToSym(v.Aux)
18488 if s.Op != OpAMD64SHLL {
18494 if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
18498 v.reset(OpAMD64BTSLmodify)
18499 v.AuxInt = int32ToAuxInt(off)
18500 v.Aux = symToAux(sym)
18501 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
18502 v0.AuxInt = int32ToAuxInt(31)
18504 v.AddArg3(ptr, v0, mem)
18507 // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
18508 // cond: is32Bit(int64(off1)+int64(off2))
18509 // result: (ORLmodify [off1+off2] {sym} base val mem)
18511 off1 := auxIntToInt32(v.AuxInt)
18512 sym := auxToSym(v.Aux)
18513 if v_0.Op != OpAMD64ADDQconst {
18516 off2 := auxIntToInt32(v_0.AuxInt)
18517 base := v_0.Args[0]
18520 if !(is32Bit(int64(off1) + int64(off2))) {
18523 v.reset(OpAMD64ORLmodify)
18524 v.AuxInt = int32ToAuxInt(off1 + off2)
18525 v.Aux = symToAux(sym)
18526 v.AddArg3(base, val, mem)
18529 // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
18530 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
18531 // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
18533 off1 := auxIntToInt32(v.AuxInt)
18534 sym1 := auxToSym(v.Aux)
18535 if v_0.Op != OpAMD64LEAQ {
18538 off2 := auxIntToInt32(v_0.AuxInt)
18539 sym2 := auxToSym(v_0.Aux)
18540 base := v_0.Args[0]
18543 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18546 v.reset(OpAMD64ORLmodify)
18547 v.AuxInt = int32ToAuxInt(off1 + off2)
18548 v.Aux = symToAux(mergeSym(sym1, sym2))
18549 v.AddArg3(base, val, mem)
18554 func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
18558 typ := &b.Func.Config.Types
18559 // match: (ORQ (SHLQ (MOVQconst [1]) y) x)
18560 // result: (BTSQ x y)
18562 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18563 if v_0.Op != OpAMD64SHLQ {
18567 v_0_0 := v_0.Args[0]
18568 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
18572 v.reset(OpAMD64BTSQ)
18578 // match: (ORQ (MOVQconst [c]) x)
18579 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
18580 // result: (BTSQconst [int8(log64(c))] x)
18582 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18583 if v_0.Op != OpAMD64MOVQconst {
18586 c := auxIntToInt64(v_0.AuxInt)
18588 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
18591 v.reset(OpAMD64BTSQconst)
18592 v.AuxInt = int8ToAuxInt(int8(log64(c)))
18598 // match: (ORQ x (MOVQconst [c]))
18599 // cond: is32Bit(c)
18600 // result: (ORQconst [int32(c)] x)
18602 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18604 if v_1.Op != OpAMD64MOVQconst {
18607 c := auxIntToInt64(v_1.AuxInt)
18611 v.reset(OpAMD64ORQconst)
18612 v.AuxInt = int32ToAuxInt(int32(c))
18618 // match: (ORQ x (MOVLconst [c]))
18619 // result: (ORQconst [c] x)
18621 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18623 if v_1.Op != OpAMD64MOVLconst {
18626 c := auxIntToInt32(v_1.AuxInt)
18627 v.reset(OpAMD64ORQconst)
18628 v.AuxInt = int32ToAuxInt(c)
18634 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d]))
18636 // result: (ROLQconst x [c])
18638 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18639 if v_0.Op != OpAMD64SHLQconst {
18642 c := auxIntToInt8(v_0.AuxInt)
18644 if v_1.Op != OpAMD64SHRQconst {
18647 d := auxIntToInt8(v_1.AuxInt)
18648 if x != v_1.Args[0] || !(d == 64-c) {
18651 v.reset(OpAMD64ROLQconst)
18652 v.AuxInt = int8ToAuxInt(c)
18658 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
18659 // result: (ROLQ x y)
18661 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18662 if v_0.Op != OpAMD64SHLQ {
18667 if v_1.Op != OpAMD64ANDQ {
18671 v_1_0 := v_1.Args[0]
18672 v_1_1 := v_1.Args[1]
18673 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
18674 if v_1_0.Op != OpAMD64SHRQ {
18678 if x != v_1_0.Args[0] {
18681 v_1_0_1 := v_1_0.Args[1]
18682 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
18685 v_1_1_0 := v_1_1.Args[0]
18686 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
18689 v_1_1_0_0 := v_1_1_0.Args[0]
18690 if v_1_1_0_0.Op != OpAMD64NEGQ {
18693 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
18694 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
18697 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
18698 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
18701 v.reset(OpAMD64ROLQ)
18708 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
18709 // result: (ROLQ x y)
18711 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18712 if v_0.Op != OpAMD64SHLQ {
18717 if v_1.Op != OpAMD64ANDQ {
18721 v_1_0 := v_1.Args[0]
18722 v_1_1 := v_1.Args[1]
18723 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
18724 if v_1_0.Op != OpAMD64SHRQ {
18728 if x != v_1_0.Args[0] {
18731 v_1_0_1 := v_1_0.Args[1]
18732 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
18735 v_1_1_0 := v_1_1.Args[0]
18736 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
18739 v_1_1_0_0 := v_1_1_0.Args[0]
18740 if v_1_1_0_0.Op != OpAMD64NEGL {
18743 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
18744 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
18747 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
18748 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
18751 v.reset(OpAMD64ROLQ)
18758 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
18759 // result: (RORQ x y)
18761 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18762 if v_0.Op != OpAMD64SHRQ {
18767 if v_1.Op != OpAMD64ANDQ {
18771 v_1_0 := v_1.Args[0]
18772 v_1_1 := v_1.Args[1]
18773 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
18774 if v_1_0.Op != OpAMD64SHLQ {
18778 if x != v_1_0.Args[0] {
18781 v_1_0_1 := v_1_0.Args[1]
18782 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
18785 v_1_1_0 := v_1_1.Args[0]
18786 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
18789 v_1_1_0_0 := v_1_1_0.Args[0]
18790 if v_1_1_0_0.Op != OpAMD64NEGQ {
18793 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
18794 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
18797 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
18798 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
18801 v.reset(OpAMD64RORQ)
18808 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
18809 // result: (RORQ x y)
18811 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18812 if v_0.Op != OpAMD64SHRQ {
18817 if v_1.Op != OpAMD64ANDQ {
18821 v_1_0 := v_1.Args[0]
18822 v_1_1 := v_1.Args[1]
18823 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
18824 if v_1_0.Op != OpAMD64SHLQ {
18828 if x != v_1_0.Args[0] {
18831 v_1_0_1 := v_1_0.Args[1]
18832 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
18835 v_1_1_0 := v_1_1.Args[0]
18836 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
18839 v_1_1_0_0 := v_1_1_0.Args[0]
18840 if v_1_1_0_0.Op != OpAMD64NEGL {
18843 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
18844 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
18847 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
18848 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
18851 v.reset(OpAMD64RORQ)
18858 // match: (ORQ (SHRQ lo bits) (SHLQ hi (NEGQ bits)))
18859 // result: (SHRDQ lo hi bits)
18861 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18862 if v_0.Op != OpAMD64SHRQ {
18865 bits := v_0.Args[1]
18867 if v_1.Op != OpAMD64SHLQ {
18872 v_1_1 := v_1.Args[1]
18873 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
18876 v.reset(OpAMD64SHRDQ)
18877 v.AddArg3(lo, hi, bits)
18882 // match: (ORQ (SHLQ lo bits) (SHRQ hi (NEGQ bits)))
18883 // result: (SHLDQ lo hi bits)
18885 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18886 if v_0.Op != OpAMD64SHLQ {
18889 bits := v_0.Args[1]
18891 if v_1.Op != OpAMD64SHRQ {
18896 v_1_1 := v_1.Args[1]
18897 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
18900 v.reset(OpAMD64SHLDQ)
18901 v.AddArg3(lo, hi, bits)
18906 // match: (ORQ (MOVQconst [c]) (MOVQconst [d]))
18907 // result: (MOVQconst [c|d])
18909 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18910 if v_0.Op != OpAMD64MOVQconst {
18913 c := auxIntToInt64(v_0.AuxInt)
18914 if v_1.Op != OpAMD64MOVQconst {
18917 d := auxIntToInt64(v_1.AuxInt)
18918 v.reset(OpAMD64MOVQconst)
18919 v.AuxInt = int64ToAuxInt(c | d)
18924 // match: (ORQ x x)
18934 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)))
18935 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
18936 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
18938 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18940 if x0.Op != OpAMD64MOVBload {
18943 i0 := auxIntToInt32(x0.AuxInt)
18944 s := auxToSym(x0.Aux)
18948 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
18952 if x1.Op != OpAMD64MOVBload {
18955 i1 := auxIntToInt32(x1.AuxInt)
18956 if auxToSym(x1.Aux) != s {
18960 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
18963 b = mergePoint(b, x0, x1)
18964 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
18966 v0.AuxInt = int32ToAuxInt(i0)
18967 v0.Aux = symToAux(s)
18973 // match: (ORQ x0:(MOVBload [i] {s} p0 mem) sh:(SHLQconst [8] x1:(MOVBload [i] {s} p1 mem)))
18974 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
18975 // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
18977 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18979 if x0.Op != OpAMD64MOVBload {
18982 i := auxIntToInt32(x0.AuxInt)
18983 s := auxToSym(x0.Aux)
18987 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
18991 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
18996 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
18999 b = mergePoint(b, x0, x1)
19000 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
19002 v0.AuxInt = int32ToAuxInt(i)
19003 v0.Aux = symToAux(s)
19004 v0.AddArg2(p0, mem)
19009 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)))
19010 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
19011 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
19013 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19015 if x0.Op != OpAMD64MOVWload {
19018 i0 := auxIntToInt32(x0.AuxInt)
19019 s := auxToSym(x0.Aux)
19023 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
19027 if x1.Op != OpAMD64MOVWload {
19030 i1 := auxIntToInt32(x1.AuxInt)
19031 if auxToSym(x1.Aux) != s {
19035 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
19038 b = mergePoint(b, x0, x1)
19039 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
19041 v0.AuxInt = int32ToAuxInt(i0)
19042 v0.Aux = symToAux(s)
19048 // match: (ORQ x0:(MOVWload [i] {s} p0 mem) sh:(SHLQconst [16] x1:(MOVWload [i] {s} p1 mem)))
19049 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
19050 // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
19052 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19054 if x0.Op != OpAMD64MOVWload {
19057 i := auxIntToInt32(x0.AuxInt)
19058 s := auxToSym(x0.Aux)
19062 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
19066 if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
19071 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
19074 b = mergePoint(b, x0, x1)
19075 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
19077 v0.AuxInt = int32ToAuxInt(i)
19078 v0.Aux = symToAux(s)
19079 v0.AddArg2(p0, mem)
19084 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)))
19085 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
19086 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
19088 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19090 if x0.Op != OpAMD64MOVLload {
19093 i0 := auxIntToInt32(x0.AuxInt)
19094 s := auxToSym(x0.Aux)
19098 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
19102 if x1.Op != OpAMD64MOVLload {
19105 i1 := auxIntToInt32(x1.AuxInt)
19106 if auxToSym(x1.Aux) != s {
19110 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
19113 b = mergePoint(b, x0, x1)
19114 v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
19116 v0.AuxInt = int32ToAuxInt(i0)
19117 v0.Aux = symToAux(s)
19123 // match: (ORQ x0:(MOVLload [i] {s} p0 mem) sh:(SHLQconst [32] x1:(MOVLload [i] {s} p1 mem)))
19124 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
19125 // result: @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem)
19127 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19129 if x0.Op != OpAMD64MOVLload {
19132 i := auxIntToInt32(x0.AuxInt)
19133 s := auxToSym(x0.Aux)
19137 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
19141 if x1.Op != OpAMD64MOVLload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
19146 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
19149 b = mergePoint(b, x0, x1)
19150 v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
19152 v0.AuxInt = int32ToAuxInt(i)
19153 v0.Aux = symToAux(s)
19154 v0.AddArg2(p0, mem)
19159 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
19160 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
19161 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
19163 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19165 if s1.Op != OpAMD64SHLQconst {
19168 j1 := auxIntToInt8(s1.AuxInt)
19170 if x1.Op != OpAMD64MOVBload {
19173 i1 := auxIntToInt32(x1.AuxInt)
19174 s := auxToSym(x1.Aux)
19178 if or.Op != OpAMD64ORQ {
19184 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19186 if s0.Op != OpAMD64SHLQconst {
19189 j0 := auxIntToInt8(s0.AuxInt)
19191 if x0.Op != OpAMD64MOVBload {
19194 i0 := auxIntToInt32(x0.AuxInt)
19195 if auxToSym(x0.Aux) != s {
19199 if p != x0.Args[0] || mem != x0.Args[1] {
19203 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
19206 b = mergePoint(b, x0, x1, y)
19207 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
19209 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
19210 v1.AuxInt = int8ToAuxInt(j0)
19211 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
19212 v2.AuxInt = int32ToAuxInt(i0)
19213 v2.Aux = symToAux(s)
19222 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i] {s} p1 mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i] {s} p0 mem)) y))
19223 // cond: j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
19224 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
19226 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19228 if s1.Op != OpAMD64SHLQconst {
19231 j1 := auxIntToInt8(s1.AuxInt)
19233 if x1.Op != OpAMD64MOVBload {
19236 i := auxIntToInt32(x1.AuxInt)
19237 s := auxToSym(x1.Aux)
19241 if or.Op != OpAMD64ORQ {
19247 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19249 if s0.Op != OpAMD64SHLQconst {
19252 j0 := auxIntToInt8(s0.AuxInt)
19254 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
19259 if mem != x0.Args[1] {
19263 if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
19266 b = mergePoint(b, x0, x1, y)
19267 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
19269 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
19270 v1.AuxInt = int8ToAuxInt(j0)
19271 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
19272 v2.AuxInt = int32ToAuxInt(i)
19273 v2.Aux = symToAux(s)
19274 v2.AddArg2(p0, mem)
19282 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y))
19283 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
19284 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
19286 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19288 if s1.Op != OpAMD64SHLQconst {
19291 j1 := auxIntToInt8(s1.AuxInt)
19293 if x1.Op != OpAMD64MOVWload {
19296 i1 := auxIntToInt32(x1.AuxInt)
19297 s := auxToSym(x1.Aux)
19301 if or.Op != OpAMD64ORQ {
19307 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19309 if s0.Op != OpAMD64SHLQconst {
19312 j0 := auxIntToInt8(s0.AuxInt)
19314 if x0.Op != OpAMD64MOVWload {
19317 i0 := auxIntToInt32(x0.AuxInt)
19318 if auxToSym(x0.Aux) != s {
19322 if p != x0.Args[0] || mem != x0.Args[1] {
19326 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
19329 b = mergePoint(b, x0, x1, y)
19330 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
19332 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
19333 v1.AuxInt = int8ToAuxInt(j0)
19334 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
19335 v2.AuxInt = int32ToAuxInt(i0)
19336 v2.Aux = symToAux(s)
19345 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i] {s} p1 mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i] {s} p0 mem)) y))
19346 // cond: j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
19347 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i] {s} p0 mem)) y)
19349 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19351 if s1.Op != OpAMD64SHLQconst {
19354 j1 := auxIntToInt8(s1.AuxInt)
19356 if x1.Op != OpAMD64MOVWload {
19359 i := auxIntToInt32(x1.AuxInt)
19360 s := auxToSym(x1.Aux)
19364 if or.Op != OpAMD64ORQ {
19370 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19372 if s0.Op != OpAMD64SHLQconst {
19375 j0 := auxIntToInt8(s0.AuxInt)
19377 if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
19382 if mem != x0.Args[1] {
19386 if !(j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
19389 b = mergePoint(b, x0, x1, y)
19390 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
19392 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
19393 v1.AuxInt = int8ToAuxInt(j0)
19394 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
19395 v2.AuxInt = int32ToAuxInt(i)
19396 v2.Aux = symToAux(s)
19397 v2.AddArg2(p0, mem)
19405 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)))
19406 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
19407 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
19409 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19411 if x1.Op != OpAMD64MOVBload {
19414 i1 := auxIntToInt32(x1.AuxInt)
19415 s := auxToSym(x1.Aux)
19419 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
19423 if x0.Op != OpAMD64MOVBload {
19426 i0 := auxIntToInt32(x0.AuxInt)
19427 if auxToSym(x0.Aux) != s {
19431 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
19434 b = mergePoint(b, x0, x1)
19435 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
19437 v0.AuxInt = int8ToAuxInt(8)
19438 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
19439 v1.AuxInt = int32ToAuxInt(i0)
19440 v1.Aux = symToAux(s)
19447 // match: (ORQ x1:(MOVBload [i] {s} p1 mem) sh:(SHLQconst [8] x0:(MOVBload [i] {s} p0 mem)))
19448 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
19449 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
19451 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19453 if x1.Op != OpAMD64MOVBload {
19456 i := auxIntToInt32(x1.AuxInt)
19457 s := auxToSym(x1.Aux)
19461 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
19465 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
19470 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
19473 b = mergePoint(b, x0, x1)
19474 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
19476 v0.AuxInt = int8ToAuxInt(8)
19477 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
19478 v1.AuxInt = int32ToAuxInt(i)
19479 v1.Aux = symToAux(s)
19480 v1.AddArg2(p0, mem)
19486 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
19487 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
19488 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
19490 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19492 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
19496 if x1.Op != OpAMD64MOVWload {
19499 i1 := auxIntToInt32(x1.AuxInt)
19500 s := auxToSym(x1.Aux)
19504 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
19508 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
19512 if x0.Op != OpAMD64MOVWload {
19515 i0 := auxIntToInt32(x0.AuxInt)
19516 if auxToSym(x0.Aux) != s {
19520 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
19523 b = mergePoint(b, x0, x1)
19524 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
19526 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
19527 v1.AuxInt = int32ToAuxInt(i0)
19528 v1.Aux = symToAux(s)
19535 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))))
19536 // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
19537 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
19539 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19541 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
19545 if x1.Op != OpAMD64MOVWload {
19548 i := auxIntToInt32(x1.AuxInt)
19549 s := auxToSym(x1.Aux)
19553 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
19557 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
19561 if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
19566 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
19569 b = mergePoint(b, x0, x1)
19570 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
19572 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
19573 v1.AuxInt = int32ToAuxInt(i)
19574 v1.Aux = symToAux(s)
19575 v1.AddArg2(p0, mem)
19581 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))))
19582 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
19583 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
19585 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19587 if r1.Op != OpAMD64BSWAPL {
19591 if x1.Op != OpAMD64MOVLload {
19594 i1 := auxIntToInt32(x1.AuxInt)
19595 s := auxToSym(x1.Aux)
19599 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
19603 if r0.Op != OpAMD64BSWAPL {
19607 if x0.Op != OpAMD64MOVLload {
19610 i0 := auxIntToInt32(x0.AuxInt)
19611 if auxToSym(x0.Aux) != s {
19615 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
19618 b = mergePoint(b, x0, x1)
19619 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
19621 v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
19622 v1.AuxInt = int32ToAuxInt(i0)
19623 v1.Aux = symToAux(s)
19630 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i] {s} p1 mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i] {s} p0 mem))))
19631 // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
19632 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i] {s} p0 mem))
19634 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19636 if r1.Op != OpAMD64BSWAPL {
19640 if x1.Op != OpAMD64MOVLload {
19643 i := auxIntToInt32(x1.AuxInt)
19644 s := auxToSym(x1.Aux)
19648 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
19652 if r0.Op != OpAMD64BSWAPL {
19656 if x0.Op != OpAMD64MOVLload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
19661 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
19664 b = mergePoint(b, x0, x1)
19665 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
19667 v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
19668 v1.AuxInt = int32ToAuxInt(i)
19669 v1.Aux = symToAux(s)
19670 v1.AddArg2(p0, mem)
19676 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
19677 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
19678 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
19680 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19682 if s0.Op != OpAMD64SHLQconst {
19685 j0 := auxIntToInt8(s0.AuxInt)
19687 if x0.Op != OpAMD64MOVBload {
19690 i0 := auxIntToInt32(x0.AuxInt)
19691 s := auxToSym(x0.Aux)
19695 if or.Op != OpAMD64ORQ {
19701 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19703 if s1.Op != OpAMD64SHLQconst {
19706 j1 := auxIntToInt8(s1.AuxInt)
19708 if x1.Op != OpAMD64MOVBload {
19711 i1 := auxIntToInt32(x1.AuxInt)
19712 if auxToSym(x1.Aux) != s {
19716 if p != x1.Args[0] || mem != x1.Args[1] {
19720 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
19723 b = mergePoint(b, x0, x1, y)
19724 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
19726 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
19727 v1.AuxInt = int8ToAuxInt(j1)
19728 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
19729 v2.AuxInt = int8ToAuxInt(8)
19730 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
19731 v3.AuxInt = int32ToAuxInt(i0)
19732 v3.Aux = symToAux(s)
19742 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i] {s} p0 mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i] {s} p1 mem)) y))
19743 // cond: j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
19744 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
19746 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19748 if s0.Op != OpAMD64SHLQconst {
19751 j0 := auxIntToInt8(s0.AuxInt)
19753 if x0.Op != OpAMD64MOVBload {
19756 i := auxIntToInt32(x0.AuxInt)
19757 s := auxToSym(x0.Aux)
19761 if or.Op != OpAMD64ORQ {
19767 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19769 if s1.Op != OpAMD64SHLQconst {
19772 j1 := auxIntToInt8(s1.AuxInt)
19774 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
19779 if mem != x1.Args[1] {
19783 if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
19786 b = mergePoint(b, x0, x1, y)
19787 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
19789 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
19790 v1.AuxInt = int8ToAuxInt(j1)
19791 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
19792 v2.AuxInt = int8ToAuxInt(8)
19793 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
19794 v3.AuxInt = int32ToAuxInt(i)
19795 v3.Aux = symToAux(s)
19796 v3.AddArg2(p0, mem)
19805 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y))
19806 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)
19807 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
19809 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19811 if s0.Op != OpAMD64SHLQconst {
19814 j0 := auxIntToInt8(s0.AuxInt)
19816 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
19820 if x0.Op != OpAMD64MOVWload {
19823 i0 := auxIntToInt32(x0.AuxInt)
19824 s := auxToSym(x0.Aux)
19828 if or.Op != OpAMD64ORQ {
19834 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19836 if s1.Op != OpAMD64SHLQconst {
19839 j1 := auxIntToInt8(s1.AuxInt)
19841 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
19845 if x1.Op != OpAMD64MOVWload {
19848 i1 := auxIntToInt32(x1.AuxInt)
19849 if auxToSym(x1.Aux) != s {
19853 if p != x1.Args[0] || mem != x1.Args[1] {
19857 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) {
19860 b = mergePoint(b, x0, x1, y)
19861 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
19863 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
19864 v1.AuxInt = int8ToAuxInt(j1)
19865 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
19866 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
19867 v3.AuxInt = int32ToAuxInt(i0)
19868 v3.Aux = symToAux(s)
19878 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem))) y))
19879 // cond: j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)
19880 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i] {s} p0 mem))) y)
19882 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19884 if s0.Op != OpAMD64SHLQconst {
19887 j0 := auxIntToInt8(s0.AuxInt)
19889 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
19893 if x0.Op != OpAMD64MOVWload {
19896 i := auxIntToInt32(x0.AuxInt)
19897 s := auxToSym(x0.Aux)
19901 if or.Op != OpAMD64ORQ {
19907 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
19909 if s1.Op != OpAMD64SHLQconst {
19912 j1 := auxIntToInt8(s1.AuxInt)
19914 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
19918 if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
19923 if mem != x1.Args[1] {
19927 if !(j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) {
19930 b = mergePoint(b, x0, x1, y)
19931 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
19933 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
19934 v1.AuxInt = int8ToAuxInt(j1)
19935 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
19936 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
19937 v3.AuxInt = int32ToAuxInt(i)
19938 v3.Aux = symToAux(s)
19939 v3.AddArg2(p0, mem)
19948 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem))
19949 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
19950 // result: (ORQload x [off] {sym} ptr mem)
19952 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
19955 if l.Op != OpAMD64MOVQload {
19958 off := auxIntToInt32(l.AuxInt)
19959 sym := auxToSym(l.Aux)
19962 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
19965 v.reset(OpAMD64ORQload)
19966 v.AuxInt = int32ToAuxInt(off)
19967 v.Aux = symToAux(sym)
19968 v.AddArg3(x, ptr, mem)
19975 func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
19977 // match: (ORQconst [c] x)
19978 // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
19979 // result: (BTSQconst [int8(log32(c))] x)
19981 c := auxIntToInt32(v.AuxInt)
19983 if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
19986 v.reset(OpAMD64BTSQconst)
19987 v.AuxInt = int8ToAuxInt(int8(log32(c)))
19991 // match: (ORQconst [c] (ORQconst [d] x))
19992 // result: (ORQconst [c | d] x)
19994 c := auxIntToInt32(v.AuxInt)
19995 if v_0.Op != OpAMD64ORQconst {
19998 d := auxIntToInt32(v_0.AuxInt)
20000 v.reset(OpAMD64ORQconst)
20001 v.AuxInt = int32ToAuxInt(c | d)
20005 // match: (ORQconst [c] (BTSQconst [d] x))
20006 // cond: is32Bit(int64(c) | 1<<uint32(d))
20007 // result: (ORQconst [c | 1<<uint32(d)] x)
20009 c := auxIntToInt32(v.AuxInt)
20010 if v_0.Op != OpAMD64BTSQconst {
20013 d := auxIntToInt8(v_0.AuxInt)
20015 if !(is32Bit(int64(c) | 1<<uint32(d))) {
20018 v.reset(OpAMD64ORQconst)
20019 v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
20023 // match: (ORQconst [0] x)
20026 if auxIntToInt32(v.AuxInt) != 0 {
20033 // match: (ORQconst [-1] _)
20034 // result: (MOVQconst [-1])
20036 if auxIntToInt32(v.AuxInt) != -1 {
20039 v.reset(OpAMD64MOVQconst)
20040 v.AuxInt = int64ToAuxInt(-1)
20043 // match: (ORQconst [c] (MOVQconst [d]))
20044 // result: (MOVQconst [int64(c)|d])
20046 c := auxIntToInt32(v.AuxInt)
20047 if v_0.Op != OpAMD64MOVQconst {
20050 d := auxIntToInt64(v_0.AuxInt)
20051 v.reset(OpAMD64MOVQconst)
20052 v.AuxInt = int64ToAuxInt(int64(c) | d)
20057 func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
20060 // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
20061 // cond: ValAndOff(valoff1).canAdd32(off2)
20062 // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
20064 valoff1 := auxIntToValAndOff(v.AuxInt)
20065 sym := auxToSym(v.Aux)
20066 if v_0.Op != OpAMD64ADDQconst {
20069 off2 := auxIntToInt32(v_0.AuxInt)
20070 base := v_0.Args[0]
20072 if !(ValAndOff(valoff1).canAdd32(off2)) {
20075 v.reset(OpAMD64ORQconstmodify)
20076 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
20077 v.Aux = symToAux(sym)
20078 v.AddArg2(base, mem)
20081 // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
20082 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
20083 // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
20085 valoff1 := auxIntToValAndOff(v.AuxInt)
20086 sym1 := auxToSym(v.Aux)
20087 if v_0.Op != OpAMD64LEAQ {
20090 off2 := auxIntToInt32(v_0.AuxInt)
20091 sym2 := auxToSym(v_0.Aux)
20092 base := v_0.Args[0]
20094 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
20097 v.reset(OpAMD64ORQconstmodify)
20098 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
20099 v.Aux = symToAux(mergeSym(sym1, sym2))
20100 v.AddArg2(base, mem)
20105 func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
20110 typ := &b.Func.Config.Types
20111 // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem)
20112 // cond: is32Bit(int64(off1)+int64(off2))
20113 // result: (ORQload [off1+off2] {sym} val base mem)
20115 off1 := auxIntToInt32(v.AuxInt)
20116 sym := auxToSym(v.Aux)
20118 if v_1.Op != OpAMD64ADDQconst {
20121 off2 := auxIntToInt32(v_1.AuxInt)
20122 base := v_1.Args[0]
20124 if !(is32Bit(int64(off1) + int64(off2))) {
20127 v.reset(OpAMD64ORQload)
20128 v.AuxInt = int32ToAuxInt(off1 + off2)
20129 v.Aux = symToAux(sym)
20130 v.AddArg3(val, base, mem)
20133 // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
20134 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
20135 // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
20137 off1 := auxIntToInt32(v.AuxInt)
20138 sym1 := auxToSym(v.Aux)
20140 if v_1.Op != OpAMD64LEAQ {
20143 off2 := auxIntToInt32(v_1.AuxInt)
20144 sym2 := auxToSym(v_1.Aux)
20145 base := v_1.Args[0]
20147 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
20150 v.reset(OpAMD64ORQload)
20151 v.AuxInt = int32ToAuxInt(off1 + off2)
20152 v.Aux = symToAux(mergeSym(sym1, sym2))
20153 v.AddArg3(val, base, mem)
20156 // match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
20157 // result: ( ORQ x (MOVQf2i y))
20159 off := auxIntToInt32(v.AuxInt)
20160 sym := auxToSym(v.Aux)
20163 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
20167 if ptr != v_2.Args[0] {
20170 v.reset(OpAMD64ORQ)
20171 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
20178 func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
20183 // match: (ORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) <t> x) mem)
20184 // result: (BTSQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
20186 off := auxIntToInt32(v.AuxInt)
20187 sym := auxToSym(v.Aux)
20190 if s.Op != OpAMD64SHLQ {
20196 if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
20200 v.reset(OpAMD64BTSQmodify)
20201 v.AuxInt = int32ToAuxInt(off)
20202 v.Aux = symToAux(sym)
20203 v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
20204 v0.AuxInt = int32ToAuxInt(63)
20206 v.AddArg3(ptr, v0, mem)
20209 // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
20210 // cond: is32Bit(int64(off1)+int64(off2))
20211 // result: (ORQmodify [off1+off2] {sym} base val mem)
20213 off1 := auxIntToInt32(v.AuxInt)
20214 sym := auxToSym(v.Aux)
20215 if v_0.Op != OpAMD64ADDQconst {
20218 off2 := auxIntToInt32(v_0.AuxInt)
20219 base := v_0.Args[0]
20222 if !(is32Bit(int64(off1) + int64(off2))) {
20225 v.reset(OpAMD64ORQmodify)
20226 v.AuxInt = int32ToAuxInt(off1 + off2)
20227 v.Aux = symToAux(sym)
20228 v.AddArg3(base, val, mem)
20231 // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
20232 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
20233 // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
20235 off1 := auxIntToInt32(v.AuxInt)
20236 sym1 := auxToSym(v.Aux)
20237 if v_0.Op != OpAMD64LEAQ {
20240 off2 := auxIntToInt32(v_0.AuxInt)
20241 sym2 := auxToSym(v_0.Aux)
20242 base := v_0.Args[0]
20245 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
20248 v.reset(OpAMD64ORQmodify)
20249 v.AuxInt = int32ToAuxInt(off1 + off2)
20250 v.Aux = symToAux(mergeSym(sym1, sym2))
20251 v.AddArg3(base, val, mem)
20256 func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
20259 // match: (ROLB x (NEGQ y))
20260 // result: (RORB x y)
20263 if v_1.Op != OpAMD64NEGQ {
20267 v.reset(OpAMD64RORB)
20271 // match: (ROLB x (NEGL y))
20272 // result: (RORB x y)
20275 if v_1.Op != OpAMD64NEGL {
20279 v.reset(OpAMD64RORB)
20283 // match: (ROLB x (MOVQconst [c]))
20284 // result: (ROLBconst [int8(c&7) ] x)
20287 if v_1.Op != OpAMD64MOVQconst {
20290 c := auxIntToInt64(v_1.AuxInt)
20291 v.reset(OpAMD64ROLBconst)
20292 v.AuxInt = int8ToAuxInt(int8(c & 7))
20296 // match: (ROLB x (MOVLconst [c]))
20297 // result: (ROLBconst [int8(c&7) ] x)
20300 if v_1.Op != OpAMD64MOVLconst {
20303 c := auxIntToInt32(v_1.AuxInt)
20304 v.reset(OpAMD64ROLBconst)
20305 v.AuxInt = int8ToAuxInt(int8(c & 7))
20311 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
20313 // match: (ROLBconst [c] (ROLBconst [d] x))
20314 // result: (ROLBconst [(c+d)& 7] x)
20316 c := auxIntToInt8(v.AuxInt)
20317 if v_0.Op != OpAMD64ROLBconst {
20320 d := auxIntToInt8(v_0.AuxInt)
20322 v.reset(OpAMD64ROLBconst)
20323 v.AuxInt = int8ToAuxInt((c + d) & 7)
20327 // match: (ROLBconst x [0])
20330 if auxIntToInt8(v.AuxInt) != 0 {
20339 func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
20342 // match: (ROLL x (NEGQ y))
20343 // result: (RORL x y)
20346 if v_1.Op != OpAMD64NEGQ {
20350 v.reset(OpAMD64RORL)
20354 // match: (ROLL x (NEGL y))
20355 // result: (RORL x y)
20358 if v_1.Op != OpAMD64NEGL {
20362 v.reset(OpAMD64RORL)
20366 // match: (ROLL x (MOVQconst [c]))
20367 // result: (ROLLconst [int8(c&31)] x)
20370 if v_1.Op != OpAMD64MOVQconst {
20373 c := auxIntToInt64(v_1.AuxInt)
20374 v.reset(OpAMD64ROLLconst)
20375 v.AuxInt = int8ToAuxInt(int8(c & 31))
20379 // match: (ROLL x (MOVLconst [c]))
20380 // result: (ROLLconst [int8(c&31)] x)
20383 if v_1.Op != OpAMD64MOVLconst {
20386 c := auxIntToInt32(v_1.AuxInt)
20387 v.reset(OpAMD64ROLLconst)
20388 v.AuxInt = int8ToAuxInt(int8(c & 31))
20394 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
20396 // match: (ROLLconst [c] (ROLLconst [d] x))
20397 // result: (ROLLconst [(c+d)&31] x)
20399 c := auxIntToInt8(v.AuxInt)
20400 if v_0.Op != OpAMD64ROLLconst {
20403 d := auxIntToInt8(v_0.AuxInt)
20405 v.reset(OpAMD64ROLLconst)
20406 v.AuxInt = int8ToAuxInt((c + d) & 31)
20410 // match: (ROLLconst x [0])
20413 if auxIntToInt8(v.AuxInt) != 0 {
20422 func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
20425 // match: (ROLQ x (NEGQ y))
20426 // result: (RORQ x y)
20429 if v_1.Op != OpAMD64NEGQ {
20433 v.reset(OpAMD64RORQ)
20437 // match: (ROLQ x (NEGL y))
20438 // result: (RORQ x y)
20441 if v_1.Op != OpAMD64NEGL {
20445 v.reset(OpAMD64RORQ)
20449 // match: (ROLQ x (MOVQconst [c]))
20450 // result: (ROLQconst [int8(c&63)] x)
20453 if v_1.Op != OpAMD64MOVQconst {
20456 c := auxIntToInt64(v_1.AuxInt)
20457 v.reset(OpAMD64ROLQconst)
20458 v.AuxInt = int8ToAuxInt(int8(c & 63))
20462 // match: (ROLQ x (MOVLconst [c]))
20463 // result: (ROLQconst [int8(c&63)] x)
20466 if v_1.Op != OpAMD64MOVLconst {
20469 c := auxIntToInt32(v_1.AuxInt)
20470 v.reset(OpAMD64ROLQconst)
20471 v.AuxInt = int8ToAuxInt(int8(c & 63))
20477 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
20479 // match: (ROLQconst [c] (ROLQconst [d] x))
20480 // result: (ROLQconst [(c+d)&63] x)
20482 c := auxIntToInt8(v.AuxInt)
20483 if v_0.Op != OpAMD64ROLQconst {
20486 d := auxIntToInt8(v_0.AuxInt)
20488 v.reset(OpAMD64ROLQconst)
20489 v.AuxInt = int8ToAuxInt((c + d) & 63)
20493 // match: (ROLQconst x [0])
20496 if auxIntToInt8(v.AuxInt) != 0 {
20505 func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
20508 // match: (ROLW x (NEGQ y))
20509 // result: (RORW x y)
20512 if v_1.Op != OpAMD64NEGQ {
20516 v.reset(OpAMD64RORW)
20520 // match: (ROLW x (NEGL y))
20521 // result: (RORW x y)
20524 if v_1.Op != OpAMD64NEGL {
20528 v.reset(OpAMD64RORW)
20532 // match: (ROLW x (MOVQconst [c]))
20533 // result: (ROLWconst [int8(c&15)] x)
20536 if v_1.Op != OpAMD64MOVQconst {
20539 c := auxIntToInt64(v_1.AuxInt)
20540 v.reset(OpAMD64ROLWconst)
20541 v.AuxInt = int8ToAuxInt(int8(c & 15))
20545 // match: (ROLW x (MOVLconst [c]))
20546 // result: (ROLWconst [int8(c&15)] x)
20549 if v_1.Op != OpAMD64MOVLconst {
20552 c := auxIntToInt32(v_1.AuxInt)
20553 v.reset(OpAMD64ROLWconst)
20554 v.AuxInt = int8ToAuxInt(int8(c & 15))
20560 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
20562 // match: (ROLWconst [c] (ROLWconst [d] x))
20563 // result: (ROLWconst [(c+d)&15] x)
20565 c := auxIntToInt8(v.AuxInt)
20566 if v_0.Op != OpAMD64ROLWconst {
20569 d := auxIntToInt8(v_0.AuxInt)
20571 v.reset(OpAMD64ROLWconst)
20572 v.AuxInt = int8ToAuxInt((c + d) & 15)
20576 // match: (ROLWconst x [0])
20579 if auxIntToInt8(v.AuxInt) != 0 {
20588 func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
20591 // match: (RORB x (NEGQ y))
20592 // result: (ROLB x y)
20595 if v_1.Op != OpAMD64NEGQ {
20599 v.reset(OpAMD64ROLB)
20603 // match: (RORB x (NEGL y))
20604 // result: (ROLB x y)
20607 if v_1.Op != OpAMD64NEGL {
20611 v.reset(OpAMD64ROLB)
20615 // match: (RORB x (MOVQconst [c]))
20616 // result: (ROLBconst [int8((-c)&7) ] x)
20619 if v_1.Op != OpAMD64MOVQconst {
20622 c := auxIntToInt64(v_1.AuxInt)
20623 v.reset(OpAMD64ROLBconst)
20624 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
20628 // match: (RORB x (MOVLconst [c]))
20629 // result: (ROLBconst [int8((-c)&7) ] x)
20632 if v_1.Op != OpAMD64MOVLconst {
20635 c := auxIntToInt32(v_1.AuxInt)
20636 v.reset(OpAMD64ROLBconst)
20637 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
20643 func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
20646 // match: (RORL x (NEGQ y))
20647 // result: (ROLL x y)
20650 if v_1.Op != OpAMD64NEGQ {
20654 v.reset(OpAMD64ROLL)
20658 // match: (RORL x (NEGL y))
20659 // result: (ROLL x y)
20662 if v_1.Op != OpAMD64NEGL {
20666 v.reset(OpAMD64ROLL)
20670 // match: (RORL x (MOVQconst [c]))
20671 // result: (ROLLconst [int8((-c)&31)] x)
20674 if v_1.Op != OpAMD64MOVQconst {
20677 c := auxIntToInt64(v_1.AuxInt)
20678 v.reset(OpAMD64ROLLconst)
20679 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
20683 // match: (RORL x (MOVLconst [c]))
20684 // result: (ROLLconst [int8((-c)&31)] x)
20687 if v_1.Op != OpAMD64MOVLconst {
20690 c := auxIntToInt32(v_1.AuxInt)
20691 v.reset(OpAMD64ROLLconst)
20692 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
20698 func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
20701 // match: (RORQ x (NEGQ y))
20702 // result: (ROLQ x y)
20705 if v_1.Op != OpAMD64NEGQ {
20709 v.reset(OpAMD64ROLQ)
20713 // match: (RORQ x (NEGL y))
20714 // result: (ROLQ x y)
20717 if v_1.Op != OpAMD64NEGL {
20721 v.reset(OpAMD64ROLQ)
20725 // match: (RORQ x (MOVQconst [c]))
20726 // result: (ROLQconst [int8((-c)&63)] x)
20729 if v_1.Op != OpAMD64MOVQconst {
20732 c := auxIntToInt64(v_1.AuxInt)
20733 v.reset(OpAMD64ROLQconst)
20734 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
20738 // match: (RORQ x (MOVLconst [c]))
20739 // result: (ROLQconst [int8((-c)&63)] x)
20742 if v_1.Op != OpAMD64MOVLconst {
20745 c := auxIntToInt32(v_1.AuxInt)
20746 v.reset(OpAMD64ROLQconst)
20747 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
20753 func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
20756 // match: (RORW x (NEGQ y))
20757 // result: (ROLW x y)
20760 if v_1.Op != OpAMD64NEGQ {
20764 v.reset(OpAMD64ROLW)
20768 // match: (RORW x (NEGL y))
20769 // result: (ROLW x y)
20772 if v_1.Op != OpAMD64NEGL {
20776 v.reset(OpAMD64ROLW)
20780 // match: (RORW x (MOVQconst [c]))
20781 // result: (ROLWconst [int8((-c)&15)] x)
20784 if v_1.Op != OpAMD64MOVQconst {
20787 c := auxIntToInt64(v_1.AuxInt)
20788 v.reset(OpAMD64ROLWconst)
20789 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
20793 // match: (RORW x (MOVLconst [c]))
20794 // result: (ROLWconst [int8((-c)&15)] x)
20797 if v_1.Op != OpAMD64MOVLconst {
20800 c := auxIntToInt32(v_1.AuxInt)
20801 v.reset(OpAMD64ROLWconst)
20802 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
20808 func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
20811 // match: (SARB x (MOVQconst [c]))
20812 // result: (SARBconst [int8(min(int64(c)&31,7))] x)
20815 if v_1.Op != OpAMD64MOVQconst {
20818 c := auxIntToInt64(v_1.AuxInt)
20819 v.reset(OpAMD64SARBconst)
20820 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
20824 // match: (SARB x (MOVLconst [c]))
20825 // result: (SARBconst [int8(min(int64(c)&31,7))] x)
20828 if v_1.Op != OpAMD64MOVLconst {
20831 c := auxIntToInt32(v_1.AuxInt)
20832 v.reset(OpAMD64SARBconst)
20833 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
20839 func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
20841 // match: (SARBconst x [0])
20844 if auxIntToInt8(v.AuxInt) != 0 {
20851 // match: (SARBconst [c] (MOVQconst [d]))
20852 // result: (MOVQconst [int64(int8(d))>>uint64(c)])
20854 c := auxIntToInt8(v.AuxInt)
20855 if v_0.Op != OpAMD64MOVQconst {
20858 d := auxIntToInt64(v_0.AuxInt)
20859 v.reset(OpAMD64MOVQconst)
20860 v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
20865 func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
20869 // match: (SARL x (MOVQconst [c]))
20870 // result: (SARLconst [int8(c&31)] x)
20873 if v_1.Op != OpAMD64MOVQconst {
20876 c := auxIntToInt64(v_1.AuxInt)
20877 v.reset(OpAMD64SARLconst)
20878 v.AuxInt = int8ToAuxInt(int8(c & 31))
20882 // match: (SARL x (MOVLconst [c]))
20883 // result: (SARLconst [int8(c&31)] x)
20886 if v_1.Op != OpAMD64MOVLconst {
20889 c := auxIntToInt32(v_1.AuxInt)
20890 v.reset(OpAMD64SARLconst)
20891 v.AuxInt = int8ToAuxInt(int8(c & 31))
20895 // match: (SARL x (ADDQconst [c] y))
20896 // cond: c & 31 == 0
20897 // result: (SARL x y)
20900 if v_1.Op != OpAMD64ADDQconst {
20903 c := auxIntToInt32(v_1.AuxInt)
20908 v.reset(OpAMD64SARL)
20912 // match: (SARL x (NEGQ <t> (ADDQconst [c] y)))
20913 // cond: c & 31 == 0
20914 // result: (SARL x (NEGQ <t> y))
20917 if v_1.Op != OpAMD64NEGQ {
20921 v_1_0 := v_1.Args[0]
20922 if v_1_0.Op != OpAMD64ADDQconst {
20925 c := auxIntToInt32(v_1_0.AuxInt)
20930 v.reset(OpAMD64SARL)
20931 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20936 // match: (SARL x (ANDQconst [c] y))
20937 // cond: c & 31 == 31
20938 // result: (SARL x y)
20941 if v_1.Op != OpAMD64ANDQconst {
20944 c := auxIntToInt32(v_1.AuxInt)
20949 v.reset(OpAMD64SARL)
20953 // match: (SARL x (NEGQ <t> (ANDQconst [c] y)))
20954 // cond: c & 31 == 31
20955 // result: (SARL x (NEGQ <t> y))
20958 if v_1.Op != OpAMD64NEGQ {
20962 v_1_0 := v_1.Args[0]
20963 if v_1_0.Op != OpAMD64ANDQconst {
20966 c := auxIntToInt32(v_1_0.AuxInt)
20971 v.reset(OpAMD64SARL)
20972 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20977 // match: (SARL x (ADDLconst [c] y))
20978 // cond: c & 31 == 0
20979 // result: (SARL x y)
20982 if v_1.Op != OpAMD64ADDLconst {
20985 c := auxIntToInt32(v_1.AuxInt)
20990 v.reset(OpAMD64SARL)
20994 // match: (SARL x (NEGL <t> (ADDLconst [c] y)))
20995 // cond: c & 31 == 0
20996 // result: (SARL x (NEGL <t> y))
20999 if v_1.Op != OpAMD64NEGL {
21003 v_1_0 := v_1.Args[0]
21004 if v_1_0.Op != OpAMD64ADDLconst {
21007 c := auxIntToInt32(v_1_0.AuxInt)
21012 v.reset(OpAMD64SARL)
21013 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21018 // match: (SARL x (ANDLconst [c] y))
21019 // cond: c & 31 == 31
21020 // result: (SARL x y)
21023 if v_1.Op != OpAMD64ANDLconst {
21026 c := auxIntToInt32(v_1.AuxInt)
21031 v.reset(OpAMD64SARL)
21035 // match: (SARL x (NEGL <t> (ANDLconst [c] y)))
21036 // cond: c & 31 == 31
21037 // result: (SARL x (NEGL <t> y))
21040 if v_1.Op != OpAMD64NEGL {
21044 v_1_0 := v_1.Args[0]
21045 if v_1_0.Op != OpAMD64ANDLconst {
21048 c := auxIntToInt32(v_1_0.AuxInt)
21053 v.reset(OpAMD64SARL)
21054 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21061 func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
21063 // match: (SARLconst x [0])
21066 if auxIntToInt8(v.AuxInt) != 0 {
21073 // match: (SARLconst [c] (MOVQconst [d]))
21074 // result: (MOVQconst [int64(int32(d))>>uint64(c)])
21076 c := auxIntToInt8(v.AuxInt)
21077 if v_0.Op != OpAMD64MOVQconst {
21080 d := auxIntToInt64(v_0.AuxInt)
21081 v.reset(OpAMD64MOVQconst)
21082 v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
21087 func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
21091 // match: (SARQ x (MOVQconst [c]))
21092 // result: (SARQconst [int8(c&63)] x)
21095 if v_1.Op != OpAMD64MOVQconst {
21098 c := auxIntToInt64(v_1.AuxInt)
21099 v.reset(OpAMD64SARQconst)
21100 v.AuxInt = int8ToAuxInt(int8(c & 63))
21104 // match: (SARQ x (MOVLconst [c]))
21105 // result: (SARQconst [int8(c&63)] x)
21108 if v_1.Op != OpAMD64MOVLconst {
21111 c := auxIntToInt32(v_1.AuxInt)
21112 v.reset(OpAMD64SARQconst)
21113 v.AuxInt = int8ToAuxInt(int8(c & 63))
21117 // match: (SARQ x (ADDQconst [c] y))
21118 // cond: c & 63 == 0
21119 // result: (SARQ x y)
21122 if v_1.Op != OpAMD64ADDQconst {
21125 c := auxIntToInt32(v_1.AuxInt)
21130 v.reset(OpAMD64SARQ)
21134 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y)))
21135 // cond: c & 63 == 0
21136 // result: (SARQ x (NEGQ <t> y))
21139 if v_1.Op != OpAMD64NEGQ {
21143 v_1_0 := v_1.Args[0]
21144 if v_1_0.Op != OpAMD64ADDQconst {
21147 c := auxIntToInt32(v_1_0.AuxInt)
21152 v.reset(OpAMD64SARQ)
21153 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21158 // match: (SARQ x (ANDQconst [c] y))
21159 // cond: c & 63 == 63
21160 // result: (SARQ x y)
21163 if v_1.Op != OpAMD64ANDQconst {
21166 c := auxIntToInt32(v_1.AuxInt)
21171 v.reset(OpAMD64SARQ)
21175 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y)))
21176 // cond: c & 63 == 63
21177 // result: (SARQ x (NEGQ <t> y))
21180 if v_1.Op != OpAMD64NEGQ {
21184 v_1_0 := v_1.Args[0]
21185 if v_1_0.Op != OpAMD64ANDQconst {
21188 c := auxIntToInt32(v_1_0.AuxInt)
21193 v.reset(OpAMD64SARQ)
21194 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21199 // match: (SARQ x (ADDLconst [c] y))
21200 // cond: c & 63 == 0
21201 // result: (SARQ x y)
21204 if v_1.Op != OpAMD64ADDLconst {
21207 c := auxIntToInt32(v_1.AuxInt)
21212 v.reset(OpAMD64SARQ)
21216 // match: (SARQ x (NEGL <t> (ADDLconst [c] y)))
21217 // cond: c & 63 == 0
21218 // result: (SARQ x (NEGL <t> y))
21221 if v_1.Op != OpAMD64NEGL {
21225 v_1_0 := v_1.Args[0]
21226 if v_1_0.Op != OpAMD64ADDLconst {
21229 c := auxIntToInt32(v_1_0.AuxInt)
21234 v.reset(OpAMD64SARQ)
21235 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21240 // match: (SARQ x (ANDLconst [c] y))
21241 // cond: c & 63 == 63
21242 // result: (SARQ x y)
21245 if v_1.Op != OpAMD64ANDLconst {
21248 c := auxIntToInt32(v_1.AuxInt)
21253 v.reset(OpAMD64SARQ)
21257 // match: (SARQ x (NEGL <t> (ANDLconst [c] y)))
21258 // cond: c & 63 == 63
21259 // result: (SARQ x (NEGL <t> y))
21262 if v_1.Op != OpAMD64NEGL {
21266 v_1_0 := v_1.Args[0]
21267 if v_1_0.Op != OpAMD64ANDLconst {
21270 c := auxIntToInt32(v_1_0.AuxInt)
21275 v.reset(OpAMD64SARQ)
21276 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21283 func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
21285 // match: (SARQconst x [0])
21288 if auxIntToInt8(v.AuxInt) != 0 {
21295 // match: (SARQconst [c] (MOVQconst [d]))
21296 // result: (MOVQconst [d>>uint64(c)])
21298 c := auxIntToInt8(v.AuxInt)
21299 if v_0.Op != OpAMD64MOVQconst {
21302 d := auxIntToInt64(v_0.AuxInt)
21303 v.reset(OpAMD64MOVQconst)
21304 v.AuxInt = int64ToAuxInt(d >> uint64(c))
21309 func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
21312 // match: (SARW x (MOVQconst [c]))
21313 // result: (SARWconst [int8(min(int64(c)&31,15))] x)
21316 if v_1.Op != OpAMD64MOVQconst {
21319 c := auxIntToInt64(v_1.AuxInt)
21320 v.reset(OpAMD64SARWconst)
21321 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
21325 // match: (SARW x (MOVLconst [c]))
21326 // result: (SARWconst [int8(min(int64(c)&31,15))] x)
21329 if v_1.Op != OpAMD64MOVLconst {
21332 c := auxIntToInt32(v_1.AuxInt)
21333 v.reset(OpAMD64SARWconst)
21334 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
21340 func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
21342 // match: (SARWconst x [0])
21345 if auxIntToInt8(v.AuxInt) != 0 {
21352 // match: (SARWconst [c] (MOVQconst [d]))
21353 // result: (MOVQconst [int64(int16(d))>>uint64(c)])
21355 c := auxIntToInt8(v.AuxInt)
21356 if v_0.Op != OpAMD64MOVQconst {
21359 d := auxIntToInt64(v_0.AuxInt)
21360 v.reset(OpAMD64MOVQconst)
21361 v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
21366 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
21368 // match: (SBBLcarrymask (FlagEQ))
21369 // result: (MOVLconst [0])
21371 if v_0.Op != OpAMD64FlagEQ {
21374 v.reset(OpAMD64MOVLconst)
21375 v.AuxInt = int32ToAuxInt(0)
21378 // match: (SBBLcarrymask (FlagLT_ULT))
21379 // result: (MOVLconst [-1])
21381 if v_0.Op != OpAMD64FlagLT_ULT {
21384 v.reset(OpAMD64MOVLconst)
21385 v.AuxInt = int32ToAuxInt(-1)
21388 // match: (SBBLcarrymask (FlagLT_UGT))
21389 // result: (MOVLconst [0])
21391 if v_0.Op != OpAMD64FlagLT_UGT {
21394 v.reset(OpAMD64MOVLconst)
21395 v.AuxInt = int32ToAuxInt(0)
21398 // match: (SBBLcarrymask (FlagGT_ULT))
21399 // result: (MOVLconst [-1])
21401 if v_0.Op != OpAMD64FlagGT_ULT {
21404 v.reset(OpAMD64MOVLconst)
21405 v.AuxInt = int32ToAuxInt(-1)
21408 // match: (SBBLcarrymask (FlagGT_UGT))
21409 // result: (MOVLconst [0])
21411 if v_0.Op != OpAMD64FlagGT_UGT {
21414 v.reset(OpAMD64MOVLconst)
21415 v.AuxInt = int32ToAuxInt(0)
21420 func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
21424 // match: (SBBQ x (MOVQconst [c]) borrow)
21425 // cond: is32Bit(c)
21426 // result: (SBBQconst x [int32(c)] borrow)
21429 if v_1.Op != OpAMD64MOVQconst {
21432 c := auxIntToInt64(v_1.AuxInt)
21437 v.reset(OpAMD64SBBQconst)
21438 v.AuxInt = int32ToAuxInt(int32(c))
21439 v.AddArg2(x, borrow)
21442 // match: (SBBQ x y (FlagEQ))
21443 // result: (SUBQborrow x y)
21447 if v_2.Op != OpAMD64FlagEQ {
21450 v.reset(OpAMD64SUBQborrow)
21456 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
21458 // match: (SBBQcarrymask (FlagEQ))
21459 // result: (MOVQconst [0])
21461 if v_0.Op != OpAMD64FlagEQ {
21464 v.reset(OpAMD64MOVQconst)
21465 v.AuxInt = int64ToAuxInt(0)
21468 // match: (SBBQcarrymask (FlagLT_ULT))
21469 // result: (MOVQconst [-1])
21471 if v_0.Op != OpAMD64FlagLT_ULT {
21474 v.reset(OpAMD64MOVQconst)
21475 v.AuxInt = int64ToAuxInt(-1)
21478 // match: (SBBQcarrymask (FlagLT_UGT))
21479 // result: (MOVQconst [0])
21481 if v_0.Op != OpAMD64FlagLT_UGT {
21484 v.reset(OpAMD64MOVQconst)
21485 v.AuxInt = int64ToAuxInt(0)
21488 // match: (SBBQcarrymask (FlagGT_ULT))
21489 // result: (MOVQconst [-1])
21491 if v_0.Op != OpAMD64FlagGT_ULT {
21494 v.reset(OpAMD64MOVQconst)
21495 v.AuxInt = int64ToAuxInt(-1)
21498 // match: (SBBQcarrymask (FlagGT_UGT))
21499 // result: (MOVQconst [0])
21501 if v_0.Op != OpAMD64FlagGT_UGT {
21504 v.reset(OpAMD64MOVQconst)
21505 v.AuxInt = int64ToAuxInt(0)
21510 func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
21513 // match: (SBBQconst x [c] (FlagEQ))
21514 // result: (SUBQconstborrow x [c])
21516 c := auxIntToInt32(v.AuxInt)
21518 if v_1.Op != OpAMD64FlagEQ {
21521 v.reset(OpAMD64SUBQconstborrow)
21522 v.AuxInt = int32ToAuxInt(c)
21528 func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
21530 // match: (SETA (InvertFlags x))
21531 // result: (SETB x)
21533 if v_0.Op != OpAMD64InvertFlags {
21537 v.reset(OpAMD64SETB)
21541 // match: (SETA (FlagEQ))
21542 // result: (MOVLconst [0])
21544 if v_0.Op != OpAMD64FlagEQ {
21547 v.reset(OpAMD64MOVLconst)
21548 v.AuxInt = int32ToAuxInt(0)
21551 // match: (SETA (FlagLT_ULT))
21552 // result: (MOVLconst [0])
21554 if v_0.Op != OpAMD64FlagLT_ULT {
21557 v.reset(OpAMD64MOVLconst)
21558 v.AuxInt = int32ToAuxInt(0)
21561 // match: (SETA (FlagLT_UGT))
21562 // result: (MOVLconst [1])
21564 if v_0.Op != OpAMD64FlagLT_UGT {
21567 v.reset(OpAMD64MOVLconst)
21568 v.AuxInt = int32ToAuxInt(1)
21571 // match: (SETA (FlagGT_ULT))
21572 // result: (MOVLconst [0])
21574 if v_0.Op != OpAMD64FlagGT_ULT {
21577 v.reset(OpAMD64MOVLconst)
21578 v.AuxInt = int32ToAuxInt(0)
21581 // match: (SETA (FlagGT_UGT))
21582 // result: (MOVLconst [1])
21584 if v_0.Op != OpAMD64FlagGT_UGT {
21587 v.reset(OpAMD64MOVLconst)
21588 v.AuxInt = int32ToAuxInt(1)
21593 func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
21595 // match: (SETAE (TESTQ x x))
21596 // result: (ConstBool [true])
21598 if v_0.Op != OpAMD64TESTQ {
21602 if x != v_0.Args[0] {
21605 v.reset(OpConstBool)
21606 v.AuxInt = boolToAuxInt(true)
21609 // match: (SETAE (TESTL x x))
21610 // result: (ConstBool [true])
21612 if v_0.Op != OpAMD64TESTL {
21616 if x != v_0.Args[0] {
21619 v.reset(OpConstBool)
21620 v.AuxInt = boolToAuxInt(true)
21623 // match: (SETAE (TESTW x x))
21624 // result: (ConstBool [true])
21626 if v_0.Op != OpAMD64TESTW {
21630 if x != v_0.Args[0] {
21633 v.reset(OpConstBool)
21634 v.AuxInt = boolToAuxInt(true)
21637 // match: (SETAE (TESTB x x))
21638 // result: (ConstBool [true])
21640 if v_0.Op != OpAMD64TESTB {
21644 if x != v_0.Args[0] {
21647 v.reset(OpConstBool)
21648 v.AuxInt = boolToAuxInt(true)
21651 // match: (SETAE (InvertFlags x))
21652 // result: (SETBE x)
21654 if v_0.Op != OpAMD64InvertFlags {
21658 v.reset(OpAMD64SETBE)
21662 // match: (SETAE (FlagEQ))
21663 // result: (MOVLconst [1])
21665 if v_0.Op != OpAMD64FlagEQ {
21668 v.reset(OpAMD64MOVLconst)
21669 v.AuxInt = int32ToAuxInt(1)
21672 // match: (SETAE (FlagLT_ULT))
21673 // result: (MOVLconst [0])
21675 if v_0.Op != OpAMD64FlagLT_ULT {
21678 v.reset(OpAMD64MOVLconst)
21679 v.AuxInt = int32ToAuxInt(0)
21682 // match: (SETAE (FlagLT_UGT))
21683 // result: (MOVLconst [1])
21685 if v_0.Op != OpAMD64FlagLT_UGT {
21688 v.reset(OpAMD64MOVLconst)
21689 v.AuxInt = int32ToAuxInt(1)
21692 // match: (SETAE (FlagGT_ULT))
21693 // result: (MOVLconst [0])
21695 if v_0.Op != OpAMD64FlagGT_ULT {
21698 v.reset(OpAMD64MOVLconst)
21699 v.AuxInt = int32ToAuxInt(0)
21702 // match: (SETAE (FlagGT_UGT))
21703 // result: (MOVLconst [1])
21705 if v_0.Op != OpAMD64FlagGT_UGT {
21708 v.reset(OpAMD64MOVLconst)
21709 v.AuxInt = int32ToAuxInt(1)
21714 func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
21719 typ := &b.Func.Config.Types
21720 // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem)
21721 // result: (SETBEstore [off] {sym} ptr x mem)
21723 off := auxIntToInt32(v.AuxInt)
21724 sym := auxToSym(v.Aux)
21726 if v_1.Op != OpAMD64InvertFlags {
21731 v.reset(OpAMD64SETBEstore)
21732 v.AuxInt = int32ToAuxInt(off)
21733 v.Aux = symToAux(sym)
21734 v.AddArg3(ptr, x, mem)
21737 // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem)
21738 // cond: is32Bit(int64(off1)+int64(off2))
21739 // result: (SETAEstore [off1+off2] {sym} base val mem)
21741 off1 := auxIntToInt32(v.AuxInt)
21742 sym := auxToSym(v.Aux)
21743 if v_0.Op != OpAMD64ADDQconst {
21746 off2 := auxIntToInt32(v_0.AuxInt)
21747 base := v_0.Args[0]
21750 if !(is32Bit(int64(off1) + int64(off2))) {
21753 v.reset(OpAMD64SETAEstore)
21754 v.AuxInt = int32ToAuxInt(off1 + off2)
21755 v.Aux = symToAux(sym)
21756 v.AddArg3(base, val, mem)
21759 // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
21760 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
21761 // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
21763 off1 := auxIntToInt32(v.AuxInt)
21764 sym1 := auxToSym(v.Aux)
21765 if v_0.Op != OpAMD64LEAQ {
21768 off2 := auxIntToInt32(v_0.AuxInt)
21769 sym2 := auxToSym(v_0.Aux)
21770 base := v_0.Args[0]
21773 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21776 v.reset(OpAMD64SETAEstore)
21777 v.AuxInt = int32ToAuxInt(off1 + off2)
21778 v.Aux = symToAux(mergeSym(sym1, sym2))
21779 v.AddArg3(base, val, mem)
21782 // match: (SETAEstore [off] {sym} ptr (FlagEQ) mem)
21783 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
21785 off := auxIntToInt32(v.AuxInt)
21786 sym := auxToSym(v.Aux)
21788 if v_1.Op != OpAMD64FlagEQ {
21792 v.reset(OpAMD64MOVBstore)
21793 v.AuxInt = int32ToAuxInt(off)
21794 v.Aux = symToAux(sym)
21795 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21796 v0.AuxInt = int32ToAuxInt(1)
21797 v.AddArg3(ptr, v0, mem)
21800 // match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem)
21801 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
21803 off := auxIntToInt32(v.AuxInt)
21804 sym := auxToSym(v.Aux)
21806 if v_1.Op != OpAMD64FlagLT_ULT {
21810 v.reset(OpAMD64MOVBstore)
21811 v.AuxInt = int32ToAuxInt(off)
21812 v.Aux = symToAux(sym)
21813 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21814 v0.AuxInt = int32ToAuxInt(0)
21815 v.AddArg3(ptr, v0, mem)
21818 // match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem)
21819 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
21821 off := auxIntToInt32(v.AuxInt)
21822 sym := auxToSym(v.Aux)
21824 if v_1.Op != OpAMD64FlagLT_UGT {
21828 v.reset(OpAMD64MOVBstore)
21829 v.AuxInt = int32ToAuxInt(off)
21830 v.Aux = symToAux(sym)
21831 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21832 v0.AuxInt = int32ToAuxInt(1)
21833 v.AddArg3(ptr, v0, mem)
21836 // match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem)
21837 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
21839 off := auxIntToInt32(v.AuxInt)
21840 sym := auxToSym(v.Aux)
21842 if v_1.Op != OpAMD64FlagGT_ULT {
21846 v.reset(OpAMD64MOVBstore)
21847 v.AuxInt = int32ToAuxInt(off)
21848 v.Aux = symToAux(sym)
21849 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21850 v0.AuxInt = int32ToAuxInt(0)
21851 v.AddArg3(ptr, v0, mem)
21854 // match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem)
21855 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
21857 off := auxIntToInt32(v.AuxInt)
21858 sym := auxToSym(v.Aux)
21860 if v_1.Op != OpAMD64FlagGT_UGT {
21864 v.reset(OpAMD64MOVBstore)
21865 v.AuxInt = int32ToAuxInt(off)
21866 v.Aux = symToAux(sym)
21867 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21868 v0.AuxInt = int32ToAuxInt(1)
21869 v.AddArg3(ptr, v0, mem)
21874 func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
21879 typ := &b.Func.Config.Types
21880 // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem)
21881 // result: (SETBstore [off] {sym} ptr x mem)
21883 off := auxIntToInt32(v.AuxInt)
21884 sym := auxToSym(v.Aux)
21886 if v_1.Op != OpAMD64InvertFlags {
21891 v.reset(OpAMD64SETBstore)
21892 v.AuxInt = int32ToAuxInt(off)
21893 v.Aux = symToAux(sym)
21894 v.AddArg3(ptr, x, mem)
21897 // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem)
21898 // cond: is32Bit(int64(off1)+int64(off2))
21899 // result: (SETAstore [off1+off2] {sym} base val mem)
21901 off1 := auxIntToInt32(v.AuxInt)
21902 sym := auxToSym(v.Aux)
21903 if v_0.Op != OpAMD64ADDQconst {
21906 off2 := auxIntToInt32(v_0.AuxInt)
21907 base := v_0.Args[0]
21910 if !(is32Bit(int64(off1) + int64(off2))) {
21913 v.reset(OpAMD64SETAstore)
21914 v.AuxInt = int32ToAuxInt(off1 + off2)
21915 v.Aux = symToAux(sym)
21916 v.AddArg3(base, val, mem)
21919 // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
21920 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
21921 // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
21923 off1 := auxIntToInt32(v.AuxInt)
21924 sym1 := auxToSym(v.Aux)
21925 if v_0.Op != OpAMD64LEAQ {
21928 off2 := auxIntToInt32(v_0.AuxInt)
21929 sym2 := auxToSym(v_0.Aux)
21930 base := v_0.Args[0]
21933 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21936 v.reset(OpAMD64SETAstore)
21937 v.AuxInt = int32ToAuxInt(off1 + off2)
21938 v.Aux = symToAux(mergeSym(sym1, sym2))
21939 v.AddArg3(base, val, mem)
21942 // match: (SETAstore [off] {sym} ptr (FlagEQ) mem)
21943 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
21945 off := auxIntToInt32(v.AuxInt)
21946 sym := auxToSym(v.Aux)
21948 if v_1.Op != OpAMD64FlagEQ {
21952 v.reset(OpAMD64MOVBstore)
21953 v.AuxInt = int32ToAuxInt(off)
21954 v.Aux = symToAux(sym)
21955 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21956 v0.AuxInt = int32ToAuxInt(0)
21957 v.AddArg3(ptr, v0, mem)
21960 // match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem)
21961 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
21963 off := auxIntToInt32(v.AuxInt)
21964 sym := auxToSym(v.Aux)
21966 if v_1.Op != OpAMD64FlagLT_ULT {
21970 v.reset(OpAMD64MOVBstore)
21971 v.AuxInt = int32ToAuxInt(off)
21972 v.Aux = symToAux(sym)
21973 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21974 v0.AuxInt = int32ToAuxInt(0)
21975 v.AddArg3(ptr, v0, mem)
21978 // match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem)
21979 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
21981 off := auxIntToInt32(v.AuxInt)
21982 sym := auxToSym(v.Aux)
21984 if v_1.Op != OpAMD64FlagLT_UGT {
21988 v.reset(OpAMD64MOVBstore)
21989 v.AuxInt = int32ToAuxInt(off)
21990 v.Aux = symToAux(sym)
21991 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21992 v0.AuxInt = int32ToAuxInt(1)
21993 v.AddArg3(ptr, v0, mem)
21996 // match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem)
21997 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
21999 off := auxIntToInt32(v.AuxInt)
22000 sym := auxToSym(v.Aux)
22002 if v_1.Op != OpAMD64FlagGT_ULT {
22006 v.reset(OpAMD64MOVBstore)
22007 v.AuxInt = int32ToAuxInt(off)
22008 v.Aux = symToAux(sym)
22009 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22010 v0.AuxInt = int32ToAuxInt(0)
22011 v.AddArg3(ptr, v0, mem)
22014 // match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem)
22015 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
22017 off := auxIntToInt32(v.AuxInt)
22018 sym := auxToSym(v.Aux)
22020 if v_1.Op != OpAMD64FlagGT_UGT {
22024 v.reset(OpAMD64MOVBstore)
22025 v.AuxInt = int32ToAuxInt(off)
22026 v.Aux = symToAux(sym)
22027 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22028 v0.AuxInt = int32ToAuxInt(1)
22029 v.AddArg3(ptr, v0, mem)
22034 func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
22036 // match: (SETB (TESTQ x x))
22037 // result: (ConstBool [false])
22039 if v_0.Op != OpAMD64TESTQ {
22043 if x != v_0.Args[0] {
22046 v.reset(OpConstBool)
22047 v.AuxInt = boolToAuxInt(false)
22050 // match: (SETB (TESTL x x))
22051 // result: (ConstBool [false])
22053 if v_0.Op != OpAMD64TESTL {
22057 if x != v_0.Args[0] {
22060 v.reset(OpConstBool)
22061 v.AuxInt = boolToAuxInt(false)
22064 // match: (SETB (TESTW x x))
22065 // result: (ConstBool [false])
22067 if v_0.Op != OpAMD64TESTW {
22071 if x != v_0.Args[0] {
22074 v.reset(OpConstBool)
22075 v.AuxInt = boolToAuxInt(false)
22078 // match: (SETB (TESTB x x))
22079 // result: (ConstBool [false])
22081 if v_0.Op != OpAMD64TESTB {
22085 if x != v_0.Args[0] {
22088 v.reset(OpConstBool)
22089 v.AuxInt = boolToAuxInt(false)
22092 // match: (SETB (BTLconst [0] x))
22093 // result: (ANDLconst [1] x)
22095 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
22099 v.reset(OpAMD64ANDLconst)
22100 v.AuxInt = int32ToAuxInt(1)
22104 // match: (SETB (BTQconst [0] x))
22105 // result: (ANDQconst [1] x)
22107 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
22111 v.reset(OpAMD64ANDQconst)
22112 v.AuxInt = int32ToAuxInt(1)
22116 // match: (SETB (InvertFlags x))
22117 // result: (SETA x)
22119 if v_0.Op != OpAMD64InvertFlags {
22123 v.reset(OpAMD64SETA)
22127 // match: (SETB (FlagEQ))
22128 // result: (MOVLconst [0])
22130 if v_0.Op != OpAMD64FlagEQ {
22133 v.reset(OpAMD64MOVLconst)
22134 v.AuxInt = int32ToAuxInt(0)
22137 // match: (SETB (FlagLT_ULT))
22138 // result: (MOVLconst [1])
22140 if v_0.Op != OpAMD64FlagLT_ULT {
22143 v.reset(OpAMD64MOVLconst)
22144 v.AuxInt = int32ToAuxInt(1)
22147 // match: (SETB (FlagLT_UGT))
22148 // result: (MOVLconst [0])
22150 if v_0.Op != OpAMD64FlagLT_UGT {
22153 v.reset(OpAMD64MOVLconst)
22154 v.AuxInt = int32ToAuxInt(0)
22157 // match: (SETB (FlagGT_ULT))
22158 // result: (MOVLconst [1])
22160 if v_0.Op != OpAMD64FlagGT_ULT {
22163 v.reset(OpAMD64MOVLconst)
22164 v.AuxInt = int32ToAuxInt(1)
22167 // match: (SETB (FlagGT_UGT))
22168 // result: (MOVLconst [0])
22170 if v_0.Op != OpAMD64FlagGT_UGT {
22173 v.reset(OpAMD64MOVLconst)
22174 v.AuxInt = int32ToAuxInt(0)
22179 func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
22181 // match: (SETBE (InvertFlags x))
22182 // result: (SETAE x)
22184 if v_0.Op != OpAMD64InvertFlags {
22188 v.reset(OpAMD64SETAE)
22192 // match: (SETBE (FlagEQ))
22193 // result: (MOVLconst [1])
22195 if v_0.Op != OpAMD64FlagEQ {
22198 v.reset(OpAMD64MOVLconst)
22199 v.AuxInt = int32ToAuxInt(1)
22202 // match: (SETBE (FlagLT_ULT))
22203 // result: (MOVLconst [1])
22205 if v_0.Op != OpAMD64FlagLT_ULT {
22208 v.reset(OpAMD64MOVLconst)
22209 v.AuxInt = int32ToAuxInt(1)
22212 // match: (SETBE (FlagLT_UGT))
22213 // result: (MOVLconst [0])
22215 if v_0.Op != OpAMD64FlagLT_UGT {
22218 v.reset(OpAMD64MOVLconst)
22219 v.AuxInt = int32ToAuxInt(0)
22222 // match: (SETBE (FlagGT_ULT))
22223 // result: (MOVLconst [1])
22225 if v_0.Op != OpAMD64FlagGT_ULT {
22228 v.reset(OpAMD64MOVLconst)
22229 v.AuxInt = int32ToAuxInt(1)
22232 // match: (SETBE (FlagGT_UGT))
22233 // result: (MOVLconst [0])
22235 if v_0.Op != OpAMD64FlagGT_UGT {
22238 v.reset(OpAMD64MOVLconst)
22239 v.AuxInt = int32ToAuxInt(0)
22244 func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
22249 typ := &b.Func.Config.Types
22250 // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem)
22251 // result: (SETAEstore [off] {sym} ptr x mem)
22253 off := auxIntToInt32(v.AuxInt)
22254 sym := auxToSym(v.Aux)
22256 if v_1.Op != OpAMD64InvertFlags {
22261 v.reset(OpAMD64SETAEstore)
22262 v.AuxInt = int32ToAuxInt(off)
22263 v.Aux = symToAux(sym)
22264 v.AddArg3(ptr, x, mem)
22267 // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem)
22268 // cond: is32Bit(int64(off1)+int64(off2))
22269 // result: (SETBEstore [off1+off2] {sym} base val mem)
22271 off1 := auxIntToInt32(v.AuxInt)
22272 sym := auxToSym(v.Aux)
22273 if v_0.Op != OpAMD64ADDQconst {
22276 off2 := auxIntToInt32(v_0.AuxInt)
22277 base := v_0.Args[0]
22280 if !(is32Bit(int64(off1) + int64(off2))) {
22283 v.reset(OpAMD64SETBEstore)
22284 v.AuxInt = int32ToAuxInt(off1 + off2)
22285 v.Aux = symToAux(sym)
22286 v.AddArg3(base, val, mem)
22289 // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
22290 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
22291 // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
22293 off1 := auxIntToInt32(v.AuxInt)
22294 sym1 := auxToSym(v.Aux)
22295 if v_0.Op != OpAMD64LEAQ {
22298 off2 := auxIntToInt32(v_0.AuxInt)
22299 sym2 := auxToSym(v_0.Aux)
22300 base := v_0.Args[0]
22303 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22306 v.reset(OpAMD64SETBEstore)
22307 v.AuxInt = int32ToAuxInt(off1 + off2)
22308 v.Aux = symToAux(mergeSym(sym1, sym2))
22309 v.AddArg3(base, val, mem)
22312 // match: (SETBEstore [off] {sym} ptr (FlagEQ) mem)
22313 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
22315 off := auxIntToInt32(v.AuxInt)
22316 sym := auxToSym(v.Aux)
22318 if v_1.Op != OpAMD64FlagEQ {
22322 v.reset(OpAMD64MOVBstore)
22323 v.AuxInt = int32ToAuxInt(off)
22324 v.Aux = symToAux(sym)
22325 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22326 v0.AuxInt = int32ToAuxInt(1)
22327 v.AddArg3(ptr, v0, mem)
22330 // match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem)
22331 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
22333 off := auxIntToInt32(v.AuxInt)
22334 sym := auxToSym(v.Aux)
22336 if v_1.Op != OpAMD64FlagLT_ULT {
22340 v.reset(OpAMD64MOVBstore)
22341 v.AuxInt = int32ToAuxInt(off)
22342 v.Aux = symToAux(sym)
22343 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22344 v0.AuxInt = int32ToAuxInt(1)
22345 v.AddArg3(ptr, v0, mem)
22348 // match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem)
22349 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
22351 off := auxIntToInt32(v.AuxInt)
22352 sym := auxToSym(v.Aux)
22354 if v_1.Op != OpAMD64FlagLT_UGT {
22358 v.reset(OpAMD64MOVBstore)
22359 v.AuxInt = int32ToAuxInt(off)
22360 v.Aux = symToAux(sym)
22361 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22362 v0.AuxInt = int32ToAuxInt(0)
22363 v.AddArg3(ptr, v0, mem)
22366 // match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem)
22367 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
22369 off := auxIntToInt32(v.AuxInt)
22370 sym := auxToSym(v.Aux)
22372 if v_1.Op != OpAMD64FlagGT_ULT {
22376 v.reset(OpAMD64MOVBstore)
22377 v.AuxInt = int32ToAuxInt(off)
22378 v.Aux = symToAux(sym)
22379 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22380 v0.AuxInt = int32ToAuxInt(1)
22381 v.AddArg3(ptr, v0, mem)
22384 // match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem)
22385 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
22387 off := auxIntToInt32(v.AuxInt)
22388 sym := auxToSym(v.Aux)
22390 if v_1.Op != OpAMD64FlagGT_UGT {
22394 v.reset(OpAMD64MOVBstore)
22395 v.AuxInt = int32ToAuxInt(off)
22396 v.Aux = symToAux(sym)
22397 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22398 v0.AuxInt = int32ToAuxInt(0)
22399 v.AddArg3(ptr, v0, mem)
22404 func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
22409 typ := &b.Func.Config.Types
22410 // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem)
22411 // result: (SETAstore [off] {sym} ptr x mem)
22413 off := auxIntToInt32(v.AuxInt)
22414 sym := auxToSym(v.Aux)
22416 if v_1.Op != OpAMD64InvertFlags {
22421 v.reset(OpAMD64SETAstore)
22422 v.AuxInt = int32ToAuxInt(off)
22423 v.Aux = symToAux(sym)
22424 v.AddArg3(ptr, x, mem)
22427 // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem)
22428 // cond: is32Bit(int64(off1)+int64(off2))
22429 // result: (SETBstore [off1+off2] {sym} base val mem)
22431 off1 := auxIntToInt32(v.AuxInt)
22432 sym := auxToSym(v.Aux)
22433 if v_0.Op != OpAMD64ADDQconst {
22436 off2 := auxIntToInt32(v_0.AuxInt)
22437 base := v_0.Args[0]
22440 if !(is32Bit(int64(off1) + int64(off2))) {
22443 v.reset(OpAMD64SETBstore)
22444 v.AuxInt = int32ToAuxInt(off1 + off2)
22445 v.Aux = symToAux(sym)
22446 v.AddArg3(base, val, mem)
22449 // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
22450 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
22451 // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
22453 off1 := auxIntToInt32(v.AuxInt)
22454 sym1 := auxToSym(v.Aux)
22455 if v_0.Op != OpAMD64LEAQ {
22458 off2 := auxIntToInt32(v_0.AuxInt)
22459 sym2 := auxToSym(v_0.Aux)
22460 base := v_0.Args[0]
22463 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22466 v.reset(OpAMD64SETBstore)
22467 v.AuxInt = int32ToAuxInt(off1 + off2)
22468 v.Aux = symToAux(mergeSym(sym1, sym2))
22469 v.AddArg3(base, val, mem)
22472 // match: (SETBstore [off] {sym} ptr (FlagEQ) mem)
22473 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
22475 off := auxIntToInt32(v.AuxInt)
22476 sym := auxToSym(v.Aux)
22478 if v_1.Op != OpAMD64FlagEQ {
22482 v.reset(OpAMD64MOVBstore)
22483 v.AuxInt = int32ToAuxInt(off)
22484 v.Aux = symToAux(sym)
22485 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22486 v0.AuxInt = int32ToAuxInt(0)
22487 v.AddArg3(ptr, v0, mem)
22490 // match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem)
22491 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
22493 off := auxIntToInt32(v.AuxInt)
22494 sym := auxToSym(v.Aux)
22496 if v_1.Op != OpAMD64FlagLT_ULT {
22500 v.reset(OpAMD64MOVBstore)
22501 v.AuxInt = int32ToAuxInt(off)
22502 v.Aux = symToAux(sym)
22503 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22504 v0.AuxInt = int32ToAuxInt(1)
22505 v.AddArg3(ptr, v0, mem)
22508 // match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem)
22509 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
22511 off := auxIntToInt32(v.AuxInt)
22512 sym := auxToSym(v.Aux)
22514 if v_1.Op != OpAMD64FlagLT_UGT {
22518 v.reset(OpAMD64MOVBstore)
22519 v.AuxInt = int32ToAuxInt(off)
22520 v.Aux = symToAux(sym)
22521 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22522 v0.AuxInt = int32ToAuxInt(0)
22523 v.AddArg3(ptr, v0, mem)
22526 // match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem)
22527 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
22529 off := auxIntToInt32(v.AuxInt)
22530 sym := auxToSym(v.Aux)
22532 if v_1.Op != OpAMD64FlagGT_ULT {
22536 v.reset(OpAMD64MOVBstore)
22537 v.AuxInt = int32ToAuxInt(off)
22538 v.Aux = symToAux(sym)
22539 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22540 v0.AuxInt = int32ToAuxInt(1)
22541 v.AddArg3(ptr, v0, mem)
22544 // match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem)
22545 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
22547 off := auxIntToInt32(v.AuxInt)
22548 sym := auxToSym(v.Aux)
22550 if v_1.Op != OpAMD64FlagGT_UGT {
22554 v.reset(OpAMD64MOVBstore)
22555 v.AuxInt = int32ToAuxInt(off)
22556 v.Aux = symToAux(sym)
22557 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22558 v0.AuxInt = int32ToAuxInt(0)
22559 v.AddArg3(ptr, v0, mem)
22564 func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
22567 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y))
22568 // result: (SETAE (BTL x y))
22570 if v_0.Op != OpAMD64TESTL {
22574 v_0_0 := v_0.Args[0]
22575 v_0_1 := v_0.Args[1]
22576 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22577 if v_0_0.Op != OpAMD64SHLL {
22581 v_0_0_0 := v_0_0.Args[0]
22582 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
22586 v.reset(OpAMD64SETAE)
22587 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
22594 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
22595 // result: (SETAE (BTQ x y))
22597 if v_0.Op != OpAMD64TESTQ {
22601 v_0_0 := v_0.Args[0]
22602 v_0_1 := v_0.Args[1]
22603 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22604 if v_0_0.Op != OpAMD64SHLQ {
22608 v_0_0_0 := v_0_0.Args[0]
22609 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
22613 v.reset(OpAMD64SETAE)
22614 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
22621 // match: (SETEQ (TESTLconst [c] x))
22622 // cond: isUint32PowerOfTwo(int64(c))
22623 // result: (SETAE (BTLconst [int8(log32(c))] x))
22625 if v_0.Op != OpAMD64TESTLconst {
22628 c := auxIntToInt32(v_0.AuxInt)
22630 if !(isUint32PowerOfTwo(int64(c))) {
22633 v.reset(OpAMD64SETAE)
22634 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
22635 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
22640 // match: (SETEQ (TESTQconst [c] x))
22641 // cond: isUint64PowerOfTwo(int64(c))
22642 // result: (SETAE (BTQconst [int8(log32(c))] x))
22644 if v_0.Op != OpAMD64TESTQconst {
22647 c := auxIntToInt32(v_0.AuxInt)
22649 if !(isUint64PowerOfTwo(int64(c))) {
22652 v.reset(OpAMD64SETAE)
22653 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22654 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
22659 // match: (SETEQ (TESTQ (MOVQconst [c]) x))
22660 // cond: isUint64PowerOfTwo(c)
22661 // result: (SETAE (BTQconst [int8(log64(c))] x))
22663 if v_0.Op != OpAMD64TESTQ {
22667 v_0_0 := v_0.Args[0]
22668 v_0_1 := v_0.Args[1]
22669 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22670 if v_0_0.Op != OpAMD64MOVQconst {
22673 c := auxIntToInt64(v_0_0.AuxInt)
22675 if !(isUint64PowerOfTwo(c)) {
22678 v.reset(OpAMD64SETAE)
22679 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22680 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
22687 // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _)))
22688 // result: (SETNE (CMPLconst [0] s))
22690 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
22694 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
22697 v.reset(OpAMD64SETNE)
22698 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
22699 v0.AuxInt = int32ToAuxInt(0)
22704 // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _)))
22705 // result: (SETNE (CMPQconst [0] s))
22707 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
22711 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
22714 v.reset(OpAMD64SETNE)
22715 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
22716 v0.AuxInt = int32ToAuxInt(0)
22721 // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
22723 // result: (SETAE (BTQconst [63] x))
22725 if v_0.Op != OpAMD64TESTQ {
22729 v_0_0 := v_0.Args[0]
22730 v_0_1 := v_0.Args[1]
22731 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22733 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
22737 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
22745 v.reset(OpAMD64SETAE)
22746 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22747 v0.AuxInt = int8ToAuxInt(63)
22754 // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
22756 // result: (SETAE (BTQconst [31] x))
22758 if v_0.Op != OpAMD64TESTL {
22762 v_0_0 := v_0.Args[0]
22763 v_0_1 := v_0.Args[1]
22764 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22766 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
22770 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
22778 v.reset(OpAMD64SETAE)
22779 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22780 v0.AuxInt = int8ToAuxInt(31)
22787 // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
22789 // result: (SETAE (BTQconst [0] x))
22791 if v_0.Op != OpAMD64TESTQ {
22795 v_0_0 := v_0.Args[0]
22796 v_0_1 := v_0.Args[1]
22797 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22799 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
22803 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
22811 v.reset(OpAMD64SETAE)
22812 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22813 v0.AuxInt = int8ToAuxInt(0)
22820 // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
22822 // result: (SETAE (BTLconst [0] x))
22824 if v_0.Op != OpAMD64TESTL {
22828 v_0_0 := v_0.Args[0]
22829 v_0_1 := v_0.Args[1]
22830 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22832 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
22836 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
22844 v.reset(OpAMD64SETAE)
22845 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
22846 v0.AuxInt = int8ToAuxInt(0)
22853 // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2))
22855 // result: (SETAE (BTQconst [63] x))
22857 if v_0.Op != OpAMD64TESTQ {
22861 v_0_0 := v_0.Args[0]
22862 v_0_1 := v_0.Args[1]
22863 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22865 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
22873 v.reset(OpAMD64SETAE)
22874 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22875 v0.AuxInt = int8ToAuxInt(63)
22882 // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2))
22884 // result: (SETAE (BTLconst [31] x))
22886 if v_0.Op != OpAMD64TESTL {
22890 v_0_0 := v_0.Args[0]
22891 v_0_1 := v_0.Args[1]
22892 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
22894 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
22902 v.reset(OpAMD64SETAE)
22903 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
22904 v0.AuxInt = int8ToAuxInt(31)
22911 // match: (SETEQ (InvertFlags x))
22912 // result: (SETEQ x)
22914 if v_0.Op != OpAMD64InvertFlags {
22918 v.reset(OpAMD64SETEQ)
22922 // match: (SETEQ (FlagEQ))
22923 // result: (MOVLconst [1])
22925 if v_0.Op != OpAMD64FlagEQ {
22928 v.reset(OpAMD64MOVLconst)
22929 v.AuxInt = int32ToAuxInt(1)
22932 // match: (SETEQ (FlagLT_ULT))
22933 // result: (MOVLconst [0])
22935 if v_0.Op != OpAMD64FlagLT_ULT {
22938 v.reset(OpAMD64MOVLconst)
22939 v.AuxInt = int32ToAuxInt(0)
22942 // match: (SETEQ (FlagLT_UGT))
22943 // result: (MOVLconst [0])
22945 if v_0.Op != OpAMD64FlagLT_UGT {
22948 v.reset(OpAMD64MOVLconst)
22949 v.AuxInt = int32ToAuxInt(0)
22952 // match: (SETEQ (FlagGT_ULT))
22953 // result: (MOVLconst [0])
22955 if v_0.Op != OpAMD64FlagGT_ULT {
22958 v.reset(OpAMD64MOVLconst)
22959 v.AuxInt = int32ToAuxInt(0)
22962 // match: (SETEQ (FlagGT_UGT))
22963 // result: (MOVLconst [0])
22965 if v_0.Op != OpAMD64FlagGT_UGT {
22968 v.reset(OpAMD64MOVLconst)
22969 v.AuxInt = int32ToAuxInt(0)
22974 func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
22979 typ := &b.Func.Config.Types
22980 // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
22981 // result: (SETAEstore [off] {sym} ptr (BTL x y) mem)
22983 off := auxIntToInt32(v.AuxInt)
22984 sym := auxToSym(v.Aux)
22986 if v_1.Op != OpAMD64TESTL {
22990 v_1_0 := v_1.Args[0]
22991 v_1_1 := v_1.Args[1]
22992 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
22993 if v_1_0.Op != OpAMD64SHLL {
22997 v_1_0_0 := v_1_0.Args[0]
22998 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
23003 v.reset(OpAMD64SETAEstore)
23004 v.AuxInt = int32ToAuxInt(off)
23005 v.Aux = symToAux(sym)
23006 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
23008 v.AddArg3(ptr, v0, mem)
23013 // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
23014 // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem)
23016 off := auxIntToInt32(v.AuxInt)
23017 sym := auxToSym(v.Aux)
23019 if v_1.Op != OpAMD64TESTQ {
23023 v_1_0 := v_1.Args[0]
23024 v_1_1 := v_1.Args[1]
23025 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23026 if v_1_0.Op != OpAMD64SHLQ {
23030 v_1_0_0 := v_1_0.Args[0]
23031 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
23036 v.reset(OpAMD64SETAEstore)
23037 v.AuxInt = int32ToAuxInt(off)
23038 v.Aux = symToAux(sym)
23039 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
23041 v.AddArg3(ptr, v0, mem)
23046 // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem)
23047 // cond: isUint32PowerOfTwo(int64(c))
23048 // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
23050 off := auxIntToInt32(v.AuxInt)
23051 sym := auxToSym(v.Aux)
23053 if v_1.Op != OpAMD64TESTLconst {
23056 c := auxIntToInt32(v_1.AuxInt)
23059 if !(isUint32PowerOfTwo(int64(c))) {
23062 v.reset(OpAMD64SETAEstore)
23063 v.AuxInt = int32ToAuxInt(off)
23064 v.Aux = symToAux(sym)
23065 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
23066 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
23068 v.AddArg3(ptr, v0, mem)
23071 // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem)
23072 // cond: isUint64PowerOfTwo(int64(c))
23073 // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
23075 off := auxIntToInt32(v.AuxInt)
23076 sym := auxToSym(v.Aux)
23078 if v_1.Op != OpAMD64TESTQconst {
23081 c := auxIntToInt32(v_1.AuxInt)
23084 if !(isUint64PowerOfTwo(int64(c))) {
23087 v.reset(OpAMD64SETAEstore)
23088 v.AuxInt = int32ToAuxInt(off)
23089 v.Aux = symToAux(sym)
23090 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23091 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
23093 v.AddArg3(ptr, v0, mem)
23096 // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
23097 // cond: isUint64PowerOfTwo(c)
23098 // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
23100 off := auxIntToInt32(v.AuxInt)
23101 sym := auxToSym(v.Aux)
23103 if v_1.Op != OpAMD64TESTQ {
23107 v_1_0 := v_1.Args[0]
23108 v_1_1 := v_1.Args[1]
23109 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23110 if v_1_0.Op != OpAMD64MOVQconst {
23113 c := auxIntToInt64(v_1_0.AuxInt)
23116 if !(isUint64PowerOfTwo(c)) {
23119 v.reset(OpAMD64SETAEstore)
23120 v.AuxInt = int32ToAuxInt(off)
23121 v.Aux = symToAux(sym)
23122 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23123 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
23125 v.AddArg3(ptr, v0, mem)
23130 // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
23131 // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem)
23133 off := auxIntToInt32(v.AuxInt)
23134 sym := auxToSym(v.Aux)
23136 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
23140 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
23144 v.reset(OpAMD64SETNEstore)
23145 v.AuxInt = int32ToAuxInt(off)
23146 v.Aux = symToAux(sym)
23147 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
23148 v0.AuxInt = int32ToAuxInt(0)
23150 v.AddArg3(ptr, v0, mem)
23153 // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
23154 // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem)
23156 off := auxIntToInt32(v.AuxInt)
23157 sym := auxToSym(v.Aux)
23159 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
23163 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
23167 v.reset(OpAMD64SETNEstore)
23168 v.AuxInt = int32ToAuxInt(off)
23169 v.Aux = symToAux(sym)
23170 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
23171 v0.AuxInt = int32ToAuxInt(0)
23173 v.AddArg3(ptr, v0, mem)
23176 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
23178 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
23180 off := auxIntToInt32(v.AuxInt)
23181 sym := auxToSym(v.Aux)
23183 if v_1.Op != OpAMD64TESTQ {
23187 v_1_0 := v_1.Args[0]
23188 v_1_1 := v_1.Args[1]
23189 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23191 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
23195 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
23204 v.reset(OpAMD64SETAEstore)
23205 v.AuxInt = int32ToAuxInt(off)
23206 v.Aux = symToAux(sym)
23207 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23208 v0.AuxInt = int8ToAuxInt(63)
23210 v.AddArg3(ptr, v0, mem)
23215 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
23217 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
23219 off := auxIntToInt32(v.AuxInt)
23220 sym := auxToSym(v.Aux)
23222 if v_1.Op != OpAMD64TESTL {
23226 v_1_0 := v_1.Args[0]
23227 v_1_1 := v_1.Args[1]
23228 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23230 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
23234 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
23243 v.reset(OpAMD64SETAEstore)
23244 v.AuxInt = int32ToAuxInt(off)
23245 v.Aux = symToAux(sym)
23246 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
23247 v0.AuxInt = int8ToAuxInt(31)
23249 v.AddArg3(ptr, v0, mem)
23254 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
23256 // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem)
23258 off := auxIntToInt32(v.AuxInt)
23259 sym := auxToSym(v.Aux)
23261 if v_1.Op != OpAMD64TESTQ {
23265 v_1_0 := v_1.Args[0]
23266 v_1_1 := v_1.Args[1]
23267 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23269 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
23273 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
23282 v.reset(OpAMD64SETAEstore)
23283 v.AuxInt = int32ToAuxInt(off)
23284 v.Aux = symToAux(sym)
23285 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23286 v0.AuxInt = int8ToAuxInt(0)
23288 v.AddArg3(ptr, v0, mem)
23293 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
23295 // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem)
23297 off := auxIntToInt32(v.AuxInt)
23298 sym := auxToSym(v.Aux)
23300 if v_1.Op != OpAMD64TESTL {
23304 v_1_0 := v_1.Args[0]
23305 v_1_1 := v_1.Args[1]
23306 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23308 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
23312 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
23321 v.reset(OpAMD64SETAEstore)
23322 v.AuxInt = int32ToAuxInt(off)
23323 v.Aux = symToAux(sym)
23324 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
23325 v0.AuxInt = int8ToAuxInt(0)
23327 v.AddArg3(ptr, v0, mem)
23332 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
23334 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
23336 off := auxIntToInt32(v.AuxInt)
23337 sym := auxToSym(v.Aux)
23339 if v_1.Op != OpAMD64TESTQ {
23343 v_1_0 := v_1.Args[0]
23344 v_1_1 := v_1.Args[1]
23345 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23347 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
23356 v.reset(OpAMD64SETAEstore)
23357 v.AuxInt = int32ToAuxInt(off)
23358 v.Aux = symToAux(sym)
23359 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23360 v0.AuxInt = int8ToAuxInt(63)
23362 v.AddArg3(ptr, v0, mem)
23367 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
23369 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
23371 off := auxIntToInt32(v.AuxInt)
23372 sym := auxToSym(v.Aux)
23374 if v_1.Op != OpAMD64TESTL {
23378 v_1_0 := v_1.Args[0]
23379 v_1_1 := v_1.Args[1]
23380 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23382 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
23391 v.reset(OpAMD64SETAEstore)
23392 v.AuxInt = int32ToAuxInt(off)
23393 v.Aux = symToAux(sym)
23394 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
23395 v0.AuxInt = int8ToAuxInt(31)
23397 v.AddArg3(ptr, v0, mem)
23402 // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem)
23403 // result: (SETEQstore [off] {sym} ptr x mem)
23405 off := auxIntToInt32(v.AuxInt)
23406 sym := auxToSym(v.Aux)
23408 if v_1.Op != OpAMD64InvertFlags {
23413 v.reset(OpAMD64SETEQstore)
23414 v.AuxInt = int32ToAuxInt(off)
23415 v.Aux = symToAux(sym)
23416 v.AddArg3(ptr, x, mem)
23419 // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem)
23420 // cond: is32Bit(int64(off1)+int64(off2))
23421 // result: (SETEQstore [off1+off2] {sym} base val mem)
23423 off1 := auxIntToInt32(v.AuxInt)
23424 sym := auxToSym(v.Aux)
23425 if v_0.Op != OpAMD64ADDQconst {
23428 off2 := auxIntToInt32(v_0.AuxInt)
23429 base := v_0.Args[0]
23432 if !(is32Bit(int64(off1) + int64(off2))) {
23435 v.reset(OpAMD64SETEQstore)
23436 v.AuxInt = int32ToAuxInt(off1 + off2)
23437 v.Aux = symToAux(sym)
23438 v.AddArg3(base, val, mem)
23441 // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
23442 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
23443 // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
23445 off1 := auxIntToInt32(v.AuxInt)
23446 sym1 := auxToSym(v.Aux)
23447 if v_0.Op != OpAMD64LEAQ {
23450 off2 := auxIntToInt32(v_0.AuxInt)
23451 sym2 := auxToSym(v_0.Aux)
23452 base := v_0.Args[0]
23455 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23458 v.reset(OpAMD64SETEQstore)
23459 v.AuxInt = int32ToAuxInt(off1 + off2)
23460 v.Aux = symToAux(mergeSym(sym1, sym2))
23461 v.AddArg3(base, val, mem)
23464 // match: (SETEQstore [off] {sym} ptr (FlagEQ) mem)
23465 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
23467 off := auxIntToInt32(v.AuxInt)
23468 sym := auxToSym(v.Aux)
23470 if v_1.Op != OpAMD64FlagEQ {
23474 v.reset(OpAMD64MOVBstore)
23475 v.AuxInt = int32ToAuxInt(off)
23476 v.Aux = symToAux(sym)
23477 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23478 v0.AuxInt = int32ToAuxInt(1)
23479 v.AddArg3(ptr, v0, mem)
23482 // match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem)
23483 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23485 off := auxIntToInt32(v.AuxInt)
23486 sym := auxToSym(v.Aux)
23488 if v_1.Op != OpAMD64FlagLT_ULT {
23492 v.reset(OpAMD64MOVBstore)
23493 v.AuxInt = int32ToAuxInt(off)
23494 v.Aux = symToAux(sym)
23495 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23496 v0.AuxInt = int32ToAuxInt(0)
23497 v.AddArg3(ptr, v0, mem)
23500 // match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem)
23501 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23503 off := auxIntToInt32(v.AuxInt)
23504 sym := auxToSym(v.Aux)
23506 if v_1.Op != OpAMD64FlagLT_UGT {
23510 v.reset(OpAMD64MOVBstore)
23511 v.AuxInt = int32ToAuxInt(off)
23512 v.Aux = symToAux(sym)
23513 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23514 v0.AuxInt = int32ToAuxInt(0)
23515 v.AddArg3(ptr, v0, mem)
23518 // match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem)
23519 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23521 off := auxIntToInt32(v.AuxInt)
23522 sym := auxToSym(v.Aux)
23524 if v_1.Op != OpAMD64FlagGT_ULT {
23528 v.reset(OpAMD64MOVBstore)
23529 v.AuxInt = int32ToAuxInt(off)
23530 v.Aux = symToAux(sym)
23531 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23532 v0.AuxInt = int32ToAuxInt(0)
23533 v.AddArg3(ptr, v0, mem)
23536 // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem)
23537 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23539 off := auxIntToInt32(v.AuxInt)
23540 sym := auxToSym(v.Aux)
23542 if v_1.Op != OpAMD64FlagGT_UGT {
23546 v.reset(OpAMD64MOVBstore)
23547 v.AuxInt = int32ToAuxInt(off)
23548 v.Aux = symToAux(sym)
23549 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23550 v0.AuxInt = int32ToAuxInt(0)
23551 v.AddArg3(ptr, v0, mem)
23556 func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
23558 // match: (SETG (InvertFlags x))
23559 // result: (SETL x)
23561 if v_0.Op != OpAMD64InvertFlags {
23565 v.reset(OpAMD64SETL)
23569 // match: (SETG (FlagEQ))
23570 // result: (MOVLconst [0])
23572 if v_0.Op != OpAMD64FlagEQ {
23575 v.reset(OpAMD64MOVLconst)
23576 v.AuxInt = int32ToAuxInt(0)
23579 // match: (SETG (FlagLT_ULT))
23580 // result: (MOVLconst [0])
23582 if v_0.Op != OpAMD64FlagLT_ULT {
23585 v.reset(OpAMD64MOVLconst)
23586 v.AuxInt = int32ToAuxInt(0)
23589 // match: (SETG (FlagLT_UGT))
23590 // result: (MOVLconst [0])
23592 if v_0.Op != OpAMD64FlagLT_UGT {
23595 v.reset(OpAMD64MOVLconst)
23596 v.AuxInt = int32ToAuxInt(0)
23599 // match: (SETG (FlagGT_ULT))
23600 // result: (MOVLconst [1])
23602 if v_0.Op != OpAMD64FlagGT_ULT {
23605 v.reset(OpAMD64MOVLconst)
23606 v.AuxInt = int32ToAuxInt(1)
23609 // match: (SETG (FlagGT_UGT))
23610 // result: (MOVLconst [1])
23612 if v_0.Op != OpAMD64FlagGT_UGT {
23615 v.reset(OpAMD64MOVLconst)
23616 v.AuxInt = int32ToAuxInt(1)
23621 func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
23623 // match: (SETGE (InvertFlags x))
23624 // result: (SETLE x)
23626 if v_0.Op != OpAMD64InvertFlags {
23630 v.reset(OpAMD64SETLE)
23634 // match: (SETGE (FlagEQ))
23635 // result: (MOVLconst [1])
23637 if v_0.Op != OpAMD64FlagEQ {
23640 v.reset(OpAMD64MOVLconst)
23641 v.AuxInt = int32ToAuxInt(1)
23644 // match: (SETGE (FlagLT_ULT))
23645 // result: (MOVLconst [0])
23647 if v_0.Op != OpAMD64FlagLT_ULT {
23650 v.reset(OpAMD64MOVLconst)
23651 v.AuxInt = int32ToAuxInt(0)
23654 // match: (SETGE (FlagLT_UGT))
23655 // result: (MOVLconst [0])
23657 if v_0.Op != OpAMD64FlagLT_UGT {
23660 v.reset(OpAMD64MOVLconst)
23661 v.AuxInt = int32ToAuxInt(0)
23664 // match: (SETGE (FlagGT_ULT))
23665 // result: (MOVLconst [1])
23667 if v_0.Op != OpAMD64FlagGT_ULT {
23670 v.reset(OpAMD64MOVLconst)
23671 v.AuxInt = int32ToAuxInt(1)
23674 // match: (SETGE (FlagGT_UGT))
23675 // result: (MOVLconst [1])
23677 if v_0.Op != OpAMD64FlagGT_UGT {
23680 v.reset(OpAMD64MOVLconst)
23681 v.AuxInt = int32ToAuxInt(1)
23686 func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
23691 typ := &b.Func.Config.Types
23692 // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem)
23693 // result: (SETLEstore [off] {sym} ptr x mem)
23695 off := auxIntToInt32(v.AuxInt)
23696 sym := auxToSym(v.Aux)
23698 if v_1.Op != OpAMD64InvertFlags {
23703 v.reset(OpAMD64SETLEstore)
23704 v.AuxInt = int32ToAuxInt(off)
23705 v.Aux = symToAux(sym)
23706 v.AddArg3(ptr, x, mem)
23709 // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem)
23710 // cond: is32Bit(int64(off1)+int64(off2))
23711 // result: (SETGEstore [off1+off2] {sym} base val mem)
23713 off1 := auxIntToInt32(v.AuxInt)
23714 sym := auxToSym(v.Aux)
23715 if v_0.Op != OpAMD64ADDQconst {
23718 off2 := auxIntToInt32(v_0.AuxInt)
23719 base := v_0.Args[0]
23722 if !(is32Bit(int64(off1) + int64(off2))) {
23725 v.reset(OpAMD64SETGEstore)
23726 v.AuxInt = int32ToAuxInt(off1 + off2)
23727 v.Aux = symToAux(sym)
23728 v.AddArg3(base, val, mem)
23731 // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
23732 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
23733 // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
23735 off1 := auxIntToInt32(v.AuxInt)
23736 sym1 := auxToSym(v.Aux)
23737 if v_0.Op != OpAMD64LEAQ {
23740 off2 := auxIntToInt32(v_0.AuxInt)
23741 sym2 := auxToSym(v_0.Aux)
23742 base := v_0.Args[0]
23745 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23748 v.reset(OpAMD64SETGEstore)
23749 v.AuxInt = int32ToAuxInt(off1 + off2)
23750 v.Aux = symToAux(mergeSym(sym1, sym2))
23751 v.AddArg3(base, val, mem)
23754 // match: (SETGEstore [off] {sym} ptr (FlagEQ) mem)
23755 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
23757 off := auxIntToInt32(v.AuxInt)
23758 sym := auxToSym(v.Aux)
23760 if v_1.Op != OpAMD64FlagEQ {
23764 v.reset(OpAMD64MOVBstore)
23765 v.AuxInt = int32ToAuxInt(off)
23766 v.Aux = symToAux(sym)
23767 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23768 v0.AuxInt = int32ToAuxInt(1)
23769 v.AddArg3(ptr, v0, mem)
23772 // match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem)
23773 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23775 off := auxIntToInt32(v.AuxInt)
23776 sym := auxToSym(v.Aux)
23778 if v_1.Op != OpAMD64FlagLT_ULT {
23782 v.reset(OpAMD64MOVBstore)
23783 v.AuxInt = int32ToAuxInt(off)
23784 v.Aux = symToAux(sym)
23785 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23786 v0.AuxInt = int32ToAuxInt(0)
23787 v.AddArg3(ptr, v0, mem)
23790 // match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem)
23791 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23793 off := auxIntToInt32(v.AuxInt)
23794 sym := auxToSym(v.Aux)
23796 if v_1.Op != OpAMD64FlagLT_UGT {
23800 v.reset(OpAMD64MOVBstore)
23801 v.AuxInt = int32ToAuxInt(off)
23802 v.Aux = symToAux(sym)
23803 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23804 v0.AuxInt = int32ToAuxInt(0)
23805 v.AddArg3(ptr, v0, mem)
23808 // match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem)
23809 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
23811 off := auxIntToInt32(v.AuxInt)
23812 sym := auxToSym(v.Aux)
23814 if v_1.Op != OpAMD64FlagGT_ULT {
23818 v.reset(OpAMD64MOVBstore)
23819 v.AuxInt = int32ToAuxInt(off)
23820 v.Aux = symToAux(sym)
23821 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23822 v0.AuxInt = int32ToAuxInt(1)
23823 v.AddArg3(ptr, v0, mem)
23826 // match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem)
23827 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
23829 off := auxIntToInt32(v.AuxInt)
23830 sym := auxToSym(v.Aux)
23832 if v_1.Op != OpAMD64FlagGT_UGT {
23836 v.reset(OpAMD64MOVBstore)
23837 v.AuxInt = int32ToAuxInt(off)
23838 v.Aux = symToAux(sym)
23839 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23840 v0.AuxInt = int32ToAuxInt(1)
23841 v.AddArg3(ptr, v0, mem)
23846 func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
23851 typ := &b.Func.Config.Types
23852 // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem)
23853 // result: (SETLstore [off] {sym} ptr x mem)
23855 off := auxIntToInt32(v.AuxInt)
23856 sym := auxToSym(v.Aux)
23858 if v_1.Op != OpAMD64InvertFlags {
23863 v.reset(OpAMD64SETLstore)
23864 v.AuxInt = int32ToAuxInt(off)
23865 v.Aux = symToAux(sym)
23866 v.AddArg3(ptr, x, mem)
23869 // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem)
23870 // cond: is32Bit(int64(off1)+int64(off2))
23871 // result: (SETGstore [off1+off2] {sym} base val mem)
23873 off1 := auxIntToInt32(v.AuxInt)
23874 sym := auxToSym(v.Aux)
23875 if v_0.Op != OpAMD64ADDQconst {
23878 off2 := auxIntToInt32(v_0.AuxInt)
23879 base := v_0.Args[0]
23882 if !(is32Bit(int64(off1) + int64(off2))) {
23885 v.reset(OpAMD64SETGstore)
23886 v.AuxInt = int32ToAuxInt(off1 + off2)
23887 v.Aux = symToAux(sym)
23888 v.AddArg3(base, val, mem)
23891 // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
23892 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
23893 // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
23895 off1 := auxIntToInt32(v.AuxInt)
23896 sym1 := auxToSym(v.Aux)
23897 if v_0.Op != OpAMD64LEAQ {
23900 off2 := auxIntToInt32(v_0.AuxInt)
23901 sym2 := auxToSym(v_0.Aux)
23902 base := v_0.Args[0]
23905 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23908 v.reset(OpAMD64SETGstore)
23909 v.AuxInt = int32ToAuxInt(off1 + off2)
23910 v.Aux = symToAux(mergeSym(sym1, sym2))
23911 v.AddArg3(base, val, mem)
23914 // match: (SETGstore [off] {sym} ptr (FlagEQ) mem)
23915 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23917 off := auxIntToInt32(v.AuxInt)
23918 sym := auxToSym(v.Aux)
23920 if v_1.Op != OpAMD64FlagEQ {
23924 v.reset(OpAMD64MOVBstore)
23925 v.AuxInt = int32ToAuxInt(off)
23926 v.Aux = symToAux(sym)
23927 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23928 v0.AuxInt = int32ToAuxInt(0)
23929 v.AddArg3(ptr, v0, mem)
23932 // match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem)
23933 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23935 off := auxIntToInt32(v.AuxInt)
23936 sym := auxToSym(v.Aux)
23938 if v_1.Op != OpAMD64FlagLT_ULT {
23942 v.reset(OpAMD64MOVBstore)
23943 v.AuxInt = int32ToAuxInt(off)
23944 v.Aux = symToAux(sym)
23945 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23946 v0.AuxInt = int32ToAuxInt(0)
23947 v.AddArg3(ptr, v0, mem)
23950 // match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem)
23951 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
23953 off := auxIntToInt32(v.AuxInt)
23954 sym := auxToSym(v.Aux)
23956 if v_1.Op != OpAMD64FlagLT_UGT {
23960 v.reset(OpAMD64MOVBstore)
23961 v.AuxInt = int32ToAuxInt(off)
23962 v.Aux = symToAux(sym)
23963 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23964 v0.AuxInt = int32ToAuxInt(0)
23965 v.AddArg3(ptr, v0, mem)
23968 // match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem)
23969 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
23971 off := auxIntToInt32(v.AuxInt)
23972 sym := auxToSym(v.Aux)
23974 if v_1.Op != OpAMD64FlagGT_ULT {
23978 v.reset(OpAMD64MOVBstore)
23979 v.AuxInt = int32ToAuxInt(off)
23980 v.Aux = symToAux(sym)
23981 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23982 v0.AuxInt = int32ToAuxInt(1)
23983 v.AddArg3(ptr, v0, mem)
23986 // match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem)
23987 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
23989 off := auxIntToInt32(v.AuxInt)
23990 sym := auxToSym(v.Aux)
23992 if v_1.Op != OpAMD64FlagGT_UGT {
23996 v.reset(OpAMD64MOVBstore)
23997 v.AuxInt = int32ToAuxInt(off)
23998 v.Aux = symToAux(sym)
23999 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24000 v0.AuxInt = int32ToAuxInt(1)
24001 v.AddArg3(ptr, v0, mem)
24006 func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
24008 // match: (SETL (InvertFlags x))
24009 // result: (SETG x)
24011 if v_0.Op != OpAMD64InvertFlags {
24015 v.reset(OpAMD64SETG)
24019 // match: (SETL (FlagEQ))
24020 // result: (MOVLconst [0])
24022 if v_0.Op != OpAMD64FlagEQ {
24025 v.reset(OpAMD64MOVLconst)
24026 v.AuxInt = int32ToAuxInt(0)
24029 // match: (SETL (FlagLT_ULT))
24030 // result: (MOVLconst [1])
24032 if v_0.Op != OpAMD64FlagLT_ULT {
24035 v.reset(OpAMD64MOVLconst)
24036 v.AuxInt = int32ToAuxInt(1)
24039 // match: (SETL (FlagLT_UGT))
24040 // result: (MOVLconst [1])
24042 if v_0.Op != OpAMD64FlagLT_UGT {
24045 v.reset(OpAMD64MOVLconst)
24046 v.AuxInt = int32ToAuxInt(1)
24049 // match: (SETL (FlagGT_ULT))
24050 // result: (MOVLconst [0])
24052 if v_0.Op != OpAMD64FlagGT_ULT {
24055 v.reset(OpAMD64MOVLconst)
24056 v.AuxInt = int32ToAuxInt(0)
24059 // match: (SETL (FlagGT_UGT))
24060 // result: (MOVLconst [0])
24062 if v_0.Op != OpAMD64FlagGT_UGT {
24065 v.reset(OpAMD64MOVLconst)
24066 v.AuxInt = int32ToAuxInt(0)
24071 func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
24073 // match: (SETLE (InvertFlags x))
24074 // result: (SETGE x)
24076 if v_0.Op != OpAMD64InvertFlags {
24080 v.reset(OpAMD64SETGE)
24084 // match: (SETLE (FlagEQ))
24085 // result: (MOVLconst [1])
24087 if v_0.Op != OpAMD64FlagEQ {
24090 v.reset(OpAMD64MOVLconst)
24091 v.AuxInt = int32ToAuxInt(1)
24094 // match: (SETLE (FlagLT_ULT))
24095 // result: (MOVLconst [1])
24097 if v_0.Op != OpAMD64FlagLT_ULT {
24100 v.reset(OpAMD64MOVLconst)
24101 v.AuxInt = int32ToAuxInt(1)
24104 // match: (SETLE (FlagLT_UGT))
24105 // result: (MOVLconst [1])
24107 if v_0.Op != OpAMD64FlagLT_UGT {
24110 v.reset(OpAMD64MOVLconst)
24111 v.AuxInt = int32ToAuxInt(1)
24114 // match: (SETLE (FlagGT_ULT))
24115 // result: (MOVLconst [0])
24117 if v_0.Op != OpAMD64FlagGT_ULT {
24120 v.reset(OpAMD64MOVLconst)
24121 v.AuxInt = int32ToAuxInt(0)
24124 // match: (SETLE (FlagGT_UGT))
24125 // result: (MOVLconst [0])
24127 if v_0.Op != OpAMD64FlagGT_UGT {
24130 v.reset(OpAMD64MOVLconst)
24131 v.AuxInt = int32ToAuxInt(0)
24136 func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
24141 typ := &b.Func.Config.Types
24142 // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem)
24143 // result: (SETGEstore [off] {sym} ptr x mem)
24145 off := auxIntToInt32(v.AuxInt)
24146 sym := auxToSym(v.Aux)
24148 if v_1.Op != OpAMD64InvertFlags {
24153 v.reset(OpAMD64SETGEstore)
24154 v.AuxInt = int32ToAuxInt(off)
24155 v.Aux = symToAux(sym)
24156 v.AddArg3(ptr, x, mem)
24159 // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem)
24160 // cond: is32Bit(int64(off1)+int64(off2))
24161 // result: (SETLEstore [off1+off2] {sym} base val mem)
24163 off1 := auxIntToInt32(v.AuxInt)
24164 sym := auxToSym(v.Aux)
24165 if v_0.Op != OpAMD64ADDQconst {
24168 off2 := auxIntToInt32(v_0.AuxInt)
24169 base := v_0.Args[0]
24172 if !(is32Bit(int64(off1) + int64(off2))) {
24175 v.reset(OpAMD64SETLEstore)
24176 v.AuxInt = int32ToAuxInt(off1 + off2)
24177 v.Aux = symToAux(sym)
24178 v.AddArg3(base, val, mem)
24181 // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
24182 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
24183 // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
24185 off1 := auxIntToInt32(v.AuxInt)
24186 sym1 := auxToSym(v.Aux)
24187 if v_0.Op != OpAMD64LEAQ {
24190 off2 := auxIntToInt32(v_0.AuxInt)
24191 sym2 := auxToSym(v_0.Aux)
24192 base := v_0.Args[0]
24195 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
24198 v.reset(OpAMD64SETLEstore)
24199 v.AuxInt = int32ToAuxInt(off1 + off2)
24200 v.Aux = symToAux(mergeSym(sym1, sym2))
24201 v.AddArg3(base, val, mem)
24204 // match: (SETLEstore [off] {sym} ptr (FlagEQ) mem)
24205 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
24207 off := auxIntToInt32(v.AuxInt)
24208 sym := auxToSym(v.Aux)
24210 if v_1.Op != OpAMD64FlagEQ {
24214 v.reset(OpAMD64MOVBstore)
24215 v.AuxInt = int32ToAuxInt(off)
24216 v.Aux = symToAux(sym)
24217 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24218 v0.AuxInt = int32ToAuxInt(1)
24219 v.AddArg3(ptr, v0, mem)
24222 // match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem)
24223 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
24225 off := auxIntToInt32(v.AuxInt)
24226 sym := auxToSym(v.Aux)
24228 if v_1.Op != OpAMD64FlagLT_ULT {
24232 v.reset(OpAMD64MOVBstore)
24233 v.AuxInt = int32ToAuxInt(off)
24234 v.Aux = symToAux(sym)
24235 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24236 v0.AuxInt = int32ToAuxInt(1)
24237 v.AddArg3(ptr, v0, mem)
24240 // match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem)
24241 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
24243 off := auxIntToInt32(v.AuxInt)
24244 sym := auxToSym(v.Aux)
24246 if v_1.Op != OpAMD64FlagLT_UGT {
24250 v.reset(OpAMD64MOVBstore)
24251 v.AuxInt = int32ToAuxInt(off)
24252 v.Aux = symToAux(sym)
24253 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24254 v0.AuxInt = int32ToAuxInt(1)
24255 v.AddArg3(ptr, v0, mem)
24258 // match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem)
24259 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
24261 off := auxIntToInt32(v.AuxInt)
24262 sym := auxToSym(v.Aux)
24264 if v_1.Op != OpAMD64FlagGT_ULT {
24268 v.reset(OpAMD64MOVBstore)
24269 v.AuxInt = int32ToAuxInt(off)
24270 v.Aux = symToAux(sym)
24271 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24272 v0.AuxInt = int32ToAuxInt(0)
24273 v.AddArg3(ptr, v0, mem)
24276 // match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem)
24277 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
24279 off := auxIntToInt32(v.AuxInt)
24280 sym := auxToSym(v.Aux)
24282 if v_1.Op != OpAMD64FlagGT_UGT {
24286 v.reset(OpAMD64MOVBstore)
24287 v.AuxInt = int32ToAuxInt(off)
24288 v.Aux = symToAux(sym)
24289 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24290 v0.AuxInt = int32ToAuxInt(0)
24291 v.AddArg3(ptr, v0, mem)
24296 func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
24301 typ := &b.Func.Config.Types
24302 // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem)
24303 // result: (SETGstore [off] {sym} ptr x mem)
24305 off := auxIntToInt32(v.AuxInt)
24306 sym := auxToSym(v.Aux)
24308 if v_1.Op != OpAMD64InvertFlags {
24313 v.reset(OpAMD64SETGstore)
24314 v.AuxInt = int32ToAuxInt(off)
24315 v.Aux = symToAux(sym)
24316 v.AddArg3(ptr, x, mem)
24319 // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem)
24320 // cond: is32Bit(int64(off1)+int64(off2))
24321 // result: (SETLstore [off1+off2] {sym} base val mem)
24323 off1 := auxIntToInt32(v.AuxInt)
24324 sym := auxToSym(v.Aux)
24325 if v_0.Op != OpAMD64ADDQconst {
24328 off2 := auxIntToInt32(v_0.AuxInt)
24329 base := v_0.Args[0]
24332 if !(is32Bit(int64(off1) + int64(off2))) {
24335 v.reset(OpAMD64SETLstore)
24336 v.AuxInt = int32ToAuxInt(off1 + off2)
24337 v.Aux = symToAux(sym)
24338 v.AddArg3(base, val, mem)
24341 // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
24342 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
24343 // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
24345 off1 := auxIntToInt32(v.AuxInt)
24346 sym1 := auxToSym(v.Aux)
24347 if v_0.Op != OpAMD64LEAQ {
24350 off2 := auxIntToInt32(v_0.AuxInt)
24351 sym2 := auxToSym(v_0.Aux)
24352 base := v_0.Args[0]
24355 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
24358 v.reset(OpAMD64SETLstore)
24359 v.AuxInt = int32ToAuxInt(off1 + off2)
24360 v.Aux = symToAux(mergeSym(sym1, sym2))
24361 v.AddArg3(base, val, mem)
24364 // match: (SETLstore [off] {sym} ptr (FlagEQ) mem)
24365 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
24367 off := auxIntToInt32(v.AuxInt)
24368 sym := auxToSym(v.Aux)
24370 if v_1.Op != OpAMD64FlagEQ {
24374 v.reset(OpAMD64MOVBstore)
24375 v.AuxInt = int32ToAuxInt(off)
24376 v.Aux = symToAux(sym)
24377 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24378 v0.AuxInt = int32ToAuxInt(0)
24379 v.AddArg3(ptr, v0, mem)
24382 // match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem)
24383 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
24385 off := auxIntToInt32(v.AuxInt)
24386 sym := auxToSym(v.Aux)
24388 if v_1.Op != OpAMD64FlagLT_ULT {
24392 v.reset(OpAMD64MOVBstore)
24393 v.AuxInt = int32ToAuxInt(off)
24394 v.Aux = symToAux(sym)
24395 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24396 v0.AuxInt = int32ToAuxInt(1)
24397 v.AddArg3(ptr, v0, mem)
24400 // match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem)
24401 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
24403 off := auxIntToInt32(v.AuxInt)
24404 sym := auxToSym(v.Aux)
24406 if v_1.Op != OpAMD64FlagLT_UGT {
24410 v.reset(OpAMD64MOVBstore)
24411 v.AuxInt = int32ToAuxInt(off)
24412 v.Aux = symToAux(sym)
24413 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24414 v0.AuxInt = int32ToAuxInt(1)
24415 v.AddArg3(ptr, v0, mem)
24418 // match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem)
24419 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
24421 off := auxIntToInt32(v.AuxInt)
24422 sym := auxToSym(v.Aux)
24424 if v_1.Op != OpAMD64FlagGT_ULT {
24428 v.reset(OpAMD64MOVBstore)
24429 v.AuxInt = int32ToAuxInt(off)
24430 v.Aux = symToAux(sym)
24431 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24432 v0.AuxInt = int32ToAuxInt(0)
24433 v.AddArg3(ptr, v0, mem)
24436 // match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem)
24437 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
24439 off := auxIntToInt32(v.AuxInt)
24440 sym := auxToSym(v.Aux)
24442 if v_1.Op != OpAMD64FlagGT_UGT {
24446 v.reset(OpAMD64MOVBstore)
24447 v.AuxInt = int32ToAuxInt(off)
24448 v.Aux = symToAux(sym)
24449 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24450 v0.AuxInt = int32ToAuxInt(0)
24451 v.AddArg3(ptr, v0, mem)
24456 func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
24459 // match: (SETNE (TESTBconst [1] x))
24460 // result: (ANDLconst [1] x)
24462 if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 {
24466 v.reset(OpAMD64ANDLconst)
24467 v.AuxInt = int32ToAuxInt(1)
24471 // match: (SETNE (TESTWconst [1] x))
24472 // result: (ANDLconst [1] x)
24474 if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 {
24478 v.reset(OpAMD64ANDLconst)
24479 v.AuxInt = int32ToAuxInt(1)
24483 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y))
24484 // result: (SETB (BTL x y))
24486 if v_0.Op != OpAMD64TESTL {
24490 v_0_0 := v_0.Args[0]
24491 v_0_1 := v_0.Args[1]
24492 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24493 if v_0_0.Op != OpAMD64SHLL {
24497 v_0_0_0 := v_0_0.Args[0]
24498 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
24502 v.reset(OpAMD64SETB)
24503 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
24510 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y))
24511 // result: (SETB (BTQ x y))
24513 if v_0.Op != OpAMD64TESTQ {
24517 v_0_0 := v_0.Args[0]
24518 v_0_1 := v_0.Args[1]
24519 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24520 if v_0_0.Op != OpAMD64SHLQ {
24524 v_0_0_0 := v_0_0.Args[0]
24525 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
24529 v.reset(OpAMD64SETB)
24530 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
24537 // match: (SETNE (TESTLconst [c] x))
24538 // cond: isUint32PowerOfTwo(int64(c))
24539 // result: (SETB (BTLconst [int8(log32(c))] x))
24541 if v_0.Op != OpAMD64TESTLconst {
24544 c := auxIntToInt32(v_0.AuxInt)
24546 if !(isUint32PowerOfTwo(int64(c))) {
24549 v.reset(OpAMD64SETB)
24550 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
24551 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
24556 // match: (SETNE (TESTQconst [c] x))
24557 // cond: isUint64PowerOfTwo(int64(c))
24558 // result: (SETB (BTQconst [int8(log32(c))] x))
24560 if v_0.Op != OpAMD64TESTQconst {
24563 c := auxIntToInt32(v_0.AuxInt)
24565 if !(isUint64PowerOfTwo(int64(c))) {
24568 v.reset(OpAMD64SETB)
24569 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24570 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
24575 // match: (SETNE (TESTQ (MOVQconst [c]) x))
24576 // cond: isUint64PowerOfTwo(c)
24577 // result: (SETB (BTQconst [int8(log64(c))] x))
24579 if v_0.Op != OpAMD64TESTQ {
24583 v_0_0 := v_0.Args[0]
24584 v_0_1 := v_0.Args[1]
24585 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24586 if v_0_0.Op != OpAMD64MOVQconst {
24589 c := auxIntToInt64(v_0_0.AuxInt)
24591 if !(isUint64PowerOfTwo(c)) {
24594 v.reset(OpAMD64SETB)
24595 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24596 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
24603 // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _)))
24604 // result: (SETEQ (CMPLconst [0] s))
24606 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
24610 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
24613 v.reset(OpAMD64SETEQ)
24614 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
24615 v0.AuxInt = int32ToAuxInt(0)
24620 // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _)))
24621 // result: (SETEQ (CMPQconst [0] s))
24623 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
24627 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
24630 v.reset(OpAMD64SETEQ)
24631 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
24632 v0.AuxInt = int32ToAuxInt(0)
24637 // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
24639 // result: (SETB (BTQconst [63] x))
24641 if v_0.Op != OpAMD64TESTQ {
24645 v_0_0 := v_0.Args[0]
24646 v_0_1 := v_0.Args[1]
24647 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24649 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
24653 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
24661 v.reset(OpAMD64SETB)
24662 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24663 v0.AuxInt = int8ToAuxInt(63)
24670 // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
24672 // result: (SETB (BTQconst [31] x))
24674 if v_0.Op != OpAMD64TESTL {
24678 v_0_0 := v_0.Args[0]
24679 v_0_1 := v_0.Args[1]
24680 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24682 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
24686 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
24694 v.reset(OpAMD64SETB)
24695 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24696 v0.AuxInt = int8ToAuxInt(31)
24703 // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
24705 // result: (SETB (BTQconst [0] x))
24707 if v_0.Op != OpAMD64TESTQ {
24711 v_0_0 := v_0.Args[0]
24712 v_0_1 := v_0.Args[1]
24713 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24715 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
24719 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
24727 v.reset(OpAMD64SETB)
24728 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24729 v0.AuxInt = int8ToAuxInt(0)
24736 // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
24738 // result: (SETB (BTLconst [0] x))
24740 if v_0.Op != OpAMD64TESTL {
24744 v_0_0 := v_0.Args[0]
24745 v_0_1 := v_0.Args[1]
24746 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24748 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
24752 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
24760 v.reset(OpAMD64SETB)
24761 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
24762 v0.AuxInt = int8ToAuxInt(0)
24769 // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2))
24771 // result: (SETB (BTQconst [63] x))
24773 if v_0.Op != OpAMD64TESTQ {
24777 v_0_0 := v_0.Args[0]
24778 v_0_1 := v_0.Args[1]
24779 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24781 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
24789 v.reset(OpAMD64SETB)
24790 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24791 v0.AuxInt = int8ToAuxInt(63)
24798 // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2))
24800 // result: (SETB (BTLconst [31] x))
24802 if v_0.Op != OpAMD64TESTL {
24806 v_0_0 := v_0.Args[0]
24807 v_0_1 := v_0.Args[1]
24808 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
24810 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
24818 v.reset(OpAMD64SETB)
24819 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
24820 v0.AuxInt = int8ToAuxInt(31)
24827 // match: (SETNE (InvertFlags x))
24828 // result: (SETNE x)
24830 if v_0.Op != OpAMD64InvertFlags {
24834 v.reset(OpAMD64SETNE)
24838 // match: (SETNE (FlagEQ))
24839 // result: (MOVLconst [0])
24841 if v_0.Op != OpAMD64FlagEQ {
24844 v.reset(OpAMD64MOVLconst)
24845 v.AuxInt = int32ToAuxInt(0)
24848 // match: (SETNE (FlagLT_ULT))
24849 // result: (MOVLconst [1])
24851 if v_0.Op != OpAMD64FlagLT_ULT {
24854 v.reset(OpAMD64MOVLconst)
24855 v.AuxInt = int32ToAuxInt(1)
24858 // match: (SETNE (FlagLT_UGT))
24859 // result: (MOVLconst [1])
24861 if v_0.Op != OpAMD64FlagLT_UGT {
24864 v.reset(OpAMD64MOVLconst)
24865 v.AuxInt = int32ToAuxInt(1)
24868 // match: (SETNE (FlagGT_ULT))
24869 // result: (MOVLconst [1])
24871 if v_0.Op != OpAMD64FlagGT_ULT {
24874 v.reset(OpAMD64MOVLconst)
24875 v.AuxInt = int32ToAuxInt(1)
24878 // match: (SETNE (FlagGT_UGT))
24879 // result: (MOVLconst [1])
24881 if v_0.Op != OpAMD64FlagGT_UGT {
24884 v.reset(OpAMD64MOVLconst)
24885 v.AuxInt = int32ToAuxInt(1)
24890 func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
24895 typ := &b.Func.Config.Types
24896 // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
24897 // result: (SETBstore [off] {sym} ptr (BTL x y) mem)
24899 off := auxIntToInt32(v.AuxInt)
24900 sym := auxToSym(v.Aux)
24902 if v_1.Op != OpAMD64TESTL {
24906 v_1_0 := v_1.Args[0]
24907 v_1_1 := v_1.Args[1]
24908 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
24909 if v_1_0.Op != OpAMD64SHLL {
24913 v_1_0_0 := v_1_0.Args[0]
24914 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
24919 v.reset(OpAMD64SETBstore)
24920 v.AuxInt = int32ToAuxInt(off)
24921 v.Aux = symToAux(sym)
24922 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
24924 v.AddArg3(ptr, v0, mem)
24929 // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
24930 // result: (SETBstore [off] {sym} ptr (BTQ x y) mem)
24932 off := auxIntToInt32(v.AuxInt)
24933 sym := auxToSym(v.Aux)
24935 if v_1.Op != OpAMD64TESTQ {
24939 v_1_0 := v_1.Args[0]
24940 v_1_1 := v_1.Args[1]
24941 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
24942 if v_1_0.Op != OpAMD64SHLQ {
24946 v_1_0_0 := v_1_0.Args[0]
24947 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
24952 v.reset(OpAMD64SETBstore)
24953 v.AuxInt = int32ToAuxInt(off)
24954 v.Aux = symToAux(sym)
24955 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
24957 v.AddArg3(ptr, v0, mem)
24962 // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem)
24963 // cond: isUint32PowerOfTwo(int64(c))
24964 // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
24966 off := auxIntToInt32(v.AuxInt)
24967 sym := auxToSym(v.Aux)
24969 if v_1.Op != OpAMD64TESTLconst {
24972 c := auxIntToInt32(v_1.AuxInt)
24975 if !(isUint32PowerOfTwo(int64(c))) {
24978 v.reset(OpAMD64SETBstore)
24979 v.AuxInt = int32ToAuxInt(off)
24980 v.Aux = symToAux(sym)
24981 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
24982 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
24984 v.AddArg3(ptr, v0, mem)
24987 // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem)
24988 // cond: isUint64PowerOfTwo(int64(c))
24989 // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
24991 off := auxIntToInt32(v.AuxInt)
24992 sym := auxToSym(v.Aux)
24994 if v_1.Op != OpAMD64TESTQconst {
24997 c := auxIntToInt32(v_1.AuxInt)
25000 if !(isUint64PowerOfTwo(int64(c))) {
25003 v.reset(OpAMD64SETBstore)
25004 v.AuxInt = int32ToAuxInt(off)
25005 v.Aux = symToAux(sym)
25006 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
25007 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
25009 v.AddArg3(ptr, v0, mem)
25012 // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
25013 // cond: isUint64PowerOfTwo(c)
25014 // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
25016 off := auxIntToInt32(v.AuxInt)
25017 sym := auxToSym(v.Aux)
25019 if v_1.Op != OpAMD64TESTQ {
25023 v_1_0 := v_1.Args[0]
25024 v_1_1 := v_1.Args[1]
25025 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25026 if v_1_0.Op != OpAMD64MOVQconst {
25029 c := auxIntToInt64(v_1_0.AuxInt)
25032 if !(isUint64PowerOfTwo(c)) {
25035 v.reset(OpAMD64SETBstore)
25036 v.AuxInt = int32ToAuxInt(off)
25037 v.Aux = symToAux(sym)
25038 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
25039 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
25041 v.AddArg3(ptr, v0, mem)
25046 // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
25047 // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem)
25049 off := auxIntToInt32(v.AuxInt)
25050 sym := auxToSym(v.Aux)
25052 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
25056 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
25060 v.reset(OpAMD64SETEQstore)
25061 v.AuxInt = int32ToAuxInt(off)
25062 v.Aux = symToAux(sym)
25063 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
25064 v0.AuxInt = int32ToAuxInt(0)
25066 v.AddArg3(ptr, v0, mem)
25069 // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
25070 // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem)
25072 off := auxIntToInt32(v.AuxInt)
25073 sym := auxToSym(v.Aux)
25075 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
25079 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
25083 v.reset(OpAMD64SETEQstore)
25084 v.AuxInt = int32ToAuxInt(off)
25085 v.Aux = symToAux(sym)
25086 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25087 v0.AuxInt = int32ToAuxInt(0)
25089 v.AddArg3(ptr, v0, mem)
25092 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
25094 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
25096 off := auxIntToInt32(v.AuxInt)
25097 sym := auxToSym(v.Aux)
25099 if v_1.Op != OpAMD64TESTQ {
25103 v_1_0 := v_1.Args[0]
25104 v_1_1 := v_1.Args[1]
25105 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25107 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
25111 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
25120 v.reset(OpAMD64SETBstore)
25121 v.AuxInt = int32ToAuxInt(off)
25122 v.Aux = symToAux(sym)
25123 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
25124 v0.AuxInt = int8ToAuxInt(63)
25126 v.AddArg3(ptr, v0, mem)
25131 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
25133 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
25135 off := auxIntToInt32(v.AuxInt)
25136 sym := auxToSym(v.Aux)
25138 if v_1.Op != OpAMD64TESTL {
25142 v_1_0 := v_1.Args[0]
25143 v_1_1 := v_1.Args[1]
25144 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25146 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
25150 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
25159 v.reset(OpAMD64SETBstore)
25160 v.AuxInt = int32ToAuxInt(off)
25161 v.Aux = symToAux(sym)
25162 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
25163 v0.AuxInt = int8ToAuxInt(31)
25165 v.AddArg3(ptr, v0, mem)
25170 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
25172 // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem)
25174 off := auxIntToInt32(v.AuxInt)
25175 sym := auxToSym(v.Aux)
25177 if v_1.Op != OpAMD64TESTQ {
25181 v_1_0 := v_1.Args[0]
25182 v_1_1 := v_1.Args[1]
25183 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25185 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
25189 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
25198 v.reset(OpAMD64SETBstore)
25199 v.AuxInt = int32ToAuxInt(off)
25200 v.Aux = symToAux(sym)
25201 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
25202 v0.AuxInt = int8ToAuxInt(0)
25204 v.AddArg3(ptr, v0, mem)
25209 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
25211 // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem)
25213 off := auxIntToInt32(v.AuxInt)
25214 sym := auxToSym(v.Aux)
25216 if v_1.Op != OpAMD64TESTL {
25220 v_1_0 := v_1.Args[0]
25221 v_1_1 := v_1.Args[1]
25222 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25224 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
25228 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
25237 v.reset(OpAMD64SETBstore)
25238 v.AuxInt = int32ToAuxInt(off)
25239 v.Aux = symToAux(sym)
25240 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
25241 v0.AuxInt = int8ToAuxInt(0)
25243 v.AddArg3(ptr, v0, mem)
25248 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
25250 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
25252 off := auxIntToInt32(v.AuxInt)
25253 sym := auxToSym(v.Aux)
25255 if v_1.Op != OpAMD64TESTQ {
25259 v_1_0 := v_1.Args[0]
25260 v_1_1 := v_1.Args[1]
25261 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25263 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
25272 v.reset(OpAMD64SETBstore)
25273 v.AuxInt = int32ToAuxInt(off)
25274 v.Aux = symToAux(sym)
25275 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
25276 v0.AuxInt = int8ToAuxInt(63)
25278 v.AddArg3(ptr, v0, mem)
25283 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
25285 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
25287 off := auxIntToInt32(v.AuxInt)
25288 sym := auxToSym(v.Aux)
25290 if v_1.Op != OpAMD64TESTL {
25294 v_1_0 := v_1.Args[0]
25295 v_1_1 := v_1.Args[1]
25296 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
25298 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
25307 v.reset(OpAMD64SETBstore)
25308 v.AuxInt = int32ToAuxInt(off)
25309 v.Aux = symToAux(sym)
25310 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
25311 v0.AuxInt = int8ToAuxInt(31)
25313 v.AddArg3(ptr, v0, mem)
25318 // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem)
25319 // result: (SETNEstore [off] {sym} ptr x mem)
25321 off := auxIntToInt32(v.AuxInt)
25322 sym := auxToSym(v.Aux)
25324 if v_1.Op != OpAMD64InvertFlags {
25329 v.reset(OpAMD64SETNEstore)
25330 v.AuxInt = int32ToAuxInt(off)
25331 v.Aux = symToAux(sym)
25332 v.AddArg3(ptr, x, mem)
25335 // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem)
25336 // cond: is32Bit(int64(off1)+int64(off2))
25337 // result: (SETNEstore [off1+off2] {sym} base val mem)
25339 off1 := auxIntToInt32(v.AuxInt)
25340 sym := auxToSym(v.Aux)
25341 if v_0.Op != OpAMD64ADDQconst {
25344 off2 := auxIntToInt32(v_0.AuxInt)
25345 base := v_0.Args[0]
25348 if !(is32Bit(int64(off1) + int64(off2))) {
25351 v.reset(OpAMD64SETNEstore)
25352 v.AuxInt = int32ToAuxInt(off1 + off2)
25353 v.Aux = symToAux(sym)
25354 v.AddArg3(base, val, mem)
25357 // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
25358 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
25359 // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
25361 off1 := auxIntToInt32(v.AuxInt)
25362 sym1 := auxToSym(v.Aux)
25363 if v_0.Op != OpAMD64LEAQ {
25366 off2 := auxIntToInt32(v_0.AuxInt)
25367 sym2 := auxToSym(v_0.Aux)
25368 base := v_0.Args[0]
25371 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
25374 v.reset(OpAMD64SETNEstore)
25375 v.AuxInt = int32ToAuxInt(off1 + off2)
25376 v.Aux = symToAux(mergeSym(sym1, sym2))
25377 v.AddArg3(base, val, mem)
25380 // match: (SETNEstore [off] {sym} ptr (FlagEQ) mem)
25381 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
25383 off := auxIntToInt32(v.AuxInt)
25384 sym := auxToSym(v.Aux)
25386 if v_1.Op != OpAMD64FlagEQ {
25390 v.reset(OpAMD64MOVBstore)
25391 v.AuxInt = int32ToAuxInt(off)
25392 v.Aux = symToAux(sym)
25393 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
25394 v0.AuxInt = int32ToAuxInt(0)
25395 v.AddArg3(ptr, v0, mem)
25398 // match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem)
25399 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
25401 off := auxIntToInt32(v.AuxInt)
25402 sym := auxToSym(v.Aux)
25404 if v_1.Op != OpAMD64FlagLT_ULT {
25408 v.reset(OpAMD64MOVBstore)
25409 v.AuxInt = int32ToAuxInt(off)
25410 v.Aux = symToAux(sym)
25411 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
25412 v0.AuxInt = int32ToAuxInt(1)
25413 v.AddArg3(ptr, v0, mem)
25416 // match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem)
25417 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
25419 off := auxIntToInt32(v.AuxInt)
25420 sym := auxToSym(v.Aux)
25422 if v_1.Op != OpAMD64FlagLT_UGT {
25426 v.reset(OpAMD64MOVBstore)
25427 v.AuxInt = int32ToAuxInt(off)
25428 v.Aux = symToAux(sym)
25429 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
25430 v0.AuxInt = int32ToAuxInt(1)
25431 v.AddArg3(ptr, v0, mem)
25434 // match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem)
25435 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
25437 off := auxIntToInt32(v.AuxInt)
25438 sym := auxToSym(v.Aux)
25440 if v_1.Op != OpAMD64FlagGT_ULT {
25444 v.reset(OpAMD64MOVBstore)
25445 v.AuxInt = int32ToAuxInt(off)
25446 v.Aux = symToAux(sym)
25447 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
25448 v0.AuxInt = int32ToAuxInt(1)
25449 v.AddArg3(ptr, v0, mem)
25452 // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem)
25453 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
25455 off := auxIntToInt32(v.AuxInt)
25456 sym := auxToSym(v.Aux)
25458 if v_1.Op != OpAMD64FlagGT_UGT {
25462 v.reset(OpAMD64MOVBstore)
25463 v.AuxInt = int32ToAuxInt(off)
25464 v.Aux = symToAux(sym)
25465 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
25466 v0.AuxInt = int32ToAuxInt(1)
25467 v.AddArg3(ptr, v0, mem)
25472 func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
25476 // match: (SHLL x (MOVQconst [c]))
25477 // result: (SHLLconst [int8(c&31)] x)
25480 if v_1.Op != OpAMD64MOVQconst {
25483 c := auxIntToInt64(v_1.AuxInt)
25484 v.reset(OpAMD64SHLLconst)
25485 v.AuxInt = int8ToAuxInt(int8(c & 31))
25489 // match: (SHLL x (MOVLconst [c]))
25490 // result: (SHLLconst [int8(c&31)] x)
25493 if v_1.Op != OpAMD64MOVLconst {
25496 c := auxIntToInt32(v_1.AuxInt)
25497 v.reset(OpAMD64SHLLconst)
25498 v.AuxInt = int8ToAuxInt(int8(c & 31))
25502 // match: (SHLL x (ADDQconst [c] y))
25503 // cond: c & 31 == 0
25504 // result: (SHLL x y)
25507 if v_1.Op != OpAMD64ADDQconst {
25510 c := auxIntToInt32(v_1.AuxInt)
25515 v.reset(OpAMD64SHLL)
25519 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y)))
25520 // cond: c & 31 == 0
25521 // result: (SHLL x (NEGQ <t> y))
25524 if v_1.Op != OpAMD64NEGQ {
25528 v_1_0 := v_1.Args[0]
25529 if v_1_0.Op != OpAMD64ADDQconst {
25532 c := auxIntToInt32(v_1_0.AuxInt)
25537 v.reset(OpAMD64SHLL)
25538 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
25543 // match: (SHLL x (ANDQconst [c] y))
25544 // cond: c & 31 == 31
25545 // result: (SHLL x y)
25548 if v_1.Op != OpAMD64ANDQconst {
25551 c := auxIntToInt32(v_1.AuxInt)
25556 v.reset(OpAMD64SHLL)
25560 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y)))
25561 // cond: c & 31 == 31
25562 // result: (SHLL x (NEGQ <t> y))
25565 if v_1.Op != OpAMD64NEGQ {
25569 v_1_0 := v_1.Args[0]
25570 if v_1_0.Op != OpAMD64ANDQconst {
25573 c := auxIntToInt32(v_1_0.AuxInt)
25578 v.reset(OpAMD64SHLL)
25579 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
25584 // match: (SHLL x (ADDLconst [c] y))
25585 // cond: c & 31 == 0
25586 // result: (SHLL x y)
25589 if v_1.Op != OpAMD64ADDLconst {
25592 c := auxIntToInt32(v_1.AuxInt)
25597 v.reset(OpAMD64SHLL)
25601 // match: (SHLL x (NEGL <t> (ADDLconst [c] y)))
25602 // cond: c & 31 == 0
25603 // result: (SHLL x (NEGL <t> y))
25606 if v_1.Op != OpAMD64NEGL {
25610 v_1_0 := v_1.Args[0]
25611 if v_1_0.Op != OpAMD64ADDLconst {
25614 c := auxIntToInt32(v_1_0.AuxInt)
25619 v.reset(OpAMD64SHLL)
25620 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
25625 // match: (SHLL x (ANDLconst [c] y))
25626 // cond: c & 31 == 31
25627 // result: (SHLL x y)
25630 if v_1.Op != OpAMD64ANDLconst {
25633 c := auxIntToInt32(v_1.AuxInt)
25638 v.reset(OpAMD64SHLL)
25642 // match: (SHLL x (NEGL <t> (ANDLconst [c] y)))
25643 // cond: c & 31 == 31
25644 // result: (SHLL x (NEGL <t> y))
25647 if v_1.Op != OpAMD64NEGL {
25651 v_1_0 := v_1.Args[0]
25652 if v_1_0.Op != OpAMD64ANDLconst {
25655 c := auxIntToInt32(v_1_0.AuxInt)
25660 v.reset(OpAMD64SHLL)
25661 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
25668 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
25670 // match: (SHLLconst [1] (SHRLconst [1] x))
25671 // result: (BTRLconst [0] x)
25673 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
25677 v.reset(OpAMD64BTRLconst)
25678 v.AuxInt = int8ToAuxInt(0)
25682 // match: (SHLLconst x [0])
25685 if auxIntToInt8(v.AuxInt) != 0 {
25692 // match: (SHLLconst [d] (MOVLconst [c]))
25693 // result: (MOVLconst [c << uint64(d)])
25695 d := auxIntToInt8(v.AuxInt)
25696 if v_0.Op != OpAMD64MOVLconst {
25699 c := auxIntToInt32(v_0.AuxInt)
25700 v.reset(OpAMD64MOVLconst)
25701 v.AuxInt = int32ToAuxInt(c << uint64(d))
25706 func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
25710 // match: (SHLQ x (MOVQconst [c]))
25711 // result: (SHLQconst [int8(c&63)] x)
25714 if v_1.Op != OpAMD64MOVQconst {
25717 c := auxIntToInt64(v_1.AuxInt)
25718 v.reset(OpAMD64SHLQconst)
25719 v.AuxInt = int8ToAuxInt(int8(c & 63))
25723 // match: (SHLQ x (MOVLconst [c]))
25724 // result: (SHLQconst [int8(c&63)] x)
25727 if v_1.Op != OpAMD64MOVLconst {
25730 c := auxIntToInt32(v_1.AuxInt)
25731 v.reset(OpAMD64SHLQconst)
25732 v.AuxInt = int8ToAuxInt(int8(c & 63))
25736 // match: (SHLQ x (ADDQconst [c] y))
25737 // cond: c & 63 == 0
25738 // result: (SHLQ x y)
25741 if v_1.Op != OpAMD64ADDQconst {
25744 c := auxIntToInt32(v_1.AuxInt)
25749 v.reset(OpAMD64SHLQ)
25753 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y)))
25754 // cond: c & 63 == 0
25755 // result: (SHLQ x (NEGQ <t> y))
25758 if v_1.Op != OpAMD64NEGQ {
25762 v_1_0 := v_1.Args[0]
25763 if v_1_0.Op != OpAMD64ADDQconst {
25766 c := auxIntToInt32(v_1_0.AuxInt)
25771 v.reset(OpAMD64SHLQ)
25772 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
25777 // match: (SHLQ x (ANDQconst [c] y))
25778 // cond: c & 63 == 63
25779 // result: (SHLQ x y)
25782 if v_1.Op != OpAMD64ANDQconst {
25785 c := auxIntToInt32(v_1.AuxInt)
25790 v.reset(OpAMD64SHLQ)
25794 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y)))
25795 // cond: c & 63 == 63
25796 // result: (SHLQ x (NEGQ <t> y))
25799 if v_1.Op != OpAMD64NEGQ {
25803 v_1_0 := v_1.Args[0]
25804 if v_1_0.Op != OpAMD64ANDQconst {
25807 c := auxIntToInt32(v_1_0.AuxInt)
25812 v.reset(OpAMD64SHLQ)
25813 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
25818 // match: (SHLQ x (ADDLconst [c] y))
25819 // cond: c & 63 == 0
25820 // result: (SHLQ x y)
25823 if v_1.Op != OpAMD64ADDLconst {
25826 c := auxIntToInt32(v_1.AuxInt)
25831 v.reset(OpAMD64SHLQ)
25835 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y)))
25836 // cond: c & 63 == 0
25837 // result: (SHLQ x (NEGL <t> y))
25840 if v_1.Op != OpAMD64NEGL {
25844 v_1_0 := v_1.Args[0]
25845 if v_1_0.Op != OpAMD64ADDLconst {
25848 c := auxIntToInt32(v_1_0.AuxInt)
25853 v.reset(OpAMD64SHLQ)
25854 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
25859 // match: (SHLQ x (ANDLconst [c] y))
25860 // cond: c & 63 == 63
25861 // result: (SHLQ x y)
25864 if v_1.Op != OpAMD64ANDLconst {
25867 c := auxIntToInt32(v_1.AuxInt)
25872 v.reset(OpAMD64SHLQ)
25876 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y)))
25877 // cond: c & 63 == 63
25878 // result: (SHLQ x (NEGL <t> y))
25881 if v_1.Op != OpAMD64NEGL {
25885 v_1_0 := v_1.Args[0]
25886 if v_1_0.Op != OpAMD64ANDLconst {
25889 c := auxIntToInt32(v_1_0.AuxInt)
25894 v.reset(OpAMD64SHLQ)
25895 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
25902 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
25904 // match: (SHLQconst [1] (SHRQconst [1] x))
25905 // result: (BTRQconst [0] x)
25907 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
25911 v.reset(OpAMD64BTRQconst)
25912 v.AuxInt = int8ToAuxInt(0)
25916 // match: (SHLQconst x [0])
25919 if auxIntToInt8(v.AuxInt) != 0 {
25926 // match: (SHLQconst [d] (MOVQconst [c]))
25927 // result: (MOVQconst [c << uint64(d)])
25929 d := auxIntToInt8(v.AuxInt)
25930 if v_0.Op != OpAMD64MOVQconst {
25933 c := auxIntToInt64(v_0.AuxInt)
25934 v.reset(OpAMD64MOVQconst)
25935 v.AuxInt = int64ToAuxInt(c << uint64(d))
25938 // match: (SHLQconst [d] (MOVLconst [c]))
25939 // result: (MOVQconst [int64(c) << uint64(d)])
25941 d := auxIntToInt8(v.AuxInt)
25942 if v_0.Op != OpAMD64MOVLconst {
25945 c := auxIntToInt32(v_0.AuxInt)
25946 v.reset(OpAMD64MOVQconst)
25947 v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
25952 func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
25955 // match: (SHRB x (MOVQconst [c]))
25957 // result: (SHRBconst [int8(c&31)] x)
25960 if v_1.Op != OpAMD64MOVQconst {
25963 c := auxIntToInt64(v_1.AuxInt)
25967 v.reset(OpAMD64SHRBconst)
25968 v.AuxInt = int8ToAuxInt(int8(c & 31))
25972 // match: (SHRB x (MOVLconst [c]))
25974 // result: (SHRBconst [int8(c&31)] x)
25977 if v_1.Op != OpAMD64MOVLconst {
25980 c := auxIntToInt32(v_1.AuxInt)
25984 v.reset(OpAMD64SHRBconst)
25985 v.AuxInt = int8ToAuxInt(int8(c & 31))
25989 // match: (SHRB _ (MOVQconst [c]))
25991 // result: (MOVLconst [0])
25993 if v_1.Op != OpAMD64MOVQconst {
25996 c := auxIntToInt64(v_1.AuxInt)
26000 v.reset(OpAMD64MOVLconst)
26001 v.AuxInt = int32ToAuxInt(0)
26004 // match: (SHRB _ (MOVLconst [c]))
26006 // result: (MOVLconst [0])
26008 if v_1.Op != OpAMD64MOVLconst {
26011 c := auxIntToInt32(v_1.AuxInt)
26015 v.reset(OpAMD64MOVLconst)
26016 v.AuxInt = int32ToAuxInt(0)
26021 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
26023 // match: (SHRBconst x [0])
26026 if auxIntToInt8(v.AuxInt) != 0 {
26035 func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
26039 // match: (SHRL x (MOVQconst [c]))
26040 // result: (SHRLconst [int8(c&31)] x)
26043 if v_1.Op != OpAMD64MOVQconst {
26046 c := auxIntToInt64(v_1.AuxInt)
26047 v.reset(OpAMD64SHRLconst)
26048 v.AuxInt = int8ToAuxInt(int8(c & 31))
26052 // match: (SHRL x (MOVLconst [c]))
26053 // result: (SHRLconst [int8(c&31)] x)
26056 if v_1.Op != OpAMD64MOVLconst {
26059 c := auxIntToInt32(v_1.AuxInt)
26060 v.reset(OpAMD64SHRLconst)
26061 v.AuxInt = int8ToAuxInt(int8(c & 31))
26065 // match: (SHRL x (ADDQconst [c] y))
26066 // cond: c & 31 == 0
26067 // result: (SHRL x y)
26070 if v_1.Op != OpAMD64ADDQconst {
26073 c := auxIntToInt32(v_1.AuxInt)
26078 v.reset(OpAMD64SHRL)
26082 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y)))
26083 // cond: c & 31 == 0
26084 // result: (SHRL x (NEGQ <t> y))
26087 if v_1.Op != OpAMD64NEGQ {
26091 v_1_0 := v_1.Args[0]
26092 if v_1_0.Op != OpAMD64ADDQconst {
26095 c := auxIntToInt32(v_1_0.AuxInt)
26100 v.reset(OpAMD64SHRL)
26101 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
26106 // match: (SHRL x (ANDQconst [c] y))
26107 // cond: c & 31 == 31
26108 // result: (SHRL x y)
26111 if v_1.Op != OpAMD64ANDQconst {
26114 c := auxIntToInt32(v_1.AuxInt)
26119 v.reset(OpAMD64SHRL)
26123 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y)))
26124 // cond: c & 31 == 31
26125 // result: (SHRL x (NEGQ <t> y))
26128 if v_1.Op != OpAMD64NEGQ {
26132 v_1_0 := v_1.Args[0]
26133 if v_1_0.Op != OpAMD64ANDQconst {
26136 c := auxIntToInt32(v_1_0.AuxInt)
26141 v.reset(OpAMD64SHRL)
26142 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
26147 // match: (SHRL x (ADDLconst [c] y))
26148 // cond: c & 31 == 0
26149 // result: (SHRL x y)
26152 if v_1.Op != OpAMD64ADDLconst {
26155 c := auxIntToInt32(v_1.AuxInt)
26160 v.reset(OpAMD64SHRL)
26164 // match: (SHRL x (NEGL <t> (ADDLconst [c] y)))
26165 // cond: c & 31 == 0
26166 // result: (SHRL x (NEGL <t> y))
26169 if v_1.Op != OpAMD64NEGL {
26173 v_1_0 := v_1.Args[0]
26174 if v_1_0.Op != OpAMD64ADDLconst {
26177 c := auxIntToInt32(v_1_0.AuxInt)
26182 v.reset(OpAMD64SHRL)
26183 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
26188 // match: (SHRL x (ANDLconst [c] y))
26189 // cond: c & 31 == 31
26190 // result: (SHRL x y)
26193 if v_1.Op != OpAMD64ANDLconst {
26196 c := auxIntToInt32(v_1.AuxInt)
26201 v.reset(OpAMD64SHRL)
26205 // match: (SHRL x (NEGL <t> (ANDLconst [c] y)))
26206 // cond: c & 31 == 31
26207 // result: (SHRL x (NEGL <t> y))
26210 if v_1.Op != OpAMD64NEGL {
26214 v_1_0 := v_1.Args[0]
26215 if v_1_0.Op != OpAMD64ANDLconst {
26218 c := auxIntToInt32(v_1_0.AuxInt)
26223 v.reset(OpAMD64SHRL)
26224 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
26231 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
26233 // match: (SHRLconst [1] (SHLLconst [1] x))
26234 // result: (BTRLconst [31] x)
26236 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
26240 v.reset(OpAMD64BTRLconst)
26241 v.AuxInt = int8ToAuxInt(31)
26245 // match: (SHRLconst x [0])
26248 if auxIntToInt8(v.AuxInt) != 0 {
26257 func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
26261 // match: (SHRQ x (MOVQconst [c]))
26262 // result: (SHRQconst [int8(c&63)] x)
26265 if v_1.Op != OpAMD64MOVQconst {
26268 c := auxIntToInt64(v_1.AuxInt)
26269 v.reset(OpAMD64SHRQconst)
26270 v.AuxInt = int8ToAuxInt(int8(c & 63))
26274 // match: (SHRQ x (MOVLconst [c]))
26275 // result: (SHRQconst [int8(c&63)] x)
26278 if v_1.Op != OpAMD64MOVLconst {
26281 c := auxIntToInt32(v_1.AuxInt)
26282 v.reset(OpAMD64SHRQconst)
26283 v.AuxInt = int8ToAuxInt(int8(c & 63))
26287 // match: (SHRQ x (ADDQconst [c] y))
26288 // cond: c & 63 == 0
26289 // result: (SHRQ x y)
26292 if v_1.Op != OpAMD64ADDQconst {
26295 c := auxIntToInt32(v_1.AuxInt)
26300 v.reset(OpAMD64SHRQ)
26304 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y)))
26305 // cond: c & 63 == 0
26306 // result: (SHRQ x (NEGQ <t> y))
26309 if v_1.Op != OpAMD64NEGQ {
26313 v_1_0 := v_1.Args[0]
26314 if v_1_0.Op != OpAMD64ADDQconst {
26317 c := auxIntToInt32(v_1_0.AuxInt)
26322 v.reset(OpAMD64SHRQ)
26323 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
26328 // match: (SHRQ x (ANDQconst [c] y))
26329 // cond: c & 63 == 63
26330 // result: (SHRQ x y)
26333 if v_1.Op != OpAMD64ANDQconst {
26336 c := auxIntToInt32(v_1.AuxInt)
26341 v.reset(OpAMD64SHRQ)
26345 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y)))
26346 // cond: c & 63 == 63
26347 // result: (SHRQ x (NEGQ <t> y))
26350 if v_1.Op != OpAMD64NEGQ {
26354 v_1_0 := v_1.Args[0]
26355 if v_1_0.Op != OpAMD64ANDQconst {
26358 c := auxIntToInt32(v_1_0.AuxInt)
26363 v.reset(OpAMD64SHRQ)
26364 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
26369 // match: (SHRQ x (ADDLconst [c] y))
26370 // cond: c & 63 == 0
26371 // result: (SHRQ x y)
26374 if v_1.Op != OpAMD64ADDLconst {
26377 c := auxIntToInt32(v_1.AuxInt)
26382 v.reset(OpAMD64SHRQ)
26386 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y)))
26387 // cond: c & 63 == 0
26388 // result: (SHRQ x (NEGL <t> y))
26391 if v_1.Op != OpAMD64NEGL {
26395 v_1_0 := v_1.Args[0]
26396 if v_1_0.Op != OpAMD64ADDLconst {
26399 c := auxIntToInt32(v_1_0.AuxInt)
26404 v.reset(OpAMD64SHRQ)
26405 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
26410 // match: (SHRQ x (ANDLconst [c] y))
26411 // cond: c & 63 == 63
26412 // result: (SHRQ x y)
26415 if v_1.Op != OpAMD64ANDLconst {
26418 c := auxIntToInt32(v_1.AuxInt)
26423 v.reset(OpAMD64SHRQ)
26427 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y)))
26428 // cond: c & 63 == 63
26429 // result: (SHRQ x (NEGL <t> y))
26432 if v_1.Op != OpAMD64NEGL {
26436 v_1_0 := v_1.Args[0]
26437 if v_1_0.Op != OpAMD64ANDLconst {
26440 c := auxIntToInt32(v_1_0.AuxInt)
26445 v.reset(OpAMD64SHRQ)
26446 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
26453 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
26455 // match: (SHRQconst [1] (SHLQconst [1] x))
26456 // result: (BTRQconst [63] x)
26458 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
26462 v.reset(OpAMD64BTRQconst)
26463 v.AuxInt = int8ToAuxInt(63)
26467 // match: (SHRQconst x [0])
26470 if auxIntToInt8(v.AuxInt) != 0 {
26479 func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
26482 // match: (SHRW x (MOVQconst [c]))
26484 // result: (SHRWconst [int8(c&31)] x)
26487 if v_1.Op != OpAMD64MOVQconst {
26490 c := auxIntToInt64(v_1.AuxInt)
26494 v.reset(OpAMD64SHRWconst)
26495 v.AuxInt = int8ToAuxInt(int8(c & 31))
26499 // match: (SHRW x (MOVLconst [c]))
26501 // result: (SHRWconst [int8(c&31)] x)
26504 if v_1.Op != OpAMD64MOVLconst {
26507 c := auxIntToInt32(v_1.AuxInt)
26511 v.reset(OpAMD64SHRWconst)
26512 v.AuxInt = int8ToAuxInt(int8(c & 31))
26516 // match: (SHRW _ (MOVQconst [c]))
26517 // cond: c&31 >= 16
26518 // result: (MOVLconst [0])
26520 if v_1.Op != OpAMD64MOVQconst {
26523 c := auxIntToInt64(v_1.AuxInt)
26527 v.reset(OpAMD64MOVLconst)
26528 v.AuxInt = int32ToAuxInt(0)
26531 // match: (SHRW _ (MOVLconst [c]))
26532 // cond: c&31 >= 16
26533 // result: (MOVLconst [0])
26535 if v_1.Op != OpAMD64MOVLconst {
26538 c := auxIntToInt32(v_1.AuxInt)
26542 v.reset(OpAMD64MOVLconst)
26543 v.AuxInt = int32ToAuxInt(0)
26548 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
26550 // match: (SHRWconst x [0])
26553 if auxIntToInt8(v.AuxInt) != 0 {
26562 func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
26566 // match: (SUBL x (MOVLconst [c]))
26567 // result: (SUBLconst x [c])
26570 if v_1.Op != OpAMD64MOVLconst {
26573 c := auxIntToInt32(v_1.AuxInt)
26574 v.reset(OpAMD64SUBLconst)
26575 v.AuxInt = int32ToAuxInt(c)
26579 // match: (SUBL (MOVLconst [c]) x)
26580 // result: (NEGL (SUBLconst <v.Type> x [c]))
26582 if v_0.Op != OpAMD64MOVLconst {
26585 c := auxIntToInt32(v_0.AuxInt)
26587 v.reset(OpAMD64NEGL)
26588 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
26589 v0.AuxInt = int32ToAuxInt(c)
26594 // match: (SUBL x x)
26595 // result: (MOVLconst [0])
26601 v.reset(OpAMD64MOVLconst)
26602 v.AuxInt = int32ToAuxInt(0)
26605 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
26606 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
26607 // result: (SUBLload x [off] {sym} ptr mem)
26611 if l.Op != OpAMD64MOVLload {
26614 off := auxIntToInt32(l.AuxInt)
26615 sym := auxToSym(l.Aux)
26618 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
26621 v.reset(OpAMD64SUBLload)
26622 v.AuxInt = int32ToAuxInt(off)
26623 v.Aux = symToAux(sym)
26624 v.AddArg3(x, ptr, mem)
26629 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
26631 // match: (SUBLconst [c] x)
26635 c := auxIntToInt32(v.AuxInt)
26643 // match: (SUBLconst [c] x)
26644 // result: (ADDLconst [-c] x)
26646 c := auxIntToInt32(v.AuxInt)
26648 v.reset(OpAMD64ADDLconst)
26649 v.AuxInt = int32ToAuxInt(-c)
26654 func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
26659 typ := &b.Func.Config.Types
26660 // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem)
26661 // cond: is32Bit(int64(off1)+int64(off2))
26662 // result: (SUBLload [off1+off2] {sym} val base mem)
26664 off1 := auxIntToInt32(v.AuxInt)
26665 sym := auxToSym(v.Aux)
26667 if v_1.Op != OpAMD64ADDQconst {
26670 off2 := auxIntToInt32(v_1.AuxInt)
26671 base := v_1.Args[0]
26673 if !(is32Bit(int64(off1) + int64(off2))) {
26676 v.reset(OpAMD64SUBLload)
26677 v.AuxInt = int32ToAuxInt(off1 + off2)
26678 v.Aux = symToAux(sym)
26679 v.AddArg3(val, base, mem)
26682 // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
26683 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
26684 // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
26686 off1 := auxIntToInt32(v.AuxInt)
26687 sym1 := auxToSym(v.Aux)
26689 if v_1.Op != OpAMD64LEAQ {
26692 off2 := auxIntToInt32(v_1.AuxInt)
26693 sym2 := auxToSym(v_1.Aux)
26694 base := v_1.Args[0]
26696 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
26699 v.reset(OpAMD64SUBLload)
26700 v.AuxInt = int32ToAuxInt(off1 + off2)
26701 v.Aux = symToAux(mergeSym(sym1, sym2))
26702 v.AddArg3(val, base, mem)
26705 // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
26706 // result: (SUBL x (MOVLf2i y))
26708 off := auxIntToInt32(v.AuxInt)
26709 sym := auxToSym(v.Aux)
26712 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
26716 if ptr != v_2.Args[0] {
26719 v.reset(OpAMD64SUBL)
26720 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
26727 func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
26731 // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
26732 // cond: is32Bit(int64(off1)+int64(off2))
26733 // result: (SUBLmodify [off1+off2] {sym} base val mem)
26735 off1 := auxIntToInt32(v.AuxInt)
26736 sym := auxToSym(v.Aux)
26737 if v_0.Op != OpAMD64ADDQconst {
26740 off2 := auxIntToInt32(v_0.AuxInt)
26741 base := v_0.Args[0]
26744 if !(is32Bit(int64(off1) + int64(off2))) {
26747 v.reset(OpAMD64SUBLmodify)
26748 v.AuxInt = int32ToAuxInt(off1 + off2)
26749 v.Aux = symToAux(sym)
26750 v.AddArg3(base, val, mem)
26753 // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
26754 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
26755 // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
26757 off1 := auxIntToInt32(v.AuxInt)
26758 sym1 := auxToSym(v.Aux)
26759 if v_0.Op != OpAMD64LEAQ {
26762 off2 := auxIntToInt32(v_0.AuxInt)
26763 sym2 := auxToSym(v_0.Aux)
26764 base := v_0.Args[0]
26767 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
26770 v.reset(OpAMD64SUBLmodify)
26771 v.AuxInt = int32ToAuxInt(off1 + off2)
26772 v.Aux = symToAux(mergeSym(sym1, sym2))
26773 v.AddArg3(base, val, mem)
26778 func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
26782 // match: (SUBQ x (MOVQconst [c]))
26783 // cond: is32Bit(c)
26784 // result: (SUBQconst x [int32(c)])
26787 if v_1.Op != OpAMD64MOVQconst {
26790 c := auxIntToInt64(v_1.AuxInt)
26794 v.reset(OpAMD64SUBQconst)
26795 v.AuxInt = int32ToAuxInt(int32(c))
26799 // match: (SUBQ (MOVQconst [c]) x)
26800 // cond: is32Bit(c)
26801 // result: (NEGQ (SUBQconst <v.Type> x [int32(c)]))
26803 if v_0.Op != OpAMD64MOVQconst {
26806 c := auxIntToInt64(v_0.AuxInt)
26811 v.reset(OpAMD64NEGQ)
26812 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
26813 v0.AuxInt = int32ToAuxInt(int32(c))
26818 // match: (SUBQ x x)
26819 // result: (MOVQconst [0])
26825 v.reset(OpAMD64MOVQconst)
26826 v.AuxInt = int64ToAuxInt(0)
26829 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem))
26830 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
26831 // result: (SUBQload x [off] {sym} ptr mem)
26835 if l.Op != OpAMD64MOVQload {
26838 off := auxIntToInt32(l.AuxInt)
26839 sym := auxToSym(l.Aux)
26842 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
26845 v.reset(OpAMD64SUBQload)
26846 v.AuxInt = int32ToAuxInt(off)
26847 v.Aux = symToAux(sym)
26848 v.AddArg3(x, ptr, mem)
26853 func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
26856 // match: (SUBQborrow x (MOVQconst [c]))
26857 // cond: is32Bit(c)
26858 // result: (SUBQconstborrow x [int32(c)])
26861 if v_1.Op != OpAMD64MOVQconst {
26864 c := auxIntToInt64(v_1.AuxInt)
26868 v.reset(OpAMD64SUBQconstborrow)
26869 v.AuxInt = int32ToAuxInt(int32(c))
26875 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
26877 // match: (SUBQconst [0] x)
26880 if auxIntToInt32(v.AuxInt) != 0 {
26887 // match: (SUBQconst [c] x)
26888 // cond: c != -(1<<31)
26889 // result: (ADDQconst [-c] x)
26891 c := auxIntToInt32(v.AuxInt)
26893 if !(c != -(1 << 31)) {
26896 v.reset(OpAMD64ADDQconst)
26897 v.AuxInt = int32ToAuxInt(-c)
26901 // match: (SUBQconst (MOVQconst [d]) [c])
26902 // result: (MOVQconst [d-int64(c)])
26904 c := auxIntToInt32(v.AuxInt)
26905 if v_0.Op != OpAMD64MOVQconst {
26908 d := auxIntToInt64(v_0.AuxInt)
26909 v.reset(OpAMD64MOVQconst)
26910 v.AuxInt = int64ToAuxInt(d - int64(c))
26913 // match: (SUBQconst (SUBQconst x [d]) [c])
26914 // cond: is32Bit(int64(-c)-int64(d))
26915 // result: (ADDQconst [-c-d] x)
26917 c := auxIntToInt32(v.AuxInt)
26918 if v_0.Op != OpAMD64SUBQconst {
26921 d := auxIntToInt32(v_0.AuxInt)
26923 if !(is32Bit(int64(-c) - int64(d))) {
26926 v.reset(OpAMD64ADDQconst)
26927 v.AuxInt = int32ToAuxInt(-c - d)
26933 func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
26938 typ := &b.Func.Config.Types
26939 // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem)
26940 // cond: is32Bit(int64(off1)+int64(off2))
26941 // result: (SUBQload [off1+off2] {sym} val base mem)
26943 off1 := auxIntToInt32(v.AuxInt)
26944 sym := auxToSym(v.Aux)
26946 if v_1.Op != OpAMD64ADDQconst {
26949 off2 := auxIntToInt32(v_1.AuxInt)
26950 base := v_1.Args[0]
26952 if !(is32Bit(int64(off1) + int64(off2))) {
26955 v.reset(OpAMD64SUBQload)
26956 v.AuxInt = int32ToAuxInt(off1 + off2)
26957 v.Aux = symToAux(sym)
26958 v.AddArg3(val, base, mem)
26961 // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
26962 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
26963 // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
26965 off1 := auxIntToInt32(v.AuxInt)
26966 sym1 := auxToSym(v.Aux)
26968 if v_1.Op != OpAMD64LEAQ {
26971 off2 := auxIntToInt32(v_1.AuxInt)
26972 sym2 := auxToSym(v_1.Aux)
26973 base := v_1.Args[0]
26975 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
26978 v.reset(OpAMD64SUBQload)
26979 v.AuxInt = int32ToAuxInt(off1 + off2)
26980 v.Aux = symToAux(mergeSym(sym1, sym2))
26981 v.AddArg3(val, base, mem)
26984 // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
26985 // result: (SUBQ x (MOVQf2i y))
26987 off := auxIntToInt32(v.AuxInt)
26988 sym := auxToSym(v.Aux)
26991 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
26995 if ptr != v_2.Args[0] {
26998 v.reset(OpAMD64SUBQ)
26999 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
27006 func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
27010 // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
27011 // cond: is32Bit(int64(off1)+int64(off2))
27012 // result: (SUBQmodify [off1+off2] {sym} base val mem)
27014 off1 := auxIntToInt32(v.AuxInt)
27015 sym := auxToSym(v.Aux)
27016 if v_0.Op != OpAMD64ADDQconst {
27019 off2 := auxIntToInt32(v_0.AuxInt)
27020 base := v_0.Args[0]
27023 if !(is32Bit(int64(off1) + int64(off2))) {
27026 v.reset(OpAMD64SUBQmodify)
27027 v.AuxInt = int32ToAuxInt(off1 + off2)
27028 v.Aux = symToAux(sym)
27029 v.AddArg3(base, val, mem)
27032 // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
27033 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
27034 // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
27036 off1 := auxIntToInt32(v.AuxInt)
27037 sym1 := auxToSym(v.Aux)
27038 if v_0.Op != OpAMD64LEAQ {
27041 off2 := auxIntToInt32(v_0.AuxInt)
27042 sym2 := auxToSym(v_0.Aux)
27043 base := v_0.Args[0]
27046 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
27049 v.reset(OpAMD64SUBQmodify)
27050 v.AuxInt = int32ToAuxInt(off1 + off2)
27051 v.Aux = symToAux(mergeSym(sym1, sym2))
27052 v.AddArg3(base, val, mem)
27057 func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
27060 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
27061 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
27062 // result: (SUBSDload x [off] {sym} ptr mem)
27066 if l.Op != OpAMD64MOVSDload {
27069 off := auxIntToInt32(l.AuxInt)
27070 sym := auxToSym(l.Aux)
27073 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
27076 v.reset(OpAMD64SUBSDload)
27077 v.AuxInt = int32ToAuxInt(off)
27078 v.Aux = symToAux(sym)
27079 v.AddArg3(x, ptr, mem)
27084 func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
27089 typ := &b.Func.Config.Types
27090 // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem)
27091 // cond: is32Bit(int64(off1)+int64(off2))
27092 // result: (SUBSDload [off1+off2] {sym} val base mem)
27094 off1 := auxIntToInt32(v.AuxInt)
27095 sym := auxToSym(v.Aux)
27097 if v_1.Op != OpAMD64ADDQconst {
27100 off2 := auxIntToInt32(v_1.AuxInt)
27101 base := v_1.Args[0]
27103 if !(is32Bit(int64(off1) + int64(off2))) {
27106 v.reset(OpAMD64SUBSDload)
27107 v.AuxInt = int32ToAuxInt(off1 + off2)
27108 v.Aux = symToAux(sym)
27109 v.AddArg3(val, base, mem)
27112 // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
27113 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
27114 // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
27116 off1 := auxIntToInt32(v.AuxInt)
27117 sym1 := auxToSym(v.Aux)
27119 if v_1.Op != OpAMD64LEAQ {
27122 off2 := auxIntToInt32(v_1.AuxInt)
27123 sym2 := auxToSym(v_1.Aux)
27124 base := v_1.Args[0]
27126 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
27129 v.reset(OpAMD64SUBSDload)
27130 v.AuxInt = int32ToAuxInt(off1 + off2)
27131 v.Aux = symToAux(mergeSym(sym1, sym2))
27132 v.AddArg3(val, base, mem)
27135 // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
27136 // result: (SUBSD x (MOVQi2f y))
27138 off := auxIntToInt32(v.AuxInt)
27139 sym := auxToSym(v.Aux)
27142 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
27146 if ptr != v_2.Args[0] {
27149 v.reset(OpAMD64SUBSD)
27150 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
27157 func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
27160 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
27161 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
27162 // result: (SUBSSload x [off] {sym} ptr mem)
27166 if l.Op != OpAMD64MOVSSload {
27169 off := auxIntToInt32(l.AuxInt)
27170 sym := auxToSym(l.Aux)
27173 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
27176 v.reset(OpAMD64SUBSSload)
27177 v.AuxInt = int32ToAuxInt(off)
27178 v.Aux = symToAux(sym)
27179 v.AddArg3(x, ptr, mem)
27184 func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
27189 typ := &b.Func.Config.Types
27190 // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem)
27191 // cond: is32Bit(int64(off1)+int64(off2))
27192 // result: (SUBSSload [off1+off2] {sym} val base mem)
27194 off1 := auxIntToInt32(v.AuxInt)
27195 sym := auxToSym(v.Aux)
27197 if v_1.Op != OpAMD64ADDQconst {
27200 off2 := auxIntToInt32(v_1.AuxInt)
27201 base := v_1.Args[0]
27203 if !(is32Bit(int64(off1) + int64(off2))) {
27206 v.reset(OpAMD64SUBSSload)
27207 v.AuxInt = int32ToAuxInt(off1 + off2)
27208 v.Aux = symToAux(sym)
27209 v.AddArg3(val, base, mem)
27212 // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
27213 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
27214 // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
27216 off1 := auxIntToInt32(v.AuxInt)
27217 sym1 := auxToSym(v.Aux)
27219 if v_1.Op != OpAMD64LEAQ {
27222 off2 := auxIntToInt32(v_1.AuxInt)
27223 sym2 := auxToSym(v_1.Aux)
27224 base := v_1.Args[0]
27226 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
27229 v.reset(OpAMD64SUBSSload)
27230 v.AuxInt = int32ToAuxInt(off1 + off2)
27231 v.Aux = symToAux(mergeSym(sym1, sym2))
27232 v.AddArg3(val, base, mem)
27235 // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
27236 // result: (SUBSS x (MOVLi2f y))
27238 off := auxIntToInt32(v.AuxInt)
27239 sym := auxToSym(v.Aux)
27242 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
27246 if ptr != v_2.Args[0] {
27249 v.reset(OpAMD64SUBSS)
27250 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
27257 func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
27261 // match: (TESTB (MOVLconst [c]) x)
27262 // result: (TESTBconst [int8(c)] x)
27264 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27265 if v_0.Op != OpAMD64MOVLconst {
27268 c := auxIntToInt32(v_0.AuxInt)
27270 v.reset(OpAMD64TESTBconst)
27271 v.AuxInt = int8ToAuxInt(int8(c))
27277 // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2)
27278 // cond: l == l2 && l.Uses == 2 && clobber(l)
27279 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0, off)] ptr mem)
27281 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27283 if l.Op != OpAMD64MOVBload {
27286 off := auxIntToInt32(l.AuxInt)
27287 sym := auxToSym(l.Aux)
27291 if !(l == l2 && l.Uses == 2 && clobber(l)) {
27295 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
27297 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
27298 v0.Aux = symToAux(sym)
27299 v0.AddArg2(ptr, mem)
27306 func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
27308 // match: (TESTBconst [-1] x)
27309 // cond: x.Op != OpAMD64MOVLconst
27310 // result: (TESTB x x)
27312 if auxIntToInt8(v.AuxInt) != -1 {
27316 if !(x.Op != OpAMD64MOVLconst) {
27319 v.reset(OpAMD64TESTB)
27325 func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
27329 // match: (TESTL (MOVLconst [c]) x)
27330 // result: (TESTLconst [c] x)
27332 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27333 if v_0.Op != OpAMD64MOVLconst {
27336 c := auxIntToInt32(v_0.AuxInt)
27338 v.reset(OpAMD64TESTLconst)
27339 v.AuxInt = int32ToAuxInt(c)
27345 // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2)
27346 // cond: l == l2 && l.Uses == 2 && clobber(l)
27347 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0, off)] ptr mem)
27349 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27351 if l.Op != OpAMD64MOVLload {
27354 off := auxIntToInt32(l.AuxInt)
27355 sym := auxToSym(l.Aux)
27359 if !(l == l2 && l.Uses == 2 && clobber(l)) {
27363 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
27365 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
27366 v0.Aux = symToAux(sym)
27367 v0.AddArg2(ptr, mem)
27372 // match: (TESTL a:(ANDLload [off] {sym} x ptr mem) a)
27373 // cond: a.Uses == 2 && a.Block == v.Block && clobber(a)
27374 // result: (TESTL (MOVLload <a.Type> [off] {sym} ptr mem) x)
27376 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27378 if a.Op != OpAMD64ANDLload {
27381 off := auxIntToInt32(a.AuxInt)
27382 sym := auxToSym(a.Aux)
27386 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
27389 v.reset(OpAMD64TESTL)
27390 v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type)
27391 v0.AuxInt = int32ToAuxInt(off)
27392 v0.Aux = symToAux(sym)
27393 v0.AddArg2(ptr, mem)
27401 func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
27403 // match: (TESTLconst [c] (MOVLconst [c]))
27405 // result: (FlagEQ)
27407 c := auxIntToInt32(v.AuxInt)
27408 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
27411 v.reset(OpAMD64FlagEQ)
27414 // match: (TESTLconst [c] (MOVLconst [c]))
27416 // result: (FlagLT_UGT)
27418 c := auxIntToInt32(v.AuxInt)
27419 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
27422 v.reset(OpAMD64FlagLT_UGT)
27425 // match: (TESTLconst [c] (MOVLconst [c]))
27427 // result: (FlagGT_UGT)
27429 c := auxIntToInt32(v.AuxInt)
27430 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
27433 v.reset(OpAMD64FlagGT_UGT)
27436 // match: (TESTLconst [-1] x)
27437 // cond: x.Op != OpAMD64MOVLconst
27438 // result: (TESTL x x)
27440 if auxIntToInt32(v.AuxInt) != -1 {
27444 if !(x.Op != OpAMD64MOVLconst) {
27447 v.reset(OpAMD64TESTL)
27453 func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
27457 // match: (TESTQ (MOVQconst [c]) x)
27458 // cond: is32Bit(c)
27459 // result: (TESTQconst [int32(c)] x)
27461 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27462 if v_0.Op != OpAMD64MOVQconst {
27465 c := auxIntToInt64(v_0.AuxInt)
27470 v.reset(OpAMD64TESTQconst)
27471 v.AuxInt = int32ToAuxInt(int32(c))
27477 // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2)
27478 // cond: l == l2 && l.Uses == 2 && clobber(l)
27479 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0, off)] ptr mem)
27481 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27483 if l.Op != OpAMD64MOVQload {
27486 off := auxIntToInt32(l.AuxInt)
27487 sym := auxToSym(l.Aux)
27491 if !(l == l2 && l.Uses == 2 && clobber(l)) {
27495 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
27497 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
27498 v0.Aux = symToAux(sym)
27499 v0.AddArg2(ptr, mem)
27504 // match: (TESTQ a:(ANDQload [off] {sym} x ptr mem) a)
27505 // cond: a.Uses == 2 && a.Block == v.Block && clobber(a)
27506 // result: (TESTQ (MOVQload <a.Type> [off] {sym} ptr mem) x)
27508 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27510 if a.Op != OpAMD64ANDQload {
27513 off := auxIntToInt32(a.AuxInt)
27514 sym := auxToSym(a.Aux)
27518 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
27521 v.reset(OpAMD64TESTQ)
27522 v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type)
27523 v0.AuxInt = int32ToAuxInt(off)
27524 v0.Aux = symToAux(sym)
27525 v0.AddArg2(ptr, mem)
27533 func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
27535 // match: (TESTQconst [c] (MOVQconst [d]))
27536 // cond: int64(c) == d && c == 0
27537 // result: (FlagEQ)
27539 c := auxIntToInt32(v.AuxInt)
27540 if v_0.Op != OpAMD64MOVQconst {
27543 d := auxIntToInt64(v_0.AuxInt)
27544 if !(int64(c) == d && c == 0) {
27547 v.reset(OpAMD64FlagEQ)
27550 // match: (TESTQconst [c] (MOVQconst [d]))
27551 // cond: int64(c) == d && c < 0
27552 // result: (FlagLT_UGT)
27554 c := auxIntToInt32(v.AuxInt)
27555 if v_0.Op != OpAMD64MOVQconst {
27558 d := auxIntToInt64(v_0.AuxInt)
27559 if !(int64(c) == d && c < 0) {
27562 v.reset(OpAMD64FlagLT_UGT)
27565 // match: (TESTQconst [c] (MOVQconst [d]))
27566 // cond: int64(c) == d && c > 0
27567 // result: (FlagGT_UGT)
27569 c := auxIntToInt32(v.AuxInt)
27570 if v_0.Op != OpAMD64MOVQconst {
27573 d := auxIntToInt64(v_0.AuxInt)
27574 if !(int64(c) == d && c > 0) {
27577 v.reset(OpAMD64FlagGT_UGT)
27580 // match: (TESTQconst [-1] x)
27581 // cond: x.Op != OpAMD64MOVQconst
27582 // result: (TESTQ x x)
27584 if auxIntToInt32(v.AuxInt) != -1 {
27588 if !(x.Op != OpAMD64MOVQconst) {
27591 v.reset(OpAMD64TESTQ)
27597 func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
27601 // match: (TESTW (MOVLconst [c]) x)
27602 // result: (TESTWconst [int16(c)] x)
27604 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27605 if v_0.Op != OpAMD64MOVLconst {
27608 c := auxIntToInt32(v_0.AuxInt)
27610 v.reset(OpAMD64TESTWconst)
27611 v.AuxInt = int16ToAuxInt(int16(c))
27617 // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2)
27618 // cond: l == l2 && l.Uses == 2 && clobber(l)
27619 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0, off)] ptr mem)
27621 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27623 if l.Op != OpAMD64MOVWload {
27626 off := auxIntToInt32(l.AuxInt)
27627 sym := auxToSym(l.Aux)
27631 if !(l == l2 && l.Uses == 2 && clobber(l)) {
27635 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
27637 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
27638 v0.Aux = symToAux(sym)
27639 v0.AddArg2(ptr, mem)
27646 func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
27648 // match: (TESTWconst [-1] x)
27649 // cond: x.Op != OpAMD64MOVLconst
27650 // result: (TESTW x x)
27652 if auxIntToInt16(v.AuxInt) != -1 {
27656 if !(x.Op != OpAMD64MOVLconst) {
27659 v.reset(OpAMD64TESTW)
27665 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
27669 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
27670 // cond: is32Bit(int64(off1)+int64(off2))
27671 // result: (XADDLlock [off1+off2] {sym} val ptr mem)
27673 off1 := auxIntToInt32(v.AuxInt)
27674 sym := auxToSym(v.Aux)
27676 if v_1.Op != OpAMD64ADDQconst {
27679 off2 := auxIntToInt32(v_1.AuxInt)
27682 if !(is32Bit(int64(off1) + int64(off2))) {
27685 v.reset(OpAMD64XADDLlock)
27686 v.AuxInt = int32ToAuxInt(off1 + off2)
27687 v.Aux = symToAux(sym)
27688 v.AddArg3(val, ptr, mem)
27693 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
27697 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
27698 // cond: is32Bit(int64(off1)+int64(off2))
27699 // result: (XADDQlock [off1+off2] {sym} val ptr mem)
27701 off1 := auxIntToInt32(v.AuxInt)
27702 sym := auxToSym(v.Aux)
27704 if v_1.Op != OpAMD64ADDQconst {
27707 off2 := auxIntToInt32(v_1.AuxInt)
27710 if !(is32Bit(int64(off1) + int64(off2))) {
27713 v.reset(OpAMD64XADDQlock)
27714 v.AuxInt = int32ToAuxInt(off1 + off2)
27715 v.Aux = symToAux(sym)
27716 v.AddArg3(val, ptr, mem)
27721 func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
27725 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
27726 // cond: is32Bit(int64(off1)+int64(off2))
27727 // result: (XCHGL [off1+off2] {sym} val ptr mem)
27729 off1 := auxIntToInt32(v.AuxInt)
27730 sym := auxToSym(v.Aux)
27732 if v_1.Op != OpAMD64ADDQconst {
27735 off2 := auxIntToInt32(v_1.AuxInt)
27738 if !(is32Bit(int64(off1) + int64(off2))) {
27741 v.reset(OpAMD64XCHGL)
27742 v.AuxInt = int32ToAuxInt(off1 + off2)
27743 v.Aux = symToAux(sym)
27744 v.AddArg3(val, ptr, mem)
27747 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
27748 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
27749 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
27751 off1 := auxIntToInt32(v.AuxInt)
27752 sym1 := auxToSym(v.Aux)
27754 if v_1.Op != OpAMD64LEAQ {
27757 off2 := auxIntToInt32(v_1.AuxInt)
27758 sym2 := auxToSym(v_1.Aux)
27761 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
27764 v.reset(OpAMD64XCHGL)
27765 v.AuxInt = int32ToAuxInt(off1 + off2)
27766 v.Aux = symToAux(mergeSym(sym1, sym2))
27767 v.AddArg3(val, ptr, mem)
27772 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
27776 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
27777 // cond: is32Bit(int64(off1)+int64(off2))
27778 // result: (XCHGQ [off1+off2] {sym} val ptr mem)
27780 off1 := auxIntToInt32(v.AuxInt)
27781 sym := auxToSym(v.Aux)
27783 if v_1.Op != OpAMD64ADDQconst {
27786 off2 := auxIntToInt32(v_1.AuxInt)
27789 if !(is32Bit(int64(off1) + int64(off2))) {
27792 v.reset(OpAMD64XCHGQ)
27793 v.AuxInt = int32ToAuxInt(off1 + off2)
27794 v.Aux = symToAux(sym)
27795 v.AddArg3(val, ptr, mem)
27798 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
27799 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
27800 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
27802 off1 := auxIntToInt32(v.AuxInt)
27803 sym1 := auxToSym(v.Aux)
27805 if v_1.Op != OpAMD64LEAQ {
27808 off2 := auxIntToInt32(v_1.AuxInt)
27809 sym2 := auxToSym(v_1.Aux)
27812 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
27815 v.reset(OpAMD64XCHGQ)
27816 v.AuxInt = int32ToAuxInt(off1 + off2)
27817 v.Aux = symToAux(mergeSym(sym1, sym2))
27818 v.AddArg3(val, ptr, mem)
27823 func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
27826 // match: (XORL (SHLL (MOVLconst [1]) y) x)
27827 // result: (BTCL x y)
27829 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27830 if v_0.Op != OpAMD64SHLL {
27834 v_0_0 := v_0.Args[0]
27835 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
27839 v.reset(OpAMD64BTCL)
27845 // match: (XORL (MOVLconst [c]) x)
27846 // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
27847 // result: (BTCLconst [int8(log32(c))] x)
27849 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27850 if v_0.Op != OpAMD64MOVLconst {
27853 c := auxIntToInt32(v_0.AuxInt)
27855 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
27858 v.reset(OpAMD64BTCLconst)
27859 v.AuxInt = int8ToAuxInt(int8(log32(c)))
27865 // match: (XORL x (MOVLconst [c]))
27866 // result: (XORLconst [c] x)
27868 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27870 if v_1.Op != OpAMD64MOVLconst {
27873 c := auxIntToInt32(v_1.AuxInt)
27874 v.reset(OpAMD64XORLconst)
27875 v.AuxInt = int32ToAuxInt(c)
27881 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d]))
27883 // result: (ROLLconst x [c])
27885 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27886 if v_0.Op != OpAMD64SHLLconst {
27889 c := auxIntToInt8(v_0.AuxInt)
27891 if v_1.Op != OpAMD64SHRLconst {
27894 d := auxIntToInt8(v_1.AuxInt)
27895 if x != v_1.Args[0] || !(d == 32-c) {
27898 v.reset(OpAMD64ROLLconst)
27899 v.AuxInt = int8ToAuxInt(c)
27905 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
27906 // cond: d==16-c && c < 16 && t.Size() == 2
27907 // result: (ROLWconst x [c])
27910 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27911 if v_0.Op != OpAMD64SHLLconst {
27914 c := auxIntToInt8(v_0.AuxInt)
27916 if v_1.Op != OpAMD64SHRWconst {
27919 d := auxIntToInt8(v_1.AuxInt)
27920 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
27923 v.reset(OpAMD64ROLWconst)
27924 v.AuxInt = int8ToAuxInt(c)
27930 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
27931 // cond: d==8-c && c < 8 && t.Size() == 1
27932 // result: (ROLBconst x [c])
27935 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27936 if v_0.Op != OpAMD64SHLLconst {
27939 c := auxIntToInt8(v_0.AuxInt)
27941 if v_1.Op != OpAMD64SHRBconst {
27944 d := auxIntToInt8(v_1.AuxInt)
27945 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
27948 v.reset(OpAMD64ROLBconst)
27949 v.AuxInt = int8ToAuxInt(c)
27955 // match: (XORL x x)
27956 // result: (MOVLconst [0])
27962 v.reset(OpAMD64MOVLconst)
27963 v.AuxInt = int32ToAuxInt(0)
27966 // match: (XORL x l:(MOVLload [off] {sym} ptr mem))
27967 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
27968 // result: (XORLload x [off] {sym} ptr mem)
27970 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27973 if l.Op != OpAMD64MOVLload {
27976 off := auxIntToInt32(l.AuxInt)
27977 sym := auxToSym(l.Aux)
27980 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
27983 v.reset(OpAMD64XORLload)
27984 v.AuxInt = int32ToAuxInt(off)
27985 v.Aux = symToAux(sym)
27986 v.AddArg3(x, ptr, mem)
27993 func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
27995 // match: (XORLconst [c] x)
27996 // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
27997 // result: (BTCLconst [int8(log32(c))] x)
27999 c := auxIntToInt32(v.AuxInt)
28001 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
28004 v.reset(OpAMD64BTCLconst)
28005 v.AuxInt = int8ToAuxInt(int8(log32(c)))
28009 // match: (XORLconst [1] (SETNE x))
28010 // result: (SETEQ x)
28012 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE {
28016 v.reset(OpAMD64SETEQ)
28020 // match: (XORLconst [1] (SETEQ x))
28021 // result: (SETNE x)
28023 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ {
28027 v.reset(OpAMD64SETNE)
28031 // match: (XORLconst [1] (SETL x))
28032 // result: (SETGE x)
28034 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL {
28038 v.reset(OpAMD64SETGE)
28042 // match: (XORLconst [1] (SETGE x))
28043 // result: (SETL x)
28045 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE {
28049 v.reset(OpAMD64SETL)
28053 // match: (XORLconst [1] (SETLE x))
28054 // result: (SETG x)
28056 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE {
28060 v.reset(OpAMD64SETG)
28064 // match: (XORLconst [1] (SETG x))
28065 // result: (SETLE x)
28067 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG {
28071 v.reset(OpAMD64SETLE)
28075 // match: (XORLconst [1] (SETB x))
28076 // result: (SETAE x)
28078 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB {
28082 v.reset(OpAMD64SETAE)
28086 // match: (XORLconst [1] (SETAE x))
28087 // result: (SETB x)
28089 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE {
28093 v.reset(OpAMD64SETB)
28097 // match: (XORLconst [1] (SETBE x))
28098 // result: (SETA x)
28100 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE {
28104 v.reset(OpAMD64SETA)
28108 // match: (XORLconst [1] (SETA x))
28109 // result: (SETBE x)
28111 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA {
28115 v.reset(OpAMD64SETBE)
28119 // match: (XORLconst [c] (XORLconst [d] x))
28120 // result: (XORLconst [c ^ d] x)
28122 c := auxIntToInt32(v.AuxInt)
28123 if v_0.Op != OpAMD64XORLconst {
28126 d := auxIntToInt32(v_0.AuxInt)
28128 v.reset(OpAMD64XORLconst)
28129 v.AuxInt = int32ToAuxInt(c ^ d)
28133 // match: (XORLconst [c] (BTCLconst [d] x))
28134 // result: (XORLconst [c ^ 1<<uint32(d)] x)
28136 c := auxIntToInt32(v.AuxInt)
28137 if v_0.Op != OpAMD64BTCLconst {
28140 d := auxIntToInt8(v_0.AuxInt)
28142 v.reset(OpAMD64XORLconst)
28143 v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
28147 // match: (XORLconst [c] x)
28151 c := auxIntToInt32(v.AuxInt)
28159 // match: (XORLconst [c] (MOVLconst [d]))
28160 // result: (MOVLconst [c^d])
28162 c := auxIntToInt32(v.AuxInt)
28163 if v_0.Op != OpAMD64MOVLconst {
28166 d := auxIntToInt32(v_0.AuxInt)
28167 v.reset(OpAMD64MOVLconst)
28168 v.AuxInt = int32ToAuxInt(c ^ d)
28173 func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
28176 // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
28177 // cond: ValAndOff(valoff1).canAdd32(off2)
28178 // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
28180 valoff1 := auxIntToValAndOff(v.AuxInt)
28181 sym := auxToSym(v.Aux)
28182 if v_0.Op != OpAMD64ADDQconst {
28185 off2 := auxIntToInt32(v_0.AuxInt)
28186 base := v_0.Args[0]
28188 if !(ValAndOff(valoff1).canAdd32(off2)) {
28191 v.reset(OpAMD64XORLconstmodify)
28192 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
28193 v.Aux = symToAux(sym)
28194 v.AddArg2(base, mem)
28197 // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
28198 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
28199 // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
28201 valoff1 := auxIntToValAndOff(v.AuxInt)
28202 sym1 := auxToSym(v.Aux)
28203 if v_0.Op != OpAMD64LEAQ {
28206 off2 := auxIntToInt32(v_0.AuxInt)
28207 sym2 := auxToSym(v_0.Aux)
28208 base := v_0.Args[0]
28210 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
28213 v.reset(OpAMD64XORLconstmodify)
28214 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
28215 v.Aux = symToAux(mergeSym(sym1, sym2))
28216 v.AddArg2(base, mem)
28221 func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
28226 typ := &b.Func.Config.Types
28227 // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem)
28228 // cond: is32Bit(int64(off1)+int64(off2))
28229 // result: (XORLload [off1+off2] {sym} val base mem)
28231 off1 := auxIntToInt32(v.AuxInt)
28232 sym := auxToSym(v.Aux)
28234 if v_1.Op != OpAMD64ADDQconst {
28237 off2 := auxIntToInt32(v_1.AuxInt)
28238 base := v_1.Args[0]
28240 if !(is32Bit(int64(off1) + int64(off2))) {
28243 v.reset(OpAMD64XORLload)
28244 v.AuxInt = int32ToAuxInt(off1 + off2)
28245 v.Aux = symToAux(sym)
28246 v.AddArg3(val, base, mem)
28249 // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
28250 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
28251 // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
28253 off1 := auxIntToInt32(v.AuxInt)
28254 sym1 := auxToSym(v.Aux)
28256 if v_1.Op != OpAMD64LEAQ {
28259 off2 := auxIntToInt32(v_1.AuxInt)
28260 sym2 := auxToSym(v_1.Aux)
28261 base := v_1.Args[0]
28263 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
28266 v.reset(OpAMD64XORLload)
28267 v.AuxInt = int32ToAuxInt(off1 + off2)
28268 v.Aux = symToAux(mergeSym(sym1, sym2))
28269 v.AddArg3(val, base, mem)
28272 // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
28273 // result: (XORL x (MOVLf2i y))
28275 off := auxIntToInt32(v.AuxInt)
28276 sym := auxToSym(v.Aux)
28279 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
28283 if ptr != v_2.Args[0] {
28286 v.reset(OpAMD64XORL)
28287 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
28294 func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
28299 // match: (XORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) <t> x) mem)
28300 // result: (BTCLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
28302 off := auxIntToInt32(v.AuxInt)
28303 sym := auxToSym(v.Aux)
28306 if s.Op != OpAMD64SHLL {
28312 if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
28316 v.reset(OpAMD64BTCLmodify)
28317 v.AuxInt = int32ToAuxInt(off)
28318 v.Aux = symToAux(sym)
28319 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
28320 v0.AuxInt = int32ToAuxInt(31)
28322 v.AddArg3(ptr, v0, mem)
28325 // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
28326 // cond: is32Bit(int64(off1)+int64(off2))
28327 // result: (XORLmodify [off1+off2] {sym} base val mem)
28329 off1 := auxIntToInt32(v.AuxInt)
28330 sym := auxToSym(v.Aux)
28331 if v_0.Op != OpAMD64ADDQconst {
28334 off2 := auxIntToInt32(v_0.AuxInt)
28335 base := v_0.Args[0]
28338 if !(is32Bit(int64(off1) + int64(off2))) {
28341 v.reset(OpAMD64XORLmodify)
28342 v.AuxInt = int32ToAuxInt(off1 + off2)
28343 v.Aux = symToAux(sym)
28344 v.AddArg3(base, val, mem)
28347 // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
28348 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
28349 // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
28351 off1 := auxIntToInt32(v.AuxInt)
28352 sym1 := auxToSym(v.Aux)
28353 if v_0.Op != OpAMD64LEAQ {
28356 off2 := auxIntToInt32(v_0.AuxInt)
28357 sym2 := auxToSym(v_0.Aux)
28358 base := v_0.Args[0]
28361 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
28364 v.reset(OpAMD64XORLmodify)
28365 v.AuxInt = int32ToAuxInt(off1 + off2)
28366 v.Aux = symToAux(mergeSym(sym1, sym2))
28367 v.AddArg3(base, val, mem)
28372 func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
28375 // match: (XORQ (SHLQ (MOVQconst [1]) y) x)
28376 // result: (BTCQ x y)
28378 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
28379 if v_0.Op != OpAMD64SHLQ {
28383 v_0_0 := v_0.Args[0]
28384 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
28388 v.reset(OpAMD64BTCQ)
28394 // match: (XORQ (MOVQconst [c]) x)
28395 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
28396 // result: (BTCQconst [int8(log64(c))] x)
28398 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
28399 if v_0.Op != OpAMD64MOVQconst {
28402 c := auxIntToInt64(v_0.AuxInt)
28404 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
28407 v.reset(OpAMD64BTCQconst)
28408 v.AuxInt = int8ToAuxInt(int8(log64(c)))
28414 // match: (XORQ x (MOVQconst [c]))
28415 // cond: is32Bit(c)
28416 // result: (XORQconst [int32(c)] x)
28418 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
28420 if v_1.Op != OpAMD64MOVQconst {
28423 c := auxIntToInt64(v_1.AuxInt)
28427 v.reset(OpAMD64XORQconst)
28428 v.AuxInt = int32ToAuxInt(int32(c))
28434 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d]))
28436 // result: (ROLQconst x [c])
28438 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
28439 if v_0.Op != OpAMD64SHLQconst {
28442 c := auxIntToInt8(v_0.AuxInt)
28444 if v_1.Op != OpAMD64SHRQconst {
28447 d := auxIntToInt8(v_1.AuxInt)
28448 if x != v_1.Args[0] || !(d == 64-c) {
28451 v.reset(OpAMD64ROLQconst)
28452 v.AuxInt = int8ToAuxInt(c)
28458 // match: (XORQ x x)
28459 // result: (MOVQconst [0])
28465 v.reset(OpAMD64MOVQconst)
28466 v.AuxInt = int64ToAuxInt(0)
28469 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem))
28470 // cond: canMergeLoadClobber(v, l, x) && clobber(l)
28471 // result: (XORQload x [off] {sym} ptr mem)
28473 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
28476 if l.Op != OpAMD64MOVQload {
28479 off := auxIntToInt32(l.AuxInt)
28480 sym := auxToSym(l.Aux)
28483 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
28486 v.reset(OpAMD64XORQload)
28487 v.AuxInt = int32ToAuxInt(off)
28488 v.Aux = symToAux(sym)
28489 v.AddArg3(x, ptr, mem)
28496 func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
28498 // match: (XORQconst [c] x)
28499 // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
28500 // result: (BTCQconst [int8(log32(c))] x)
28502 c := auxIntToInt32(v.AuxInt)
28504 if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
28507 v.reset(OpAMD64BTCQconst)
28508 v.AuxInt = int8ToAuxInt(int8(log32(c)))
28512 // match: (XORQconst [c] (XORQconst [d] x))
28513 // result: (XORQconst [c ^ d] x)
28515 c := auxIntToInt32(v.AuxInt)
28516 if v_0.Op != OpAMD64XORQconst {
28519 d := auxIntToInt32(v_0.AuxInt)
28521 v.reset(OpAMD64XORQconst)
28522 v.AuxInt = int32ToAuxInt(c ^ d)
28526 // match: (XORQconst [c] (BTCQconst [d] x))
28527 // cond: is32Bit(int64(c) ^ 1<<uint32(d))
28528 // result: (XORQconst [c ^ 1<<uint32(d)] x)
28530 c := auxIntToInt32(v.AuxInt)
28531 if v_0.Op != OpAMD64BTCQconst {
28534 d := auxIntToInt8(v_0.AuxInt)
28536 if !(is32Bit(int64(c) ^ 1<<uint32(d))) {
28539 v.reset(OpAMD64XORQconst)
28540 v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
28544 // match: (XORQconst [0] x)
28547 if auxIntToInt32(v.AuxInt) != 0 {
28554 // match: (XORQconst [c] (MOVQconst [d]))
28555 // result: (MOVQconst [int64(c)^d])
28557 c := auxIntToInt32(v.AuxInt)
28558 if v_0.Op != OpAMD64MOVQconst {
28561 d := auxIntToInt64(v_0.AuxInt)
28562 v.reset(OpAMD64MOVQconst)
28563 v.AuxInt = int64ToAuxInt(int64(c) ^ d)
28568 func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
28571 // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
28572 // cond: ValAndOff(valoff1).canAdd32(off2)
28573 // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
28575 valoff1 := auxIntToValAndOff(v.AuxInt)
28576 sym := auxToSym(v.Aux)
28577 if v_0.Op != OpAMD64ADDQconst {
28580 off2 := auxIntToInt32(v_0.AuxInt)
28581 base := v_0.Args[0]
28583 if !(ValAndOff(valoff1).canAdd32(off2)) {
28586 v.reset(OpAMD64XORQconstmodify)
28587 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
28588 v.Aux = symToAux(sym)
28589 v.AddArg2(base, mem)
28592 // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
28593 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
28594 // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
28596 valoff1 := auxIntToValAndOff(v.AuxInt)
28597 sym1 := auxToSym(v.Aux)
28598 if v_0.Op != OpAMD64LEAQ {
28601 off2 := auxIntToInt32(v_0.AuxInt)
28602 sym2 := auxToSym(v_0.Aux)
28603 base := v_0.Args[0]
28605 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
28608 v.reset(OpAMD64XORQconstmodify)
28609 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
28610 v.Aux = symToAux(mergeSym(sym1, sym2))
28611 v.AddArg2(base, mem)
28616 func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
28621 typ := &b.Func.Config.Types
28622 // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem)
28623 // cond: is32Bit(int64(off1)+int64(off2))
28624 // result: (XORQload [off1+off2] {sym} val base mem)
28626 off1 := auxIntToInt32(v.AuxInt)
28627 sym := auxToSym(v.Aux)
28629 if v_1.Op != OpAMD64ADDQconst {
28632 off2 := auxIntToInt32(v_1.AuxInt)
28633 base := v_1.Args[0]
28635 if !(is32Bit(int64(off1) + int64(off2))) {
28638 v.reset(OpAMD64XORQload)
28639 v.AuxInt = int32ToAuxInt(off1 + off2)
28640 v.Aux = symToAux(sym)
28641 v.AddArg3(val, base, mem)
28644 // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
28645 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
28646 // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
28648 off1 := auxIntToInt32(v.AuxInt)
28649 sym1 := auxToSym(v.Aux)
28651 if v_1.Op != OpAMD64LEAQ {
28654 off2 := auxIntToInt32(v_1.AuxInt)
28655 sym2 := auxToSym(v_1.Aux)
28656 base := v_1.Args[0]
28658 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
28661 v.reset(OpAMD64XORQload)
28662 v.AuxInt = int32ToAuxInt(off1 + off2)
28663 v.Aux = symToAux(mergeSym(sym1, sym2))
28664 v.AddArg3(val, base, mem)
28667 // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
28668 // result: (XORQ x (MOVQf2i y))
28670 off := auxIntToInt32(v.AuxInt)
28671 sym := auxToSym(v.Aux)
28674 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
28678 if ptr != v_2.Args[0] {
28681 v.reset(OpAMD64XORQ)
28682 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
28689 func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
28694 // match: (XORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) <t> x) mem)
28695 // result: (BTCQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
28697 off := auxIntToInt32(v.AuxInt)
28698 sym := auxToSym(v.Aux)
28701 if s.Op != OpAMD64SHLQ {
28707 if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
28711 v.reset(OpAMD64BTCQmodify)
28712 v.AuxInt = int32ToAuxInt(off)
28713 v.Aux = symToAux(sym)
28714 v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
28715 v0.AuxInt = int32ToAuxInt(63)
28717 v.AddArg3(ptr, v0, mem)
28720 // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
28721 // cond: is32Bit(int64(off1)+int64(off2))
28722 // result: (XORQmodify [off1+off2] {sym} base val mem)
28724 off1 := auxIntToInt32(v.AuxInt)
28725 sym := auxToSym(v.Aux)
28726 if v_0.Op != OpAMD64ADDQconst {
28729 off2 := auxIntToInt32(v_0.AuxInt)
28730 base := v_0.Args[0]
28733 if !(is32Bit(int64(off1) + int64(off2))) {
28736 v.reset(OpAMD64XORQmodify)
28737 v.AuxInt = int32ToAuxInt(off1 + off2)
28738 v.Aux = symToAux(sym)
28739 v.AddArg3(base, val, mem)
28742 // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
28743 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
28744 // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
28746 off1 := auxIntToInt32(v.AuxInt)
28747 sym1 := auxToSym(v.Aux)
28748 if v_0.Op != OpAMD64LEAQ {
28751 off2 := auxIntToInt32(v_0.AuxInt)
28752 sym2 := auxToSym(v_0.Aux)
28753 base := v_0.Args[0]
28756 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
28759 v.reset(OpAMD64XORQmodify)
28760 v.AuxInt = int32ToAuxInt(off1 + off2)
28761 v.Aux = symToAux(mergeSym(sym1, sym2))
28762 v.AddArg3(base, val, mem)
28767 func rewriteValueAMD64_OpAddr(v *Value) bool {
28769 // match: (Addr {sym} base)
28770 // result: (LEAQ {sym} base)
28772 sym := auxToSym(v.Aux)
28774 v.reset(OpAMD64LEAQ)
28775 v.Aux = symToAux(sym)
28780 func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
28785 typ := &b.Func.Config.Types
28786 // match: (AtomicAdd32 ptr val mem)
28787 // result: (AddTupleFirst32 val (XADDLlock val ptr mem))
28792 v.reset(OpAMD64AddTupleFirst32)
28793 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
28794 v0.AddArg3(val, ptr, mem)
28799 func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
28804 typ := &b.Func.Config.Types
28805 // match: (AtomicAdd64 ptr val mem)
28806 // result: (AddTupleFirst64 val (XADDQlock val ptr mem))
28811 v.reset(OpAMD64AddTupleFirst64)
28812 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
28813 v0.AddArg3(val, ptr, mem)
28818 func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
28822 // match: (AtomicAnd32 ptr val mem)
28823 // result: (ANDLlock ptr val mem)
28828 v.reset(OpAMD64ANDLlock)
28829 v.AddArg3(ptr, val, mem)
28833 func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
28837 // match: (AtomicAnd8 ptr val mem)
28838 // result: (ANDBlock ptr val mem)
28843 v.reset(OpAMD64ANDBlock)
28844 v.AddArg3(ptr, val, mem)
28848 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
28853 // match: (AtomicCompareAndSwap32 ptr old new_ mem)
28854 // result: (CMPXCHGLlock ptr old new_ mem)
28860 v.reset(OpAMD64CMPXCHGLlock)
28861 v.AddArg4(ptr, old, new_, mem)
28865 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
28870 // match: (AtomicCompareAndSwap64 ptr old new_ mem)
28871 // result: (CMPXCHGQlock ptr old new_ mem)
28877 v.reset(OpAMD64CMPXCHGQlock)
28878 v.AddArg4(ptr, old, new_, mem)
28882 func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
28886 // match: (AtomicExchange32 ptr val mem)
28887 // result: (XCHGL val ptr mem)
28892 v.reset(OpAMD64XCHGL)
28893 v.AddArg3(val, ptr, mem)
28897 func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
28901 // match: (AtomicExchange64 ptr val mem)
28902 // result: (XCHGQ val ptr mem)
28907 v.reset(OpAMD64XCHGQ)
28908 v.AddArg3(val, ptr, mem)
28912 func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
28915 // match: (AtomicLoad32 ptr mem)
28916 // result: (MOVLatomicload ptr mem)
28920 v.reset(OpAMD64MOVLatomicload)
28921 v.AddArg2(ptr, mem)
28925 func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
28928 // match: (AtomicLoad64 ptr mem)
28929 // result: (MOVQatomicload ptr mem)
28933 v.reset(OpAMD64MOVQatomicload)
28934 v.AddArg2(ptr, mem)
28938 func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool {
28941 // match: (AtomicLoad8 ptr mem)
28942 // result: (MOVBatomicload ptr mem)
28946 v.reset(OpAMD64MOVBatomicload)
28947 v.AddArg2(ptr, mem)
28951 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
28954 // match: (AtomicLoadPtr ptr mem)
28955 // result: (MOVQatomicload ptr mem)
28959 v.reset(OpAMD64MOVQatomicload)
28960 v.AddArg2(ptr, mem)
28964 func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
28968 // match: (AtomicOr32 ptr val mem)
28969 // result: (ORLlock ptr val mem)
28974 v.reset(OpAMD64ORLlock)
28975 v.AddArg3(ptr, val, mem)
28979 func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
28983 // match: (AtomicOr8 ptr val mem)
28984 // result: (ORBlock ptr val mem)
28989 v.reset(OpAMD64ORBlock)
28990 v.AddArg3(ptr, val, mem)
28994 func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
28999 typ := &b.Func.Config.Types
29000 // match: (AtomicStore32 ptr val mem)
29001 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
29007 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
29008 v0.AddArg3(val, ptr, mem)
29013 func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
29018 typ := &b.Func.Config.Types
29019 // match: (AtomicStore64 ptr val mem)
29020 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
29026 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
29027 v0.AddArg3(val, ptr, mem)
29032 func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
29037 typ := &b.Func.Config.Types
29038 // match: (AtomicStore8 ptr val mem)
29039 // result: (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
29045 v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
29046 v0.AddArg3(val, ptr, mem)
29051 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
29056 typ := &b.Func.Config.Types
29057 // match: (AtomicStorePtrNoWB ptr val mem)
29058 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
29064 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
29065 v0.AddArg3(val, ptr, mem)
29070 func rewriteValueAMD64_OpBitLen16(v *Value) bool {
29073 typ := &b.Func.Config.Types
29074 // match: (BitLen16 x)
29075 // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
29078 v.reset(OpAMD64BSRL)
29079 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
29080 v0.AuxInt = int32ToAuxInt(1)
29081 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
29088 func rewriteValueAMD64_OpBitLen32(v *Value) bool {
29091 typ := &b.Func.Config.Types
29092 // match: (BitLen32 x)
29093 // result: (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
29097 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29098 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
29099 v1.AuxInt = int32ToAuxInt(1)
29100 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
29108 func rewriteValueAMD64_OpBitLen64(v *Value) bool {
29111 typ := &b.Func.Config.Types
29112 // match: (BitLen64 <t> x)
29113 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
29117 v.reset(OpAMD64ADDQconst)
29118 v.AuxInt = int32ToAuxInt(1)
29119 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
29120 v1 := b.NewValue0(v.Pos, OpSelect0, t)
29121 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29124 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
29125 v3.AuxInt = int64ToAuxInt(-1)
29126 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29128 v0.AddArg3(v1, v3, v4)
29133 func rewriteValueAMD64_OpBitLen8(v *Value) bool {
29136 typ := &b.Func.Config.Types
29137 // match: (BitLen8 x)
29138 // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
29141 v.reset(OpAMD64BSRL)
29142 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
29143 v0.AuxInt = int32ToAuxInt(1)
29144 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
29151 func rewriteValueAMD64_OpCeil(v *Value) bool {
29154 // result: (ROUNDSD [2] x)
29157 v.reset(OpAMD64ROUNDSD)
29158 v.AuxInt = int8ToAuxInt(2)
29163 func rewriteValueAMD64_OpCondSelect(v *Value) bool {
29168 typ := &b.Func.Config.Types
29169 // match: (CondSelect <t> x y (SETEQ cond))
29170 // cond: (is64BitInt(t) || isPtr(t))
29171 // result: (CMOVQEQ y x cond)
29176 if v_2.Op != OpAMD64SETEQ {
29179 cond := v_2.Args[0]
29180 if !(is64BitInt(t) || isPtr(t)) {
29183 v.reset(OpAMD64CMOVQEQ)
29184 v.AddArg3(y, x, cond)
29187 // match: (CondSelect <t> x y (SETNE cond))
29188 // cond: (is64BitInt(t) || isPtr(t))
29189 // result: (CMOVQNE y x cond)
29194 if v_2.Op != OpAMD64SETNE {
29197 cond := v_2.Args[0]
29198 if !(is64BitInt(t) || isPtr(t)) {
29201 v.reset(OpAMD64CMOVQNE)
29202 v.AddArg3(y, x, cond)
29205 // match: (CondSelect <t> x y (SETL cond))
29206 // cond: (is64BitInt(t) || isPtr(t))
29207 // result: (CMOVQLT y x cond)
29212 if v_2.Op != OpAMD64SETL {
29215 cond := v_2.Args[0]
29216 if !(is64BitInt(t) || isPtr(t)) {
29219 v.reset(OpAMD64CMOVQLT)
29220 v.AddArg3(y, x, cond)
29223 // match: (CondSelect <t> x y (SETG cond))
29224 // cond: (is64BitInt(t) || isPtr(t))
29225 // result: (CMOVQGT y x cond)
29230 if v_2.Op != OpAMD64SETG {
29233 cond := v_2.Args[0]
29234 if !(is64BitInt(t) || isPtr(t)) {
29237 v.reset(OpAMD64CMOVQGT)
29238 v.AddArg3(y, x, cond)
29241 // match: (CondSelect <t> x y (SETLE cond))
29242 // cond: (is64BitInt(t) || isPtr(t))
29243 // result: (CMOVQLE y x cond)
29248 if v_2.Op != OpAMD64SETLE {
29251 cond := v_2.Args[0]
29252 if !(is64BitInt(t) || isPtr(t)) {
29255 v.reset(OpAMD64CMOVQLE)
29256 v.AddArg3(y, x, cond)
29259 // match: (CondSelect <t> x y (SETGE cond))
29260 // cond: (is64BitInt(t) || isPtr(t))
29261 // result: (CMOVQGE y x cond)
29266 if v_2.Op != OpAMD64SETGE {
29269 cond := v_2.Args[0]
29270 if !(is64BitInt(t) || isPtr(t)) {
29273 v.reset(OpAMD64CMOVQGE)
29274 v.AddArg3(y, x, cond)
29277 // match: (CondSelect <t> x y (SETA cond))
29278 // cond: (is64BitInt(t) || isPtr(t))
29279 // result: (CMOVQHI y x cond)
29284 if v_2.Op != OpAMD64SETA {
29287 cond := v_2.Args[0]
29288 if !(is64BitInt(t) || isPtr(t)) {
29291 v.reset(OpAMD64CMOVQHI)
29292 v.AddArg3(y, x, cond)
29295 // match: (CondSelect <t> x y (SETB cond))
29296 // cond: (is64BitInt(t) || isPtr(t))
29297 // result: (CMOVQCS y x cond)
29302 if v_2.Op != OpAMD64SETB {
29305 cond := v_2.Args[0]
29306 if !(is64BitInt(t) || isPtr(t)) {
29309 v.reset(OpAMD64CMOVQCS)
29310 v.AddArg3(y, x, cond)
29313 // match: (CondSelect <t> x y (SETAE cond))
29314 // cond: (is64BitInt(t) || isPtr(t))
29315 // result: (CMOVQCC y x cond)
29320 if v_2.Op != OpAMD64SETAE {
29323 cond := v_2.Args[0]
29324 if !(is64BitInt(t) || isPtr(t)) {
29327 v.reset(OpAMD64CMOVQCC)
29328 v.AddArg3(y, x, cond)
29331 // match: (CondSelect <t> x y (SETBE cond))
29332 // cond: (is64BitInt(t) || isPtr(t))
29333 // result: (CMOVQLS y x cond)
29338 if v_2.Op != OpAMD64SETBE {
29341 cond := v_2.Args[0]
29342 if !(is64BitInt(t) || isPtr(t)) {
29345 v.reset(OpAMD64CMOVQLS)
29346 v.AddArg3(y, x, cond)
29349 // match: (CondSelect <t> x y (SETEQF cond))
29350 // cond: (is64BitInt(t) || isPtr(t))
29351 // result: (CMOVQEQF y x cond)
29356 if v_2.Op != OpAMD64SETEQF {
29359 cond := v_2.Args[0]
29360 if !(is64BitInt(t) || isPtr(t)) {
29363 v.reset(OpAMD64CMOVQEQF)
29364 v.AddArg3(y, x, cond)
29367 // match: (CondSelect <t> x y (SETNEF cond))
29368 // cond: (is64BitInt(t) || isPtr(t))
29369 // result: (CMOVQNEF y x cond)
29374 if v_2.Op != OpAMD64SETNEF {
29377 cond := v_2.Args[0]
29378 if !(is64BitInt(t) || isPtr(t)) {
29381 v.reset(OpAMD64CMOVQNEF)
29382 v.AddArg3(y, x, cond)
29385 // match: (CondSelect <t> x y (SETGF cond))
29386 // cond: (is64BitInt(t) || isPtr(t))
29387 // result: (CMOVQGTF y x cond)
29392 if v_2.Op != OpAMD64SETGF {
29395 cond := v_2.Args[0]
29396 if !(is64BitInt(t) || isPtr(t)) {
29399 v.reset(OpAMD64CMOVQGTF)
29400 v.AddArg3(y, x, cond)
29403 // match: (CondSelect <t> x y (SETGEF cond))
29404 // cond: (is64BitInt(t) || isPtr(t))
29405 // result: (CMOVQGEF y x cond)
29410 if v_2.Op != OpAMD64SETGEF {
29413 cond := v_2.Args[0]
29414 if !(is64BitInt(t) || isPtr(t)) {
29417 v.reset(OpAMD64CMOVQGEF)
29418 v.AddArg3(y, x, cond)
29421 // match: (CondSelect <t> x y (SETEQ cond))
29422 // cond: is32BitInt(t)
29423 // result: (CMOVLEQ y x cond)
29428 if v_2.Op != OpAMD64SETEQ {
29431 cond := v_2.Args[0]
29432 if !(is32BitInt(t)) {
29435 v.reset(OpAMD64CMOVLEQ)
29436 v.AddArg3(y, x, cond)
29439 // match: (CondSelect <t> x y (SETNE cond))
29440 // cond: is32BitInt(t)
29441 // result: (CMOVLNE y x cond)
29446 if v_2.Op != OpAMD64SETNE {
29449 cond := v_2.Args[0]
29450 if !(is32BitInt(t)) {
29453 v.reset(OpAMD64CMOVLNE)
29454 v.AddArg3(y, x, cond)
29457 // match: (CondSelect <t> x y (SETL cond))
29458 // cond: is32BitInt(t)
29459 // result: (CMOVLLT y x cond)
29464 if v_2.Op != OpAMD64SETL {
29467 cond := v_2.Args[0]
29468 if !(is32BitInt(t)) {
29471 v.reset(OpAMD64CMOVLLT)
29472 v.AddArg3(y, x, cond)
29475 // match: (CondSelect <t> x y (SETG cond))
29476 // cond: is32BitInt(t)
29477 // result: (CMOVLGT y x cond)
29482 if v_2.Op != OpAMD64SETG {
29485 cond := v_2.Args[0]
29486 if !(is32BitInt(t)) {
29489 v.reset(OpAMD64CMOVLGT)
29490 v.AddArg3(y, x, cond)
29493 // match: (CondSelect <t> x y (SETLE cond))
29494 // cond: is32BitInt(t)
29495 // result: (CMOVLLE y x cond)
29500 if v_2.Op != OpAMD64SETLE {
29503 cond := v_2.Args[0]
29504 if !(is32BitInt(t)) {
29507 v.reset(OpAMD64CMOVLLE)
29508 v.AddArg3(y, x, cond)
29511 // match: (CondSelect <t> x y (SETGE cond))
29512 // cond: is32BitInt(t)
29513 // result: (CMOVLGE y x cond)
29518 if v_2.Op != OpAMD64SETGE {
29521 cond := v_2.Args[0]
29522 if !(is32BitInt(t)) {
29525 v.reset(OpAMD64CMOVLGE)
29526 v.AddArg3(y, x, cond)
29529 // match: (CondSelect <t> x y (SETA cond))
29530 // cond: is32BitInt(t)
29531 // result: (CMOVLHI y x cond)
29536 if v_2.Op != OpAMD64SETA {
29539 cond := v_2.Args[0]
29540 if !(is32BitInt(t)) {
29543 v.reset(OpAMD64CMOVLHI)
29544 v.AddArg3(y, x, cond)
29547 // match: (CondSelect <t> x y (SETB cond))
29548 // cond: is32BitInt(t)
29549 // result: (CMOVLCS y x cond)
29554 if v_2.Op != OpAMD64SETB {
29557 cond := v_2.Args[0]
29558 if !(is32BitInt(t)) {
29561 v.reset(OpAMD64CMOVLCS)
29562 v.AddArg3(y, x, cond)
29565 // match: (CondSelect <t> x y (SETAE cond))
29566 // cond: is32BitInt(t)
29567 // result: (CMOVLCC y x cond)
29572 if v_2.Op != OpAMD64SETAE {
29575 cond := v_2.Args[0]
29576 if !(is32BitInt(t)) {
29579 v.reset(OpAMD64CMOVLCC)
29580 v.AddArg3(y, x, cond)
29583 // match: (CondSelect <t> x y (SETBE cond))
29584 // cond: is32BitInt(t)
29585 // result: (CMOVLLS y x cond)
29590 if v_2.Op != OpAMD64SETBE {
29593 cond := v_2.Args[0]
29594 if !(is32BitInt(t)) {
29597 v.reset(OpAMD64CMOVLLS)
29598 v.AddArg3(y, x, cond)
29601 // match: (CondSelect <t> x y (SETEQF cond))
29602 // cond: is32BitInt(t)
29603 // result: (CMOVLEQF y x cond)
29608 if v_2.Op != OpAMD64SETEQF {
29611 cond := v_2.Args[0]
29612 if !(is32BitInt(t)) {
29615 v.reset(OpAMD64CMOVLEQF)
29616 v.AddArg3(y, x, cond)
29619 // match: (CondSelect <t> x y (SETNEF cond))
29620 // cond: is32BitInt(t)
29621 // result: (CMOVLNEF y x cond)
29626 if v_2.Op != OpAMD64SETNEF {
29629 cond := v_2.Args[0]
29630 if !(is32BitInt(t)) {
29633 v.reset(OpAMD64CMOVLNEF)
29634 v.AddArg3(y, x, cond)
29637 // match: (CondSelect <t> x y (SETGF cond))
29638 // cond: is32BitInt(t)
29639 // result: (CMOVLGTF y x cond)
29644 if v_2.Op != OpAMD64SETGF {
29647 cond := v_2.Args[0]
29648 if !(is32BitInt(t)) {
29651 v.reset(OpAMD64CMOVLGTF)
29652 v.AddArg3(y, x, cond)
29655 // match: (CondSelect <t> x y (SETGEF cond))
29656 // cond: is32BitInt(t)
29657 // result: (CMOVLGEF y x cond)
29662 if v_2.Op != OpAMD64SETGEF {
29665 cond := v_2.Args[0]
29666 if !(is32BitInt(t)) {
29669 v.reset(OpAMD64CMOVLGEF)
29670 v.AddArg3(y, x, cond)
29673 // match: (CondSelect <t> x y (SETEQ cond))
29674 // cond: is16BitInt(t)
29675 // result: (CMOVWEQ y x cond)
29680 if v_2.Op != OpAMD64SETEQ {
29683 cond := v_2.Args[0]
29684 if !(is16BitInt(t)) {
29687 v.reset(OpAMD64CMOVWEQ)
29688 v.AddArg3(y, x, cond)
29691 // match: (CondSelect <t> x y (SETNE cond))
29692 // cond: is16BitInt(t)
29693 // result: (CMOVWNE y x cond)
29698 if v_2.Op != OpAMD64SETNE {
29701 cond := v_2.Args[0]
29702 if !(is16BitInt(t)) {
29705 v.reset(OpAMD64CMOVWNE)
29706 v.AddArg3(y, x, cond)
29709 // match: (CondSelect <t> x y (SETL cond))
29710 // cond: is16BitInt(t)
29711 // result: (CMOVWLT y x cond)
29716 if v_2.Op != OpAMD64SETL {
29719 cond := v_2.Args[0]
29720 if !(is16BitInt(t)) {
29723 v.reset(OpAMD64CMOVWLT)
29724 v.AddArg3(y, x, cond)
29727 // match: (CondSelect <t> x y (SETG cond))
29728 // cond: is16BitInt(t)
29729 // result: (CMOVWGT y x cond)
29734 if v_2.Op != OpAMD64SETG {
29737 cond := v_2.Args[0]
29738 if !(is16BitInt(t)) {
29741 v.reset(OpAMD64CMOVWGT)
29742 v.AddArg3(y, x, cond)
29745 // match: (CondSelect <t> x y (SETLE cond))
29746 // cond: is16BitInt(t)
29747 // result: (CMOVWLE y x cond)
29752 if v_2.Op != OpAMD64SETLE {
29755 cond := v_2.Args[0]
29756 if !(is16BitInt(t)) {
29759 v.reset(OpAMD64CMOVWLE)
29760 v.AddArg3(y, x, cond)
29763 // match: (CondSelect <t> x y (SETGE cond))
29764 // cond: is16BitInt(t)
29765 // result: (CMOVWGE y x cond)
29770 if v_2.Op != OpAMD64SETGE {
29773 cond := v_2.Args[0]
29774 if !(is16BitInt(t)) {
29777 v.reset(OpAMD64CMOVWGE)
29778 v.AddArg3(y, x, cond)
29781 // match: (CondSelect <t> x y (SETA cond))
29782 // cond: is16BitInt(t)
29783 // result: (CMOVWHI y x cond)
29788 if v_2.Op != OpAMD64SETA {
29791 cond := v_2.Args[0]
29792 if !(is16BitInt(t)) {
29795 v.reset(OpAMD64CMOVWHI)
29796 v.AddArg3(y, x, cond)
29799 // match: (CondSelect <t> x y (SETB cond))
29800 // cond: is16BitInt(t)
29801 // result: (CMOVWCS y x cond)
29806 if v_2.Op != OpAMD64SETB {
29809 cond := v_2.Args[0]
29810 if !(is16BitInt(t)) {
29813 v.reset(OpAMD64CMOVWCS)
29814 v.AddArg3(y, x, cond)
29817 // match: (CondSelect <t> x y (SETAE cond))
29818 // cond: is16BitInt(t)
29819 // result: (CMOVWCC y x cond)
29824 if v_2.Op != OpAMD64SETAE {
29827 cond := v_2.Args[0]
29828 if !(is16BitInt(t)) {
29831 v.reset(OpAMD64CMOVWCC)
29832 v.AddArg3(y, x, cond)
29835 // match: (CondSelect <t> x y (SETBE cond))
29836 // cond: is16BitInt(t)
29837 // result: (CMOVWLS y x cond)
29842 if v_2.Op != OpAMD64SETBE {
29845 cond := v_2.Args[0]
29846 if !(is16BitInt(t)) {
29849 v.reset(OpAMD64CMOVWLS)
29850 v.AddArg3(y, x, cond)
29853 // match: (CondSelect <t> x y (SETEQF cond))
29854 // cond: is16BitInt(t)
29855 // result: (CMOVWEQF y x cond)
29860 if v_2.Op != OpAMD64SETEQF {
29863 cond := v_2.Args[0]
29864 if !(is16BitInt(t)) {
29867 v.reset(OpAMD64CMOVWEQF)
29868 v.AddArg3(y, x, cond)
29871 // match: (CondSelect <t> x y (SETNEF cond))
29872 // cond: is16BitInt(t)
29873 // result: (CMOVWNEF y x cond)
29878 if v_2.Op != OpAMD64SETNEF {
29881 cond := v_2.Args[0]
29882 if !(is16BitInt(t)) {
29885 v.reset(OpAMD64CMOVWNEF)
29886 v.AddArg3(y, x, cond)
29889 // match: (CondSelect <t> x y (SETGF cond))
29890 // cond: is16BitInt(t)
29891 // result: (CMOVWGTF y x cond)
29896 if v_2.Op != OpAMD64SETGF {
29899 cond := v_2.Args[0]
29900 if !(is16BitInt(t)) {
29903 v.reset(OpAMD64CMOVWGTF)
29904 v.AddArg3(y, x, cond)
29907 // match: (CondSelect <t> x y (SETGEF cond))
29908 // cond: is16BitInt(t)
29909 // result: (CMOVWGEF y x cond)
29914 if v_2.Op != OpAMD64SETGEF {
29917 cond := v_2.Args[0]
29918 if !(is16BitInt(t)) {
29921 v.reset(OpAMD64CMOVWGEF)
29922 v.AddArg3(y, x, cond)
29925 // match: (CondSelect <t> x y check)
29926 // cond: !check.Type.IsFlags() && check.Type.Size() == 1
29927 // result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
29933 if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
29936 v.reset(OpCondSelect)
29938 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
29940 v.AddArg3(x, y, v0)
29943 // match: (CondSelect <t> x y check)
29944 // cond: !check.Type.IsFlags() && check.Type.Size() == 2
29945 // result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
29951 if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
29954 v.reset(OpCondSelect)
29956 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
29958 v.AddArg3(x, y, v0)
29961 // match: (CondSelect <t> x y check)
29962 // cond: !check.Type.IsFlags() && check.Type.Size() == 4
29963 // result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
29969 if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
29972 v.reset(OpCondSelect)
29974 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
29976 v.AddArg3(x, y, v0)
29979 // match: (CondSelect <t> x y check)
29980 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
29981 // result: (CMOVQNE y x (CMPQconst [0] check))
29987 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
29990 v.reset(OpAMD64CMOVQNE)
29991 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29992 v0.AuxInt = int32ToAuxInt(0)
29994 v.AddArg3(y, x, v0)
29997 // match: (CondSelect <t> x y check)
29998 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
29999 // result: (CMOVLNE y x (CMPQconst [0] check))
30005 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
30008 v.reset(OpAMD64CMOVLNE)
30009 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
30010 v0.AuxInt = int32ToAuxInt(0)
30012 v.AddArg3(y, x, v0)
30015 // match: (CondSelect <t> x y check)
30016 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
30017 // result: (CMOVWNE y x (CMPQconst [0] check))
30023 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
30026 v.reset(OpAMD64CMOVWNE)
30027 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
30028 v0.AuxInt = int32ToAuxInt(0)
30030 v.AddArg3(y, x, v0)
30035 func rewriteValueAMD64_OpConst16(v *Value) bool {
30036 // match: (Const16 [c])
30037 // result: (MOVLconst [int32(c)])
30039 c := auxIntToInt16(v.AuxInt)
30040 v.reset(OpAMD64MOVLconst)
30041 v.AuxInt = int32ToAuxInt(int32(c))
30045 func rewriteValueAMD64_OpConst8(v *Value) bool {
30046 // match: (Const8 [c])
30047 // result: (MOVLconst [int32(c)])
30049 c := auxIntToInt8(v.AuxInt)
30050 v.reset(OpAMD64MOVLconst)
30051 v.AuxInt = int32ToAuxInt(int32(c))
30055 func rewriteValueAMD64_OpConstBool(v *Value) bool {
30056 // match: (ConstBool [c])
30057 // result: (MOVLconst [b2i32(c)])
30059 c := auxIntToBool(v.AuxInt)
30060 v.reset(OpAMD64MOVLconst)
30061 v.AuxInt = int32ToAuxInt(b2i32(c))
30065 func rewriteValueAMD64_OpConstNil(v *Value) bool {
30066 // match: (ConstNil )
30067 // result: (MOVQconst [0])
30069 v.reset(OpAMD64MOVQconst)
30070 v.AuxInt = int64ToAuxInt(0)
30074 func rewriteValueAMD64_OpCtz16(v *Value) bool {
30077 typ := &b.Func.Config.Types
30078 // match: (Ctz16 x)
30079 // result: (BSFL (BTSLconst <typ.UInt32> [16] x))
30082 v.reset(OpAMD64BSFL)
30083 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
30084 v0.AuxInt = int8ToAuxInt(16)
30090 func rewriteValueAMD64_OpCtz32(v *Value) bool {
30093 typ := &b.Func.Config.Types
30094 // match: (Ctz32 x)
30095 // result: (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
30099 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
30100 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
30101 v1.AuxInt = int8ToAuxInt(32)
30108 func rewriteValueAMD64_OpCtz64(v *Value) bool {
30111 typ := &b.Func.Config.Types
30112 // match: (Ctz64 <t> x)
30113 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
30117 v.reset(OpAMD64CMOVQEQ)
30118 v0 := b.NewValue0(v.Pos, OpSelect0, t)
30119 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
30122 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
30123 v2.AuxInt = int64ToAuxInt(64)
30124 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
30126 v.AddArg3(v0, v2, v3)
30130 func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool {
30133 typ := &b.Func.Config.Types
30134 // match: (Ctz64NonZero x)
30135 // result: (Select0 (BSFQ x))
30139 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
30145 func rewriteValueAMD64_OpCtz8(v *Value) bool {
30148 typ := &b.Func.Config.Types
30150 // result: (BSFL (BTSLconst <typ.UInt32> [ 8] x))
30153 v.reset(OpAMD64BSFL)
30154 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
30155 v0.AuxInt = int8ToAuxInt(8)
30161 func rewriteValueAMD64_OpDiv16(v *Value) bool {
30165 typ := &b.Func.Config.Types
30166 // match: (Div16 [a] x y)
30167 // result: (Select0 (DIVW [a] x y))
30169 a := auxIntToBool(v.AuxInt)
30173 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
30174 v0.AuxInt = boolToAuxInt(a)
30180 func rewriteValueAMD64_OpDiv16u(v *Value) bool {
30184 typ := &b.Func.Config.Types
30185 // match: (Div16u x y)
30186 // result: (Select0 (DIVWU x y))
30191 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
30197 func rewriteValueAMD64_OpDiv32(v *Value) bool {
30201 typ := &b.Func.Config.Types
30202 // match: (Div32 [a] x y)
30203 // result: (Select0 (DIVL [a] x y))
30205 a := auxIntToBool(v.AuxInt)
30209 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
30210 v0.AuxInt = boolToAuxInt(a)
30216 func rewriteValueAMD64_OpDiv32u(v *Value) bool {
30220 typ := &b.Func.Config.Types
30221 // match: (Div32u x y)
30222 // result: (Select0 (DIVLU x y))
30227 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
30233 func rewriteValueAMD64_OpDiv64(v *Value) bool {
30237 typ := &b.Func.Config.Types
30238 // match: (Div64 [a] x y)
30239 // result: (Select0 (DIVQ [a] x y))
30241 a := auxIntToBool(v.AuxInt)
30245 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
30246 v0.AuxInt = boolToAuxInt(a)
30252 func rewriteValueAMD64_OpDiv64u(v *Value) bool {
30256 typ := &b.Func.Config.Types
30257 // match: (Div64u x y)
30258 // result: (Select0 (DIVQU x y))
30263 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
30269 func rewriteValueAMD64_OpDiv8(v *Value) bool {
30273 typ := &b.Func.Config.Types
30274 // match: (Div8 x y)
30275 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
30280 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
30281 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
30283 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
30290 func rewriteValueAMD64_OpDiv8u(v *Value) bool {
30294 typ := &b.Func.Config.Types
30295 // match: (Div8u x y)
30296 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
30301 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
30302 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
30304 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
30311 func rewriteValueAMD64_OpEq16(v *Value) bool {
30315 // match: (Eq16 x y)
30316 // result: (SETEQ (CMPW x y))
30320 v.reset(OpAMD64SETEQ)
30321 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
30327 func rewriteValueAMD64_OpEq32(v *Value) bool {
30331 // match: (Eq32 x y)
30332 // result: (SETEQ (CMPL x y))
30336 v.reset(OpAMD64SETEQ)
30337 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
30343 func rewriteValueAMD64_OpEq32F(v *Value) bool {
30347 // match: (Eq32F x y)
30348 // result: (SETEQF (UCOMISS x y))
30352 v.reset(OpAMD64SETEQF)
30353 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
30359 func rewriteValueAMD64_OpEq64(v *Value) bool {
30363 // match: (Eq64 x y)
30364 // result: (SETEQ (CMPQ x y))
30368 v.reset(OpAMD64SETEQ)
30369 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30375 func rewriteValueAMD64_OpEq64F(v *Value) bool {
30379 // match: (Eq64F x y)
30380 // result: (SETEQF (UCOMISD x y))
30384 v.reset(OpAMD64SETEQF)
30385 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
30391 func rewriteValueAMD64_OpEq8(v *Value) bool {
30395 // match: (Eq8 x y)
30396 // result: (SETEQ (CMPB x y))
30400 v.reset(OpAMD64SETEQ)
30401 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
30407 func rewriteValueAMD64_OpEqB(v *Value) bool {
30411 // match: (EqB x y)
30412 // result: (SETEQ (CMPB x y))
30416 v.reset(OpAMD64SETEQ)
30417 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
30423 func rewriteValueAMD64_OpEqPtr(v *Value) bool {
30427 // match: (EqPtr x y)
30428 // result: (SETEQ (CMPQ x y))
30432 v.reset(OpAMD64SETEQ)
30433 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30439 func rewriteValueAMD64_OpFMA(v *Value) bool {
30443 // match: (FMA x y z)
30444 // result: (VFMADD231SD z x y)
30449 v.reset(OpAMD64VFMADD231SD)
30454 func rewriteValueAMD64_OpFloor(v *Value) bool {
30456 // match: (Floor x)
30457 // result: (ROUNDSD [1] x)
30460 v.reset(OpAMD64ROUNDSD)
30461 v.AuxInt = int8ToAuxInt(1)
30466 func rewriteValueAMD64_OpGetG(v *Value) bool {
30468 // match: (GetG mem)
30469 // cond: !(objabi.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)
30470 // result: (LoweredGetG mem)
30473 if !(!(objabi.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)) {
30476 v.reset(OpAMD64LoweredGetG)
30482 func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
30484 typ := &b.Func.Config.Types
30485 // match: (HasCPUFeature {s})
30486 // result: (SETNE (CMPQconst [0] (LoweredHasCPUFeature {s})))
30488 s := auxToSym(v.Aux)
30489 v.reset(OpAMD64SETNE)
30490 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
30491 v0.AuxInt = int32ToAuxInt(0)
30492 v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64)
30493 v1.Aux = symToAux(s)
30499 func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
30503 // match: (IsInBounds idx len)
30504 // result: (SETB (CMPQ idx len))
30508 v.reset(OpAMD64SETB)
30509 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30510 v0.AddArg2(idx, len)
30515 func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
30518 // match: (IsNonNil p)
30519 // result: (SETNE (TESTQ p p))
30522 v.reset(OpAMD64SETNE)
30523 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
30529 func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
30533 // match: (IsSliceInBounds idx len)
30534 // result: (SETBE (CMPQ idx len))
30538 v.reset(OpAMD64SETBE)
30539 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30540 v0.AddArg2(idx, len)
30545 func rewriteValueAMD64_OpLeq16(v *Value) bool {
30549 // match: (Leq16 x y)
30550 // result: (SETLE (CMPW x y))
30554 v.reset(OpAMD64SETLE)
30555 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
30561 func rewriteValueAMD64_OpLeq16U(v *Value) bool {
30565 // match: (Leq16U x y)
30566 // result: (SETBE (CMPW x y))
30570 v.reset(OpAMD64SETBE)
30571 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
30577 func rewriteValueAMD64_OpLeq32(v *Value) bool {
30581 // match: (Leq32 x y)
30582 // result: (SETLE (CMPL x y))
30586 v.reset(OpAMD64SETLE)
30587 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
30593 func rewriteValueAMD64_OpLeq32F(v *Value) bool {
30597 // match: (Leq32F x y)
30598 // result: (SETGEF (UCOMISS y x))
30602 v.reset(OpAMD64SETGEF)
30603 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
30609 func rewriteValueAMD64_OpLeq32U(v *Value) bool {
30613 // match: (Leq32U x y)
30614 // result: (SETBE (CMPL x y))
30618 v.reset(OpAMD64SETBE)
30619 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
30625 func rewriteValueAMD64_OpLeq64(v *Value) bool {
30629 // match: (Leq64 x y)
30630 // result: (SETLE (CMPQ x y))
30634 v.reset(OpAMD64SETLE)
30635 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30641 func rewriteValueAMD64_OpLeq64F(v *Value) bool {
30645 // match: (Leq64F x y)
30646 // result: (SETGEF (UCOMISD y x))
30650 v.reset(OpAMD64SETGEF)
30651 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
30657 func rewriteValueAMD64_OpLeq64U(v *Value) bool {
30661 // match: (Leq64U x y)
30662 // result: (SETBE (CMPQ x y))
30666 v.reset(OpAMD64SETBE)
30667 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30673 func rewriteValueAMD64_OpLeq8(v *Value) bool {
30677 // match: (Leq8 x y)
30678 // result: (SETLE (CMPB x y))
30682 v.reset(OpAMD64SETLE)
30683 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
30689 func rewriteValueAMD64_OpLeq8U(v *Value) bool {
30693 // match: (Leq8U x y)
30694 // result: (SETBE (CMPB x y))
30698 v.reset(OpAMD64SETBE)
30699 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
30705 func rewriteValueAMD64_OpLess16(v *Value) bool {
30709 // match: (Less16 x y)
30710 // result: (SETL (CMPW x y))
30714 v.reset(OpAMD64SETL)
30715 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
30721 func rewriteValueAMD64_OpLess16U(v *Value) bool {
30725 // match: (Less16U x y)
30726 // result: (SETB (CMPW x y))
30730 v.reset(OpAMD64SETB)
30731 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
30737 func rewriteValueAMD64_OpLess32(v *Value) bool {
30741 // match: (Less32 x y)
30742 // result: (SETL (CMPL x y))
30746 v.reset(OpAMD64SETL)
30747 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
30753 func rewriteValueAMD64_OpLess32F(v *Value) bool {
30757 // match: (Less32F x y)
30758 // result: (SETGF (UCOMISS y x))
30762 v.reset(OpAMD64SETGF)
30763 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
30769 func rewriteValueAMD64_OpLess32U(v *Value) bool {
30773 // match: (Less32U x y)
30774 // result: (SETB (CMPL x y))
30778 v.reset(OpAMD64SETB)
30779 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
30785 func rewriteValueAMD64_OpLess64(v *Value) bool {
30789 // match: (Less64 x y)
30790 // result: (SETL (CMPQ x y))
30794 v.reset(OpAMD64SETL)
30795 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30801 func rewriteValueAMD64_OpLess64F(v *Value) bool {
30805 // match: (Less64F x y)
30806 // result: (SETGF (UCOMISD y x))
30810 v.reset(OpAMD64SETGF)
30811 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
30817 func rewriteValueAMD64_OpLess64U(v *Value) bool {
30821 // match: (Less64U x y)
30822 // result: (SETB (CMPQ x y))
30826 v.reset(OpAMD64SETB)
30827 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30833 func rewriteValueAMD64_OpLess8(v *Value) bool {
30837 // match: (Less8 x y)
30838 // result: (SETL (CMPB x y))
30842 v.reset(OpAMD64SETL)
30843 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
30849 func rewriteValueAMD64_OpLess8U(v *Value) bool {
30853 // match: (Less8U x y)
30854 // result: (SETB (CMPB x y))
30858 v.reset(OpAMD64SETB)
30859 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
30865 func rewriteValueAMD64_OpLoad(v *Value) bool {
30868 // match: (Load <t> ptr mem)
30869 // cond: (is64BitInt(t) || isPtr(t))
30870 // result: (MOVQload ptr mem)
30875 if !(is64BitInt(t) || isPtr(t)) {
30878 v.reset(OpAMD64MOVQload)
30879 v.AddArg2(ptr, mem)
30882 // match: (Load <t> ptr mem)
30883 // cond: is32BitInt(t)
30884 // result: (MOVLload ptr mem)
30889 if !(is32BitInt(t)) {
30892 v.reset(OpAMD64MOVLload)
30893 v.AddArg2(ptr, mem)
30896 // match: (Load <t> ptr mem)
30897 // cond: is16BitInt(t)
30898 // result: (MOVWload ptr mem)
30903 if !(is16BitInt(t)) {
30906 v.reset(OpAMD64MOVWload)
30907 v.AddArg2(ptr, mem)
30910 // match: (Load <t> ptr mem)
30911 // cond: (t.IsBoolean() || is8BitInt(t))
30912 // result: (MOVBload ptr mem)
30917 if !(t.IsBoolean() || is8BitInt(t)) {
30920 v.reset(OpAMD64MOVBload)
30921 v.AddArg2(ptr, mem)
30924 // match: (Load <t> ptr mem)
30925 // cond: is32BitFloat(t)
30926 // result: (MOVSSload ptr mem)
30931 if !(is32BitFloat(t)) {
30934 v.reset(OpAMD64MOVSSload)
30935 v.AddArg2(ptr, mem)
30938 // match: (Load <t> ptr mem)
30939 // cond: is64BitFloat(t)
30940 // result: (MOVSDload ptr mem)
30945 if !(is64BitFloat(t)) {
30948 v.reset(OpAMD64MOVSDload)
30949 v.AddArg2(ptr, mem)
30954 func rewriteValueAMD64_OpLocalAddr(v *Value) bool {
30956 // match: (LocalAddr {sym} base _)
30957 // result: (LEAQ {sym} base)
30959 sym := auxToSym(v.Aux)
30961 v.reset(OpAMD64LEAQ)
30962 v.Aux = symToAux(sym)
30967 func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
30971 // match: (Lsh16x16 <t> x y)
30972 // cond: !shiftIsBounded(v)
30973 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
30978 if !(!shiftIsBounded(v)) {
30981 v.reset(OpAMD64ANDL)
30982 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30984 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30985 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
30986 v2.AuxInt = int16ToAuxInt(32)
30992 // match: (Lsh16x16 x y)
30993 // cond: shiftIsBounded(v)
30994 // result: (SHLL x y)
30998 if !(shiftIsBounded(v)) {
31001 v.reset(OpAMD64SHLL)
31007 func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
31011 // match: (Lsh16x32 <t> x y)
31012 // cond: !shiftIsBounded(v)
31013 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
31018 if !(!shiftIsBounded(v)) {
31021 v.reset(OpAMD64ANDL)
31022 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31024 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31025 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
31026 v2.AuxInt = int32ToAuxInt(32)
31032 // match: (Lsh16x32 x y)
31033 // cond: shiftIsBounded(v)
31034 // result: (SHLL x y)
31038 if !(shiftIsBounded(v)) {
31041 v.reset(OpAMD64SHLL)
31047 func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
31051 // match: (Lsh16x64 <t> x y)
31052 // cond: !shiftIsBounded(v)
31053 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
31058 if !(!shiftIsBounded(v)) {
31061 v.reset(OpAMD64ANDL)
31062 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31064 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31065 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
31066 v2.AuxInt = int32ToAuxInt(32)
31072 // match: (Lsh16x64 x y)
31073 // cond: shiftIsBounded(v)
31074 // result: (SHLL x y)
31078 if !(shiftIsBounded(v)) {
31081 v.reset(OpAMD64SHLL)
31087 func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
31091 // match: (Lsh16x8 <t> x y)
31092 // cond: !shiftIsBounded(v)
31093 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
31098 if !(!shiftIsBounded(v)) {
31101 v.reset(OpAMD64ANDL)
31102 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31104 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31105 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
31106 v2.AuxInt = int8ToAuxInt(32)
31112 // match: (Lsh16x8 x y)
31113 // cond: shiftIsBounded(v)
31114 // result: (SHLL x y)
31118 if !(shiftIsBounded(v)) {
31121 v.reset(OpAMD64SHLL)
31127 func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
31131 // match: (Lsh32x16 <t> x y)
31132 // cond: !shiftIsBounded(v)
31133 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
31138 if !(!shiftIsBounded(v)) {
31141 v.reset(OpAMD64ANDL)
31142 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31144 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31145 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
31146 v2.AuxInt = int16ToAuxInt(32)
31152 // match: (Lsh32x16 x y)
31153 // cond: shiftIsBounded(v)
31154 // result: (SHLL x y)
31158 if !(shiftIsBounded(v)) {
31161 v.reset(OpAMD64SHLL)
31167 func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
31171 // match: (Lsh32x32 <t> x y)
31172 // cond: !shiftIsBounded(v)
31173 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
31178 if !(!shiftIsBounded(v)) {
31181 v.reset(OpAMD64ANDL)
31182 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31184 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31185 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
31186 v2.AuxInt = int32ToAuxInt(32)
31192 // match: (Lsh32x32 x y)
31193 // cond: shiftIsBounded(v)
31194 // result: (SHLL x y)
31198 if !(shiftIsBounded(v)) {
31201 v.reset(OpAMD64SHLL)
31207 func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
31211 // match: (Lsh32x64 <t> x y)
31212 // cond: !shiftIsBounded(v)
31213 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
31218 if !(!shiftIsBounded(v)) {
31221 v.reset(OpAMD64ANDL)
31222 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31224 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31225 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
31226 v2.AuxInt = int32ToAuxInt(32)
31232 // match: (Lsh32x64 x y)
31233 // cond: shiftIsBounded(v)
31234 // result: (SHLL x y)
31238 if !(shiftIsBounded(v)) {
31241 v.reset(OpAMD64SHLL)
31247 func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
31251 // match: (Lsh32x8 <t> x y)
31252 // cond: !shiftIsBounded(v)
31253 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
31258 if !(!shiftIsBounded(v)) {
31261 v.reset(OpAMD64ANDL)
31262 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31264 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31265 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
31266 v2.AuxInt = int8ToAuxInt(32)
31272 // match: (Lsh32x8 x y)
31273 // cond: shiftIsBounded(v)
31274 // result: (SHLL x y)
31278 if !(shiftIsBounded(v)) {
31281 v.reset(OpAMD64SHLL)
31287 func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
31291 // match: (Lsh64x16 <t> x y)
31292 // cond: !shiftIsBounded(v)
31293 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
31298 if !(!shiftIsBounded(v)) {
31301 v.reset(OpAMD64ANDQ)
31302 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
31304 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
31305 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
31306 v2.AuxInt = int16ToAuxInt(64)
31312 // match: (Lsh64x16 x y)
31313 // cond: shiftIsBounded(v)
31314 // result: (SHLQ x y)
31318 if !(shiftIsBounded(v)) {
31321 v.reset(OpAMD64SHLQ)
31327 func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
31331 // match: (Lsh64x32 <t> x y)
31332 // cond: !shiftIsBounded(v)
31333 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
31338 if !(!shiftIsBounded(v)) {
31341 v.reset(OpAMD64ANDQ)
31342 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
31344 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
31345 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
31346 v2.AuxInt = int32ToAuxInt(64)
31352 // match: (Lsh64x32 x y)
31353 // cond: shiftIsBounded(v)
31354 // result: (SHLQ x y)
31358 if !(shiftIsBounded(v)) {
31361 v.reset(OpAMD64SHLQ)
31367 func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
31371 // match: (Lsh64x64 <t> x y)
31372 // cond: !shiftIsBounded(v)
31373 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
31378 if !(!shiftIsBounded(v)) {
31381 v.reset(OpAMD64ANDQ)
31382 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
31384 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
31385 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
31386 v2.AuxInt = int32ToAuxInt(64)
31392 // match: (Lsh64x64 x y)
31393 // cond: shiftIsBounded(v)
31394 // result: (SHLQ x y)
31398 if !(shiftIsBounded(v)) {
31401 v.reset(OpAMD64SHLQ)
31407 func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
31411 // match: (Lsh64x8 <t> x y)
31412 // cond: !shiftIsBounded(v)
31413 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
31418 if !(!shiftIsBounded(v)) {
31421 v.reset(OpAMD64ANDQ)
31422 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
31424 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
31425 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
31426 v2.AuxInt = int8ToAuxInt(64)
31432 // match: (Lsh64x8 x y)
31433 // cond: shiftIsBounded(v)
31434 // result: (SHLQ x y)
31438 if !(shiftIsBounded(v)) {
31441 v.reset(OpAMD64SHLQ)
31447 func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
31451 // match: (Lsh8x16 <t> x y)
31452 // cond: !shiftIsBounded(v)
31453 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
31458 if !(!shiftIsBounded(v)) {
31461 v.reset(OpAMD64ANDL)
31462 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31464 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31465 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
31466 v2.AuxInt = int16ToAuxInt(32)
31472 // match: (Lsh8x16 x y)
31473 // cond: shiftIsBounded(v)
31474 // result: (SHLL x y)
31478 if !(shiftIsBounded(v)) {
31481 v.reset(OpAMD64SHLL)
31487 func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
31491 // match: (Lsh8x32 <t> x y)
31492 // cond: !shiftIsBounded(v)
31493 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
31498 if !(!shiftIsBounded(v)) {
31501 v.reset(OpAMD64ANDL)
31502 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31504 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31505 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
31506 v2.AuxInt = int32ToAuxInt(32)
31512 // match: (Lsh8x32 x y)
31513 // cond: shiftIsBounded(v)
31514 // result: (SHLL x y)
31518 if !(shiftIsBounded(v)) {
31521 v.reset(OpAMD64SHLL)
31527 func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
31531 // match: (Lsh8x64 <t> x y)
31532 // cond: !shiftIsBounded(v)
31533 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
31538 if !(!shiftIsBounded(v)) {
31541 v.reset(OpAMD64ANDL)
31542 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31544 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31545 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
31546 v2.AuxInt = int32ToAuxInt(32)
31552 // match: (Lsh8x64 x y)
31553 // cond: shiftIsBounded(v)
31554 // result: (SHLL x y)
31558 if !(shiftIsBounded(v)) {
31561 v.reset(OpAMD64SHLL)
31567 func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
31571 // match: (Lsh8x8 <t> x y)
31572 // cond: !shiftIsBounded(v)
31573 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
31578 if !(!shiftIsBounded(v)) {
31581 v.reset(OpAMD64ANDL)
31582 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
31584 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31585 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
31586 v2.AuxInt = int8ToAuxInt(32)
31592 // match: (Lsh8x8 x y)
31593 // cond: shiftIsBounded(v)
31594 // result: (SHLL x y)
31598 if !(shiftIsBounded(v)) {
31601 v.reset(OpAMD64SHLL)
31607 func rewriteValueAMD64_OpMod16(v *Value) bool {
31611 typ := &b.Func.Config.Types
31612 // match: (Mod16 [a] x y)
31613 // result: (Select1 (DIVW [a] x y))
31615 a := auxIntToBool(v.AuxInt)
31619 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
31620 v0.AuxInt = boolToAuxInt(a)
31626 func rewriteValueAMD64_OpMod16u(v *Value) bool {
31630 typ := &b.Func.Config.Types
31631 // match: (Mod16u x y)
31632 // result: (Select1 (DIVWU x y))
31637 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
31643 func rewriteValueAMD64_OpMod32(v *Value) bool {
31647 typ := &b.Func.Config.Types
31648 // match: (Mod32 [a] x y)
31649 // result: (Select1 (DIVL [a] x y))
31651 a := auxIntToBool(v.AuxInt)
31655 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
31656 v0.AuxInt = boolToAuxInt(a)
31662 func rewriteValueAMD64_OpMod32u(v *Value) bool {
31666 typ := &b.Func.Config.Types
31667 // match: (Mod32u x y)
31668 // result: (Select1 (DIVLU x y))
31673 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
31679 func rewriteValueAMD64_OpMod64(v *Value) bool {
31683 typ := &b.Func.Config.Types
31684 // match: (Mod64 [a] x y)
31685 // result: (Select1 (DIVQ [a] x y))
31687 a := auxIntToBool(v.AuxInt)
31691 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
31692 v0.AuxInt = boolToAuxInt(a)
31698 func rewriteValueAMD64_OpMod64u(v *Value) bool {
31702 typ := &b.Func.Config.Types
31703 // match: (Mod64u x y)
31704 // result: (Select1 (DIVQU x y))
31709 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
31715 func rewriteValueAMD64_OpMod8(v *Value) bool {
31719 typ := &b.Func.Config.Types
31720 // match: (Mod8 x y)
31721 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
31726 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
31727 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
31729 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
31736 func rewriteValueAMD64_OpMod8u(v *Value) bool {
31740 typ := &b.Func.Config.Types
31741 // match: (Mod8u x y)
31742 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
31747 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
31748 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
31750 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
31757 func rewriteValueAMD64_OpMove(v *Value) bool {
31762 config := b.Func.Config
31763 typ := &b.Func.Config.Types
31764 // match: (Move [0] _ _ mem)
31767 if auxIntToInt64(v.AuxInt) != 0 {
31774 // match: (Move [1] dst src mem)
31775 // result: (MOVBstore dst (MOVBload src mem) mem)
31777 if auxIntToInt64(v.AuxInt) != 1 {
31783 v.reset(OpAMD64MOVBstore)
31784 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
31785 v0.AddArg2(src, mem)
31786 v.AddArg3(dst, v0, mem)
31789 // match: (Move [2] dst src mem)
31790 // result: (MOVWstore dst (MOVWload src mem) mem)
31792 if auxIntToInt64(v.AuxInt) != 2 {
31798 v.reset(OpAMD64MOVWstore)
31799 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
31800 v0.AddArg2(src, mem)
31801 v.AddArg3(dst, v0, mem)
31804 // match: (Move [4] dst src mem)
31805 // result: (MOVLstore dst (MOVLload src mem) mem)
31807 if auxIntToInt64(v.AuxInt) != 4 {
31813 v.reset(OpAMD64MOVLstore)
31814 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
31815 v0.AddArg2(src, mem)
31816 v.AddArg3(dst, v0, mem)
31819 // match: (Move [8] dst src mem)
31820 // result: (MOVQstore dst (MOVQload src mem) mem)
31822 if auxIntToInt64(v.AuxInt) != 8 {
31828 v.reset(OpAMD64MOVQstore)
31829 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31830 v0.AddArg2(src, mem)
31831 v.AddArg3(dst, v0, mem)
31834 // match: (Move [16] dst src mem)
31835 // cond: config.useSSE
31836 // result: (MOVOstore dst (MOVOload src mem) mem)
31838 if auxIntToInt64(v.AuxInt) != 16 {
31844 if !(config.useSSE) {
31847 v.reset(OpAMD64MOVOstore)
31848 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
31849 v0.AddArg2(src, mem)
31850 v.AddArg3(dst, v0, mem)
31853 // match: (Move [16] dst src mem)
31854 // cond: !config.useSSE
31855 // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
31857 if auxIntToInt64(v.AuxInt) != 16 {
31863 if !(!config.useSSE) {
31866 v.reset(OpAMD64MOVQstore)
31867 v.AuxInt = int32ToAuxInt(8)
31868 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31869 v0.AuxInt = int32ToAuxInt(8)
31870 v0.AddArg2(src, mem)
31871 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
31872 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31873 v2.AddArg2(src, mem)
31874 v1.AddArg3(dst, v2, mem)
31875 v.AddArg3(dst, v0, v1)
31878 // match: (Move [32] dst src mem)
31879 // result: (Move [16] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
31881 if auxIntToInt64(v.AuxInt) != 32 {
31888 v.AuxInt = int64ToAuxInt(16)
31889 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
31890 v0.AuxInt = int64ToAuxInt(16)
31892 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
31893 v1.AuxInt = int64ToAuxInt(16)
31895 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
31896 v2.AuxInt = int64ToAuxInt(16)
31897 v2.AddArg3(dst, src, mem)
31898 v.AddArg3(v0, v1, v2)
31901 // match: (Move [48] dst src mem)
31902 // cond: config.useSSE
31903 // result: (Move [32] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
31905 if auxIntToInt64(v.AuxInt) != 48 {
31911 if !(config.useSSE) {
31915 v.AuxInt = int64ToAuxInt(32)
31916 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
31917 v0.AuxInt = int64ToAuxInt(16)
31919 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
31920 v1.AuxInt = int64ToAuxInt(16)
31922 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
31923 v2.AuxInt = int64ToAuxInt(16)
31924 v2.AddArg3(dst, src, mem)
31925 v.AddArg3(v0, v1, v2)
31928 // match: (Move [64] dst src mem)
31929 // cond: config.useSSE
31930 // result: (Move [32] (OffPtr <dst.Type> dst [32]) (OffPtr <src.Type> src [32]) (Move [32] dst src mem))
31932 if auxIntToInt64(v.AuxInt) != 64 {
31938 if !(config.useSSE) {
31942 v.AuxInt = int64ToAuxInt(32)
31943 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
31944 v0.AuxInt = int64ToAuxInt(32)
31946 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
31947 v1.AuxInt = int64ToAuxInt(32)
31949 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
31950 v2.AuxInt = int64ToAuxInt(32)
31951 v2.AddArg3(dst, src, mem)
31952 v.AddArg3(v0, v1, v2)
31955 // match: (Move [3] dst src mem)
31956 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
31958 if auxIntToInt64(v.AuxInt) != 3 {
31964 v.reset(OpAMD64MOVBstore)
31965 v.AuxInt = int32ToAuxInt(2)
31966 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
31967 v0.AuxInt = int32ToAuxInt(2)
31968 v0.AddArg2(src, mem)
31969 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
31970 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
31971 v2.AddArg2(src, mem)
31972 v1.AddArg3(dst, v2, mem)
31973 v.AddArg3(dst, v0, v1)
31976 // match: (Move [5] dst src mem)
31977 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
31979 if auxIntToInt64(v.AuxInt) != 5 {
31985 v.reset(OpAMD64MOVBstore)
31986 v.AuxInt = int32ToAuxInt(4)
31987 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
31988 v0.AuxInt = int32ToAuxInt(4)
31989 v0.AddArg2(src, mem)
31990 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
31991 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
31992 v2.AddArg2(src, mem)
31993 v1.AddArg3(dst, v2, mem)
31994 v.AddArg3(dst, v0, v1)
31997 // match: (Move [6] dst src mem)
31998 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
32000 if auxIntToInt64(v.AuxInt) != 6 {
32006 v.reset(OpAMD64MOVWstore)
32007 v.AuxInt = int32ToAuxInt(4)
32008 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
32009 v0.AuxInt = int32ToAuxInt(4)
32010 v0.AddArg2(src, mem)
32011 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
32012 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
32013 v2.AddArg2(src, mem)
32014 v1.AddArg3(dst, v2, mem)
32015 v.AddArg3(dst, v0, v1)
32018 // match: (Move [7] dst src mem)
32019 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
32021 if auxIntToInt64(v.AuxInt) != 7 {
32027 v.reset(OpAMD64MOVLstore)
32028 v.AuxInt = int32ToAuxInt(3)
32029 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
32030 v0.AuxInt = int32ToAuxInt(3)
32031 v0.AddArg2(src, mem)
32032 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
32033 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
32034 v2.AddArg2(src, mem)
32035 v1.AddArg3(dst, v2, mem)
32036 v.AddArg3(dst, v0, v1)
32039 // match: (Move [9] dst src mem)
32040 // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
32042 if auxIntToInt64(v.AuxInt) != 9 {
32048 v.reset(OpAMD64MOVBstore)
32049 v.AuxInt = int32ToAuxInt(8)
32050 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
32051 v0.AuxInt = int32ToAuxInt(8)
32052 v0.AddArg2(src, mem)
32053 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32054 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32055 v2.AddArg2(src, mem)
32056 v1.AddArg3(dst, v2, mem)
32057 v.AddArg3(dst, v0, v1)
32060 // match: (Move [10] dst src mem)
32061 // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
32063 if auxIntToInt64(v.AuxInt) != 10 {
32069 v.reset(OpAMD64MOVWstore)
32070 v.AuxInt = int32ToAuxInt(8)
32071 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
32072 v0.AuxInt = int32ToAuxInt(8)
32073 v0.AddArg2(src, mem)
32074 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32075 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32076 v2.AddArg2(src, mem)
32077 v1.AddArg3(dst, v2, mem)
32078 v.AddArg3(dst, v0, v1)
32081 // match: (Move [12] dst src mem)
32082 // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
32084 if auxIntToInt64(v.AuxInt) != 12 {
32090 v.reset(OpAMD64MOVLstore)
32091 v.AuxInt = int32ToAuxInt(8)
32092 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
32093 v0.AuxInt = int32ToAuxInt(8)
32094 v0.AddArg2(src, mem)
32095 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32096 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32097 v2.AddArg2(src, mem)
32098 v1.AddArg3(dst, v2, mem)
32099 v.AddArg3(dst, v0, v1)
32102 // match: (Move [s] dst src mem)
32103 // cond: s == 11 || s >= 13 && s <= 15
32104 // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem))
32106 s := auxIntToInt64(v.AuxInt)
32110 if !(s == 11 || s >= 13 && s <= 15) {
32113 v.reset(OpAMD64MOVQstore)
32114 v.AuxInt = int32ToAuxInt(int32(s - 8))
32115 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32116 v0.AuxInt = int32ToAuxInt(int32(s - 8))
32117 v0.AddArg2(src, mem)
32118 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32119 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32120 v2.AddArg2(src, mem)
32121 v1.AddArg3(dst, v2, mem)
32122 v.AddArg3(dst, v0, v1)
32125 // match: (Move [s] dst src mem)
32126 // cond: s > 16 && s%16 != 0 && s%16 <= 8
32127 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem))
32129 s := auxIntToInt64(v.AuxInt)
32133 if !(s > 16 && s%16 != 0 && s%16 <= 8) {
32137 v.AuxInt = int64ToAuxInt(s - s%16)
32138 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
32139 v0.AuxInt = int64ToAuxInt(s % 16)
32141 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
32142 v1.AuxInt = int64ToAuxInt(s % 16)
32144 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32145 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32146 v3.AddArg2(src, mem)
32147 v2.AddArg3(dst, v3, mem)
32148 v.AddArg3(v0, v1, v2)
32151 // match: (Move [s] dst src mem)
32152 // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE
32153 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem))
32155 s := auxIntToInt64(v.AuxInt)
32159 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) {
32163 v.AuxInt = int64ToAuxInt(s - s%16)
32164 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
32165 v0.AuxInt = int64ToAuxInt(s % 16)
32167 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
32168 v1.AuxInt = int64ToAuxInt(s % 16)
32170 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
32171 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
32172 v3.AddArg2(src, mem)
32173 v2.AddArg3(dst, v3, mem)
32174 v.AddArg3(v0, v1, v2)
32177 // match: (Move [s] dst src mem)
32178 // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE
32179 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)))
32181 s := auxIntToInt64(v.AuxInt)
32185 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) {
32189 v.AuxInt = int64ToAuxInt(s - s%16)
32190 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
32191 v0.AuxInt = int64ToAuxInt(s % 16)
32193 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
32194 v1.AuxInt = int64ToAuxInt(s % 16)
32196 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32197 v2.AuxInt = int32ToAuxInt(8)
32198 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32199 v3.AuxInt = int32ToAuxInt(8)
32200 v3.AddArg2(src, mem)
32201 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
32202 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
32203 v5.AddArg2(src, mem)
32204 v4.AddArg3(dst, v5, mem)
32205 v2.AddArg3(dst, v3, v4)
32206 v.AddArg3(v0, v1, v2)
32209 // match: (Move [s] dst src mem)
32210 // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
32211 // result: (DUFFCOPY [s] dst src mem)
32213 s := auxIntToInt64(v.AuxInt)
32217 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
32220 v.reset(OpAMD64DUFFCOPY)
32221 v.AuxInt = int64ToAuxInt(s)
32222 v.AddArg3(dst, src, mem)
32225 // match: (Move [s] dst src mem)
32226 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)
32227 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem)
32229 s := auxIntToInt64(v.AuxInt)
32233 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) {
32236 v.reset(OpAMD64REPMOVSQ)
32237 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
32238 v0.AuxInt = int64ToAuxInt(s / 8)
32239 v.AddArg4(dst, src, v0, mem)
32244 func rewriteValueAMD64_OpNeg32F(v *Value) bool {
32247 typ := &b.Func.Config.Types
32248 // match: (Neg32F x)
32249 // result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
32252 v.reset(OpAMD64PXOR)
32253 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
32254 v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
32259 func rewriteValueAMD64_OpNeg64F(v *Value) bool {
32262 typ := &b.Func.Config.Types
32263 // match: (Neg64F x)
32264 // result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
32267 v.reset(OpAMD64PXOR)
32268 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
32269 v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
32274 func rewriteValueAMD64_OpNeq16(v *Value) bool {
32278 // match: (Neq16 x y)
32279 // result: (SETNE (CMPW x y))
32283 v.reset(OpAMD64SETNE)
32284 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
32290 func rewriteValueAMD64_OpNeq32(v *Value) bool {
32294 // match: (Neq32 x y)
32295 // result: (SETNE (CMPL x y))
32299 v.reset(OpAMD64SETNE)
32300 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
32306 func rewriteValueAMD64_OpNeq32F(v *Value) bool {
32310 // match: (Neq32F x y)
32311 // result: (SETNEF (UCOMISS x y))
32315 v.reset(OpAMD64SETNEF)
32316 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
32322 func rewriteValueAMD64_OpNeq64(v *Value) bool {
32326 // match: (Neq64 x y)
32327 // result: (SETNE (CMPQ x y))
32331 v.reset(OpAMD64SETNE)
32332 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
32338 func rewriteValueAMD64_OpNeq64F(v *Value) bool {
32342 // match: (Neq64F x y)
32343 // result: (SETNEF (UCOMISD x y))
32347 v.reset(OpAMD64SETNEF)
32348 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
32354 func rewriteValueAMD64_OpNeq8(v *Value) bool {
32358 // match: (Neq8 x y)
32359 // result: (SETNE (CMPB x y))
32363 v.reset(OpAMD64SETNE)
32364 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
32370 func rewriteValueAMD64_OpNeqB(v *Value) bool {
32374 // match: (NeqB x y)
32375 // result: (SETNE (CMPB x y))
32379 v.reset(OpAMD64SETNE)
32380 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
32386 func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
32390 // match: (NeqPtr x y)
32391 // result: (SETNE (CMPQ x y))
32395 v.reset(OpAMD64SETNE)
32396 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
32402 func rewriteValueAMD64_OpNot(v *Value) bool {
32405 // result: (XORLconst [1] x)
32408 v.reset(OpAMD64XORLconst)
32409 v.AuxInt = int32ToAuxInt(1)
32414 func rewriteValueAMD64_OpOffPtr(v *Value) bool {
32417 typ := &b.Func.Config.Types
32418 // match: (OffPtr [off] ptr)
32419 // cond: is32Bit(off)
32420 // result: (ADDQconst [int32(off)] ptr)
32422 off := auxIntToInt64(v.AuxInt)
32424 if !(is32Bit(off)) {
32427 v.reset(OpAMD64ADDQconst)
32428 v.AuxInt = int32ToAuxInt(int32(off))
32432 // match: (OffPtr [off] ptr)
32433 // result: (ADDQ (MOVQconst [off]) ptr)
32435 off := auxIntToInt64(v.AuxInt)
32437 v.reset(OpAMD64ADDQ)
32438 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
32439 v0.AuxInt = int64ToAuxInt(off)
32444 func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
32448 // match: (PanicBounds [kind] x y mem)
32449 // cond: boundsABI(kind) == 0
32450 // result: (LoweredPanicBoundsA [kind] x y mem)
32452 kind := auxIntToInt64(v.AuxInt)
32456 if !(boundsABI(kind) == 0) {
32459 v.reset(OpAMD64LoweredPanicBoundsA)
32460 v.AuxInt = int64ToAuxInt(kind)
32461 v.AddArg3(x, y, mem)
32464 // match: (PanicBounds [kind] x y mem)
32465 // cond: boundsABI(kind) == 1
32466 // result: (LoweredPanicBoundsB [kind] x y mem)
32468 kind := auxIntToInt64(v.AuxInt)
32472 if !(boundsABI(kind) == 1) {
32475 v.reset(OpAMD64LoweredPanicBoundsB)
32476 v.AuxInt = int64ToAuxInt(kind)
32477 v.AddArg3(x, y, mem)
32480 // match: (PanicBounds [kind] x y mem)
32481 // cond: boundsABI(kind) == 2
32482 // result: (LoweredPanicBoundsC [kind] x y mem)
32484 kind := auxIntToInt64(v.AuxInt)
32488 if !(boundsABI(kind) == 2) {
32491 v.reset(OpAMD64LoweredPanicBoundsC)
32492 v.AuxInt = int64ToAuxInt(kind)
32493 v.AddArg3(x, y, mem)
32498 func rewriteValueAMD64_OpPopCount16(v *Value) bool {
32501 typ := &b.Func.Config.Types
32502 // match: (PopCount16 x)
32503 // result: (POPCNTL (MOVWQZX <typ.UInt32> x))
32506 v.reset(OpAMD64POPCNTL)
32507 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
32513 func rewriteValueAMD64_OpPopCount8(v *Value) bool {
32516 typ := &b.Func.Config.Types
32517 // match: (PopCount8 x)
32518 // result: (POPCNTL (MOVBQZX <typ.UInt32> x))
32521 v.reset(OpAMD64POPCNTL)
32522 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
32528 func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
32530 // match: (RoundToEven x)
32531 // result: (ROUNDSD [0] x)
32534 v.reset(OpAMD64ROUNDSD)
32535 v.AuxInt = int8ToAuxInt(0)
32540 func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
32544 // match: (Rsh16Ux16 <t> x y)
32545 // cond: !shiftIsBounded(v)
32546 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
32551 if !(!shiftIsBounded(v)) {
32554 v.reset(OpAMD64ANDL)
32555 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
32557 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32558 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
32559 v2.AuxInt = int16ToAuxInt(16)
32565 // match: (Rsh16Ux16 x y)
32566 // cond: shiftIsBounded(v)
32567 // result: (SHRW x y)
32571 if !(shiftIsBounded(v)) {
32574 v.reset(OpAMD64SHRW)
32580 func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
32584 // match: (Rsh16Ux32 <t> x y)
32585 // cond: !shiftIsBounded(v)
32586 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
32591 if !(!shiftIsBounded(v)) {
32594 v.reset(OpAMD64ANDL)
32595 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
32597 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32598 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
32599 v2.AuxInt = int32ToAuxInt(16)
32605 // match: (Rsh16Ux32 x y)
32606 // cond: shiftIsBounded(v)
32607 // result: (SHRW x y)
32611 if !(shiftIsBounded(v)) {
32614 v.reset(OpAMD64SHRW)
32620 func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
32624 // match: (Rsh16Ux64 <t> x y)
32625 // cond: !shiftIsBounded(v)
32626 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
32631 if !(!shiftIsBounded(v)) {
32634 v.reset(OpAMD64ANDL)
32635 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
32637 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32638 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
32639 v2.AuxInt = int32ToAuxInt(16)
32645 // match: (Rsh16Ux64 x y)
32646 // cond: shiftIsBounded(v)
32647 // result: (SHRW x y)
32651 if !(shiftIsBounded(v)) {
32654 v.reset(OpAMD64SHRW)
32660 func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
32664 // match: (Rsh16Ux8 <t> x y)
32665 // cond: !shiftIsBounded(v)
32666 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
32671 if !(!shiftIsBounded(v)) {
32674 v.reset(OpAMD64ANDL)
32675 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
32677 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32678 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
32679 v2.AuxInt = int8ToAuxInt(16)
32685 // match: (Rsh16Ux8 x y)
32686 // cond: shiftIsBounded(v)
32687 // result: (SHRW x y)
32691 if !(shiftIsBounded(v)) {
32694 v.reset(OpAMD64SHRW)
32700 func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
32704 // match: (Rsh16x16 <t> x y)
32705 // cond: !shiftIsBounded(v)
32706 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
32711 if !(!shiftIsBounded(v)) {
32714 v.reset(OpAMD64SARW)
32716 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32717 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32718 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32719 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
32720 v3.AuxInt = int16ToAuxInt(16)
32728 // match: (Rsh16x16 x y)
32729 // cond: shiftIsBounded(v)
32730 // result: (SARW x y)
32734 if !(shiftIsBounded(v)) {
32737 v.reset(OpAMD64SARW)
32743 func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
32747 // match: (Rsh16x32 <t> x y)
32748 // cond: !shiftIsBounded(v)
32749 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
32754 if !(!shiftIsBounded(v)) {
32757 v.reset(OpAMD64SARW)
32759 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32760 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32761 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32762 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
32763 v3.AuxInt = int32ToAuxInt(16)
32771 // match: (Rsh16x32 x y)
32772 // cond: shiftIsBounded(v)
32773 // result: (SARW x y)
32777 if !(shiftIsBounded(v)) {
32780 v.reset(OpAMD64SARW)
32786 func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
32790 // match: (Rsh16x64 <t> x y)
32791 // cond: !shiftIsBounded(v)
32792 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
32797 if !(!shiftIsBounded(v)) {
32800 v.reset(OpAMD64SARW)
32802 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
32803 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
32804 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
32805 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
32806 v3.AuxInt = int32ToAuxInt(16)
32814 // match: (Rsh16x64 x y)
32815 // cond: shiftIsBounded(v)
32816 // result: (SARW x y)
32820 if !(shiftIsBounded(v)) {
32823 v.reset(OpAMD64SARW)
32829 func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
32833 // match: (Rsh16x8 <t> x y)
32834 // cond: !shiftIsBounded(v)
32835 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
32840 if !(!shiftIsBounded(v)) {
32843 v.reset(OpAMD64SARW)
32845 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32846 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32847 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32848 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
32849 v3.AuxInt = int8ToAuxInt(16)
32857 // match: (Rsh16x8 x y)
32858 // cond: shiftIsBounded(v)
32859 // result: (SARW x y)
32863 if !(shiftIsBounded(v)) {
32866 v.reset(OpAMD64SARW)
32872 func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
32876 // match: (Rsh32Ux16 <t> x y)
32877 // cond: !shiftIsBounded(v)
32878 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
32883 if !(!shiftIsBounded(v)) {
32886 v.reset(OpAMD64ANDL)
32887 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
32889 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32890 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
32891 v2.AuxInt = int16ToAuxInt(32)
32897 // match: (Rsh32Ux16 x y)
32898 // cond: shiftIsBounded(v)
32899 // result: (SHRL x y)
32903 if !(shiftIsBounded(v)) {
32906 v.reset(OpAMD64SHRL)
32912 func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
32916 // match: (Rsh32Ux32 <t> x y)
32917 // cond: !shiftIsBounded(v)
32918 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
32923 if !(!shiftIsBounded(v)) {
32926 v.reset(OpAMD64ANDL)
32927 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
32929 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32930 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
32931 v2.AuxInt = int32ToAuxInt(32)
32937 // match: (Rsh32Ux32 x y)
32938 // cond: shiftIsBounded(v)
32939 // result: (SHRL x y)
32943 if !(shiftIsBounded(v)) {
32946 v.reset(OpAMD64SHRL)
32952 func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
32956 // match: (Rsh32Ux64 <t> x y)
32957 // cond: !shiftIsBounded(v)
32958 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
32963 if !(!shiftIsBounded(v)) {
32966 v.reset(OpAMD64ANDL)
32967 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
32969 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32970 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
32971 v2.AuxInt = int32ToAuxInt(32)
32977 // match: (Rsh32Ux64 x y)
32978 // cond: shiftIsBounded(v)
32979 // result: (SHRL x y)
32983 if !(shiftIsBounded(v)) {
32986 v.reset(OpAMD64SHRL)
32992 func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
32996 // match: (Rsh32Ux8 <t> x y)
32997 // cond: !shiftIsBounded(v)
32998 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
33003 if !(!shiftIsBounded(v)) {
33006 v.reset(OpAMD64ANDL)
33007 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
33009 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
33010 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
33011 v2.AuxInt = int8ToAuxInt(32)
33017 // match: (Rsh32Ux8 x y)
33018 // cond: shiftIsBounded(v)
33019 // result: (SHRL x y)
33023 if !(shiftIsBounded(v)) {
33026 v.reset(OpAMD64SHRL)
33032 func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
33036 // match: (Rsh32x16 <t> x y)
33037 // cond: !shiftIsBounded(v)
33038 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
33043 if !(!shiftIsBounded(v)) {
33046 v.reset(OpAMD64SARL)
33048 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33049 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33050 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33051 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
33052 v3.AuxInt = int16ToAuxInt(32)
33060 // match: (Rsh32x16 x y)
33061 // cond: shiftIsBounded(v)
33062 // result: (SARL x y)
33066 if !(shiftIsBounded(v)) {
33069 v.reset(OpAMD64SARL)
33075 func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
33079 // match: (Rsh32x32 <t> x y)
33080 // cond: !shiftIsBounded(v)
33081 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
33086 if !(!shiftIsBounded(v)) {
33089 v.reset(OpAMD64SARL)
33091 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33092 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33093 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33094 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
33095 v3.AuxInt = int32ToAuxInt(32)
33103 // match: (Rsh32x32 x y)
33104 // cond: shiftIsBounded(v)
33105 // result: (SARL x y)
33109 if !(shiftIsBounded(v)) {
33112 v.reset(OpAMD64SARL)
33118 func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
33122 // match: (Rsh32x64 <t> x y)
33123 // cond: !shiftIsBounded(v)
33124 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
33129 if !(!shiftIsBounded(v)) {
33132 v.reset(OpAMD64SARL)
33134 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
33135 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
33136 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
33137 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
33138 v3.AuxInt = int32ToAuxInt(32)
33146 // match: (Rsh32x64 x y)
33147 // cond: shiftIsBounded(v)
33148 // result: (SARL x y)
33152 if !(shiftIsBounded(v)) {
33155 v.reset(OpAMD64SARL)
33161 func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
33165 // match: (Rsh32x8 <t> x y)
33166 // cond: !shiftIsBounded(v)
33167 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
33172 if !(!shiftIsBounded(v)) {
33175 v.reset(OpAMD64SARL)
33177 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33178 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33179 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33180 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
33181 v3.AuxInt = int8ToAuxInt(32)
33189 // match: (Rsh32x8 x y)
33190 // cond: shiftIsBounded(v)
33191 // result: (SARL x y)
33195 if !(shiftIsBounded(v)) {
33198 v.reset(OpAMD64SARL)
33204 func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
33208 // match: (Rsh64Ux16 <t> x y)
33209 // cond: !shiftIsBounded(v)
33210 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
33215 if !(!shiftIsBounded(v)) {
33218 v.reset(OpAMD64ANDQ)
33219 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
33221 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
33222 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
33223 v2.AuxInt = int16ToAuxInt(64)
33229 // match: (Rsh64Ux16 x y)
33230 // cond: shiftIsBounded(v)
33231 // result: (SHRQ x y)
33235 if !(shiftIsBounded(v)) {
33238 v.reset(OpAMD64SHRQ)
33244 func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
33248 // match: (Rsh64Ux32 <t> x y)
33249 // cond: !shiftIsBounded(v)
33250 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
33255 if !(!shiftIsBounded(v)) {
33258 v.reset(OpAMD64ANDQ)
33259 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
33261 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
33262 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
33263 v2.AuxInt = int32ToAuxInt(64)
33269 // match: (Rsh64Ux32 x y)
33270 // cond: shiftIsBounded(v)
33271 // result: (SHRQ x y)
33275 if !(shiftIsBounded(v)) {
33278 v.reset(OpAMD64SHRQ)
33284 func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
33288 // match: (Rsh64Ux64 <t> x y)
33289 // cond: !shiftIsBounded(v)
33290 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
33295 if !(!shiftIsBounded(v)) {
33298 v.reset(OpAMD64ANDQ)
33299 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
33301 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
33302 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
33303 v2.AuxInt = int32ToAuxInt(64)
33309 // match: (Rsh64Ux64 x y)
33310 // cond: shiftIsBounded(v)
33311 // result: (SHRQ x y)
33315 if !(shiftIsBounded(v)) {
33318 v.reset(OpAMD64SHRQ)
33324 func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
33328 // match: (Rsh64Ux8 <t> x y)
33329 // cond: !shiftIsBounded(v)
33330 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
33335 if !(!shiftIsBounded(v)) {
33338 v.reset(OpAMD64ANDQ)
33339 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
33341 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
33342 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
33343 v2.AuxInt = int8ToAuxInt(64)
33349 // match: (Rsh64Ux8 x y)
33350 // cond: shiftIsBounded(v)
33351 // result: (SHRQ x y)
33355 if !(shiftIsBounded(v)) {
33358 v.reset(OpAMD64SHRQ)
33364 func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
33368 // match: (Rsh64x16 <t> x y)
33369 // cond: !shiftIsBounded(v)
33370 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
33375 if !(!shiftIsBounded(v)) {
33378 v.reset(OpAMD64SARQ)
33380 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33381 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33382 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33383 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
33384 v3.AuxInt = int16ToAuxInt(64)
33392 // match: (Rsh64x16 x y)
33393 // cond: shiftIsBounded(v)
33394 // result: (SARQ x y)
33398 if !(shiftIsBounded(v)) {
33401 v.reset(OpAMD64SARQ)
33407 func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
33411 // match: (Rsh64x32 <t> x y)
33412 // cond: !shiftIsBounded(v)
33413 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
33418 if !(!shiftIsBounded(v)) {
33421 v.reset(OpAMD64SARQ)
33423 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33424 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33425 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33426 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
33427 v3.AuxInt = int32ToAuxInt(64)
33435 // match: (Rsh64x32 x y)
33436 // cond: shiftIsBounded(v)
33437 // result: (SARQ x y)
33441 if !(shiftIsBounded(v)) {
33444 v.reset(OpAMD64SARQ)
33450 func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
33454 // match: (Rsh64x64 <t> x y)
33455 // cond: !shiftIsBounded(v)
33456 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
33461 if !(!shiftIsBounded(v)) {
33464 v.reset(OpAMD64SARQ)
33466 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
33467 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
33468 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
33469 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
33470 v3.AuxInt = int32ToAuxInt(64)
33478 // match: (Rsh64x64 x y)
33479 // cond: shiftIsBounded(v)
33480 // result: (SARQ x y)
33484 if !(shiftIsBounded(v)) {
33487 v.reset(OpAMD64SARQ)
33493 func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
33497 // match: (Rsh64x8 <t> x y)
33498 // cond: !shiftIsBounded(v)
33499 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
33504 if !(!shiftIsBounded(v)) {
33507 v.reset(OpAMD64SARQ)
33509 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33510 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33511 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33512 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
33513 v3.AuxInt = int8ToAuxInt(64)
33521 // match: (Rsh64x8 x y)
33522 // cond: shiftIsBounded(v)
33523 // result: (SARQ x y)
33527 if !(shiftIsBounded(v)) {
33530 v.reset(OpAMD64SARQ)
33536 func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
33540 // match: (Rsh8Ux16 <t> x y)
33541 // cond: !shiftIsBounded(v)
33542 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
33547 if !(!shiftIsBounded(v)) {
33550 v.reset(OpAMD64ANDL)
33551 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
33553 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
33554 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
33555 v2.AuxInt = int16ToAuxInt(8)
33561 // match: (Rsh8Ux16 x y)
33562 // cond: shiftIsBounded(v)
33563 // result: (SHRB x y)
33567 if !(shiftIsBounded(v)) {
33570 v.reset(OpAMD64SHRB)
33576 func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
33580 // match: (Rsh8Ux32 <t> x y)
33581 // cond: !shiftIsBounded(v)
33582 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
33587 if !(!shiftIsBounded(v)) {
33590 v.reset(OpAMD64ANDL)
33591 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
33593 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
33594 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
33595 v2.AuxInt = int32ToAuxInt(8)
33601 // match: (Rsh8Ux32 x y)
33602 // cond: shiftIsBounded(v)
33603 // result: (SHRB x y)
33607 if !(shiftIsBounded(v)) {
33610 v.reset(OpAMD64SHRB)
33616 func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
33620 // match: (Rsh8Ux64 <t> x y)
33621 // cond: !shiftIsBounded(v)
33622 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
33627 if !(!shiftIsBounded(v)) {
33630 v.reset(OpAMD64ANDL)
33631 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
33633 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
33634 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
33635 v2.AuxInt = int32ToAuxInt(8)
33641 // match: (Rsh8Ux64 x y)
33642 // cond: shiftIsBounded(v)
33643 // result: (SHRB x y)
33647 if !(shiftIsBounded(v)) {
33650 v.reset(OpAMD64SHRB)
33656 func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
33660 // match: (Rsh8Ux8 <t> x y)
33661 // cond: !shiftIsBounded(v)
33662 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
33667 if !(!shiftIsBounded(v)) {
33670 v.reset(OpAMD64ANDL)
33671 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
33673 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
33674 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
33675 v2.AuxInt = int8ToAuxInt(8)
33681 // match: (Rsh8Ux8 x y)
33682 // cond: shiftIsBounded(v)
33683 // result: (SHRB x y)
33687 if !(shiftIsBounded(v)) {
33690 v.reset(OpAMD64SHRB)
33696 func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
33700 // match: (Rsh8x16 <t> x y)
33701 // cond: !shiftIsBounded(v)
33702 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
33707 if !(!shiftIsBounded(v)) {
33710 v.reset(OpAMD64SARB)
33712 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33713 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33714 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33715 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
33716 v3.AuxInt = int16ToAuxInt(8)
33724 // match: (Rsh8x16 x y)
33725 // cond: shiftIsBounded(v)
33726 // result: (SARB x y)
33730 if !(shiftIsBounded(v)) {
33733 v.reset(OpAMD64SARB)
33739 func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
33743 // match: (Rsh8x32 <t> x y)
33744 // cond: !shiftIsBounded(v)
33745 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
33750 if !(!shiftIsBounded(v)) {
33753 v.reset(OpAMD64SARB)
33755 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33756 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33757 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33758 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
33759 v3.AuxInt = int32ToAuxInt(8)
33767 // match: (Rsh8x32 x y)
33768 // cond: shiftIsBounded(v)
33769 // result: (SARB x y)
33773 if !(shiftIsBounded(v)) {
33776 v.reset(OpAMD64SARB)
33782 func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
33786 // match: (Rsh8x64 <t> x y)
33787 // cond: !shiftIsBounded(v)
33788 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
33793 if !(!shiftIsBounded(v)) {
33796 v.reset(OpAMD64SARB)
33798 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
33799 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
33800 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
33801 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
33802 v3.AuxInt = int32ToAuxInt(8)
33810 // match: (Rsh8x64 x y)
33811 // cond: shiftIsBounded(v)
33812 // result: (SARB x y)
33816 if !(shiftIsBounded(v)) {
33819 v.reset(OpAMD64SARB)
33825 func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
33829 // match: (Rsh8x8 <t> x y)
33830 // cond: !shiftIsBounded(v)
33831 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
33836 if !(!shiftIsBounded(v)) {
33839 v.reset(OpAMD64SARB)
33841 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
33842 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
33843 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
33844 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
33845 v3.AuxInt = int8ToAuxInt(8)
33853 // match: (Rsh8x8 x y)
33854 // cond: shiftIsBounded(v)
33855 // result: (SARB x y)
33859 if !(shiftIsBounded(v)) {
33862 v.reset(OpAMD64SARB)
33868 func rewriteValueAMD64_OpSelect0(v *Value) bool {
33871 typ := &b.Func.Config.Types
33872 // match: (Select0 (Mul64uover x y))
33873 // result: (Select0 <typ.UInt64> (MULQU x y))
33875 if v_0.Op != OpMul64uover {
33881 v.Type = typ.UInt64
33882 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
33887 // match: (Select0 (Mul32uover x y))
33888 // result: (Select0 <typ.UInt32> (MULLU x y))
33890 if v_0.Op != OpMul32uover {
33896 v.Type = typ.UInt32
33897 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
33902 // match: (Select0 (Add64carry x y c))
33903 // result: (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
33905 if v_0.Op != OpAdd64carry {
33912 v.Type = typ.UInt64
33913 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
33914 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
33915 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
33918 v0.AddArg3(x, y, v1)
33922 // match: (Select0 (Sub64borrow x y c))
33923 // result: (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
33925 if v_0.Op != OpSub64borrow {
33932 v.Type = typ.UInt64
33933 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
33934 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
33935 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
33938 v0.AddArg3(x, y, v1)
33942 // match: (Select0 <t> (AddTupleFirst32 val tuple))
33943 // result: (ADDL val (Select0 <t> tuple))
33946 if v_0.Op != OpAMD64AddTupleFirst32 {
33949 tuple := v_0.Args[1]
33951 v.reset(OpAMD64ADDL)
33952 v0 := b.NewValue0(v.Pos, OpSelect0, t)
33957 // match: (Select0 <t> (AddTupleFirst64 val tuple))
33958 // result: (ADDQ val (Select0 <t> tuple))
33961 if v_0.Op != OpAMD64AddTupleFirst64 {
33964 tuple := v_0.Args[1]
33966 v.reset(OpAMD64ADDQ)
33967 v0 := b.NewValue0(v.Pos, OpSelect0, t)
33974 func rewriteValueAMD64_OpSelect1(v *Value) bool {
33977 typ := &b.Func.Config.Types
33978 // match: (Select1 (Mul64uover x y))
33979 // result: (SETO (Select1 <types.TypeFlags> (MULQU x y)))
33981 if v_0.Op != OpMul64uover {
33986 v.reset(OpAMD64SETO)
33987 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
33988 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
33994 // match: (Select1 (Mul32uover x y))
33995 // result: (SETO (Select1 <types.TypeFlags> (MULLU x y)))
33997 if v_0.Op != OpMul32uover {
34002 v.reset(OpAMD64SETO)
34003 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
34004 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
34010 // match: (Select1 (Add64carry x y c))
34011 // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
34013 if v_0.Op != OpAdd64carry {
34019 v.reset(OpAMD64NEGQ)
34020 v.Type = typ.UInt64
34021 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
34022 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
34023 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
34024 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
34025 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
34028 v2.AddArg3(x, y, v3)
34034 // match: (Select1 (Sub64borrow x y c))
34035 // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
34037 if v_0.Op != OpSub64borrow {
34043 v.reset(OpAMD64NEGQ)
34044 v.Type = typ.UInt64
34045 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
34046 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
34047 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
34048 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
34049 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
34052 v2.AddArg3(x, y, v3)
34058 // match: (Select1 (NEGLflags (MOVQconst [0])))
34059 // result: (FlagEQ)
34061 if v_0.Op != OpAMD64NEGLflags {
34064 v_0_0 := v_0.Args[0]
34065 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
34068 v.reset(OpAMD64FlagEQ)
34071 // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x))))
34074 if v_0.Op != OpAMD64NEGLflags {
34077 v_0_0 := v_0.Args[0]
34078 if v_0_0.Op != OpAMD64NEGQ {
34081 v_0_0_0 := v_0_0.Args[0]
34082 if v_0_0_0.Op != OpAMD64SBBQcarrymask {
34085 x := v_0_0_0.Args[0]
34089 // match: (Select1 (AddTupleFirst32 _ tuple))
34090 // result: (Select1 tuple)
34092 if v_0.Op != OpAMD64AddTupleFirst32 {
34095 tuple := v_0.Args[1]
34100 // match: (Select1 (AddTupleFirst64 _ tuple))
34101 // result: (Select1 tuple)
34103 if v_0.Op != OpAMD64AddTupleFirst64 {
34106 tuple := v_0.Args[1]
34113 func rewriteValueAMD64_OpSlicemask(v *Value) bool {
34116 // match: (Slicemask <t> x)
34117 // result: (SARQconst (NEGQ <t> x) [63])
34121 v.reset(OpAMD64SARQconst)
34122 v.AuxInt = int8ToAuxInt(63)
34123 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
34129 func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
34133 typ := &b.Func.Config.Types
34134 // match: (SpectreIndex <t> x y)
34135 // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
34139 v.reset(OpAMD64CMOVQCC)
34140 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
34141 v0.AuxInt = int64ToAuxInt(0)
34142 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
34144 v.AddArg3(x, v0, v1)
34148 func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
34152 typ := &b.Func.Config.Types
34153 // match: (SpectreSliceIndex <t> x y)
34154 // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
34158 v.reset(OpAMD64CMOVQHI)
34159 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
34160 v0.AuxInt = int64ToAuxInt(0)
34161 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
34163 v.AddArg3(x, v0, v1)
34167 func rewriteValueAMD64_OpStore(v *Value) bool {
34171 // match: (Store {t} ptr val mem)
34172 // cond: t.Size() == 8 && is64BitFloat(val.Type)
34173 // result: (MOVSDstore ptr val mem)
34175 t := auxToType(v.Aux)
34179 if !(t.Size() == 8 && is64BitFloat(val.Type)) {
34182 v.reset(OpAMD64MOVSDstore)
34183 v.AddArg3(ptr, val, mem)
34186 // match: (Store {t} ptr val mem)
34187 // cond: t.Size() == 4 && is32BitFloat(val.Type)
34188 // result: (MOVSSstore ptr val mem)
34190 t := auxToType(v.Aux)
34194 if !(t.Size() == 4 && is32BitFloat(val.Type)) {
34197 v.reset(OpAMD64MOVSSstore)
34198 v.AddArg3(ptr, val, mem)
34201 // match: (Store {t} ptr val mem)
34202 // cond: t.Size() == 8
34203 // result: (MOVQstore ptr val mem)
34205 t := auxToType(v.Aux)
34209 if !(t.Size() == 8) {
34212 v.reset(OpAMD64MOVQstore)
34213 v.AddArg3(ptr, val, mem)
34216 // match: (Store {t} ptr val mem)
34217 // cond: t.Size() == 4
34218 // result: (MOVLstore ptr val mem)
34220 t := auxToType(v.Aux)
34224 if !(t.Size() == 4) {
34227 v.reset(OpAMD64MOVLstore)
34228 v.AddArg3(ptr, val, mem)
34231 // match: (Store {t} ptr val mem)
34232 // cond: t.Size() == 2
34233 // result: (MOVWstore ptr val mem)
34235 t := auxToType(v.Aux)
34239 if !(t.Size() == 2) {
34242 v.reset(OpAMD64MOVWstore)
34243 v.AddArg3(ptr, val, mem)
34246 // match: (Store {t} ptr val mem)
34247 // cond: t.Size() == 1
34248 // result: (MOVBstore ptr val mem)
34250 t := auxToType(v.Aux)
34254 if !(t.Size() == 1) {
34257 v.reset(OpAMD64MOVBstore)
34258 v.AddArg3(ptr, val, mem)
34263 func rewriteValueAMD64_OpTrunc(v *Value) bool {
34265 // match: (Trunc x)
34266 // result: (ROUNDSD [3] x)
34269 v.reset(OpAMD64ROUNDSD)
34270 v.AuxInt = int8ToAuxInt(3)
34275 func rewriteValueAMD64_OpZero(v *Value) bool {
34279 config := b.Func.Config
34280 typ := &b.Func.Config.Types
34281 // match: (Zero [0] _ mem)
34284 if auxIntToInt64(v.AuxInt) != 0 {
34291 // match: (Zero [1] destptr mem)
34292 // result: (MOVBstoreconst [makeValAndOff(0,0)] destptr mem)
34294 if auxIntToInt64(v.AuxInt) != 1 {
34299 v.reset(OpAMD64MOVBstoreconst)
34300 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34301 v.AddArg2(destptr, mem)
34304 // match: (Zero [2] destptr mem)
34305 // result: (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)
34307 if auxIntToInt64(v.AuxInt) != 2 {
34312 v.reset(OpAMD64MOVWstoreconst)
34313 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34314 v.AddArg2(destptr, mem)
34317 // match: (Zero [4] destptr mem)
34318 // result: (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)
34320 if auxIntToInt64(v.AuxInt) != 4 {
34325 v.reset(OpAMD64MOVLstoreconst)
34326 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34327 v.AddArg2(destptr, mem)
34330 // match: (Zero [8] destptr mem)
34331 // result: (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)
34333 if auxIntToInt64(v.AuxInt) != 8 {
34338 v.reset(OpAMD64MOVQstoreconst)
34339 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34340 v.AddArg2(destptr, mem)
34343 // match: (Zero [3] destptr mem)
34344 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
34346 if auxIntToInt64(v.AuxInt) != 3 {
34351 v.reset(OpAMD64MOVBstoreconst)
34352 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
34353 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
34354 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34355 v0.AddArg2(destptr, mem)
34356 v.AddArg2(destptr, v0)
34359 // match: (Zero [5] destptr mem)
34360 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
34362 if auxIntToInt64(v.AuxInt) != 5 {
34367 v.reset(OpAMD64MOVBstoreconst)
34368 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
34369 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
34370 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34371 v0.AddArg2(destptr, mem)
34372 v.AddArg2(destptr, v0)
34375 // match: (Zero [6] destptr mem)
34376 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
34378 if auxIntToInt64(v.AuxInt) != 6 {
34383 v.reset(OpAMD64MOVWstoreconst)
34384 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
34385 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
34386 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34387 v0.AddArg2(destptr, mem)
34388 v.AddArg2(destptr, v0)
34391 // match: (Zero [7] destptr mem)
34392 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
34394 if auxIntToInt64(v.AuxInt) != 7 {
34399 v.reset(OpAMD64MOVLstoreconst)
34400 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
34401 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
34402 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34403 v0.AddArg2(destptr, mem)
34404 v.AddArg2(destptr, v0)
34407 // match: (Zero [s] destptr mem)
34408 // cond: s%8 != 0 && s > 8 && !config.useSSE
34409 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
34411 s := auxIntToInt64(v.AuxInt)
34414 if !(s%8 != 0 && s > 8 && !config.useSSE) {
34418 v.AuxInt = int64ToAuxInt(s - s%8)
34419 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34420 v0.AuxInt = int64ToAuxInt(s % 8)
34422 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34423 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34424 v1.AddArg2(destptr, mem)
34428 // match: (Zero [16] destptr mem)
34429 // cond: !config.useSSE
34430 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
34432 if auxIntToInt64(v.AuxInt) != 16 {
34437 if !(!config.useSSE) {
34440 v.reset(OpAMD64MOVQstoreconst)
34441 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
34442 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34443 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34444 v0.AddArg2(destptr, mem)
34445 v.AddArg2(destptr, v0)
34448 // match: (Zero [24] destptr mem)
34449 // cond: !config.useSSE
34450 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))
34452 if auxIntToInt64(v.AuxInt) != 24 {
34457 if !(!config.useSSE) {
34460 v.reset(OpAMD64MOVQstoreconst)
34461 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
34462 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34463 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
34464 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34465 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34466 v1.AddArg2(destptr, mem)
34467 v0.AddArg2(destptr, v1)
34468 v.AddArg2(destptr, v0)
34471 // match: (Zero [32] destptr mem)
34472 // cond: !config.useSSE
34473 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))))
34475 if auxIntToInt64(v.AuxInt) != 32 {
34480 if !(!config.useSSE) {
34483 v.reset(OpAMD64MOVQstoreconst)
34484 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 24))
34485 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34486 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
34487 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34488 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
34489 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34490 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34491 v2.AddArg2(destptr, mem)
34492 v1.AddArg2(destptr, v2)
34493 v0.AddArg2(destptr, v1)
34494 v.AddArg2(destptr, v0)
34497 // match: (Zero [s] destptr mem)
34498 // cond: s > 8 && s < 16 && config.useSSE
34499 // result: (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
34501 s := auxIntToInt64(v.AuxInt)
34504 if !(s > 8 && s < 16 && config.useSSE) {
34507 v.reset(OpAMD64MOVQstoreconst)
34508 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8)))
34509 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34510 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34511 v0.AddArg2(destptr, mem)
34512 v.AddArg2(destptr, v0)
34515 // match: (Zero [s] destptr mem)
34516 // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE
34517 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstorezero destptr mem))
34519 s := auxIntToInt64(v.AuxInt)
34522 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) {
34526 v.AuxInt = int64ToAuxInt(s - s%16)
34527 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34528 v0.AuxInt = int64ToAuxInt(s % 16)
34530 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34531 v1.AddArg2(destptr, mem)
34535 // match: (Zero [s] destptr mem)
34536 // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE
34537 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
34539 s := auxIntToInt64(v.AuxInt)
34542 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) {
34546 v.AuxInt = int64ToAuxInt(s - s%16)
34547 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34548 v0.AuxInt = int64ToAuxInt(s % 16)
34550 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
34551 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
34552 v1.AddArg2(destptr, mem)
34556 // match: (Zero [16] destptr mem)
34557 // cond: config.useSSE
34558 // result: (MOVOstorezero destptr mem)
34560 if auxIntToInt64(v.AuxInt) != 16 {
34565 if !(config.useSSE) {
34568 v.reset(OpAMD64MOVOstorezero)
34569 v.AddArg2(destptr, mem)
34572 // match: (Zero [32] destptr mem)
34573 // cond: config.useSSE
34574 // result: (MOVOstorezero (OffPtr <destptr.Type> destptr [16]) (MOVOstorezero destptr mem))
34576 if auxIntToInt64(v.AuxInt) != 32 {
34581 if !(config.useSSE) {
34584 v.reset(OpAMD64MOVOstorezero)
34585 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34586 v0.AuxInt = int64ToAuxInt(16)
34588 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34589 v1.AddArg2(destptr, mem)
34593 // match: (Zero [48] destptr mem)
34594 // cond: config.useSSE
34595 // result: (MOVOstorezero (OffPtr <destptr.Type> destptr [32]) (MOVOstorezero (OffPtr <destptr.Type> destptr [16]) (MOVOstorezero destptr mem)))
34597 if auxIntToInt64(v.AuxInt) != 48 {
34602 if !(config.useSSE) {
34605 v.reset(OpAMD64MOVOstorezero)
34606 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34607 v0.AuxInt = int64ToAuxInt(32)
34609 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34610 v2 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34611 v2.AuxInt = int64ToAuxInt(16)
34613 v3 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34614 v3.AddArg2(destptr, mem)
34619 // match: (Zero [64] destptr mem)
34620 // cond: config.useSSE
34621 // result: (MOVOstorezero (OffPtr <destptr.Type> destptr [48]) (MOVOstorezero (OffPtr <destptr.Type> destptr [32]) (MOVOstorezero (OffPtr <destptr.Type> destptr [16]) (MOVOstorezero destptr mem))))
34623 if auxIntToInt64(v.AuxInt) != 64 {
34628 if !(config.useSSE) {
34631 v.reset(OpAMD64MOVOstorezero)
34632 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34633 v0.AuxInt = int64ToAuxInt(48)
34635 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34636 v2 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34637 v2.AuxInt = int64ToAuxInt(32)
34639 v3 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34640 v4 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
34641 v4.AuxInt = int64ToAuxInt(16)
34643 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
34644 v5.AddArg2(destptr, mem)
34650 // match: (Zero [s] destptr mem)
34651 // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice
34652 // result: (DUFFZERO [s] destptr mem)
34654 s := auxIntToInt64(v.AuxInt)
34657 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
34660 v.reset(OpAMD64DUFFZERO)
34661 v.AuxInt = int64ToAuxInt(s)
34662 v.AddArg2(destptr, mem)
34665 // match: (Zero [s] destptr mem)
34666 // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0
34667 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
34669 s := auxIntToInt64(v.AuxInt)
34672 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) {
34675 v.reset(OpAMD64REPSTOSQ)
34676 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
34677 v0.AuxInt = int64ToAuxInt(s / 8)
34678 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
34679 v1.AuxInt = int64ToAuxInt(0)
34680 v.AddArg4(destptr, v0, v1, mem)
34685 func rewriteBlockAMD64(b *Block) bool {
34688 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y))
34689 // result: (UGE (BTL x y))
34690 for b.Controls[0].Op == OpAMD64TESTL {
34691 v_0 := b.Controls[0]
34693 v_0_0 := v_0.Args[0]
34694 v_0_1 := v_0.Args[1]
34695 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34696 if v_0_0.Op != OpAMD64SHLL {
34700 v_0_0_0 := v_0_0.Args[0]
34701 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
34705 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
34707 b.resetWithControl(BlockAMD64UGE, v0)
34712 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
34713 // result: (UGE (BTQ x y))
34714 for b.Controls[0].Op == OpAMD64TESTQ {
34715 v_0 := b.Controls[0]
34717 v_0_0 := v_0.Args[0]
34718 v_0_1 := v_0.Args[1]
34719 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34720 if v_0_0.Op != OpAMD64SHLQ {
34724 v_0_0_0 := v_0_0.Args[0]
34725 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
34729 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
34731 b.resetWithControl(BlockAMD64UGE, v0)
34736 // match: (EQ (TESTLconst [c] x))
34737 // cond: isUint32PowerOfTwo(int64(c))
34738 // result: (UGE (BTLconst [int8(log32(c))] x))
34739 for b.Controls[0].Op == OpAMD64TESTLconst {
34740 v_0 := b.Controls[0]
34741 c := auxIntToInt32(v_0.AuxInt)
34743 if !(isUint32PowerOfTwo(int64(c))) {
34746 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
34747 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
34749 b.resetWithControl(BlockAMD64UGE, v0)
34752 // match: (EQ (TESTQconst [c] x))
34753 // cond: isUint64PowerOfTwo(int64(c))
34754 // result: (UGE (BTQconst [int8(log32(c))] x))
34755 for b.Controls[0].Op == OpAMD64TESTQconst {
34756 v_0 := b.Controls[0]
34757 c := auxIntToInt32(v_0.AuxInt)
34759 if !(isUint64PowerOfTwo(int64(c))) {
34762 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34763 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
34765 b.resetWithControl(BlockAMD64UGE, v0)
34768 // match: (EQ (TESTQ (MOVQconst [c]) x))
34769 // cond: isUint64PowerOfTwo(c)
34770 // result: (UGE (BTQconst [int8(log64(c))] x))
34771 for b.Controls[0].Op == OpAMD64TESTQ {
34772 v_0 := b.Controls[0]
34774 v_0_0 := v_0.Args[0]
34775 v_0_1 := v_0.Args[1]
34776 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34777 if v_0_0.Op != OpAMD64MOVQconst {
34780 c := auxIntToInt64(v_0_0.AuxInt)
34782 if !(isUint64PowerOfTwo(c)) {
34785 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34786 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
34788 b.resetWithControl(BlockAMD64UGE, v0)
34793 // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
34795 // result: (UGE (BTQconst [63] x))
34796 for b.Controls[0].Op == OpAMD64TESTQ {
34797 v_0 := b.Controls[0]
34799 v_0_0 := v_0.Args[0]
34800 v_0_1 := v_0.Args[1]
34801 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34803 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
34807 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
34815 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34816 v0.AuxInt = int8ToAuxInt(63)
34818 b.resetWithControl(BlockAMD64UGE, v0)
34823 // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
34825 // result: (UGE (BTQconst [31] x))
34826 for b.Controls[0].Op == OpAMD64TESTL {
34827 v_0 := b.Controls[0]
34829 v_0_0 := v_0.Args[0]
34830 v_0_1 := v_0.Args[1]
34831 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34833 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
34837 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
34845 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34846 v0.AuxInt = int8ToAuxInt(31)
34848 b.resetWithControl(BlockAMD64UGE, v0)
34853 // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
34855 // result: (UGE (BTQconst [0] x))
34856 for b.Controls[0].Op == OpAMD64TESTQ {
34857 v_0 := b.Controls[0]
34859 v_0_0 := v_0.Args[0]
34860 v_0_1 := v_0.Args[1]
34861 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34863 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
34867 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
34875 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34876 v0.AuxInt = int8ToAuxInt(0)
34878 b.resetWithControl(BlockAMD64UGE, v0)
34883 // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
34885 // result: (UGE (BTLconst [0] x))
34886 for b.Controls[0].Op == OpAMD64TESTL {
34887 v_0 := b.Controls[0]
34889 v_0_0 := v_0.Args[0]
34890 v_0_1 := v_0.Args[1]
34891 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34893 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
34897 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
34905 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
34906 v0.AuxInt = int8ToAuxInt(0)
34908 b.resetWithControl(BlockAMD64UGE, v0)
34913 // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2))
34915 // result: (UGE (BTQconst [63] x))
34916 for b.Controls[0].Op == OpAMD64TESTQ {
34917 v_0 := b.Controls[0]
34919 v_0_0 := v_0.Args[0]
34920 v_0_1 := v_0.Args[1]
34921 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34923 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
34931 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34932 v0.AuxInt = int8ToAuxInt(63)
34934 b.resetWithControl(BlockAMD64UGE, v0)
34939 // match: (EQ (TESTL z1:(SHRLconst [31] x) z2))
34941 // result: (UGE (BTLconst [31] x))
34942 for b.Controls[0].Op == OpAMD64TESTL {
34943 v_0 := b.Controls[0]
34945 v_0_0 := v_0.Args[0]
34946 v_0_1 := v_0.Args[1]
34947 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34949 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
34957 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
34958 v0.AuxInt = int8ToAuxInt(31)
34960 b.resetWithControl(BlockAMD64UGE, v0)
34965 // match: (EQ (InvertFlags cmp) yes no)
34966 // result: (EQ cmp yes no)
34967 for b.Controls[0].Op == OpAMD64InvertFlags {
34968 v_0 := b.Controls[0]
34970 b.resetWithControl(BlockAMD64EQ, cmp)
34973 // match: (EQ (FlagEQ) yes no)
34974 // result: (First yes no)
34975 for b.Controls[0].Op == OpAMD64FlagEQ {
34976 b.Reset(BlockFirst)
34979 // match: (EQ (FlagLT_ULT) yes no)
34980 // result: (First no yes)
34981 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
34982 b.Reset(BlockFirst)
34986 // match: (EQ (FlagLT_UGT) yes no)
34987 // result: (First no yes)
34988 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
34989 b.Reset(BlockFirst)
34993 // match: (EQ (FlagGT_ULT) yes no)
34994 // result: (First no yes)
34995 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
34996 b.Reset(BlockFirst)
35000 // match: (EQ (FlagGT_UGT) yes no)
35001 // result: (First no yes)
35002 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35003 b.Reset(BlockFirst)
35008 // match: (GE (InvertFlags cmp) yes no)
35009 // result: (LE cmp yes no)
35010 for b.Controls[0].Op == OpAMD64InvertFlags {
35011 v_0 := b.Controls[0]
35013 b.resetWithControl(BlockAMD64LE, cmp)
35016 // match: (GE (FlagEQ) yes no)
35017 // result: (First yes no)
35018 for b.Controls[0].Op == OpAMD64FlagEQ {
35019 b.Reset(BlockFirst)
35022 // match: (GE (FlagLT_ULT) yes no)
35023 // result: (First no yes)
35024 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35025 b.Reset(BlockFirst)
35029 // match: (GE (FlagLT_UGT) yes no)
35030 // result: (First no yes)
35031 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35032 b.Reset(BlockFirst)
35036 // match: (GE (FlagGT_ULT) yes no)
35037 // result: (First yes no)
35038 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35039 b.Reset(BlockFirst)
35042 // match: (GE (FlagGT_UGT) yes no)
35043 // result: (First yes no)
35044 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35045 b.Reset(BlockFirst)
35049 // match: (GT (InvertFlags cmp) yes no)
35050 // result: (LT cmp yes no)
35051 for b.Controls[0].Op == OpAMD64InvertFlags {
35052 v_0 := b.Controls[0]
35054 b.resetWithControl(BlockAMD64LT, cmp)
35057 // match: (GT (FlagEQ) yes no)
35058 // result: (First no yes)
35059 for b.Controls[0].Op == OpAMD64FlagEQ {
35060 b.Reset(BlockFirst)
35064 // match: (GT (FlagLT_ULT) yes no)
35065 // result: (First no yes)
35066 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35067 b.Reset(BlockFirst)
35071 // match: (GT (FlagLT_UGT) yes no)
35072 // result: (First no yes)
35073 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35074 b.Reset(BlockFirst)
35078 // match: (GT (FlagGT_ULT) yes no)
35079 // result: (First yes no)
35080 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35081 b.Reset(BlockFirst)
35084 // match: (GT (FlagGT_UGT) yes no)
35085 // result: (First yes no)
35086 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35087 b.Reset(BlockFirst)
35091 // match: (If (SETL cmp) yes no)
35092 // result: (LT cmp yes no)
35093 for b.Controls[0].Op == OpAMD64SETL {
35094 v_0 := b.Controls[0]
35096 b.resetWithControl(BlockAMD64LT, cmp)
35099 // match: (If (SETLE cmp) yes no)
35100 // result: (LE cmp yes no)
35101 for b.Controls[0].Op == OpAMD64SETLE {
35102 v_0 := b.Controls[0]
35104 b.resetWithControl(BlockAMD64LE, cmp)
35107 // match: (If (SETG cmp) yes no)
35108 // result: (GT cmp yes no)
35109 for b.Controls[0].Op == OpAMD64SETG {
35110 v_0 := b.Controls[0]
35112 b.resetWithControl(BlockAMD64GT, cmp)
35115 // match: (If (SETGE cmp) yes no)
35116 // result: (GE cmp yes no)
35117 for b.Controls[0].Op == OpAMD64SETGE {
35118 v_0 := b.Controls[0]
35120 b.resetWithControl(BlockAMD64GE, cmp)
35123 // match: (If (SETEQ cmp) yes no)
35124 // result: (EQ cmp yes no)
35125 for b.Controls[0].Op == OpAMD64SETEQ {
35126 v_0 := b.Controls[0]
35128 b.resetWithControl(BlockAMD64EQ, cmp)
35131 // match: (If (SETNE cmp) yes no)
35132 // result: (NE cmp yes no)
35133 for b.Controls[0].Op == OpAMD64SETNE {
35134 v_0 := b.Controls[0]
35136 b.resetWithControl(BlockAMD64NE, cmp)
35139 // match: (If (SETB cmp) yes no)
35140 // result: (ULT cmp yes no)
35141 for b.Controls[0].Op == OpAMD64SETB {
35142 v_0 := b.Controls[0]
35144 b.resetWithControl(BlockAMD64ULT, cmp)
35147 // match: (If (SETBE cmp) yes no)
35148 // result: (ULE cmp yes no)
35149 for b.Controls[0].Op == OpAMD64SETBE {
35150 v_0 := b.Controls[0]
35152 b.resetWithControl(BlockAMD64ULE, cmp)
35155 // match: (If (SETA cmp) yes no)
35156 // result: (UGT cmp yes no)
35157 for b.Controls[0].Op == OpAMD64SETA {
35158 v_0 := b.Controls[0]
35160 b.resetWithControl(BlockAMD64UGT, cmp)
35163 // match: (If (SETAE cmp) yes no)
35164 // result: (UGE cmp yes no)
35165 for b.Controls[0].Op == OpAMD64SETAE {
35166 v_0 := b.Controls[0]
35168 b.resetWithControl(BlockAMD64UGE, cmp)
35171 // match: (If (SETO cmp) yes no)
35172 // result: (OS cmp yes no)
35173 for b.Controls[0].Op == OpAMD64SETO {
35174 v_0 := b.Controls[0]
35176 b.resetWithControl(BlockAMD64OS, cmp)
35179 // match: (If (SETGF cmp) yes no)
35180 // result: (UGT cmp yes no)
35181 for b.Controls[0].Op == OpAMD64SETGF {
35182 v_0 := b.Controls[0]
35184 b.resetWithControl(BlockAMD64UGT, cmp)
35187 // match: (If (SETGEF cmp) yes no)
35188 // result: (UGE cmp yes no)
35189 for b.Controls[0].Op == OpAMD64SETGEF {
35190 v_0 := b.Controls[0]
35192 b.resetWithControl(BlockAMD64UGE, cmp)
35195 // match: (If (SETEQF cmp) yes no)
35196 // result: (EQF cmp yes no)
35197 for b.Controls[0].Op == OpAMD64SETEQF {
35198 v_0 := b.Controls[0]
35200 b.resetWithControl(BlockAMD64EQF, cmp)
35203 // match: (If (SETNEF cmp) yes no)
35204 // result: (NEF cmp yes no)
35205 for b.Controls[0].Op == OpAMD64SETNEF {
35206 v_0 := b.Controls[0]
35208 b.resetWithControl(BlockAMD64NEF, cmp)
35211 // match: (If cond yes no)
35212 // result: (NE (TESTB cond cond) yes no)
35214 cond := b.Controls[0]
35215 v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
35216 v0.AddArg2(cond, cond)
35217 b.resetWithControl(BlockAMD64NE, v0)
35221 // match: (LE (InvertFlags cmp) yes no)
35222 // result: (GE cmp yes no)
35223 for b.Controls[0].Op == OpAMD64InvertFlags {
35224 v_0 := b.Controls[0]
35226 b.resetWithControl(BlockAMD64GE, cmp)
35229 // match: (LE (FlagEQ) yes no)
35230 // result: (First yes no)
35231 for b.Controls[0].Op == OpAMD64FlagEQ {
35232 b.Reset(BlockFirst)
35235 // match: (LE (FlagLT_ULT) yes no)
35236 // result: (First yes no)
35237 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35238 b.Reset(BlockFirst)
35241 // match: (LE (FlagLT_UGT) yes no)
35242 // result: (First yes no)
35243 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35244 b.Reset(BlockFirst)
35247 // match: (LE (FlagGT_ULT) yes no)
35248 // result: (First no yes)
35249 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35250 b.Reset(BlockFirst)
35254 // match: (LE (FlagGT_UGT) yes no)
35255 // result: (First no yes)
35256 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35257 b.Reset(BlockFirst)
35262 // match: (LT (InvertFlags cmp) yes no)
35263 // result: (GT cmp yes no)
35264 for b.Controls[0].Op == OpAMD64InvertFlags {
35265 v_0 := b.Controls[0]
35267 b.resetWithControl(BlockAMD64GT, cmp)
35270 // match: (LT (FlagEQ) yes no)
35271 // result: (First no yes)
35272 for b.Controls[0].Op == OpAMD64FlagEQ {
35273 b.Reset(BlockFirst)
35277 // match: (LT (FlagLT_ULT) yes no)
35278 // result: (First yes no)
35279 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35280 b.Reset(BlockFirst)
35283 // match: (LT (FlagLT_UGT) yes no)
35284 // result: (First yes no)
35285 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35286 b.Reset(BlockFirst)
35289 // match: (LT (FlagGT_ULT) yes no)
35290 // result: (First no yes)
35291 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35292 b.Reset(BlockFirst)
35296 // match: (LT (FlagGT_UGT) yes no)
35297 // result: (First no yes)
35298 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35299 b.Reset(BlockFirst)
35304 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no)
35305 // result: (LT cmp yes no)
35306 for b.Controls[0].Op == OpAMD64TESTB {
35307 v_0 := b.Controls[0]
35309 v_0_0 := v_0.Args[0]
35310 if v_0_0.Op != OpAMD64SETL {
35313 cmp := v_0_0.Args[0]
35314 v_0_1 := v_0.Args[1]
35315 if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
35318 b.resetWithControl(BlockAMD64LT, cmp)
35321 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
35322 // result: (LE cmp yes no)
35323 for b.Controls[0].Op == OpAMD64TESTB {
35324 v_0 := b.Controls[0]
35326 v_0_0 := v_0.Args[0]
35327 if v_0_0.Op != OpAMD64SETLE {
35330 cmp := v_0_0.Args[0]
35331 v_0_1 := v_0.Args[1]
35332 if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
35335 b.resetWithControl(BlockAMD64LE, cmp)
35338 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
35339 // result: (GT cmp yes no)
35340 for b.Controls[0].Op == OpAMD64TESTB {
35341 v_0 := b.Controls[0]
35343 v_0_0 := v_0.Args[0]
35344 if v_0_0.Op != OpAMD64SETG {
35347 cmp := v_0_0.Args[0]
35348 v_0_1 := v_0.Args[1]
35349 if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
35352 b.resetWithControl(BlockAMD64GT, cmp)
35355 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
35356 // result: (GE cmp yes no)
35357 for b.Controls[0].Op == OpAMD64TESTB {
35358 v_0 := b.Controls[0]
35360 v_0_0 := v_0.Args[0]
35361 if v_0_0.Op != OpAMD64SETGE {
35364 cmp := v_0_0.Args[0]
35365 v_0_1 := v_0.Args[1]
35366 if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
35369 b.resetWithControl(BlockAMD64GE, cmp)
35372 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
35373 // result: (EQ cmp yes no)
35374 for b.Controls[0].Op == OpAMD64TESTB {
35375 v_0 := b.Controls[0]
35377 v_0_0 := v_0.Args[0]
35378 if v_0_0.Op != OpAMD64SETEQ {
35381 cmp := v_0_0.Args[0]
35382 v_0_1 := v_0.Args[1]
35383 if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
35386 b.resetWithControl(BlockAMD64EQ, cmp)
35389 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
35390 // result: (NE cmp yes no)
35391 for b.Controls[0].Op == OpAMD64TESTB {
35392 v_0 := b.Controls[0]
35394 v_0_0 := v_0.Args[0]
35395 if v_0_0.Op != OpAMD64SETNE {
35398 cmp := v_0_0.Args[0]
35399 v_0_1 := v_0.Args[1]
35400 if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
35403 b.resetWithControl(BlockAMD64NE, cmp)
35406 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
35407 // result: (ULT cmp yes no)
35408 for b.Controls[0].Op == OpAMD64TESTB {
35409 v_0 := b.Controls[0]
35411 v_0_0 := v_0.Args[0]
35412 if v_0_0.Op != OpAMD64SETB {
35415 cmp := v_0_0.Args[0]
35416 v_0_1 := v_0.Args[1]
35417 if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
35420 b.resetWithControl(BlockAMD64ULT, cmp)
35423 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
35424 // result: (ULE cmp yes no)
35425 for b.Controls[0].Op == OpAMD64TESTB {
35426 v_0 := b.Controls[0]
35428 v_0_0 := v_0.Args[0]
35429 if v_0_0.Op != OpAMD64SETBE {
35432 cmp := v_0_0.Args[0]
35433 v_0_1 := v_0.Args[1]
35434 if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
35437 b.resetWithControl(BlockAMD64ULE, cmp)
35440 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
35441 // result: (UGT cmp yes no)
35442 for b.Controls[0].Op == OpAMD64TESTB {
35443 v_0 := b.Controls[0]
35445 v_0_0 := v_0.Args[0]
35446 if v_0_0.Op != OpAMD64SETA {
35449 cmp := v_0_0.Args[0]
35450 v_0_1 := v_0.Args[1]
35451 if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
35454 b.resetWithControl(BlockAMD64UGT, cmp)
35457 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
35458 // result: (UGE cmp yes no)
35459 for b.Controls[0].Op == OpAMD64TESTB {
35460 v_0 := b.Controls[0]
35462 v_0_0 := v_0.Args[0]
35463 if v_0_0.Op != OpAMD64SETAE {
35466 cmp := v_0_0.Args[0]
35467 v_0_1 := v_0.Args[1]
35468 if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
35471 b.resetWithControl(BlockAMD64UGE, cmp)
35474 // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
35475 // result: (OS cmp yes no)
35476 for b.Controls[0].Op == OpAMD64TESTB {
35477 v_0 := b.Controls[0]
35479 v_0_0 := v_0.Args[0]
35480 if v_0_0.Op != OpAMD64SETO {
35483 cmp := v_0_0.Args[0]
35484 v_0_1 := v_0.Args[1]
35485 if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
35488 b.resetWithControl(BlockAMD64OS, cmp)
35491 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y))
35492 // result: (ULT (BTL x y))
35493 for b.Controls[0].Op == OpAMD64TESTL {
35494 v_0 := b.Controls[0]
35496 v_0_0 := v_0.Args[0]
35497 v_0_1 := v_0.Args[1]
35498 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35499 if v_0_0.Op != OpAMD64SHLL {
35503 v_0_0_0 := v_0_0.Args[0]
35504 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
35508 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
35510 b.resetWithControl(BlockAMD64ULT, v0)
35515 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y))
35516 // result: (ULT (BTQ x y))
35517 for b.Controls[0].Op == OpAMD64TESTQ {
35518 v_0 := b.Controls[0]
35520 v_0_0 := v_0.Args[0]
35521 v_0_1 := v_0.Args[1]
35522 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35523 if v_0_0.Op != OpAMD64SHLQ {
35527 v_0_0_0 := v_0_0.Args[0]
35528 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
35532 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
35534 b.resetWithControl(BlockAMD64ULT, v0)
35539 // match: (NE (TESTLconst [c] x))
35540 // cond: isUint32PowerOfTwo(int64(c))
35541 // result: (ULT (BTLconst [int8(log32(c))] x))
35542 for b.Controls[0].Op == OpAMD64TESTLconst {
35543 v_0 := b.Controls[0]
35544 c := auxIntToInt32(v_0.AuxInt)
35546 if !(isUint32PowerOfTwo(int64(c))) {
35549 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
35550 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
35552 b.resetWithControl(BlockAMD64ULT, v0)
35555 // match: (NE (TESTQconst [c] x))
35556 // cond: isUint64PowerOfTwo(int64(c))
35557 // result: (ULT (BTQconst [int8(log32(c))] x))
35558 for b.Controls[0].Op == OpAMD64TESTQconst {
35559 v_0 := b.Controls[0]
35560 c := auxIntToInt32(v_0.AuxInt)
35562 if !(isUint64PowerOfTwo(int64(c))) {
35565 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
35566 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
35568 b.resetWithControl(BlockAMD64ULT, v0)
35571 // match: (NE (TESTQ (MOVQconst [c]) x))
35572 // cond: isUint64PowerOfTwo(c)
35573 // result: (ULT (BTQconst [int8(log64(c))] x))
35574 for b.Controls[0].Op == OpAMD64TESTQ {
35575 v_0 := b.Controls[0]
35577 v_0_0 := v_0.Args[0]
35578 v_0_1 := v_0.Args[1]
35579 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35580 if v_0_0.Op != OpAMD64MOVQconst {
35583 c := auxIntToInt64(v_0_0.AuxInt)
35585 if !(isUint64PowerOfTwo(c)) {
35588 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
35589 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
35591 b.resetWithControl(BlockAMD64ULT, v0)
35596 // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
35598 // result: (ULT (BTQconst [63] x))
35599 for b.Controls[0].Op == OpAMD64TESTQ {
35600 v_0 := b.Controls[0]
35602 v_0_0 := v_0.Args[0]
35603 v_0_1 := v_0.Args[1]
35604 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35606 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
35610 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
35618 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
35619 v0.AuxInt = int8ToAuxInt(63)
35621 b.resetWithControl(BlockAMD64ULT, v0)
35626 // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
35628 // result: (ULT (BTQconst [31] x))
35629 for b.Controls[0].Op == OpAMD64TESTL {
35630 v_0 := b.Controls[0]
35632 v_0_0 := v_0.Args[0]
35633 v_0_1 := v_0.Args[1]
35634 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35636 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
35640 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
35648 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
35649 v0.AuxInt = int8ToAuxInt(31)
35651 b.resetWithControl(BlockAMD64ULT, v0)
35656 // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
35658 // result: (ULT (BTQconst [0] x))
35659 for b.Controls[0].Op == OpAMD64TESTQ {
35660 v_0 := b.Controls[0]
35662 v_0_0 := v_0.Args[0]
35663 v_0_1 := v_0.Args[1]
35664 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35666 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
35670 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
35678 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
35679 v0.AuxInt = int8ToAuxInt(0)
35681 b.resetWithControl(BlockAMD64ULT, v0)
35686 // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
35688 // result: (ULT (BTLconst [0] x))
35689 for b.Controls[0].Op == OpAMD64TESTL {
35690 v_0 := b.Controls[0]
35692 v_0_0 := v_0.Args[0]
35693 v_0_1 := v_0.Args[1]
35694 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35696 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
35700 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
35708 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
35709 v0.AuxInt = int8ToAuxInt(0)
35711 b.resetWithControl(BlockAMD64ULT, v0)
35716 // match: (NE (TESTQ z1:(SHRQconst [63] x) z2))
35718 // result: (ULT (BTQconst [63] x))
35719 for b.Controls[0].Op == OpAMD64TESTQ {
35720 v_0 := b.Controls[0]
35722 v_0_0 := v_0.Args[0]
35723 v_0_1 := v_0.Args[1]
35724 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35726 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
35734 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
35735 v0.AuxInt = int8ToAuxInt(63)
35737 b.resetWithControl(BlockAMD64ULT, v0)
35742 // match: (NE (TESTL z1:(SHRLconst [31] x) z2))
35744 // result: (ULT (BTLconst [31] x))
35745 for b.Controls[0].Op == OpAMD64TESTL {
35746 v_0 := b.Controls[0]
35748 v_0_0 := v_0.Args[0]
35749 v_0_1 := v_0.Args[1]
35750 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
35752 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
35760 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
35761 v0.AuxInt = int8ToAuxInt(31)
35763 b.resetWithControl(BlockAMD64ULT, v0)
35768 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
35769 // result: (UGT cmp yes no)
35770 for b.Controls[0].Op == OpAMD64TESTB {
35771 v_0 := b.Controls[0]
35773 v_0_0 := v_0.Args[0]
35774 if v_0_0.Op != OpAMD64SETGF {
35777 cmp := v_0_0.Args[0]
35778 v_0_1 := v_0.Args[1]
35779 if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
35782 b.resetWithControl(BlockAMD64UGT, cmp)
35785 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
35786 // result: (UGE cmp yes no)
35787 for b.Controls[0].Op == OpAMD64TESTB {
35788 v_0 := b.Controls[0]
35790 v_0_0 := v_0.Args[0]
35791 if v_0_0.Op != OpAMD64SETGEF {
35794 cmp := v_0_0.Args[0]
35795 v_0_1 := v_0.Args[1]
35796 if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
35799 b.resetWithControl(BlockAMD64UGE, cmp)
35802 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
35803 // result: (EQF cmp yes no)
35804 for b.Controls[0].Op == OpAMD64TESTB {
35805 v_0 := b.Controls[0]
35807 v_0_0 := v_0.Args[0]
35808 if v_0_0.Op != OpAMD64SETEQF {
35811 cmp := v_0_0.Args[0]
35812 v_0_1 := v_0.Args[1]
35813 if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
35816 b.resetWithControl(BlockAMD64EQF, cmp)
35819 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
35820 // result: (NEF cmp yes no)
35821 for b.Controls[0].Op == OpAMD64TESTB {
35822 v_0 := b.Controls[0]
35824 v_0_0 := v_0.Args[0]
35825 if v_0_0.Op != OpAMD64SETNEF {
35828 cmp := v_0_0.Args[0]
35829 v_0_1 := v_0.Args[1]
35830 if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
35833 b.resetWithControl(BlockAMD64NEF, cmp)
35836 // match: (NE (InvertFlags cmp) yes no)
35837 // result: (NE cmp yes no)
35838 for b.Controls[0].Op == OpAMD64InvertFlags {
35839 v_0 := b.Controls[0]
35841 b.resetWithControl(BlockAMD64NE, cmp)
35844 // match: (NE (FlagEQ) yes no)
35845 // result: (First no yes)
35846 for b.Controls[0].Op == OpAMD64FlagEQ {
35847 b.Reset(BlockFirst)
35851 // match: (NE (FlagLT_ULT) yes no)
35852 // result: (First yes no)
35853 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35854 b.Reset(BlockFirst)
35857 // match: (NE (FlagLT_UGT) yes no)
35858 // result: (First yes no)
35859 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35860 b.Reset(BlockFirst)
35863 // match: (NE (FlagGT_ULT) yes no)
35864 // result: (First yes no)
35865 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35866 b.Reset(BlockFirst)
35869 // match: (NE (FlagGT_UGT) yes no)
35870 // result: (First yes no)
35871 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35872 b.Reset(BlockFirst)
35875 case BlockAMD64UGE:
35876 // match: (UGE (TESTQ x x) yes no)
35877 // result: (First yes no)
35878 for b.Controls[0].Op == OpAMD64TESTQ {
35879 v_0 := b.Controls[0]
35881 if x != v_0.Args[0] {
35884 b.Reset(BlockFirst)
35887 // match: (UGE (TESTL x x) yes no)
35888 // result: (First yes no)
35889 for b.Controls[0].Op == OpAMD64TESTL {
35890 v_0 := b.Controls[0]
35892 if x != v_0.Args[0] {
35895 b.Reset(BlockFirst)
35898 // match: (UGE (TESTW x x) yes no)
35899 // result: (First yes no)
35900 for b.Controls[0].Op == OpAMD64TESTW {
35901 v_0 := b.Controls[0]
35903 if x != v_0.Args[0] {
35906 b.Reset(BlockFirst)
35909 // match: (UGE (TESTB x x) yes no)
35910 // result: (First yes no)
35911 for b.Controls[0].Op == OpAMD64TESTB {
35912 v_0 := b.Controls[0]
35914 if x != v_0.Args[0] {
35917 b.Reset(BlockFirst)
35920 // match: (UGE (InvertFlags cmp) yes no)
35921 // result: (ULE cmp yes no)
35922 for b.Controls[0].Op == OpAMD64InvertFlags {
35923 v_0 := b.Controls[0]
35925 b.resetWithControl(BlockAMD64ULE, cmp)
35928 // match: (UGE (FlagEQ) yes no)
35929 // result: (First yes no)
35930 for b.Controls[0].Op == OpAMD64FlagEQ {
35931 b.Reset(BlockFirst)
35934 // match: (UGE (FlagLT_ULT) yes no)
35935 // result: (First no yes)
35936 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35937 b.Reset(BlockFirst)
35941 // match: (UGE (FlagLT_UGT) yes no)
35942 // result: (First yes no)
35943 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35944 b.Reset(BlockFirst)
35947 // match: (UGE (FlagGT_ULT) yes no)
35948 // result: (First no yes)
35949 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35950 b.Reset(BlockFirst)
35954 // match: (UGE (FlagGT_UGT) yes no)
35955 // result: (First yes no)
35956 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35957 b.Reset(BlockFirst)
35960 case BlockAMD64UGT:
35961 // match: (UGT (InvertFlags cmp) yes no)
35962 // result: (ULT cmp yes no)
35963 for b.Controls[0].Op == OpAMD64InvertFlags {
35964 v_0 := b.Controls[0]
35966 b.resetWithControl(BlockAMD64ULT, cmp)
35969 // match: (UGT (FlagEQ) yes no)
35970 // result: (First no yes)
35971 for b.Controls[0].Op == OpAMD64FlagEQ {
35972 b.Reset(BlockFirst)
35976 // match: (UGT (FlagLT_ULT) yes no)
35977 // result: (First no yes)
35978 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35979 b.Reset(BlockFirst)
35983 // match: (UGT (FlagLT_UGT) yes no)
35984 // result: (First yes no)
35985 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35986 b.Reset(BlockFirst)
35989 // match: (UGT (FlagGT_ULT) yes no)
35990 // result: (First no yes)
35991 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35992 b.Reset(BlockFirst)
35996 // match: (UGT (FlagGT_UGT) yes no)
35997 // result: (First yes no)
35998 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35999 b.Reset(BlockFirst)
36002 case BlockAMD64ULE:
36003 // match: (ULE (InvertFlags cmp) yes no)
36004 // result: (UGE cmp yes no)
36005 for b.Controls[0].Op == OpAMD64InvertFlags {
36006 v_0 := b.Controls[0]
36008 b.resetWithControl(BlockAMD64UGE, cmp)
36011 // match: (ULE (FlagEQ) yes no)
36012 // result: (First yes no)
36013 for b.Controls[0].Op == OpAMD64FlagEQ {
36014 b.Reset(BlockFirst)
36017 // match: (ULE (FlagLT_ULT) yes no)
36018 // result: (First yes no)
36019 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
36020 b.Reset(BlockFirst)
36023 // match: (ULE (FlagLT_UGT) yes no)
36024 // result: (First no yes)
36025 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
36026 b.Reset(BlockFirst)
36030 // match: (ULE (FlagGT_ULT) yes no)
36031 // result: (First yes no)
36032 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
36033 b.Reset(BlockFirst)
36036 // match: (ULE (FlagGT_UGT) yes no)
36037 // result: (First no yes)
36038 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
36039 b.Reset(BlockFirst)
36043 case BlockAMD64ULT:
36044 // match: (ULT (TESTQ x x) yes no)
36045 // result: (First no yes)
36046 for b.Controls[0].Op == OpAMD64TESTQ {
36047 v_0 := b.Controls[0]
36049 if x != v_0.Args[0] {
36052 b.Reset(BlockFirst)
36056 // match: (ULT (TESTL x x) yes no)
36057 // result: (First no yes)
36058 for b.Controls[0].Op == OpAMD64TESTL {
36059 v_0 := b.Controls[0]
36061 if x != v_0.Args[0] {
36064 b.Reset(BlockFirst)
36068 // match: (ULT (TESTW x x) yes no)
36069 // result: (First no yes)
36070 for b.Controls[0].Op == OpAMD64TESTW {
36071 v_0 := b.Controls[0]
36073 if x != v_0.Args[0] {
36076 b.Reset(BlockFirst)
36080 // match: (ULT (TESTB x x) yes no)
36081 // result: (First no yes)
36082 for b.Controls[0].Op == OpAMD64TESTB {
36083 v_0 := b.Controls[0]
36085 if x != v_0.Args[0] {
36088 b.Reset(BlockFirst)
36092 // match: (ULT (InvertFlags cmp) yes no)
36093 // result: (UGT cmp yes no)
36094 for b.Controls[0].Op == OpAMD64InvertFlags {
36095 v_0 := b.Controls[0]
36097 b.resetWithControl(BlockAMD64UGT, cmp)
36100 // match: (ULT (FlagEQ) yes no)
36101 // result: (First no yes)
36102 for b.Controls[0].Op == OpAMD64FlagEQ {
36103 b.Reset(BlockFirst)
36107 // match: (ULT (FlagLT_ULT) yes no)
36108 // result: (First yes no)
36109 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
36110 b.Reset(BlockFirst)
36113 // match: (ULT (FlagLT_UGT) yes no)
36114 // result: (First no yes)
36115 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
36116 b.Reset(BlockFirst)
36120 // match: (ULT (FlagGT_ULT) yes no)
36121 // result: (First yes no)
36122 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
36123 b.Reset(BlockFirst)
36126 // match: (ULT (FlagGT_UGT) yes no)
36127 // result: (First no yes)
36128 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
36129 b.Reset(BlockFirst)