--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add64 x y) -> (ADD x y)
+(AddPtr x y) -> (ADD x y)
+(Add32 x y) -> (ADDW x y)
+(Add16 x y) -> (ADDW x y)
+(Add8 x y) -> (ADDW x y)
+(Add32F x y) -> (FADDS x y)
+(Add64F x y) -> (FADD x y)
+
+(Sub64 x y) -> (SUB x y)
+(SubPtr x y) -> (SUB x y)
+(Sub32 x y) -> (SUBW x y)
+(Sub16 x y) -> (SUBW x y)
+(Sub8 x y) -> (SUBW x y)
+(Sub32F x y) -> (FSUBS x y)
+(Sub64F x y) -> (FSUB x y)
+
+(Mul64 x y) -> (MULLD x y)
+(Mul32 x y) -> (MULLW x y)
+(Mul16 x y) -> (MULLW x y)
+(Mul8 x y) -> (MULLW x y)
+(Mul32F x y) -> (FMULS x y)
+(Mul64F x y) -> (FMUL x y)
+
+(Div32F x y) -> (FDIVS x y)
+(Div64F x y) -> (FDIV x y)
+
+(Div64 x y) -> (DIVD x y)
+(Div64u x y) -> (DIVDU x y)
+// DIVW/DIVWU has a 64-bit dividend and a 32-bit divisor,
+// so a sign/zero extension of the dividend is required.
+(Div32 x y) -> (DIVW (MOVWreg x) y)
+(Div32u x y) -> (DIVWU (MOVWZreg x) y)
+(Div16 x y) -> (DIVW (MOVHreg x) (MOVHreg y))
+(Div16u x y) -> (DIVWU (MOVHZreg x) (MOVHZreg y))
+(Div8 x y) -> (DIVW (MOVBreg x) (MOVBreg y))
+(Div8u x y) -> (DIVWU (MOVBZreg x) (MOVBZreg y))
+
+(Hmul64 x y) -> (MULHD x y)
+(Hmul64u x y) -> (MULHDU x y)
+(Hmul32 x y) -> (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
+(Hmul32u x y) -> (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
+(Hmul16 x y) -> (SRDconst [16] (MULLW (MOVHreg x) (MOVHreg y)))
+(Hmul16u x y) -> (SRDconst [16] (MULLW (MOVHZreg x) (MOVHZreg y)))
+(Hmul8 x y) -> (SRDconst [8] (MULLW (MOVBreg x) (MOVBreg y)))
+(Hmul8u x y) -> (SRDconst [8] (MULLW (MOVBZreg x) (MOVBZreg y)))
+
+(Mod64 x y) -> (MODD x y)
+(Mod64u x y) -> (MODDU x y)
+// MODW/MODWU has a 64-bit dividend and a 32-bit divisor,
+// so a sign/zero extension of the dividend is required.
+(Mod32 x y) -> (MODW (MOVWreg x) y)
+(Mod32u x y) -> (MODWU (MOVWZreg x) y)
+(Mod16 x y) -> (MODW (MOVHreg x) (MOVHreg y))
+(Mod16u x y) -> (MODWU (MOVHZreg x) (MOVHZreg y))
+(Mod8 x y) -> (MODW (MOVBreg x) (MOVBreg y))
+(Mod8u x y) -> (MODWU (MOVBZreg x) (MOVBZreg y))
+
+(Avg64u <t> x y) -> (ADD (ADD <t> (SRDconst <t> x [1]) (SRDconst <t> y [1])) (ANDconst <t> (AND <t> x y) [1]))
+
+(And64 x y) -> (AND x y)
+(And32 x y) -> (ANDW x y)
+(And16 x y) -> (ANDW x y)
+(And8 x y) -> (ANDW x y)
+
+(Or64 x y) -> (OR x y)
+(Or32 x y) -> (ORW x y)
+(Or16 x y) -> (ORW x y)
+(Or8 x y) -> (ORW x y)
+
+(Xor64 x y) -> (XOR x y)
+(Xor32 x y) -> (XORW x y)
+(Xor16 x y) -> (XORW x y)
+(Xor8 x y) -> (XORW x y)
+
+(Neg64 x) -> (NEG x)
+(Neg32 x) -> (NEGW x)
+(Neg16 x) -> (NEGW (MOVHreg x))
+(Neg8 x) -> (NEGW (MOVBreg x))
+(Neg32F x) -> (FNEGS x)
+(Neg64F x) -> (FNEG x)
+
+(Com64 x) -> (NOT x)
+(Com32 x) -> (NOTW x)
+(Com16 x) -> (NOTW x)
+(Com8 x) -> (NOTW x)
+(NOT x) && true -> (XORconst [-1] x)
+(NOTW x) && true -> (XORWconst [-1] x)
+
+// Lowering boolean ops
+(AndB x y) -> (ANDW x y)
+(OrB x y) -> (ORW x y)
+(Not x) -> (XORWconst [1] x)
+
+// Lowering pointer arithmetic
+(OffPtr [off] ptr:(SP)) -> (MOVDaddr [off] ptr)
+(OffPtr [off] ptr) && is32Bit(off) -> (ADDconst [off] ptr)
+(OffPtr [off] ptr) -> (ADD (MOVDconst [off]) ptr)
+
+(Sqrt x) -> (FSQRT x)
+
+// Lowering extension
+// Note: we always extend to 64 bits even though some ops don't need that many result bits.
+(SignExt8to16 x) -> (MOVBreg x)
+(SignExt8to32 x) -> (MOVBreg x)
+(SignExt8to64 x) -> (MOVBreg x)
+(SignExt16to32 x) -> (MOVHreg x)
+(SignExt16to64 x) -> (MOVHreg x)
+(SignExt32to64 x) -> (MOVWreg x)
+
+(ZeroExt8to16 x) -> (MOVBZreg x)
+(ZeroExt8to32 x) -> (MOVBZreg x)
+(ZeroExt8to64 x) -> (MOVBZreg x)
+(ZeroExt16to32 x) -> (MOVHZreg x)
+(ZeroExt16to64 x) -> (MOVHZreg x)
+(ZeroExt32to64 x) -> (MOVWZreg x)
+
+// Lowering truncation
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 x) -> x
+(Trunc32to8 x) -> x
+(Trunc32to16 x) -> x
+(Trunc64to8 x) -> x
+(Trunc64to16 x) -> x
+(Trunc64to32 x) -> x
+
+// Lowering float <-> int
+(Cvt32to32F x) -> (CEFBRA x)
+(Cvt32to64F x) -> (CDFBRA x)
+(Cvt64to32F x) -> (CEGBRA x)
+(Cvt64to64F x) -> (CDGBRA x)
+
+(Cvt32Fto32 x) -> (CFEBRA x)
+(Cvt32Fto64 x) -> (CGEBRA x)
+(Cvt64Fto32 x) -> (CFDBRA x)
+(Cvt64Fto64 x) -> (CGDBRA x)
+
+(Cvt32Fto64F x) -> (LDEBR x)
+(Cvt64Fto32F x) -> (LEDBR x)
+
+// Lowering shifts
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
+(Lsh64x64 <t> x y) -> (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPUconst y [63])))
+(Lsh64x32 <t> x y) -> (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst y [63])))
+(Lsh64x16 <t> x y) -> (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
+(Lsh64x8 <t> x y) -> (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
+
+(Lsh32x64 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
+(Lsh32x32 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
+(Lsh32x16 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
+(Lsh32x8 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
+
+(Lsh16x64 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
+(Lsh16x32 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
+(Lsh16x16 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
+(Lsh16x8 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
+
+(Lsh8x64 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
+(Lsh8x32 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
+(Lsh8x16 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
+(Lsh8x8 <t> x y) -> (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
+
+(Lrot64 <t> x [c]) -> (RLLGconst <t> [c&63] x)
+(Lrot32 <t> x [c]) -> (RLLconst <t> [c&31] x)
+
+(Rsh64Ux64 <t> x y) -> (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPUconst y [63])))
+(Rsh64Ux32 <t> x y) -> (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst y [63])))
+(Rsh64Ux16 <t> x y) -> (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
+(Rsh64Ux8 <t> x y) -> (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
+
+(Rsh32Ux64 <t> x y) -> (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
+(Rsh32Ux32 <t> x y) -> (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
+(Rsh32Ux16 <t> x y) -> (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
+(Rsh32Ux8 <t> x y) -> (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
+
+(Rsh16Ux64 <t> x y) -> (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [15])))
+(Rsh16Ux32 <t> x y) -> (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [15])))
+(Rsh16Ux16 <t> x y) -> (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [15])))
+(Rsh16Ux8 <t> x y) -> (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [15])))
+
+(Rsh8Ux64 <t> x y) -> (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [7])))
+(Rsh8Ux32 <t> x y) -> (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [7])))
+(Rsh8Ux16 <t> x y) -> (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [7])))
+(Rsh8Ux8 <t> x y) -> (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [7])))
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
+(Rsh64x64 <t> x y) -> (SRAD <t> x (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [63])))))
+(Rsh64x32 <t> x y) -> (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [63])))))
+(Rsh64x16 <t> x y) -> (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [63])))))
+(Rsh64x8 <t> x y) -> (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [63])))))
+
+(Rsh32x64 <t> x y) -> (SRAW <t> x (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [31])))))
+(Rsh32x32 <t> x y) -> (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [31])))))
+(Rsh32x16 <t> x y) -> (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [31])))))
+(Rsh32x8 <t> x y) -> (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [31])))))
+
+(Rsh16x64 <t> x y) -> (SRAW <t> (MOVHreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [15])))))
+(Rsh16x32 <t> x y) -> (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [15])))))
+(Rsh16x16 <t> x y) -> (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [15])))))
+(Rsh16x8 <t> x y) -> (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [15])))))
+
+(Rsh8x64 <t> x y) -> (SRAW <t> (MOVBreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [7])))))
+(Rsh8x32 <t> x y) -> (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [7])))))
+(Rsh8x16 <t> x y) -> (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [7])))))
+(Rsh8x8 <t> x y) -> (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [7])))))
+
+// Lowering comparisons
+(Less64 x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Less32 x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Less16 x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
+(Less8 x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+(Less64U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+(Less32U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+(Less16U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
+(Less8U x y) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
+// Use SETG with reversed operands to dodge NaN case.
+(Less64F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
+(Less32F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
+
+(Leq64 x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Leq32 x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Leq16 x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
+(Leq8 x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+(Leq64U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+(Leq32U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+(Leq16U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
+(Leq8U x y) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
+// Use SETGE with reversed operands to dodge NaN case.
+(Leq64F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
+(Leq32F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
+
+(Greater64 x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Greater32 x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Greater16 x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
+(Greater8 x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+(Greater64U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+(Greater32U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+(Greater16U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
+(Greater8U x y) -> (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
+(Greater64F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Greater32F x y) -> (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+(Geq64 x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Geq32 x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Geq16 x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
+(Geq8 x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+(Geq64U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+(Geq32U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+(Geq16U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
+(Geq8U x y) -> (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
+(Geq64F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Geq32F x y) -> (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+(Eq64 x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Eq32 x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Eq16 x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
+(Eq8 x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+(EqB x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+(EqPtr x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Eq64F x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Eq32F x y) -> (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+(Neq64 x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Neq32 x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Neq16 x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
+(Neq8 x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+(NeqB x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+(NeqPtr x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Neq64F x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Neq32F x y) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+// Lowering loads
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) -> (MOVWZload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) -> (MOVHZload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBZload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
+
+// Lowering stores
+// These more-specific FP versions of Store pattern should come first.
+(Store [8] ptr val mem) && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
+(Store [4] ptr val mem) && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
+
+(Store [8] ptr val mem) -> (MOVDstore ptr val mem)
+(Store [4] ptr val mem) -> (MOVWstore ptr val mem)
+(Store [2] ptr val mem) -> (MOVHstore ptr val mem)
+(Store [1] ptr val mem) -> (MOVBstore ptr val mem)
+
+// Lowering moves
+
+// Load and store for small copies.
+(Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBZload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 -> (MOVHstore dst (MOVHZload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 -> (MOVWstore dst (MOVWZload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 -> (MOVDstore dst (MOVDload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 16 ->
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 24 ->
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem)))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
+ (MOVBstore [2] dst (MOVBZload [2] src mem)
+ (MOVHstore dst (MOVHZload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 5 ->
+ (MOVBstore [4] dst (MOVBZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 ->
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 7 ->
+ (MOVBstore [6] dst (MOVBZload [6] src mem)
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem)))
+
+// MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes).
+(Move [s] dst src mem) && SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 256 ->
+ (MVC [makeValAndOff(SizeAndAlign(s).Size(), 0)] dst src mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() > 256 && SizeAndAlign(s).Size() <= 512 ->
+ (MVC [makeValAndOff(SizeAndAlign(s).Size()-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Size() <= 768 ->
+ (MVC [makeValAndOff(SizeAndAlign(s).Size()-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() > 768 && SizeAndAlign(s).Size() <= 1024 ->
+ (MVC [makeValAndOff(SizeAndAlign(s).Size()-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
+
+// Move more than 1024 bytes using a loop.
+(Move [s] dst src mem) && SizeAndAlign(s).Size() > 1024 ->
+ (LoweredMove [SizeAndAlign(s).Size()%256] dst src (ADDconst <src.Type> src [(SizeAndAlign(s).Size()/256)*256]) mem)
+
+// Lowering Zero instructions
+(Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstoreconst [0] destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 -> (MOVHstoreconst [0] destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 -> (MOVWstoreconst [0] destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 -> (MOVDstoreconst [0] destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 ->
+ (MOVBstoreconst [makeValAndOff(0,2)] destptr
+ (MOVHstoreconst [0] destptr mem))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 5 ->
+ (MOVBstoreconst [makeValAndOff(0,4)] destptr
+ (MOVWstoreconst [0] destptr mem))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 6 ->
+ (MOVHstoreconst [makeValAndOff(0,4)] destptr
+ (MOVWstoreconst [0] destptr mem))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 7 ->
+ (MOVWstoreconst [makeValAndOff(0,3)] destptr
+ (MOVWstoreconst [0] destptr mem))
+
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 1024 ->
+ (CLEAR [makeValAndOff(SizeAndAlign(s).Size(), 0)] destptr mem)
+
+// Move more than 1024 bytes using a loop.
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() > 1024 ->
+ (LoweredZero [SizeAndAlign(s).Size()%256] destptr (ADDconst <destptr.Type> destptr [(SizeAndAlign(s).Size()/256)*256]) mem)
+
+// Lowering constants
+(Const8 [val]) -> (MOVDconst [val])
+(Const16 [val]) -> (MOVDconst [val])
+(Const32 [val]) -> (MOVDconst [val])
+(Const64 [val]) -> (MOVDconst [val])
+(Const32F [val]) -> (FMOVSconst [val])
+(Const64F [val]) -> (FMOVDconst [val])
+(ConstNil) -> (MOVDconst [0])
+(ConstBool [b]) -> (MOVDconst [b])
+
+// Lowering calls
+(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
+(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
+(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
+(GoCall [argwid] mem) -> (CALLgo [argwid] mem)
+(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
+
+// Miscellaneous
+(Convert <t> x mem) -> (MOVDconvert <t> x mem)
+(IsNonNil p) -> (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
+(IsInBounds idx len) -> (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+(IsSliceInBounds idx len) -> (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
+(GetG mem) -> (LoweredGetG mem)
+(GetClosurePtr) -> (LoweredGetClosurePtr)
+(Addr {sym} base) -> (MOVDaddr {sym} base)
+(ITab (Load ptr mem)) -> (MOVDload ptr mem)
+
+// block rewrites
+(If (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (LT cmp yes no)
+(If (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (LE cmp yes no)
+(If (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GT cmp yes no)
+(If (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GE cmp yes no)
+(If (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (EQ cmp yes no)
+(If (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (NE cmp yes no)
+
+// Special case for floating point - LF/LEF not generated.
+(If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GTF cmp yes no)
+(If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GEF cmp yes no)
+
+(If cond yes no) -> (NE (TESTB cond) yes no)
+
+// ***************************
+// Above: lowering rules
+// Below: optimizations
+// ***************************
+// TODO: Should the optimizations be a separate pass?
+
+// Fold boolean tests into blocks
+(NE (TESTB (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (LT cmp yes no)
+(NE (TESTB (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (LE cmp yes no)
+(NE (TESTB (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GT cmp yes no)
+(NE (TESTB (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GE cmp yes no)
+(NE (TESTB (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (EQ cmp yes no)
+(NE (TESTB (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (NE cmp yes no)
+(NE (TESTB (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GTF cmp yes no)
+(NE (TESTB (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (GEF cmp yes no)
+
+// Fold constants into instructions.
+(ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
+(ADD (MOVDconst [c]) x) && is32Bit(c) -> (ADDconst [c] x)
+(ADDW x (MOVDconst [c])) -> (ADDWconst [c] x)
+(ADDW (MOVDconst [c]) x) -> (ADDWconst [c] x)
+
+(SUB x (MOVDconst [c])) && is32Bit(c) -> (SUBconst x [c])
+(SUB (MOVDconst [c]) x) && is32Bit(c) -> (NEG (SUBconst <v.Type> x [c]))
+(SUBW x (MOVDconst [c])) -> (SUBWconst x [c])
+(SUBW (MOVDconst [c]) x) -> (NEGW (SUBWconst <v.Type> x [c]))
+
+(MULLD x (MOVDconst [c])) && is32Bit(c) -> (MULLDconst [c] x)
+(MULLD (MOVDconst [c]) x) && is32Bit(c) -> (MULLDconst [c] x)
+(MULLW x (MOVDconst [c])) -> (MULLWconst [c] x)
+(MULLW (MOVDconst [c]) x) -> (MULLWconst [c] x)
+
+(AND x (MOVDconst [c])) && is32Bit(c) -> (ANDconst [c] x)
+(AND (MOVDconst [c]) x) && is32Bit(c) -> (ANDconst [c] x)
+(ANDW x (MOVDconst [c])) -> (ANDWconst [c] x)
+(ANDW (MOVDconst [c]) x) -> (ANDWconst [c] x)
+
+(ANDWconst [c] (ANDWconst [d] x)) -> (ANDWconst [c & d] x)
+(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c & d] x)
+
+(OR x (MOVDconst [c])) && is32Bit(c) -> (ORconst [c] x)
+(OR (MOVDconst [c]) x) && is32Bit(c) -> (ORconst [c] x)
+(ORW x (MOVDconst [c])) -> (ORWconst [c] x)
+(ORW (MOVDconst [c]) x) -> (ORWconst [c] x)
+
+(XOR x (MOVDconst [c])) && is32Bit(c) -> (XORconst [c] x)
+(XOR (MOVDconst [c]) x) && is32Bit(c) -> (XORconst [c] x)
+(XORW x (MOVDconst [c])) -> (XORWconst [c] x)
+(XORW (MOVDconst [c]) x) -> (XORWconst [c] x)
+
+(SLD x (MOVDconst [c])) -> (SLDconst [c&63] x)
+(SLW x (MOVDconst [c])) -> (SLWconst [c&63] x)
+(SRD x (MOVDconst [c])) -> (SRDconst [c&63] x)
+(SRW x (MOVDconst [c])) -> (SRWconst [c&63] x)
+(SRAD x (MOVDconst [c])) -> (SRADconst [c&63] x)
+(SRAW x (MOVDconst [c])) -> (SRAWconst [c&63] x)
+
+(SRAW x (ANDWconst [63] y)) -> (SRAW x y)
+(SRAD x (ANDconst [63] y)) -> (SRAD x y)
+(SLW x (ANDWconst [63] y)) -> (SLW x y)
+(SLD x (ANDconst [63] y)) -> (SLD x y)
+(SRW x (ANDWconst [63] y)) -> (SRW x y)
+(SRD x (ANDconst [63] y)) -> (SRD x y)
+
+(CMP x (MOVDconst [c])) && is32Bit(c) -> (CMPconst x [c])
+(CMP (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPconst x [c]))
+(CMPW x (MOVDconst [c])) -> (CMPWconst x [c])
+(CMPW (MOVDconst [c]) x) -> (InvertFlags (CMPWconst x [c]))
+(CMPU x (MOVDconst [c])) && is32Bit(c) -> (CMPUconst x [int64(uint32(c))])
+(CMPU (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPUconst x [int64(uint32(c))]))
+(CMPWU x (MOVDconst [c])) -> (CMPWUconst x [int64(uint32(c))])
+(CMPWU (MOVDconst [c]) x) -> (InvertFlags (CMPWUconst x [int64(uint32(c))]))
+
+// Using MOVBZreg instead of AND is cheaper.
+(ANDconst [0xFF] x) -> (MOVBZreg x)
+(ANDconst [0xFFFF] x) -> (MOVHZreg x)
+(ANDconst [0xFFFFFFFF] x) -> (MOVWZreg x)
+
+// strength reduction
+(MULLDconst [-1] x) -> (NEG x)
+(MULLDconst [0] _) -> (MOVDconst [0])
+(MULLDconst [1] x) -> x
+(MULLDconst [c] x) && isPowerOfTwo(c) -> (SLDconst [log2(c)] x)
+(MULLDconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUB (SLDconst <v.Type> [log2(c+1)] x) x)
+(MULLDconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (ADD (SLDconst <v.Type> [log2(c-1)] x) x)
+
+(MULLWconst [-1] x) -> (NEGW x)
+(MULLWconst [0] _) -> (MOVDconst [0])
+(MULLWconst [1] x) -> x
+(MULLWconst [c] x) && isPowerOfTwo(c) -> (SLWconst [log2(c)] x)
+(MULLWconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBW (SLWconst <v.Type> [log2(c+1)] x) x)
+(MULLWconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (ADDW (SLWconst <v.Type> [log2(c-1)] x) x)
+
+// Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them).
+(ADDconst [c] (MOVDaddr [d] {s} x)) && ((c+d)&1 == 0) && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x)
+(MOVDaddr [c] {s} (ADDconst [d] x)) && ((c+d)&1 == 0) && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x)
+(ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x)
+(MOVDaddr [c] {s} (ADDconst [d] x)) && x.Op != OpSB && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x)
+(MOVDaddr [c] {s} (ADD x y)) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y)
+(ADD x (MOVDaddr [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y)
+(ADD (MOVDaddr [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y)
+
+// fold ADDconst into MOVDaddrx
+(ADDconst [c] (MOVDaddridx [d] {s} x y)) && is32Bit(c+d) -> (MOVDaddridx [c+d] {s} x y)
+(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (MOVDaddridx [c+d] {s} x y)
+(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is32Bit(c+d) && y.Op != OpSB -> (MOVDaddridx [c+d] {s} x y)
+
+// reverse ordering of compare instruction
+(MOVDLT x y (InvertFlags cmp)) -> (MOVDGT x y cmp)
+(MOVDGT x y (InvertFlags cmp)) -> (MOVDLT x y cmp)
+(MOVDLE x y (InvertFlags cmp)) -> (MOVDGE x y cmp)
+(MOVDGE x y (InvertFlags cmp)) -> (MOVDLE x y cmp)
+(MOVDEQ x y (InvertFlags cmp)) -> (MOVDEQ x y cmp)
+(MOVDNE x y (InvertFlags cmp)) -> (MOVDNE x y cmp)
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) -> x
+(MOVBZreg x:(MOVBZload _ _)) -> x
+(MOVHreg x:(MOVBload _ _)) -> x
+(MOVHreg x:(MOVBZload _ _)) -> x
+(MOVHreg x:(MOVHload _ _)) -> x
+(MOVHZreg x:(MOVBZload _ _)) -> x
+(MOVHZreg x:(MOVHZload _ _)) -> x
+(MOVWreg x:(MOVBload _ _)) -> x
+(MOVWreg x:(MOVBZload _ _)) -> x
+(MOVWreg x:(MOVHload _ _)) -> x
+(MOVWreg x:(MOVHZload _ _)) -> x
+(MOVWreg x:(MOVWload _ _)) -> x
+(MOVWZreg x:(MOVBZload _ _)) -> x
+(MOVWZreg x:(MOVHZload _ _)) -> x
+(MOVWZreg x:(MOVWZload _ _)) -> x
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) -> x
+(MOVBZreg x:(MOVBZreg _)) -> x
+(MOVHreg x:(MOVBreg _)) -> x
+(MOVHreg x:(MOVBZreg _)) -> x
+(MOVHreg x:(MOVHreg _)) -> x
+(MOVHZreg x:(MOVBZreg _)) -> x
+(MOVHZreg x:(MOVHZreg _)) -> x
+(MOVWreg x:(MOVBreg _)) -> x
+(MOVWreg x:(MOVBZreg _)) -> x
+(MOVWreg x:(MOVHreg _)) -> x
+(MOVWreg x:(MOVHreg _)) -> x
+(MOVWreg x:(MOVWreg _)) -> x
+(MOVWZreg x:(MOVBZreg _)) -> x
+(MOVWZreg x:(MOVHZreg _)) -> x
+(MOVWZreg x:(MOVWZreg _)) -> x
+
+// sign extended loads
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOVBreg x:(MOVBZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVBZreg x:(MOVBZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZload <v.Type> [off] {sym} ptr mem)
+(MOVHreg x:(MOVHZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHload <v.Type> [off] {sym} ptr mem)
+(MOVHZreg x:(MOVHZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZload <v.Type> [off] {sym} ptr mem)
+(MOVWreg x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+(MOVWZreg x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZload <v.Type> [off] {sym} ptr mem)
+
+(MOVBZreg x:(MOVBZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZloadidx <v.Type> [off] {sym} ptr idx mem)
+(MOVHZreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZloadidx <v.Type> [off] {sym} ptr idx mem)
+(MOVWZreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZloadidx <v.Type> [off] {sym} ptr idx mem)
+
+// replace load from same location as preceding store with copy
+(MOVBZload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+
+// Fold extensions and ANDs together.
+(MOVBZreg (ANDWconst [c] x)) -> (ANDconst [c & 0xff] x)
+(MOVHZreg (ANDWconst [c] x)) -> (ANDconst [c & 0xffff] x)
+(MOVWZreg (ANDWconst [c] x)) -> (ANDconst [c & 0xffffffff] x)
+(MOVBreg (ANDWconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c & 0x7f] x)
+(MOVHreg (ANDWconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c & 0x7fff] x)
+(MOVWreg (ANDWconst [c] x)) && c & 0x80000000 == 0 -> (ANDconst [c & 0x7fffffff] x)
+
+(MOVBZreg (ANDconst [c] x)) -> (ANDconst [c & 0xff] x)
+(MOVHZreg (ANDconst [c] x)) -> (ANDconst [c & 0xffff] x)
+(MOVWZreg (ANDconst [c] x)) -> (ANDconst [c & 0xffffffff] x)
+(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c & 0x7f] x)
+(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c & 0x7fff] x)
+(MOVWreg (ANDconst [c] x)) && c & 0x80000000 == 0 -> (ANDconst [c & 0x7fffffff] x)
+
+// Don't extend before storing
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWZreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHZreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBZreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+
+// Fold constants into memory operations.
+// Note that this is not always a good idea because if not all the uses of
+// the ADDconst get eliminated, we still have to compute the ADDconst and we now
+// have potentially two live values (ptr and (ADDconst [off] ptr)) instead of one.
+// Nevertheless, let's do it!
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDload [off1+off2] {sym} ptr mem)
+(MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} ptr mem)
+(MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} ptr mem)
+(MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} ptr mem)
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem)
+
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem)
+
+// Fold constants into stores.
+(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && validValAndOff(c,off) && int64(int16(c)) == c && ptr.Op != OpSB ->
+ (MOVDstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB ->
+ (MOVWstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && validOff(off) && ptr.Op != OpSB ->
+ (MOVHstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && validOff(off) && ptr.Op != OpSB ->
+ (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
+
+// Fold address offsets into constant stores.
+(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+
+// We need to fold MOVDaddr into the MOVx ops so that the live variable analysis knows
+// what variables are being read/written by the ops.
+(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+(MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
+ (MOVDstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
+ (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
+ (MOVHstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
+ (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+
+// generating indexed loads and stores
+(MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVHZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (FMOVSloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (FMOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+
+(MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVHstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (FMOVSstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (FMOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+
+(MOVBZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVBZloadidx [off] {sym} ptr idx mem)
+(MOVHZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVHZloadidx [off] {sym} ptr idx mem)
+(MOVWZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVWZloadidx [off] {sym} ptr idx mem)
+(MOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVDloadidx [off] {sym} ptr idx mem)
+(FMOVSload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (FMOVSloadidx [off] {sym} ptr idx mem)
+(FMOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (FMOVDloadidx [off] {sym} ptr idx mem)
+(MOVBstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx [off] {sym} ptr idx val mem)
+(MOVHstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVHstoreidx [off] {sym} ptr idx val mem)
+(MOVWstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx [off] {sym} ptr idx val mem)
+(MOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVDstoreidx [off] {sym} ptr idx val mem)
+(FMOVSstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (FMOVSstoreidx [off] {sym} ptr idx val mem)
+(FMOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (FMOVDstoreidx [off] {sym} ptr idx val mem)
+
+// combine ADD into indexed loads and stores
+(MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVBZloadidx [c+d] {sym} ptr idx mem)
+(MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVHZloadidx [c+d] {sym} ptr idx mem)
+(MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVWZloadidx [c+d] {sym} ptr idx mem)
+(MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVDloadidx [c+d] {sym} ptr idx mem)
+(FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (FMOVSloadidx [c+d] {sym} ptr idx mem)
+(FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (FMOVDloadidx [c+d] {sym} ptr idx mem)
+
+(MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem)
+(MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem)
+(MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem)
+(FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem)
+(FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem)
+
+(MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVBZloadidx [c+d] {sym} ptr idx mem)
+(MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVHZloadidx [c+d] {sym} ptr idx mem)
+(MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVWZloadidx [c+d] {sym} ptr idx mem)
+(MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVDloadidx [c+d] {sym} ptr idx mem)
+(FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (FMOVSloadidx [c+d] {sym} ptr idx mem)
+(FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (FMOVDloadidx [c+d] {sym} ptr idx mem)
+
+(MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem)
+(MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem)
+(MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem)
+(FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem)
+(FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem)
+
+// fold MOVDaddrs together
+(MOVDaddr [off1] {sym1} (MOVDaddr [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVDaddr [off1+off2] {mergeSym(sym1,sym2)} x)
+
+// MOVDaddr into MOVDaddridx
+(MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
+ (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+(MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB ->
+ (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// MOVDaddridx into MOVDaddr
+(MOVDaddr [off1] {sym1} (MOVDaddridx [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// Absorb InvertFlags into branches.
+(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
+(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
+
+// Constant comparisons.
+(CMPconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ)
+(CMPconst (MOVDconst [x]) [y]) && x<y -> (FlagLT)
+(CMPconst (MOVDconst [x]) [y]) && x>y -> (FlagGT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)==uint64(y) -> (FlagEQ)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT)
+
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) -> (FlagLT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) -> (FlagGT)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)==uint32(y) -> (FlagEQ)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT)
+
+// Other known comparisons.
+(CMPconst (MOVBZreg _) [c]) && 0xFF < c -> (FlagLT)
+(CMPconst (MOVHZreg _) [c]) && 0xFFFF < c -> (FlagLT)
+(CMPconst (MOVWZreg _) [c]) && 0xFFFFFFFF < c -> (FlagLT)
+(CMPWconst (SRWconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT)
+(CMPconst (SRDconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT)
+(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT)
+(CMPWconst (ANDWconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT)
+
+// Absorb flag constants into SBB ops.
+(SUBEcarrymask (FlagEQ)) -> (MOVDconst [-1])
+(SUBEcarrymask (FlagLT)) -> (MOVDconst [-1])
+(SUBEcarrymask (FlagGT)) -> (MOVDconst [0])
+(SUBEWcarrymask (FlagEQ)) -> (MOVDconst [-1])
+(SUBEWcarrymask (FlagLT)) -> (MOVDconst [-1])
+(SUBEWcarrymask (FlagGT)) -> (MOVDconst [0])
+
+// Absorb flag constants into branches.
+(EQ (FlagEQ) yes no) -> (First nil yes no)
+(EQ (FlagLT) yes no) -> (First nil no yes)
+(EQ (FlagGT) yes no) -> (First nil no yes)
+
+(NE (FlagEQ) yes no) -> (First nil no yes)
+(NE (FlagLT) yes no) -> (First nil yes no)
+(NE (FlagGT) yes no) -> (First nil yes no)
+
+(LT (FlagEQ) yes no) -> (First nil no yes)
+(LT (FlagLT) yes no) -> (First nil yes no)
+(LT (FlagGT) yes no) -> (First nil no yes)
+
+(LE (FlagEQ) yes no) -> (First nil yes no)
+(LE (FlagLT) yes no) -> (First nil yes no)
+(LE (FlagGT) yes no) -> (First nil no yes)
+
+(GT (FlagEQ) yes no) -> (First nil no yes)
+(GT (FlagLT) yes no) -> (First nil no yes)
+(GT (FlagGT) yes no) -> (First nil yes no)
+
+(GE (FlagEQ) yes no) -> (First nil yes no)
+(GE (FlagLT) yes no) -> (First nil no yes)
+(GE (FlagGT) yes no) -> (First nil yes no)
+
+// Absorb flag constants into SETxx ops.
+(MOVDEQ _ x (FlagEQ)) -> x
+(MOVDEQ y _ (FlagLT)) -> y
+(MOVDEQ y _ (FlagGT)) -> y
+
+(MOVDNE _ y (FlagEQ)) -> y
+(MOVDNE x _ (FlagLT)) -> x
+(MOVDNE x _ (FlagGT)) -> x
+
+(MOVDLT y _ (FlagEQ)) -> y
+(MOVDLT _ x (FlagLT)) -> x
+(MOVDLT y _ (FlagGT)) -> y
+
+(MOVDLE _ x (FlagEQ)) -> x
+(MOVDLE _ x (FlagLT)) -> x
+(MOVDLE y _ (FlagGT)) -> y
+
+(MOVDGT y _ (FlagEQ)) -> y
+(MOVDGT y _ (FlagLT)) -> y
+(MOVDGT _ x (FlagGT)) -> x
+
+(MOVDGE _ x (FlagEQ)) -> x
+(MOVDGE y _ (FlagLT)) -> y
+(MOVDGE _ x (FlagGT)) -> x
+
+// Remove redundant *const ops
+(ADDconst [0] x) -> x
+(ADDWconst [c] x) && int32(c)==0 -> x
+(SUBconst [0] x) -> x
+(SUBWconst [c] x) && int32(c) == 0 -> x
+(ANDconst [0] _) -> (MOVDconst [0])
+(ANDWconst [c] _) && int32(c)==0 -> (MOVDconst [0])
+(ANDconst [-1] x) -> x
+(ANDWconst [c] x) && int32(c)==-1 -> x
+(ORconst [0] x) -> x
+(ORWconst [c] x) && int32(c)==0 -> x
+(ORconst [-1] _) -> (MOVDconst [-1])
+(ORWconst [c] _) && int32(c)==-1 -> (MOVDconst [-1])
+(XORconst [0] x) -> x
+(XORWconst [c] x) && int32(c)==0 -> x
+
+// Convert constant subtracts to constant adds.
+(SUBconst [c] x) && c != -(1<<31) -> (ADDconst [-c] x)
+(SUBWconst [c] x) -> (ADDWconst [int64(int32(-c))] x)
+
+// generic constant folding
+// TODO: more of this
+(ADDconst [c] (MOVDconst [d])) -> (MOVDconst [c+d])
+(ADDWconst [c] (MOVDconst [d])) -> (MOVDconst [int64(int32(c+d))])
+(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x)
+(ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [int64(int32(c+d))] x)
+(SUBconst (MOVDconst [d]) [c]) -> (MOVDconst [d-c])
+(SUBconst (SUBconst x [d]) [c]) && is32Bit(-c-d) -> (ADDconst [-c-d] x)
+(SRADconst [c] (MOVDconst [d])) -> (MOVDconst [d>>uint64(c)])
+(SRAWconst [c] (MOVDconst [d])) -> (MOVDconst [d>>uint64(c)])
+(NEG (MOVDconst [c])) -> (MOVDconst [-c])
+(NEGW (MOVDconst [c])) -> (MOVDconst [int64(int32(-c))])
+(MULLDconst [c] (MOVDconst [d])) -> (MOVDconst [c*d])
+(MULLWconst [c] (MOVDconst [d])) -> (MOVDconst [int64(int32(c*d))])
+(ANDconst [c] (MOVDconst [d])) -> (MOVDconst [c&d])
+(ANDWconst [c] (MOVDconst [d])) -> (MOVDconst [c&d])
+(ORconst [c] (MOVDconst [d])) -> (MOVDconst [c|d])
+(ORWconst [c] (MOVDconst [d])) -> (MOVDconst [c|d])
+(XORconst [c] (MOVDconst [d])) -> (MOVDconst [c^d])
+(XORWconst [c] (MOVDconst [d])) -> (MOVDconst [c^d])
+(NOT (MOVDconst [c])) -> (MOVDconst [^c])
+(NOTW (MOVDconst [c])) -> (MOVDconst [^c])
+
+// generic simplifications
+// TODO: more of this
+(ADD x (NEG y)) -> (SUB x y)
+(ADDW x (NEGW y)) -> (SUBW x y)
+(SUB x x) -> (MOVDconst [0])
+(SUBW x x) -> (MOVDconst [0])
+(AND x x) -> x
+(ANDW x x) -> x
+(OR x x) -> x
+(ORW x x) -> x
+(XOR x x) -> (MOVDconst [0])
+(XORW x x) -> (MOVDconst [0])
+
+// Combine constant stores into larger (unaligned) stores.
+// It doesn't work to global data (based on SB),
+// because STGRL doesn't support unaligned address
+(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVHstoreconst [makeValAndOff(ValAndOff(c).Val()&0xff | ValAndOff(a).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+(MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVWstoreconst [makeValAndOff(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVDstore [ValAndOff(a).Off()] {s} p (MOVDconst [ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32]) mem)
+
+// Combine stores into larger (unaligned) stores.
+// It doesn't work to global data (based on SB),
+// because STGRL doesn't support unaligned address
+(MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVHstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVHstore [i-1] {s} p w0 mem)
+(MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVWstore [i-2] {s} p w mem)
+(MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVWstore [i-2] {s} p w0 mem)
+(MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVDstore [i-4] {s} p w mem)
+(MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVDstore [i-4] {s} p w0 mem)
+
+(MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVHstoreidx [i-1] {s} p idx w mem)
+(MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVHstoreidx [i-1] {s} p idx w0 mem)
+(MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVWstoreidx [i-2] {s} p idx w mem)
+(MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVWstoreidx [i-2] {s} p idx w0 mem)
+(MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVDstoreidx [i-4] {s} p idx w mem)
+(MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVDstoreidx [i-4] {s} p idx w0 mem)
+
+// Combining byte loads into larger (unaligned) loads.
+
+// Little endian loads.
+
+// b[0] | b[1]<<8 -> load 16-bit, reverse bytes
+(ORW x0:(MOVBZload [i] {s} p mem)
+ s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(s0)
+ -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i] {s} p mem))
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit, reverse bytes
+(ORW o0:(ORW o1:(ORW
+ x0:(MOVBZload [i] {s} p mem)
+ s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem)))
+ s1:(SLWconst [16] x2:(MOVBZload [i+2] {s} p mem)))
+ s2:(SLWconst [24] x3:(MOVBZload [i+3] {s} p mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && s2.Uses == 1
+ && o0.Uses == 1
+ && o1.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(x2)
+ && clobber(x3)
+ && clobber(s0)
+ && clobber(s1)
+ && clobber(s2)
+ && clobber(o0)
+ && clobber(o1)
+ -> @mergePoint(b,x0,x1,x2,x3) (MOVWZreg (MOVWBRload [i] {s} p mem))
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit, reverse bytes
+(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
+ x0:(MOVBZload [i] {s} p mem)
+ s0:(SLDconst [8] x1:(MOVBZload [i+1] {s} p mem)))
+ s1:(SLDconst [16] x2:(MOVBZload [i+2] {s} p mem)))
+ s2:(SLDconst [24] x3:(MOVBZload [i+3] {s} p mem)))
+ s3:(SLDconst [32] x4:(MOVBZload [i+4] {s} p mem)))
+ s4:(SLDconst [40] x5:(MOVBZload [i+5] {s} p mem)))
+ s5:(SLDconst [48] x6:(MOVBZload [i+6] {s} p mem)))
+ s6:(SLDconst [56] x7:(MOVBZload [i+7] {s} p mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && x7.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && s2.Uses == 1
+ && s3.Uses == 1
+ && s4.Uses == 1
+ && s5.Uses == 1
+ && s6.Uses == 1
+ && o0.Uses == 1
+ && o1.Uses == 1
+ && o2.Uses == 1
+ && o3.Uses == 1
+ && o4.Uses == 1
+ && o5.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(x2)
+ && clobber(x3)
+ && clobber(x4)
+ && clobber(x5)
+ && clobber(x6)
+ && clobber(x7)
+ && clobber(s0)
+ && clobber(s1)
+ && clobber(s2)
+ && clobber(s3)
+ && clobber(s4)
+ && clobber(s5)
+ && clobber(s6)
+ && clobber(o0)
+ && clobber(o1)
+ && clobber(o2)
+ && clobber(o3)
+ && clobber(o4)
+ && clobber(o5)
+ -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRload [i] {s} p mem)
+
+// b[0] | b[1]<<8 -> load 16-bit, reverse bytes
+(ORW x0:(MOVBZloadidx [i] {s} p idx mem)
+ s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(s0)
+ -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx <v.Type> [i] {s} p idx mem))
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit, reverse bytes
+(ORW o0:(ORW o1:(ORW
+ x0:(MOVBZloadidx [i] {s} p idx mem)
+ s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem)))
+ s1:(SLWconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem)))
+ s2:(SLWconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && s2.Uses == 1
+ && o0.Uses == 1
+ && o1.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(x2)
+ && clobber(x3)
+ && clobber(s0)
+ && clobber(s1)
+ && clobber(s2)
+ && clobber(o0)
+ && clobber(o1)
+ -> @mergePoint(b,x0,x1,x2,x3) (MOVWZreg (MOVWBRloadidx <v.Type> [i] {s} p idx mem))
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit, reverse bytes
+(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
+ x0:(MOVBZloadidx [i] {s} p idx mem)
+ s0:(SLDconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem)))
+ s1:(SLDconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem)))
+ s2:(SLDconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem)))
+ s3:(SLDconst [32] x4:(MOVBZloadidx [i+4] {s} p idx mem)))
+ s4:(SLDconst [40] x5:(MOVBZloadidx [i+5] {s} p idx mem)))
+ s5:(SLDconst [48] x6:(MOVBZloadidx [i+6] {s} p idx mem)))
+ s6:(SLDconst [56] x7:(MOVBZloadidx [i+7] {s} p idx mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && x7.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && s2.Uses == 1
+ && s3.Uses == 1
+ && s4.Uses == 1
+ && s5.Uses == 1
+ && s6.Uses == 1
+ && o0.Uses == 1
+ && o1.Uses == 1
+ && o2.Uses == 1
+ && o3.Uses == 1
+ && o4.Uses == 1
+ && o5.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(x2)
+ && clobber(x3)
+ && clobber(x4)
+ && clobber(x5)
+ && clobber(x6)
+ && clobber(x7)
+ && clobber(s0)
+ && clobber(s1)
+ && clobber(s2)
+ && clobber(s3)
+ && clobber(s4)
+ && clobber(s5)
+ && clobber(s6)
+ && clobber(o0)
+ && clobber(o1)
+ && clobber(o2)
+ && clobber(o3)
+ && clobber(o4)
+ && clobber(o5)
+ -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRloadidx <v.Type> [i] {s} p idx mem)
+
+// Big endian loads.
+
+// b[1] | b[0]<<8 -> load 16-bit
+(ORW x0:(MOVBZload [i] {s} p mem)
+ s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(s0)
+ -> @mergePoint(b,x0,x1) (MOVHZload [i-1] {s} p mem)
+
+// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit
+(ORW o0:(ORW o1:(ORW
+ x0:(MOVBZload [i] {s} p mem)
+ s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem)))
+ s1:(SLWconst [16] x2:(MOVBZload [i-2] {s} p mem)))
+ s2:(SLWconst [24] x3:(MOVBZload [i-3] {s} p mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && s2.Uses == 1
+ && o0.Uses == 1
+ && o1.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(x2)
+ && clobber(x3)
+ && clobber(s0)
+ && clobber(s1)
+ && clobber(s2)
+ && clobber(o0)
+ && clobber(o1)
+ -> @mergePoint(b,x0,x1,x2,x3) (MOVWZload [i-3] {s} p mem)
+
+// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit
+(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
+ x0:(MOVBZload [i] {s} p mem)
+ s0:(SLDconst [8] x1:(MOVBZload [i-1] {s} p mem)))
+ s1:(SLDconst [16] x2:(MOVBZload [i-2] {s} p mem)))
+ s2:(SLDconst [24] x3:(MOVBZload [i-3] {s} p mem)))
+ s3:(SLDconst [32] x4:(MOVBZload [i-4] {s} p mem)))
+ s4:(SLDconst [40] x5:(MOVBZload [i-5] {s} p mem)))
+ s5:(SLDconst [48] x6:(MOVBZload [i-6] {s} p mem)))
+ s6:(SLDconst [56] x7:(MOVBZload [i-7] {s} p mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && x7.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && s2.Uses == 1
+ && s3.Uses == 1
+ && s4.Uses == 1
+ && s5.Uses == 1
+ && s6.Uses == 1
+ && o0.Uses == 1
+ && o1.Uses == 1
+ && o2.Uses == 1
+ && o3.Uses == 1
+ && o4.Uses == 1
+ && o5.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(x2)
+ && clobber(x3)
+ && clobber(x4)
+ && clobber(x5)
+ && clobber(x6)
+ && clobber(x7)
+ && clobber(s0)
+ && clobber(s1)
+ && clobber(s2)
+ && clobber(s3)
+ && clobber(s4)
+ && clobber(s5)
+ && clobber(s6)
+ && clobber(o0)
+ && clobber(o1)
+ && clobber(o2)
+ && clobber(o3)
+ && clobber(o4)
+ && clobber(o5)
+ -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload [i-7] {s} p mem)
+
+// b[1] | b[0]<<8 -> load 16-bit
+(ORW x0:(MOVBZloadidx [i] {s} p idx mem)
+ s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(s0)
+ -> @mergePoint(b,x0,x1) (MOVHZloadidx <v.Type> [i-1] {s} p idx mem)
+
+// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit
+(ORW o0:(ORW o1:(ORW
+ x0:(MOVBZloadidx [i] {s} p idx mem)
+ s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
+ s1:(SLWconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem)))
+ s2:(SLWconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && s2.Uses == 1
+ && o0.Uses == 1
+ && o1.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(x2)
+ && clobber(x3)
+ && clobber(s0)
+ && clobber(s1)
+ && clobber(s2)
+ && clobber(o0)
+ && clobber(o1)
+ -> @mergePoint(b,x0,x1,x2,x3) (MOVWZloadidx <v.Type> [i-3] {s} p idx mem)
+
+// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit
+(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR
+ x0:(MOVBZloadidx [i] {s} p idx mem)
+ s0:(SLDconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
+ s1:(SLDconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem)))
+ s2:(SLDconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem)))
+ s3:(SLDconst [32] x4:(MOVBZloadidx [i-4] {s} p idx mem)))
+ s4:(SLDconst [40] x5:(MOVBZloadidx [i-5] {s} p idx mem)))
+ s5:(SLDconst [48] x6:(MOVBZloadidx [i-6] {s} p idx mem)))
+ s6:(SLDconst [56] x7:(MOVBZloadidx [i-7] {s} p idx mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && x7.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && s2.Uses == 1
+ && s3.Uses == 1
+ && s4.Uses == 1
+ && s5.Uses == 1
+ && s6.Uses == 1
+ && o0.Uses == 1
+ && o1.Uses == 1
+ && o2.Uses == 1
+ && o3.Uses == 1
+ && o4.Uses == 1
+ && o5.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(x2)
+ && clobber(x3)
+ && clobber(x4)
+ && clobber(x5)
+ && clobber(x6)
+ && clobber(x7)
+ && clobber(s0)
+ && clobber(s1)
+ && clobber(s2)
+ && clobber(s3)
+ && clobber(s4)
+ && clobber(s5)
+ && clobber(s6)
+ && clobber(o0)
+ && clobber(o1)
+ && clobber(o2)
+ && clobber(o3)
+ && clobber(o4)
+ && clobber(o5)
+ -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <v.Type> [i-7] {s} p idx mem)
+
+// Combine stores into store multiples.
+(MOVWstore [i] {s} p w3
+ x2:(MOVWstore [i-4] {s} p w2
+ x1:(MOVWstore [i-8] {s} p w1
+ x0:(MOVWstore [i-12] {s} p w0 mem))))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && is20Bit(i-12)
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(x2)
+ -> (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
+(MOVWstore [i] {s} p w2
+ x1:(MOVWstore [i-4] {s} p w1
+ x0:(MOVWstore [i-8] {s} p w0 mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && is20Bit(i-8)
+ && clobber(x0)
+ && clobber(x1)
+ -> (STM3 [i-8] {s} p w0 w1 w2 mem)
+(MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && is20Bit(i-4)
+ && clobber(x)
+ -> (STM2 [i-4] {s} p w0 w1 mem)
+(MOVDstore [i] {s} p w3
+ x2:(MOVDstore [i-8] {s} p w2
+ x1:(MOVDstore [i-16] {s} p w1
+ x0:(MOVDstore [i-24] {s} p w0 mem))))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && is20Bit(i-24)
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(x2)
+ -> (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
+(MOVDstore [i] {s} p w2
+ x1:(MOVDstore [i-8] {s} p w1
+ x0:(MOVDstore [i-16] {s} p w0 mem)))
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && is20Bit(i-16)
+ && clobber(x0)
+ && clobber(x1)
+ -> (STMG3 [i-16] {s} p w0 w1 w2 mem)
+(MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && is20Bit(i-8)
+ && clobber(x)
+ -> (STMG2 [i-8] {s} p w0 w1 mem)
--- /dev/null
+// autogenerated from gen/S390X.rules: do not edit!
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+
+var _ = math.MinInt8 // in case not otherwise used
+func rewriteValueS390X(v *Value, config *Config) bool {
+ switch v.Op {
+ case OpAdd16:
+ return rewriteValueS390X_OpAdd16(v, config)
+ case OpAdd32:
+ return rewriteValueS390X_OpAdd32(v, config)
+ case OpAdd32F:
+ return rewriteValueS390X_OpAdd32F(v, config)
+ case OpAdd64:
+ return rewriteValueS390X_OpAdd64(v, config)
+ case OpAdd64F:
+ return rewriteValueS390X_OpAdd64F(v, config)
+ case OpAdd8:
+ return rewriteValueS390X_OpAdd8(v, config)
+ case OpAddPtr:
+ return rewriteValueS390X_OpAddPtr(v, config)
+ case OpAddr:
+ return rewriteValueS390X_OpAddr(v, config)
+ case OpAnd16:
+ return rewriteValueS390X_OpAnd16(v, config)
+ case OpAnd32:
+ return rewriteValueS390X_OpAnd32(v, config)
+ case OpAnd64:
+ return rewriteValueS390X_OpAnd64(v, config)
+ case OpAnd8:
+ return rewriteValueS390X_OpAnd8(v, config)
+ case OpAndB:
+ return rewriteValueS390X_OpAndB(v, config)
+ case OpAvg64u:
+ return rewriteValueS390X_OpAvg64u(v, config)
+ case OpClosureCall:
+ return rewriteValueS390X_OpClosureCall(v, config)
+ case OpCom16:
+ return rewriteValueS390X_OpCom16(v, config)
+ case OpCom32:
+ return rewriteValueS390X_OpCom32(v, config)
+ case OpCom64:
+ return rewriteValueS390X_OpCom64(v, config)
+ case OpCom8:
+ return rewriteValueS390X_OpCom8(v, config)
+ case OpConst16:
+ return rewriteValueS390X_OpConst16(v, config)
+ case OpConst32:
+ return rewriteValueS390X_OpConst32(v, config)
+ case OpConst32F:
+ return rewriteValueS390X_OpConst32F(v, config)
+ case OpConst64:
+ return rewriteValueS390X_OpConst64(v, config)
+ case OpConst64F:
+ return rewriteValueS390X_OpConst64F(v, config)
+ case OpConst8:
+ return rewriteValueS390X_OpConst8(v, config)
+ case OpConstBool:
+ return rewriteValueS390X_OpConstBool(v, config)
+ case OpConstNil:
+ return rewriteValueS390X_OpConstNil(v, config)
+ case OpConvert:
+ return rewriteValueS390X_OpConvert(v, config)
+ case OpCvt32Fto32:
+ return rewriteValueS390X_OpCvt32Fto32(v, config)
+ case OpCvt32Fto64:
+ return rewriteValueS390X_OpCvt32Fto64(v, config)
+ case OpCvt32Fto64F:
+ return rewriteValueS390X_OpCvt32Fto64F(v, config)
+ case OpCvt32to32F:
+ return rewriteValueS390X_OpCvt32to32F(v, config)
+ case OpCvt32to64F:
+ return rewriteValueS390X_OpCvt32to64F(v, config)
+ case OpCvt64Fto32:
+ return rewriteValueS390X_OpCvt64Fto32(v, config)
+ case OpCvt64Fto32F:
+ return rewriteValueS390X_OpCvt64Fto32F(v, config)
+ case OpCvt64Fto64:
+ return rewriteValueS390X_OpCvt64Fto64(v, config)
+ case OpCvt64to32F:
+ return rewriteValueS390X_OpCvt64to32F(v, config)
+ case OpCvt64to64F:
+ return rewriteValueS390X_OpCvt64to64F(v, config)
+ case OpDeferCall:
+ return rewriteValueS390X_OpDeferCall(v, config)
+ case OpDiv16:
+ return rewriteValueS390X_OpDiv16(v, config)
+ case OpDiv16u:
+ return rewriteValueS390X_OpDiv16u(v, config)
+ case OpDiv32:
+ return rewriteValueS390X_OpDiv32(v, config)
+ case OpDiv32F:
+ return rewriteValueS390X_OpDiv32F(v, config)
+ case OpDiv32u:
+ return rewriteValueS390X_OpDiv32u(v, config)
+ case OpDiv64:
+ return rewriteValueS390X_OpDiv64(v, config)
+ case OpDiv64F:
+ return rewriteValueS390X_OpDiv64F(v, config)
+ case OpDiv64u:
+ return rewriteValueS390X_OpDiv64u(v, config)
+ case OpDiv8:
+ return rewriteValueS390X_OpDiv8(v, config)
+ case OpDiv8u:
+ return rewriteValueS390X_OpDiv8u(v, config)
+ case OpEq16:
+ return rewriteValueS390X_OpEq16(v, config)
+ case OpEq32:
+ return rewriteValueS390X_OpEq32(v, config)
+ case OpEq32F:
+ return rewriteValueS390X_OpEq32F(v, config)
+ case OpEq64:
+ return rewriteValueS390X_OpEq64(v, config)
+ case OpEq64F:
+ return rewriteValueS390X_OpEq64F(v, config)
+ case OpEq8:
+ return rewriteValueS390X_OpEq8(v, config)
+ case OpEqB:
+ return rewriteValueS390X_OpEqB(v, config)
+ case OpEqPtr:
+ return rewriteValueS390X_OpEqPtr(v, config)
+ case OpGeq16:
+ return rewriteValueS390X_OpGeq16(v, config)
+ case OpGeq16U:
+ return rewriteValueS390X_OpGeq16U(v, config)
+ case OpGeq32:
+ return rewriteValueS390X_OpGeq32(v, config)
+ case OpGeq32F:
+ return rewriteValueS390X_OpGeq32F(v, config)
+ case OpGeq32U:
+ return rewriteValueS390X_OpGeq32U(v, config)
+ case OpGeq64:
+ return rewriteValueS390X_OpGeq64(v, config)
+ case OpGeq64F:
+ return rewriteValueS390X_OpGeq64F(v, config)
+ case OpGeq64U:
+ return rewriteValueS390X_OpGeq64U(v, config)
+ case OpGeq8:
+ return rewriteValueS390X_OpGeq8(v, config)
+ case OpGeq8U:
+ return rewriteValueS390X_OpGeq8U(v, config)
+ case OpGetClosurePtr:
+ return rewriteValueS390X_OpGetClosurePtr(v, config)
+ case OpGetG:
+ return rewriteValueS390X_OpGetG(v, config)
+ case OpGoCall:
+ return rewriteValueS390X_OpGoCall(v, config)
+ case OpGreater16:
+ return rewriteValueS390X_OpGreater16(v, config)
+ case OpGreater16U:
+ return rewriteValueS390X_OpGreater16U(v, config)
+ case OpGreater32:
+ return rewriteValueS390X_OpGreater32(v, config)
+ case OpGreater32F:
+ return rewriteValueS390X_OpGreater32F(v, config)
+ case OpGreater32U:
+ return rewriteValueS390X_OpGreater32U(v, config)
+ case OpGreater64:
+ return rewriteValueS390X_OpGreater64(v, config)
+ case OpGreater64F:
+ return rewriteValueS390X_OpGreater64F(v, config)
+ case OpGreater64U:
+ return rewriteValueS390X_OpGreater64U(v, config)
+ case OpGreater8:
+ return rewriteValueS390X_OpGreater8(v, config)
+ case OpGreater8U:
+ return rewriteValueS390X_OpGreater8U(v, config)
+ case OpHmul16:
+ return rewriteValueS390X_OpHmul16(v, config)
+ case OpHmul16u:
+ return rewriteValueS390X_OpHmul16u(v, config)
+ case OpHmul32:
+ return rewriteValueS390X_OpHmul32(v, config)
+ case OpHmul32u:
+ return rewriteValueS390X_OpHmul32u(v, config)
+ case OpHmul64:
+ return rewriteValueS390X_OpHmul64(v, config)
+ case OpHmul64u:
+ return rewriteValueS390X_OpHmul64u(v, config)
+ case OpHmul8:
+ return rewriteValueS390X_OpHmul8(v, config)
+ case OpHmul8u:
+ return rewriteValueS390X_OpHmul8u(v, config)
+ case OpITab:
+ return rewriteValueS390X_OpITab(v, config)
+ case OpInterCall:
+ return rewriteValueS390X_OpInterCall(v, config)
+ case OpIsInBounds:
+ return rewriteValueS390X_OpIsInBounds(v, config)
+ case OpIsNonNil:
+ return rewriteValueS390X_OpIsNonNil(v, config)
+ case OpIsSliceInBounds:
+ return rewriteValueS390X_OpIsSliceInBounds(v, config)
+ case OpLeq16:
+ return rewriteValueS390X_OpLeq16(v, config)
+ case OpLeq16U:
+ return rewriteValueS390X_OpLeq16U(v, config)
+ case OpLeq32:
+ return rewriteValueS390X_OpLeq32(v, config)
+ case OpLeq32F:
+ return rewriteValueS390X_OpLeq32F(v, config)
+ case OpLeq32U:
+ return rewriteValueS390X_OpLeq32U(v, config)
+ case OpLeq64:
+ return rewriteValueS390X_OpLeq64(v, config)
+ case OpLeq64F:
+ return rewriteValueS390X_OpLeq64F(v, config)
+ case OpLeq64U:
+ return rewriteValueS390X_OpLeq64U(v, config)
+ case OpLeq8:
+ return rewriteValueS390X_OpLeq8(v, config)
+ case OpLeq8U:
+ return rewriteValueS390X_OpLeq8U(v, config)
+ case OpLess16:
+ return rewriteValueS390X_OpLess16(v, config)
+ case OpLess16U:
+ return rewriteValueS390X_OpLess16U(v, config)
+ case OpLess32:
+ return rewriteValueS390X_OpLess32(v, config)
+ case OpLess32F:
+ return rewriteValueS390X_OpLess32F(v, config)
+ case OpLess32U:
+ return rewriteValueS390X_OpLess32U(v, config)
+ case OpLess64:
+ return rewriteValueS390X_OpLess64(v, config)
+ case OpLess64F:
+ return rewriteValueS390X_OpLess64F(v, config)
+ case OpLess64U:
+ return rewriteValueS390X_OpLess64U(v, config)
+ case OpLess8:
+ return rewriteValueS390X_OpLess8(v, config)
+ case OpLess8U:
+ return rewriteValueS390X_OpLess8U(v, config)
+ case OpLoad:
+ return rewriteValueS390X_OpLoad(v, config)
+ case OpLrot32:
+ return rewriteValueS390X_OpLrot32(v, config)
+ case OpLrot64:
+ return rewriteValueS390X_OpLrot64(v, config)
+ case OpLsh16x16:
+ return rewriteValueS390X_OpLsh16x16(v, config)
+ case OpLsh16x32:
+ return rewriteValueS390X_OpLsh16x32(v, config)
+ case OpLsh16x64:
+ return rewriteValueS390X_OpLsh16x64(v, config)
+ case OpLsh16x8:
+ return rewriteValueS390X_OpLsh16x8(v, config)
+ case OpLsh32x16:
+ return rewriteValueS390X_OpLsh32x16(v, config)
+ case OpLsh32x32:
+ return rewriteValueS390X_OpLsh32x32(v, config)
+ case OpLsh32x64:
+ return rewriteValueS390X_OpLsh32x64(v, config)
+ case OpLsh32x8:
+ return rewriteValueS390X_OpLsh32x8(v, config)
+ case OpLsh64x16:
+ return rewriteValueS390X_OpLsh64x16(v, config)
+ case OpLsh64x32:
+ return rewriteValueS390X_OpLsh64x32(v, config)
+ case OpLsh64x64:
+ return rewriteValueS390X_OpLsh64x64(v, config)
+ case OpLsh64x8:
+ return rewriteValueS390X_OpLsh64x8(v, config)
+ case OpLsh8x16:
+ return rewriteValueS390X_OpLsh8x16(v, config)
+ case OpLsh8x32:
+ return rewriteValueS390X_OpLsh8x32(v, config)
+ case OpLsh8x64:
+ return rewriteValueS390X_OpLsh8x64(v, config)
+ case OpLsh8x8:
+ return rewriteValueS390X_OpLsh8x8(v, config)
+ case OpMod16:
+ return rewriteValueS390X_OpMod16(v, config)
+ case OpMod16u:
+ return rewriteValueS390X_OpMod16u(v, config)
+ case OpMod32:
+ return rewriteValueS390X_OpMod32(v, config)
+ case OpMod32u:
+ return rewriteValueS390X_OpMod32u(v, config)
+ case OpMod64:
+ return rewriteValueS390X_OpMod64(v, config)
+ case OpMod64u:
+ return rewriteValueS390X_OpMod64u(v, config)
+ case OpMod8:
+ return rewriteValueS390X_OpMod8(v, config)
+ case OpMod8u:
+ return rewriteValueS390X_OpMod8u(v, config)
+ case OpMove:
+ return rewriteValueS390X_OpMove(v, config)
+ case OpMul16:
+ return rewriteValueS390X_OpMul16(v, config)
+ case OpMul32:
+ return rewriteValueS390X_OpMul32(v, config)
+ case OpMul32F:
+ return rewriteValueS390X_OpMul32F(v, config)
+ case OpMul64:
+ return rewriteValueS390X_OpMul64(v, config)
+ case OpMul64F:
+ return rewriteValueS390X_OpMul64F(v, config)
+ case OpMul8:
+ return rewriteValueS390X_OpMul8(v, config)
+ case OpNeg16:
+ return rewriteValueS390X_OpNeg16(v, config)
+ case OpNeg32:
+ return rewriteValueS390X_OpNeg32(v, config)
+ case OpNeg32F:
+ return rewriteValueS390X_OpNeg32F(v, config)
+ case OpNeg64:
+ return rewriteValueS390X_OpNeg64(v, config)
+ case OpNeg64F:
+ return rewriteValueS390X_OpNeg64F(v, config)
+ case OpNeg8:
+ return rewriteValueS390X_OpNeg8(v, config)
+ case OpNeq16:
+ return rewriteValueS390X_OpNeq16(v, config)
+ case OpNeq32:
+ return rewriteValueS390X_OpNeq32(v, config)
+ case OpNeq32F:
+ return rewriteValueS390X_OpNeq32F(v, config)
+ case OpNeq64:
+ return rewriteValueS390X_OpNeq64(v, config)
+ case OpNeq64F:
+ return rewriteValueS390X_OpNeq64F(v, config)
+ case OpNeq8:
+ return rewriteValueS390X_OpNeq8(v, config)
+ case OpNeqB:
+ return rewriteValueS390X_OpNeqB(v, config)
+ case OpNeqPtr:
+ return rewriteValueS390X_OpNeqPtr(v, config)
+ case OpNilCheck:
+ return rewriteValueS390X_OpNilCheck(v, config)
+ case OpNot:
+ return rewriteValueS390X_OpNot(v, config)
+ case OpOffPtr:
+ return rewriteValueS390X_OpOffPtr(v, config)
+ case OpOr16:
+ return rewriteValueS390X_OpOr16(v, config)
+ case OpOr32:
+ return rewriteValueS390X_OpOr32(v, config)
+ case OpOr64:
+ return rewriteValueS390X_OpOr64(v, config)
+ case OpOr8:
+ return rewriteValueS390X_OpOr8(v, config)
+ case OpOrB:
+ return rewriteValueS390X_OpOrB(v, config)
+ case OpRsh16Ux16:
+ return rewriteValueS390X_OpRsh16Ux16(v, config)
+ case OpRsh16Ux32:
+ return rewriteValueS390X_OpRsh16Ux32(v, config)
+ case OpRsh16Ux64:
+ return rewriteValueS390X_OpRsh16Ux64(v, config)
+ case OpRsh16Ux8:
+ return rewriteValueS390X_OpRsh16Ux8(v, config)
+ case OpRsh16x16:
+ return rewriteValueS390X_OpRsh16x16(v, config)
+ case OpRsh16x32:
+ return rewriteValueS390X_OpRsh16x32(v, config)
+ case OpRsh16x64:
+ return rewriteValueS390X_OpRsh16x64(v, config)
+ case OpRsh16x8:
+ return rewriteValueS390X_OpRsh16x8(v, config)
+ case OpRsh32Ux16:
+ return rewriteValueS390X_OpRsh32Ux16(v, config)
+ case OpRsh32Ux32:
+ return rewriteValueS390X_OpRsh32Ux32(v, config)
+ case OpRsh32Ux64:
+ return rewriteValueS390X_OpRsh32Ux64(v, config)
+ case OpRsh32Ux8:
+ return rewriteValueS390X_OpRsh32Ux8(v, config)
+ case OpRsh32x16:
+ return rewriteValueS390X_OpRsh32x16(v, config)
+ case OpRsh32x32:
+ return rewriteValueS390X_OpRsh32x32(v, config)
+ case OpRsh32x64:
+ return rewriteValueS390X_OpRsh32x64(v, config)
+ case OpRsh32x8:
+ return rewriteValueS390X_OpRsh32x8(v, config)
+ case OpRsh64Ux16:
+ return rewriteValueS390X_OpRsh64Ux16(v, config)
+ case OpRsh64Ux32:
+ return rewriteValueS390X_OpRsh64Ux32(v, config)
+ case OpRsh64Ux64:
+ return rewriteValueS390X_OpRsh64Ux64(v, config)
+ case OpRsh64Ux8:
+ return rewriteValueS390X_OpRsh64Ux8(v, config)
+ case OpRsh64x16:
+ return rewriteValueS390X_OpRsh64x16(v, config)
+ case OpRsh64x32:
+ return rewriteValueS390X_OpRsh64x32(v, config)
+ case OpRsh64x64:
+ return rewriteValueS390X_OpRsh64x64(v, config)
+ case OpRsh64x8:
+ return rewriteValueS390X_OpRsh64x8(v, config)
+ case OpRsh8Ux16:
+ return rewriteValueS390X_OpRsh8Ux16(v, config)
+ case OpRsh8Ux32:
+ return rewriteValueS390X_OpRsh8Ux32(v, config)
+ case OpRsh8Ux64:
+ return rewriteValueS390X_OpRsh8Ux64(v, config)
+ case OpRsh8Ux8:
+ return rewriteValueS390X_OpRsh8Ux8(v, config)
+ case OpRsh8x16:
+ return rewriteValueS390X_OpRsh8x16(v, config)
+ case OpRsh8x32:
+ return rewriteValueS390X_OpRsh8x32(v, config)
+ case OpRsh8x64:
+ return rewriteValueS390X_OpRsh8x64(v, config)
+ case OpRsh8x8:
+ return rewriteValueS390X_OpRsh8x8(v, config)
+ case OpS390XADD:
+ return rewriteValueS390X_OpS390XADD(v, config)
+ case OpS390XADDW:
+ return rewriteValueS390X_OpS390XADDW(v, config)
+ case OpS390XADDWconst:
+ return rewriteValueS390X_OpS390XADDWconst(v, config)
+ case OpS390XADDconst:
+ return rewriteValueS390X_OpS390XADDconst(v, config)
+ case OpS390XAND:
+ return rewriteValueS390X_OpS390XAND(v, config)
+ case OpS390XANDW:
+ return rewriteValueS390X_OpS390XANDW(v, config)
+ case OpS390XANDWconst:
+ return rewriteValueS390X_OpS390XANDWconst(v, config)
+ case OpS390XANDconst:
+ return rewriteValueS390X_OpS390XANDconst(v, config)
+ case OpS390XCMP:
+ return rewriteValueS390X_OpS390XCMP(v, config)
+ case OpS390XCMPU:
+ return rewriteValueS390X_OpS390XCMPU(v, config)
+ case OpS390XCMPUconst:
+ return rewriteValueS390X_OpS390XCMPUconst(v, config)
+ case OpS390XCMPW:
+ return rewriteValueS390X_OpS390XCMPW(v, config)
+ case OpS390XCMPWU:
+ return rewriteValueS390X_OpS390XCMPWU(v, config)
+ case OpS390XCMPWUconst:
+ return rewriteValueS390X_OpS390XCMPWUconst(v, config)
+ case OpS390XCMPWconst:
+ return rewriteValueS390X_OpS390XCMPWconst(v, config)
+ case OpS390XCMPconst:
+ return rewriteValueS390X_OpS390XCMPconst(v, config)
+ case OpS390XFMOVDload:
+ return rewriteValueS390X_OpS390XFMOVDload(v, config)
+ case OpS390XFMOVDloadidx:
+ return rewriteValueS390X_OpS390XFMOVDloadidx(v, config)
+ case OpS390XFMOVDstore:
+ return rewriteValueS390X_OpS390XFMOVDstore(v, config)
+ case OpS390XFMOVDstoreidx:
+ return rewriteValueS390X_OpS390XFMOVDstoreidx(v, config)
+ case OpS390XFMOVSload:
+ return rewriteValueS390X_OpS390XFMOVSload(v, config)
+ case OpS390XFMOVSloadidx:
+ return rewriteValueS390X_OpS390XFMOVSloadidx(v, config)
+ case OpS390XFMOVSstore:
+ return rewriteValueS390X_OpS390XFMOVSstore(v, config)
+ case OpS390XFMOVSstoreidx:
+ return rewriteValueS390X_OpS390XFMOVSstoreidx(v, config)
+ case OpS390XMOVBZload:
+ return rewriteValueS390X_OpS390XMOVBZload(v, config)
+ case OpS390XMOVBZloadidx:
+ return rewriteValueS390X_OpS390XMOVBZloadidx(v, config)
+ case OpS390XMOVBZreg:
+ return rewriteValueS390X_OpS390XMOVBZreg(v, config)
+ case OpS390XMOVBload:
+ return rewriteValueS390X_OpS390XMOVBload(v, config)
+ case OpS390XMOVBreg:
+ return rewriteValueS390X_OpS390XMOVBreg(v, config)
+ case OpS390XMOVBstore:
+ return rewriteValueS390X_OpS390XMOVBstore(v, config)
+ case OpS390XMOVBstoreconst:
+ return rewriteValueS390X_OpS390XMOVBstoreconst(v, config)
+ case OpS390XMOVBstoreidx:
+ return rewriteValueS390X_OpS390XMOVBstoreidx(v, config)
+ case OpS390XMOVDEQ:
+ return rewriteValueS390X_OpS390XMOVDEQ(v, config)
+ case OpS390XMOVDGE:
+ return rewriteValueS390X_OpS390XMOVDGE(v, config)
+ case OpS390XMOVDGT:
+ return rewriteValueS390X_OpS390XMOVDGT(v, config)
+ case OpS390XMOVDLE:
+ return rewriteValueS390X_OpS390XMOVDLE(v, config)
+ case OpS390XMOVDLT:
+ return rewriteValueS390X_OpS390XMOVDLT(v, config)
+ case OpS390XMOVDNE:
+ return rewriteValueS390X_OpS390XMOVDNE(v, config)
+ case OpS390XMOVDaddr:
+ return rewriteValueS390X_OpS390XMOVDaddr(v, config)
+ case OpS390XMOVDaddridx:
+ return rewriteValueS390X_OpS390XMOVDaddridx(v, config)
+ case OpS390XMOVDload:
+ return rewriteValueS390X_OpS390XMOVDload(v, config)
+ case OpS390XMOVDloadidx:
+ return rewriteValueS390X_OpS390XMOVDloadidx(v, config)
+ case OpS390XMOVDstore:
+ return rewriteValueS390X_OpS390XMOVDstore(v, config)
+ case OpS390XMOVDstoreconst:
+ return rewriteValueS390X_OpS390XMOVDstoreconst(v, config)
+ case OpS390XMOVDstoreidx:
+ return rewriteValueS390X_OpS390XMOVDstoreidx(v, config)
+ case OpS390XMOVHZload:
+ return rewriteValueS390X_OpS390XMOVHZload(v, config)
+ case OpS390XMOVHZloadidx:
+ return rewriteValueS390X_OpS390XMOVHZloadidx(v, config)
+ case OpS390XMOVHZreg:
+ return rewriteValueS390X_OpS390XMOVHZreg(v, config)
+ case OpS390XMOVHload:
+ return rewriteValueS390X_OpS390XMOVHload(v, config)
+ case OpS390XMOVHreg:
+ return rewriteValueS390X_OpS390XMOVHreg(v, config)
+ case OpS390XMOVHstore:
+ return rewriteValueS390X_OpS390XMOVHstore(v, config)
+ case OpS390XMOVHstoreconst:
+ return rewriteValueS390X_OpS390XMOVHstoreconst(v, config)
+ case OpS390XMOVHstoreidx:
+ return rewriteValueS390X_OpS390XMOVHstoreidx(v, config)
+ case OpS390XMOVWZload:
+ return rewriteValueS390X_OpS390XMOVWZload(v, config)
+ case OpS390XMOVWZloadidx:
+ return rewriteValueS390X_OpS390XMOVWZloadidx(v, config)
+ case OpS390XMOVWZreg:
+ return rewriteValueS390X_OpS390XMOVWZreg(v, config)
+ case OpS390XMOVWload:
+ return rewriteValueS390X_OpS390XMOVWload(v, config)
+ case OpS390XMOVWreg:
+ return rewriteValueS390X_OpS390XMOVWreg(v, config)
+ case OpS390XMOVWstore:
+ return rewriteValueS390X_OpS390XMOVWstore(v, config)
+ case OpS390XMOVWstoreconst:
+ return rewriteValueS390X_OpS390XMOVWstoreconst(v, config)
+ case OpS390XMOVWstoreidx:
+ return rewriteValueS390X_OpS390XMOVWstoreidx(v, config)
+ case OpS390XMULLD:
+ return rewriteValueS390X_OpS390XMULLD(v, config)
+ case OpS390XMULLDconst:
+ return rewriteValueS390X_OpS390XMULLDconst(v, config)
+ case OpS390XMULLW:
+ return rewriteValueS390X_OpS390XMULLW(v, config)
+ case OpS390XMULLWconst:
+ return rewriteValueS390X_OpS390XMULLWconst(v, config)
+ case OpS390XNEG:
+ return rewriteValueS390X_OpS390XNEG(v, config)
+ case OpS390XNEGW:
+ return rewriteValueS390X_OpS390XNEGW(v, config)
+ case OpS390XNOT:
+ return rewriteValueS390X_OpS390XNOT(v, config)
+ case OpS390XNOTW:
+ return rewriteValueS390X_OpS390XNOTW(v, config)
+ case OpS390XOR:
+ return rewriteValueS390X_OpS390XOR(v, config)
+ case OpS390XORW:
+ return rewriteValueS390X_OpS390XORW(v, config)
+ case OpS390XORWconst:
+ return rewriteValueS390X_OpS390XORWconst(v, config)
+ case OpS390XORconst:
+ return rewriteValueS390X_OpS390XORconst(v, config)
+ case OpS390XSLD:
+ return rewriteValueS390X_OpS390XSLD(v, config)
+ case OpS390XSLW:
+ return rewriteValueS390X_OpS390XSLW(v, config)
+ case OpS390XSRAD:
+ return rewriteValueS390X_OpS390XSRAD(v, config)
+ case OpS390XSRADconst:
+ return rewriteValueS390X_OpS390XSRADconst(v, config)
+ case OpS390XSRAW:
+ return rewriteValueS390X_OpS390XSRAW(v, config)
+ case OpS390XSRAWconst:
+ return rewriteValueS390X_OpS390XSRAWconst(v, config)
+ case OpS390XSRD:
+ return rewriteValueS390X_OpS390XSRD(v, config)
+ case OpS390XSRW:
+ return rewriteValueS390X_OpS390XSRW(v, config)
+ case OpS390XSUB:
+ return rewriteValueS390X_OpS390XSUB(v, config)
+ case OpS390XSUBEWcarrymask:
+ return rewriteValueS390X_OpS390XSUBEWcarrymask(v, config)
+ case OpS390XSUBEcarrymask:
+ return rewriteValueS390X_OpS390XSUBEcarrymask(v, config)
+ case OpS390XSUBW:
+ return rewriteValueS390X_OpS390XSUBW(v, config)
+ case OpS390XSUBWconst:
+ return rewriteValueS390X_OpS390XSUBWconst(v, config)
+ case OpS390XSUBconst:
+ return rewriteValueS390X_OpS390XSUBconst(v, config)
+ case OpS390XXOR:
+ return rewriteValueS390X_OpS390XXOR(v, config)
+ case OpS390XXORW:
+ return rewriteValueS390X_OpS390XXORW(v, config)
+ case OpS390XXORWconst:
+ return rewriteValueS390X_OpS390XXORWconst(v, config)
+ case OpS390XXORconst:
+ return rewriteValueS390X_OpS390XXORconst(v, config)
+ case OpSignExt16to32:
+ return rewriteValueS390X_OpSignExt16to32(v, config)
+ case OpSignExt16to64:
+ return rewriteValueS390X_OpSignExt16to64(v, config)
+ case OpSignExt32to64:
+ return rewriteValueS390X_OpSignExt32to64(v, config)
+ case OpSignExt8to16:
+ return rewriteValueS390X_OpSignExt8to16(v, config)
+ case OpSignExt8to32:
+ return rewriteValueS390X_OpSignExt8to32(v, config)
+ case OpSignExt8to64:
+ return rewriteValueS390X_OpSignExt8to64(v, config)
+ case OpSqrt:
+ return rewriteValueS390X_OpSqrt(v, config)
+ case OpStaticCall:
+ return rewriteValueS390X_OpStaticCall(v, config)
+ case OpStore:
+ return rewriteValueS390X_OpStore(v, config)
+ case OpSub16:
+ return rewriteValueS390X_OpSub16(v, config)
+ case OpSub32:
+ return rewriteValueS390X_OpSub32(v, config)
+ case OpSub32F:
+ return rewriteValueS390X_OpSub32F(v, config)
+ case OpSub64:
+ return rewriteValueS390X_OpSub64(v, config)
+ case OpSub64F:
+ return rewriteValueS390X_OpSub64F(v, config)
+ case OpSub8:
+ return rewriteValueS390X_OpSub8(v, config)
+ case OpSubPtr:
+ return rewriteValueS390X_OpSubPtr(v, config)
+ case OpTrunc16to8:
+ return rewriteValueS390X_OpTrunc16to8(v, config)
+ case OpTrunc32to16:
+ return rewriteValueS390X_OpTrunc32to16(v, config)
+ case OpTrunc32to8:
+ return rewriteValueS390X_OpTrunc32to8(v, config)
+ case OpTrunc64to16:
+ return rewriteValueS390X_OpTrunc64to16(v, config)
+ case OpTrunc64to32:
+ return rewriteValueS390X_OpTrunc64to32(v, config)
+ case OpTrunc64to8:
+ return rewriteValueS390X_OpTrunc64to8(v, config)
+ case OpXor16:
+ return rewriteValueS390X_OpXor16(v, config)
+ case OpXor32:
+ return rewriteValueS390X_OpXor32(v, config)
+ case OpXor64:
+ return rewriteValueS390X_OpXor64(v, config)
+ case OpXor8:
+ return rewriteValueS390X_OpXor8(v, config)
+ case OpZero:
+ return rewriteValueS390X_OpZero(v, config)
+ case OpZeroExt16to32:
+ return rewriteValueS390X_OpZeroExt16to32(v, config)
+ case OpZeroExt16to64:
+ return rewriteValueS390X_OpZeroExt16to64(v, config)
+ case OpZeroExt32to64:
+ return rewriteValueS390X_OpZeroExt32to64(v, config)
+ case OpZeroExt8to16:
+ return rewriteValueS390X_OpZeroExt8to16(v, config)
+ case OpZeroExt8to32:
+ return rewriteValueS390X_OpZeroExt8to32(v, config)
+ case OpZeroExt8to64:
+ return rewriteValueS390X_OpZeroExt8to64(v, config)
+ }
+ return false
+}
+func rewriteValueS390X_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADDW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XADDW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADDW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XADDW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (FADDS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XFADDS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpAdd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (FADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XFADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADDW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XADDW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(OpS390XMOVDaddr)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueS390X_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (ANDW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (ANDW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpAnd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And64 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (ANDW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (ANDW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpAvg64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Avg64u <t> x y)
+ // cond:
+ // result: (ADD (ADD <t> (SRDconst <t> x [1]) (SRDconst <t> y [1])) (ANDconst <t> (AND <t> x y) [1]))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Line, OpS390XADD, t)
+ v1 := b.NewValue0(v.Line, OpS390XSRDconst, t)
+ v1.AuxInt = 1
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XSRDconst, t)
+ v2.AuxInt = 1
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpS390XANDconst, t)
+ v3.AuxInt = 1
+ v4 := b.NewValue0(v.Line, OpS390XAND, t)
+ v4.AddArg(x)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpS390XCALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (NOTW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XNOTW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (NOTW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XNOTW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpCom64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com64 x)
+ // cond:
+ // result: (NOT x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XNOT)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (NOTW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XNOTW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueS390X_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueS390X_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (FMOVSconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpS390XFMOVSconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueS390X_OpConst64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueS390X_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (FMOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpS390XFMOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueS390X_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueS390X_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVDconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValueS390X_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValueS390X_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert <t> x mem)
+ // cond:
+ // result: (MOVDconvert <t> x mem)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpS390XMOVDconvert)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (CFEBRA x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XCFEBRA)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpCvt32Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64 x)
+ // cond:
+ // result: (CGEBRA x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XCGEBRA)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (LDEBR x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XLDEBR)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (CEFBRA x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XCEFBRA)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (CDFBRA x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XCDFBRA)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (CFDBRA x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XCFDBRA)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (LEDBR x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XLEDBR)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpCvt64Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto64 x)
+ // cond:
+ // result: (CGDBRA x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XCGDBRA)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpCvt64to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to32F x)
+ // cond:
+ // result: (CEGBRA x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XCEGBRA)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpCvt64to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to64F x)
+ // cond:
+ // result: (CDGBRA x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XCDGBRA)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpS390XCALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (DIVW (MOVHreg x) (MOVHreg y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XDIVW)
+ v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (DIVWU (MOVHZreg x) (MOVHZreg y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XDIVWU)
+ v0 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (DIVW (MOVWreg x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XDIVW)
+ v0 := b.NewValue0(v.Line, OpS390XMOVWreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (FDIVS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XFDIVS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (DIVWU (MOVWZreg x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XDIVWU)
+ v0 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64 x y)
+ // cond:
+ // result: (DIVD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XDIVD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (FDIV x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XFDIV)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64u x y)
+ // cond:
+ // result: (DIVDU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XDIVDU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (DIVW (MOVBreg x) (MOVBreg y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XDIVW)
+ v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (DIVWU (MOVBZreg x) (MOVBZreg y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XDIVWU)
+ v0 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDEQ)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDEQ)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDEQ)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64 x y)
+ // cond:
+ // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDEQ)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDEQ)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDEQ)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDEQ)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDEQ)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGEnoinv)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWU, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64 x y)
+ // cond:
+ // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGEnoinv)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64U x y)
+ // cond:
+ // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(OpS390XLoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValueS390X_OpGetG(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetG mem)
+ // cond:
+ // result: (LoweredGetG mem)
+ for {
+ mem := v.Args[0]
+ v.reset(OpS390XLoweredGetG)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpS390XCALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGTnoinv)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWU, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGreater64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64 x y)
+ // cond:
+ // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGTnoinv)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGreater64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64U x y)
+ // cond:
+ // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (SRDconst [16] (MULLW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpS390XMULLW, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (SRDconst [16] (MULLW (MOVHZreg x) (MOVHZreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpS390XMULLW, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = 32
+ v0 := b.NewValue0(v.Line, OpS390XMULLD, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Line, OpS390XMOVWreg, config.fe.TypeInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XMOVWreg, config.fe.TypeInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = 32
+ v0 := b.NewValue0(v.Line, OpS390XMULLD, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul64 x y)
+ // cond:
+ // result: (MULHD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMULHD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul64u x y)
+ // cond:
+ // result: (MULHDU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMULHDU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (SRDconst [8] (MULLW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpS390XMULLW, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (SRDconst [8] (MULLW (MOVBZreg x) (MOVBZreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpS390XMULLW, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpITab(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ITab (Load ptr mem))
+ // cond:
+ // result: (MOVDload ptr mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpLoad {
+ break
+ }
+ ptr := v_0.Args[0]
+ mem := v_0.Args[1]
+ v.reset(OpS390XMOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpS390XCALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpS390XMOVDLT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v2.AddArg(idx)
+ v2.AddArg(len)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil p)
+ // cond:
+ // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
+ for {
+ p := v.Args[0]
+ v.reset(OpS390XMOVDNE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPconst, TypeFlags)
+ v2.AuxInt = 0
+ v2.AddArg(p)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpS390XMOVDLE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v2.AddArg(idx)
+ v2.AddArg(len)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGEnoinv)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags)
+ v2.AddArg(y)
+ v2.AddArg(x)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWU, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64 x y)
+ // cond:
+ // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGEnoinv)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags)
+ v2.AddArg(y)
+ v2.AddArg(x)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64U x y)
+ // cond:
+ // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGTnoinv)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags)
+ v2.AddArg(y)
+ v2.AddArg(x)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWU, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64 x y)
+ // cond:
+ // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDGTnoinv)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags)
+ v2.AddArg(y)
+ v2.AddArg(x)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64U x y)
+ // cond:
+ // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDLT)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpS390XMOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t)
+ // result: (MOVWZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpS390XMOVWZload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVHZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpS390XMOVHZload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(OpS390XMOVBZload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpS390XFMOVSload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpS390XFMOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpLrot32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot32 <t> x [c])
+ // cond:
+ // result: (RLLconst <t> [c&31] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpS390XRLLconst)
+ v.Type = t
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpLrot64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot64 <t> x [c])
+ // cond:
+ // result: (RLLGconst <t> [c&63] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpS390XRLLGconst)
+ v.Type = t
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 <t> x y)
+ // cond:
+ // result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 31
+ v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 <t> x y)
+ // cond:
+ // result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 31
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 <t> x y)
+ // cond:
+ // result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v2.AuxInt = 31
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 <t> x y)
+ // cond:
+ // result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 31
+ v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 <t> x y)
+ // cond:
+ // result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 31
+ v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 <t> x y)
+ // cond:
+ // result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 31
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 <t> x y)
+ // cond:
+ // result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v2.AuxInt = 31
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 <t> x y)
+ // cond:
+ // result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 31
+ v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x16 <t> x y)
+ // cond:
+ // result: (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XAND)
+ v0 := b.NewValue0(v.Line, OpS390XSLD, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 63
+ v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x32 <t> x y)
+ // cond:
+ // result: (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst y [63])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XAND)
+ v0 := b.NewValue0(v.Line, OpS390XSLD, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 63
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x64 <t> x y)
+ // cond:
+ // result: (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPUconst y [63])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XAND)
+ v0 := b.NewValue0(v.Line, OpS390XSLD, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v2.AuxInt = 63
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x8 <t> x y)
+ // cond:
+ // result: (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XAND)
+ v0 := b.NewValue0(v.Line, OpS390XSLD, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 63
+ v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 <t> x y)
+ // cond:
+ // result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 31
+ v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 <t> x y)
+ // cond:
+ // result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 31
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 <t> x y)
+ // cond:
+ // result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v2.AuxInt = 31
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 <t> x y)
+ // cond:
+ // result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 31
+ v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (MODW (MOVHreg x) (MOVHreg y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMODW)
+ v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (MODWU (MOVHZreg x) (MOVHZreg y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMODWU)
+ v0 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (MODW (MOVWreg x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMODW)
+ v0 := b.NewValue0(v.Line, OpS390XMOVWreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (MODWU (MOVWZreg x) y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMODWU)
+ v0 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64 x y)
+ // cond:
+ // result: (MODD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMODD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64u x y)
+ // cond:
+ // result: (MODDU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMODDU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (MODW (MOVBreg x) (MOVBreg y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMODW)
+ v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (MODWU (MOVBZreg x) (MOVBZreg y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMODWU)
+ v0 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMove(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBZload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpS390XMOVBZload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVHstore dst (MOVHZload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVWstore dst (MOVWZload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 16
+ // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 16) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = 8
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
+ v0.AuxInt = 8
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 24
+ // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 24) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = 16
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
+ v0.AuxInt = 16
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDstore, TypeMem)
+ v1.AuxInt = 8
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
+ v2.AuxInt = 8
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpS390XMOVDstore, TypeMem)
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHZload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpS390XMOVBZload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVHstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 5
+ // result: (MOVBstore [4] dst (MOVBZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 5) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpS390XMOVBZload, config.fe.TypeUInt8())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVWstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 6
+ // result: (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 6) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVWstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 7
+ // result: (MOVBstore [6] dst (MOVBZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 7) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = 6
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpS390XMOVBZload, config.fe.TypeUInt8())
+ v0.AuxInt = 6
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVHstore, TypeMem)
+ v1.AuxInt = 4
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16())
+ v2.AuxInt = 4
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpS390XMOVWstore, TypeMem)
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32())
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 256
+ // result: (MVC [makeValAndOff(SizeAndAlign(s).Size(), 0)] dst src mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 256) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = makeValAndOff(SizeAndAlign(s).Size(), 0)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 256 && SizeAndAlign(s).Size() <= 512
+ // result: (MVC [makeValAndOff(SizeAndAlign(s).Size()-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 256 && SizeAndAlign(s).Size() <= 512) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = makeValAndOff(SizeAndAlign(s).Size()-256, 256)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpS390XMVC, TypeMem)
+ v0.AuxInt = makeValAndOff(256, 0)
+ v0.AddArg(dst)
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Size() <= 768
+ // result: (MVC [makeValAndOff(SizeAndAlign(s).Size()-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Size() <= 768) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = makeValAndOff(SizeAndAlign(s).Size()-512, 512)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpS390XMVC, TypeMem)
+ v0.AuxInt = makeValAndOff(256, 256)
+ v0.AddArg(dst)
+ v0.AddArg(src)
+ v1 := b.NewValue0(v.Line, OpS390XMVC, TypeMem)
+ v1.AuxInt = makeValAndOff(256, 0)
+ v1.AddArg(dst)
+ v1.AddArg(src)
+ v1.AddArg(mem)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 768 && SizeAndAlign(s).Size() <= 1024
+ // result: (MVC [makeValAndOff(SizeAndAlign(s).Size()-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 768 && SizeAndAlign(s).Size() <= 1024) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = makeValAndOff(SizeAndAlign(s).Size()-768, 768)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpS390XMVC, TypeMem)
+ v0.AuxInt = makeValAndOff(256, 512)
+ v0.AddArg(dst)
+ v0.AddArg(src)
+ v1 := b.NewValue0(v.Line, OpS390XMVC, TypeMem)
+ v1.AuxInt = makeValAndOff(256, 256)
+ v1.AddArg(dst)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Line, OpS390XMVC, TypeMem)
+ v2.AuxInt = makeValAndOff(256, 0)
+ v2.AddArg(dst)
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 1024
+ // result: (LoweredMove [SizeAndAlign(s).Size()%256] dst src (ADDconst <src.Type> src [(SizeAndAlign(s).Size()/256)*256]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 1024) {
+ break
+ }
+ v.reset(OpS390XLoweredMove)
+ v.AuxInt = SizeAndAlign(s).Size() % 256
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpS390XADDconst, src.Type)
+ v0.AuxInt = (SizeAndAlign(s).Size() / 256) * 256
+ v0.AddArg(src)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MULLW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMULLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MULLW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMULLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (FMULS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XFMULS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64 x y)
+ // cond:
+ // result: (MULLD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMULLD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (FMUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XFMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MULLW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMULLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (NEGW (MOVHreg x))
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XNEGW)
+ v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (NEGW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XNEGW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond:
+ // result: (FNEGS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XFNEGS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeg64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XNEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond:
+ // result: (FNEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XFNEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (NEGW (MOVBreg x))
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XNEGW)
+ v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDNE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDNE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDNE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64 x y)
+ // cond:
+ // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDNE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDNE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDNE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDNE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4.AddArg(y)
+ v2.AddArg(v4)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XMOVDNE)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1.AuxInt = 1
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
+ v2.AddArg(x)
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpS390XLoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORWconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr:(SP))
+ // cond:
+ // result: (MOVDaddr [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADD (MOVDconst [off]) ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = off
+ v.AddArg(v0)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueS390X_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (ORW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (ORW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpOr64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or64 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (ORW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (ORW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 <t> x y)
+ // cond:
+ // result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [15])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v3.AuxInt = 15
+ v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 <t> x y)
+ // cond:
+ // result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [15])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v3.AuxInt = 15
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 <t> x y)
+ // cond:
+ // result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [15])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v3 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v3.AuxInt = 15
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 <t> x y)
+ // cond:
+ // result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [15])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v3.AuxInt = 15
+ v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 <t> x y)
+ // cond:
+ // result: (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [15])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAW)
+ v.Type = t
+ v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XORW, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
+ v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v4.AuxInt = 15
+ v5 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 <t> x y)
+ // cond:
+ // result: (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [15])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAW)
+ v.Type = t
+ v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XORW, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
+ v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v4.AuxInt = 15
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 <t> x y)
+ // cond:
+ // result: (SRAW <t> (MOVHreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [15])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAW)
+ v.Type = t
+ v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XOR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpS390XNOT, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, y.Type)
+ v4 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v4.AuxInt = 15
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 <t> x y)
+ // cond:
+ // result: (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [15])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAW)
+ v.Type = t
+ v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XORW, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
+ v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v4.AuxInt = 15
+ v5 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 <t> x y)
+ // cond:
+ // result: (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 31
+ v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 <t> x y)
+ // cond:
+ // result: (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 31
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 <t> x y)
+ // cond:
+ // result: (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v2.AuxInt = 31
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 <t> x y)
+ // cond:
+ // result: (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 31
+ v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 <t> x y)
+ // cond:
+ // result: (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [31])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpS390XORW, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v3.AuxInt = 31
+ v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 <t> x y)
+ // cond:
+ // result: (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [31])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpS390XORW, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v3.AuxInt = 31
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 <t> x y)
+ // cond:
+ // result: (SRAW <t> x (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [31])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpS390XOR, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpS390XNOT, y.Type)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v3.AuxInt = 31
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 <t> x y)
+ // cond:
+ // result: (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [31])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpS390XORW, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v3.AuxInt = 31
+ v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux16 <t> x y)
+ // cond:
+ // result: (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XAND)
+ v0 := b.NewValue0(v.Line, OpS390XSRD, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 63
+ v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux32 <t> x y)
+ // cond:
+ // result: (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst y [63])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XAND)
+ v0 := b.NewValue0(v.Line, OpS390XSRD, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 63
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux64 <t> x y)
+ // cond:
+ // result: (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPUconst y [63])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XAND)
+ v0 := b.NewValue0(v.Line, OpS390XSRD, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v2.AuxInt = 63
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux8 <t> x y)
+ // cond:
+ // result: (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XAND)
+ v0 := b.NewValue0(v.Line, OpS390XSRD, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v2.AuxInt = 63
+ v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x16 <t> x y)
+ // cond:
+ // result: (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [63])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAD)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpS390XORW, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v3.AuxInt = 63
+ v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x32 <t> x y)
+ // cond:
+ // result: (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [63])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAD)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpS390XORW, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v3.AuxInt = 63
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x64 <t> x y)
+ // cond:
+ // result: (SRAD <t> x (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [63])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAD)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpS390XOR, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpS390XNOT, y.Type)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v3.AuxInt = 63
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x8 <t> x y)
+ // cond:
+ // result: (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [63])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAD)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpS390XORW, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v3.AuxInt = 63
+ v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 <t> x y)
+ // cond:
+ // result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [7])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v3.AuxInt = 7
+ v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 <t> x y)
+ // cond:
+ // result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [7])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v3.AuxInt = 7
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 <t> x y)
+ // cond:
+ // result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [7])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v3 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v3.AuxInt = 7
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 <t> x y)
+ // cond:
+ // result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [7])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XANDW)
+ v0 := b.NewValue0(v.Line, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
+ v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v3.AuxInt = 7
+ v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 <t> x y)
+ // cond:
+ // result: (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [7])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAW)
+ v.Type = t
+ v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XORW, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
+ v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v4.AuxInt = 7
+ v5 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 <t> x y)
+ // cond:
+ // result: (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [7])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAW)
+ v.Type = t
+ v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XORW, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
+ v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v4.AuxInt = 7
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 <t> x y)
+ // cond:
+ // result: (SRAW <t> (MOVBreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [7])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAW)
+ v.Type = t
+ v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XOR, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpS390XNOT, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, y.Type)
+ v4 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v4.AuxInt = 7
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 <t> x y)
+ // cond:
+ // result: (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [7])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSRAW)
+ v.Type = t
+ v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XORW, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
+ v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
+ v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v4.AuxInt = 7
+ v5 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XADD(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADD x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADD (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (ADDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADD x (MOVDaddr [c] {s} y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (MOVDaddridx [c] {s} x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ c := v_1.AuxInt
+ s := v_1.Aux
+ y := v_1.Args[0]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (MOVDaddr [c] {s} x) y)
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (MOVDaddridx [c] {s} x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ c := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD x (NEG y))
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XNEG {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDW x (MOVDconst [c]))
+ // cond:
+ // result: (ADDWconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDW (MOVDconst [c]) x)
+ // cond:
+ // result: (ADDWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDW x (NEGW y))
+ // cond:
+ // result: (SUBW x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XNEGW {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSUBW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDWconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDWconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(int32(c+d))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64(int32(c + d))
+ return true
+ }
+ // match: (ADDWconst [c] (ADDWconst [d] x))
+ // cond:
+ // result: (ADDWconst [int64(int32(c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDWconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDconst [c] (MOVDaddr [d] {s} x))
+ // cond: ((c+d)&1 == 0) && is32Bit(c+d)
+ // result: (MOVDaddr [c+d] {s} x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ d := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ if !(((c+d)&1 == 0) && is32Bit(c+d)) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDaddr [d] {s} x))
+ // cond: x.Op != OpSB && is32Bit(c+d)
+ // result: (MOVDaddr [c+d] {s} x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ d := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ if !(x.Op != OpSB && is32Bit(c+d)) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDaddridx [d] {s} x y))
+ // cond: is32Bit(c+d)
+ // result: (MOVDaddridx [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ d := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c+d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = c + d
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (ADDconst [c+d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = c + d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XAND(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AND x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (ANDconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (AND (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (ANDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (AND x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDW x (MOVDconst [c]))
+ // cond:
+ // result: (ANDWconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XANDWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDW (MOVDconst [c]) x)
+ // cond:
+ // result: (ANDWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpS390XANDWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDW x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDWconst [c] (ANDWconst [d] x))
+ // cond:
+ // result: (ANDWconst [c & d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpS390XANDWconst)
+ v.AuxInt = c & d
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDWconst [c] _)
+ // cond: int32(c)==0
+ // result: (MOVDconst [0])
+ for {
+ c := v.AuxInt
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (ANDWconst [c] x)
+ // cond: int32(c)==-1
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDWconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c&d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = c & d
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // cond:
+ // result: (ANDconst [c & d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & d
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [0xFF] x)
+ // cond:
+ // result: (MOVBZreg x)
+ for {
+ if v.AuxInt != 0xFF {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [0xFFFF] x)
+ // cond:
+ // result: (MOVHZreg x)
+ for {
+ if v.AuxInt != 0xFFFF {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [0xFFFFFFFF] x)
+ // cond:
+ // result: (MOVWZreg x)
+ for {
+ if v.AuxInt != 0xFFFFFFFF {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [0] _)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c&d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = c & d
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMP(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMP x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (CMPconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XCMPconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (InvertFlags (CMPconst x [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Line, OpS390XCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPU x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (CMPUconst x [int64(uint32(c))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XCMPUconst)
+ v.AuxInt = int64(uint32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPU (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (InvertFlags (CMPUconst x [int64(uint32(c))]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
+ v0.AuxInt = int64(uint32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPUconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)==uint64(y)
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(uint64(x) == uint64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)<uint64(y)
+ // result: (FlagLT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)>uint64(y)
+ // result: (FlagGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPW x (MOVDconst [c]))
+ // cond:
+ // result: (CMPWconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVDconst [c]) x)
+ // cond:
+ // result: (InvertFlags (CMPWconst x [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Line, OpS390XCMPWconst, TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPWU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPWU x (MOVDconst [c]))
+ // cond:
+ // result: (CMPWUconst x [int64(uint32(c))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int64(uint32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWU (MOVDconst [c]) x)
+ // cond:
+ // result: (InvertFlags (CMPWUconst x [int64(uint32(c))]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
+ v0.AuxInt = int64(uint32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPWUconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)==uint32(y)
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(uint32(x) == uint32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)<uint32(y)
+ // result: (FlagLT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)>uint32(y)
+ // result: (FlagGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)<int32(y)
+ // result: (FlagLT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) < int32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)>int32(y)
+ // result: (FlagGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) > int32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPWconst (SRWconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
+ // result: (FlagLT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XSRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst (ANDWconst _ [m]) [n])
+ // cond: 0 <= int32(m) && int32(m) < int32(n)
+ // result: (FlagLT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := v_0.AuxInt
+ if !(0 <= int32(m) && int32(m) < int32(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(x == y) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x<y
+ // result: (FlagLT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(x < y) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x>y
+ // result: (FlagGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(x > y) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPconst (MOVBZreg _) [c])
+ // cond: 0xFF < c
+ // result: (FlagLT)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVBZreg {
+ break
+ }
+ if !(0xFF < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVHZreg _) [c])
+ // cond: 0xFFFF < c
+ // result: (FlagLT)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVHZreg {
+ break
+ }
+ if !(0xFFFF < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVWZreg _) [c])
+ // cond: 0xFFFFFFFF < c
+ // result: (FlagLT)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ if !(0xFFFFFFFF < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPconst (SRDconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
+ // result: (FlagLT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := v_0.AuxInt
+ if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPconst (ANDconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDconst {
+ break
+ }
+ m := v_0.AuxInt
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVDload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (FMOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpS390XFMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (FMOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVDloadidx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} (ADD ptr idx) mem)
+ // cond: ptr.Op != OpSB
+ // result: (FMOVDloadidx [off] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XFMOVDloadidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVDloadidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
+ // cond:
+ // result: (FMOVDloadidx [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpS390XFMOVDloadidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem)
+ // cond:
+ // result: (FMOVDloadidx [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpS390XFMOVDloadidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpS390XFMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (FMOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVDstoreidx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: ptr.Op != OpSB
+ // result: (FMOVDstoreidx [off] {sym} ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XFMOVDstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVDstoreidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
+ // cond:
+ // result: (FMOVDstoreidx [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpS390XFMOVDstoreidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem)
+ // cond:
+ // result: (FMOVDstoreidx [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpS390XFMOVDstoreidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVSload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (FMOVSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpS390XFMOVSload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVSload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (FMOVSloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVSloadidx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSload [off] {sym} (ADD ptr idx) mem)
+ // cond: ptr.Op != OpSB
+ // result: (FMOVSloadidx [off] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XFMOVSloadidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVSloadidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
+ // cond:
+ // result: (FMOVSloadidx [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpS390XFMOVSloadidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem)
+ // cond:
+ // result: (FMOVSloadidx [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpS390XFMOVSloadidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVSstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpS390XFMOVSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (FMOVSstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVSstoreidx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: ptr.Op != OpSB
+ // result: (FMOVSstoreidx [off] {sym} ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XFMOVSstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVSstoreidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
+ // cond:
+ // result: (FMOVSstoreidx [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpS390XFMOVSstoreidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem)
+ // cond:
+ // result: (FMOVSstoreidx [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpS390XFMOVSstoreidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBZload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBZload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVBstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBZload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpS390XMOVBZload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBZload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBZloadidx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBZload [off] {sym} (ADD ptr idx) mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVBZloadidx [off] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVBZloadidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBZloadidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVBZloadidx [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVBZloadidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem)
+ // cond:
+ // result: (MOVBZloadidx [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVBZloadidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBZreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBZreg x:(MOVBZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg x:(MOVBZreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg x:(MOVBZload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBZload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZload {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, OpS390XMOVBZload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVBZreg x:(MOVBZloadidx [off] {sym} ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBZloadidx <v.Type> [off] {sym} ptr idx mem)
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, OpS390XMOVBZloadidx, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVBZreg (ANDWconst [c] x))
+ // cond:
+ // result: (ANDconst [c & 0xff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & 0xff
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (ANDconst [c] x))
+ // cond:
+ // result: (ANDconst [c & 0xff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & 0xff
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBreg x:(MOVBload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBZload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZload {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, OpS390XMOVBload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVBreg (ANDWconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDconst [c & 0x7f] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & 0x7f
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (ANDconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDconst [c & 0x7f] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & 0x7f
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: validOff(off) && ptr.Op != OpSB
+ // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validOff(off) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = makeValAndOff(int64(int8(c)), off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreidx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVBstoreidx [off] {sym} ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} p w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ w := v.Args[1]
+ x := v.Args[2]
+ if x.Op != OpS390XMOVBstore {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst {
+ break
+ }
+ if x_1.AuxInt != 8 {
+ break
+ }
+ if w != x_1.Args[0] {
+ break
+ }
+ mem := x.Args[2]
+ if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} p w0 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ w0 := v.Args[1]
+ if w0.Op != OpS390XSRDconst {
+ break
+ }
+ j := w0.AuxInt
+ w := w0.Args[0]
+ x := v.Args[2]
+ if x.Op != OpS390XMOVBstore {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst {
+ break
+ }
+ if x_1.AuxInt != j+8 {
+ break
+ }
+ if w != x_1.Args[0] {
+ break
+ }
+ mem := x.Args[2]
+ if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVHstoreconst [makeValAndOff(ValAndOff(c).Val()&0xff | ValAndOff(a).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ x := v.Args[1]
+ if x.Op != OpS390XMOVBstoreconst {
+ break
+ }
+ a := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ mem := x.Args[1]
+ if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(c).Val()&0xff|ValAndOff(a).Val()<<8, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpS390XMOVBstoreidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem)
+ // cond:
+ // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpS390XMOVBstoreidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHstoreidx [i-1] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ idx := v.Args[1]
+ w := v.Args[2]
+ x := v.Args[3]
+ if x.Op != OpS390XMOVBstoreidx {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != OpS390XSRDconst {
+ break
+ }
+ if x_2.AuxInt != 8 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreidx)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHstoreidx [i-1] {s} p idx w0 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ idx := v.Args[1]
+ w0 := v.Args[2]
+ if w0.Op != OpS390XSRDconst {
+ break
+ }
+ j := w0.AuxInt
+ w := w0.Args[0]
+ x := v.Args[3]
+ if x.Op != OpS390XMOVBstoreidx {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != OpS390XSRDconst {
+ break
+ }
+ if x_2.AuxInt != j+8 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreidx)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDEQ(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDEQ x y (InvertFlags cmp))
+ // cond:
+ // result: (MOVDEQ x y cmp)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpS390XMOVDEQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cmp)
+ return true
+ }
+ // match: (MOVDEQ _ x (FlagEQ))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDEQ y _ (FlagLT))
+ // cond:
+ // result: y
+ for {
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagLT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (MOVDEQ y _ (FlagGT))
+ // cond:
+ // result: y
+ for {
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDGE(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDGE x y (InvertFlags cmp))
+ // cond:
+ // result: (MOVDLE x y cmp)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpS390XMOVDLE)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cmp)
+ return true
+ }
+ // match: (MOVDGE _ x (FlagEQ))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDGE y _ (FlagLT))
+ // cond:
+ // result: y
+ for {
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagLT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (MOVDGE _ x (FlagGT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDGT(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDGT x y (InvertFlags cmp))
+ // cond:
+ // result: (MOVDLT x y cmp)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpS390XMOVDLT)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cmp)
+ return true
+ }
+ // match: (MOVDGT y _ (FlagEQ))
+ // cond:
+ // result: y
+ for {
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (MOVDGT y _ (FlagLT))
+ // cond:
+ // result: y
+ for {
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagLT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (MOVDGT _ x (FlagGT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDLE(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDLE x y (InvertFlags cmp))
+ // cond:
+ // result: (MOVDGE x y cmp)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpS390XMOVDGE)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cmp)
+ return true
+ }
+ // match: (MOVDLE _ x (FlagEQ))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDLE _ x (FlagLT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagLT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDLE y _ (FlagGT))
+ // cond:
+ // result: y
+ for {
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDLT(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDLT x y (InvertFlags cmp))
+ // cond:
+ // result: (MOVDGT x y cmp)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpS390XMOVDGT)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cmp)
+ return true
+ }
+ // match: (MOVDLT y _ (FlagEQ))
+ // cond:
+ // result: y
+ for {
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (MOVDLT _ x (FlagLT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagLT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDLT y _ (FlagGT))
+ // cond:
+ // result: y
+ for {
+ y := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDNE(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDNE x y (InvertFlags cmp))
+ // cond:
+ // result: (MOVDNE x y cmp)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpS390XMOVDNE)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(cmp)
+ return true
+ }
+ // match: (MOVDNE _ y (FlagEQ))
+ // cond:
+ // result: y
+ for {
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (MOVDNE x _ (FlagLT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagLT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDNE x _ (FlagGT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpS390XFlagGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDaddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDaddr [c] {s} (ADDconst [d] x))
+ // cond: ((c+d)&1 == 0) && is32Bit(c+d)
+ // result: (MOVDaddr [c+d] {s} x)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(((c+d)&1 == 0) && is32Bit(c+d)) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDaddr [c] {s} (ADDconst [d] x))
+ // cond: x.Op != OpSB && is32Bit(c+d)
+ // result: (MOVDaddr [c+d] {s} x)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(x.Op != OpSB && is32Bit(c+d)) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDaddr [c] {s} (ADD x y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (MOVDaddridx [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MOVDaddr [off1] {sym1} (MOVDaddr [off2] {sym2} x))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVDaddr [off1+off2] {mergeSym(sym1,sym2)} x)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDaddr [off1] {sym1} (MOVDaddridx [off2] {sym2} x y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDaddridx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDaddridx [c] {s} (ADDconst [d] x) y)
+ // cond: is32Bit(c+d) && x.Op != OpSB
+ // result: (MOVDaddridx [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(c+d) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MOVDaddridx [c] {s} x (ADDconst [d] y))
+ // cond: is32Bit(c+d) && y.Op != OpSB
+ // result: (MOVDaddridx [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(is32Bit(c+d) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
+ // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ y := v_1.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpS390XMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVDloadidx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} (ADD ptr idx) mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVDloadidx [off] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDloadidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDloadidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVDloadidx [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVDloadidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem)
+ // cond:
+ // result: (MOVDloadidx [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVDloadidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: validValAndOff(c,off) && int64(int16(c)) == c && ptr.Op != OpSB
+ // result: (MOVDstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validValAndOff(c, off) && int64(int16(c)) == c && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreidx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVDstoreidx [off] {sym} ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [i] {s} p w3 x2:(MOVDstore [i-8] {s} p w2 x1:(MOVDstore [i-16] {s} p w1 x0:(MOVDstore [i-24] {s} p w0 mem))))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && is20Bit(i-24) && clobber(x0) && clobber(x1) && clobber(x2)
+ // result: (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ w3 := v.Args[1]
+ x2 := v.Args[2]
+ if x2.Op != OpS390XMOVDstore {
+ break
+ }
+ if x2.AuxInt != i-8 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
+ break
+ }
+ w2 := x2.Args[1]
+ x1 := x2.Args[2]
+ if x1.Op != OpS390XMOVDstore {
+ break
+ }
+ if x1.AuxInt != i-16 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ w1 := x1.Args[1]
+ x0 := x1.Args[2]
+ if x0.Op != OpS390XMOVDstore {
+ break
+ }
+ if x0.AuxInt != i-24 {
+ break
+ }
+ if x0.Aux != s {
+ break
+ }
+ if p != x0.Args[0] {
+ break
+ }
+ w0 := x0.Args[1]
+ mem := x0.Args[2]
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && is20Bit(i-24) && clobber(x0) && clobber(x1) && clobber(x2)) {
+ break
+ }
+ v.reset(OpS390XSTMG4)
+ v.AuxInt = i - 24
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w0)
+ v.AddArg(w1)
+ v.AddArg(w2)
+ v.AddArg(w3)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [i] {s} p w2 x1:(MOVDstore [i-8] {s} p w1 x0:(MOVDstore [i-16] {s} p w0 mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && is20Bit(i-16) && clobber(x0) && clobber(x1)
+ // result: (STMG3 [i-16] {s} p w0 w1 w2 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ w2 := v.Args[1]
+ x1 := v.Args[2]
+ if x1.Op != OpS390XMOVDstore {
+ break
+ }
+ if x1.AuxInt != i-8 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ w1 := x1.Args[1]
+ x0 := x1.Args[2]
+ if x0.Op != OpS390XMOVDstore {
+ break
+ }
+ if x0.AuxInt != i-16 {
+ break
+ }
+ if x0.Aux != s {
+ break
+ }
+ if p != x0.Args[0] {
+ break
+ }
+ w0 := x0.Args[1]
+ mem := x0.Args[2]
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && is20Bit(i-16) && clobber(x0) && clobber(x1)) {
+ break
+ }
+ v.reset(OpS390XSTMG3)
+ v.AuxInt = i - 16
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w0)
+ v.AddArg(w1)
+ v.AddArg(w2)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(i-8) && clobber(x)
+ // result: (STMG2 [i-8] {s} p w0 w1 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ w1 := v.Args[1]
+ x := v.Args[2]
+ if x.Op != OpS390XMOVDstore {
+ break
+ }
+ if x.AuxInt != i-8 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ mem := x.Args[2]
+ if !(p.Op != OpSB && x.Uses == 1 && is20Bit(i-8) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG2)
+ v.AuxInt = i - 8
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w0)
+ v.AddArg(w1)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ // result: (MOVDstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpS390XMOVDstoreidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem)
+ // cond:
+ // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpS390XMOVDstoreidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHZload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVHstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVHZload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpS390XMOVHZload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVHZload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVHZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVHZloadidx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHZload [off] {sym} (ADD ptr idx) mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVHZloadidx [off] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVHZloadidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVHZloadidx [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVHZloadidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem)
+ // cond:
+ // result: (MOVHZloadidx [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVHZloadidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHZreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHZreg x:(MOVBZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHZload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVBZreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHZreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHZload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHZload {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, OpS390XMOVHZload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZloadidx [off] {sym} ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHZloadidx <v.Type> [off] {sym} ptr idx mem)
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHZloadidx {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, OpS390XMOVHZloadidx, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVHZreg (ANDWconst [c] x))
+ // cond:
+ // result: (ANDconst [c & 0xffff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & 0xffff
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (ANDconst [c] x))
+ // cond:
+ // result: (ANDconst [c & 0xffff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & 0xffff
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHreg x:(MOVBload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBZreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHZload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHZload {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, OpS390XMOVHload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVHreg (ANDWconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDconst [c & 0x7fff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & 0x7fff
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (ANDconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDconst [c & 0x7fff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & 0x7fff
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // cond:
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem)
+ // cond:
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: validOff(off) && ptr.Op != OpSB
+ // result: (MOVHstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validOff(off) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = makeValAndOff(int64(int16(c)), off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVHstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreidx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVHstoreidx [off] {sym} ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-2] {s} p w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ w := v.Args[1]
+ x := v.Args[2]
+ if x.Op != OpS390XMOVHstore {
+ break
+ }
+ if x.AuxInt != i-2 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst {
+ break
+ }
+ if x_1.AuxInt != 16 {
+ break
+ }
+ if w != x_1.Args[0] {
+ break
+ }
+ mem := x.Args[2]
+ if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = i - 2
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-2] {s} p w0 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ w0 := v.Args[1]
+ if w0.Op != OpS390XSRDconst {
+ break
+ }
+ j := w0.AuxInt
+ w := w0.Args[0]
+ x := v.Args[2]
+ if x.Op != OpS390XMOVHstore {
+ break
+ }
+ if x.AuxInt != i-2 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst {
+ break
+ }
+ if x_1.AuxInt != j+16 {
+ break
+ }
+ if w != x_1.Args[0] {
+ break
+ }
+ mem := x.Args[2]
+ if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = i - 2
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ // result: (MOVHstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ x := v.Args[1]
+ if x.Op != OpS390XMOVHstoreconst {
+ break
+ }
+ a := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ mem := x.Args[1]
+ if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(c).Val()&0xffff|ValAndOff(a).Val()<<16, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpS390XMOVHstoreidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem)
+ // cond:
+ // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpS390XMOVHstoreidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx [i-2] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ idx := v.Args[1]
+ w := v.Args[2]
+ x := v.Args[3]
+ if x.Op != OpS390XMOVHstoreidx {
+ break
+ }
+ if x.AuxInt != i-2 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != OpS390XSRDconst {
+ break
+ }
+ if x_2.AuxInt != 16 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreidx)
+ v.AuxInt = i - 2
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx [i-2] {s} p idx w0 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ idx := v.Args[1]
+ w0 := v.Args[2]
+ if w0.Op != OpS390XSRDconst {
+ break
+ }
+ j := w0.AuxInt
+ w := w0.Args[0]
+ x := v.Args[3]
+ if x.Op != OpS390XMOVHstoreidx {
+ break
+ }
+ if x.AuxInt != i-2 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != OpS390XSRDconst {
+ break
+ }
+ if x_2.AuxInt != j+16 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreidx)
+ v.AuxInt = i - 2
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWZload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVWstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWZload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpS390XMOVWZload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVWZload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVWZloadidx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWZload [off] {sym} (ADD ptr idx) mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVWZloadidx [off] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVWZloadidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVWZloadidx [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVWZloadidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem)
+ // cond:
+ // result: (MOVWZloadidx [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVWZloadidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWZreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWZreg x:(MOVBZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHZload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVWZload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVBZreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHZreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVWZreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWZload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVWZload {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, OpS390XMOVWZload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZloadidx [off] {sym} ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWZloadidx <v.Type> [off] {sym} ptr idx mem)
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVWZloadidx {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, OpS390XMOVWZloadidx, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVWZreg (ANDWconst [c] x))
+ // cond:
+ // result: (ANDconst [c & 0xffffffff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & 0xffffffff
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg (ANDconst [c] x))
+ // cond:
+ // result: (ANDconst [c & 0xffffffff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & 0xffffffff
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWreg x:(MOVBload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHZload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVWload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBZreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVBZreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVHreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVWreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWZload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v.Args[0]
+ if x.Op != OpS390XMOVWZload {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, OpS390XMOVWload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVWreg (ANDWconst [c] x))
+ // cond: c & 0x80000000 == 0
+ // result: (ANDconst [c & 0x7fffffff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x80000000 == 0) {
+ break
+ }
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & 0x7fffffff
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (ANDconst [c] x))
+ // cond: c & 0x80000000 == 0
+ // result: (ANDconst [c & 0x7fffffff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XANDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x80000000 == 0) {
+ break
+ }
+ v.reset(OpS390XANDconst)
+ v.AuxInt = c & 0x7fffffff
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // cond:
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem)
+ // cond:
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB
+ // result: (MOVWstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = makeValAndOff(int64(int32(c)), off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreidx)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVWstoreidx [off] {sym} ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVDstore [i-4] {s} p w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XSRDconst {
+ break
+ }
+ if v_1.AuxInt != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v.Args[2]
+ if x.Op != OpS390XMOVWstore {
+ break
+ }
+ if x.AuxInt != i-4 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if w != x.Args[1] {
+ break
+ }
+ mem := x.Args[2]
+ if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = i - 4
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVDstore [i-4] {s} p w0 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ w0 := v.Args[1]
+ if w0.Op != OpS390XSRDconst {
+ break
+ }
+ j := w0.AuxInt
+ w := w0.Args[0]
+ x := v.Args[2]
+ if x.Op != OpS390XMOVWstore {
+ break
+ }
+ if x.AuxInt != i-4 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst {
+ break
+ }
+ if x_1.AuxInt != j+32 {
+ break
+ }
+ if w != x_1.Args[0] {
+ break
+ }
+ mem := x.Args[2]
+ if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = i - 4
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w3 x2:(MOVWstore [i-4] {s} p w2 x1:(MOVWstore [i-8] {s} p w1 x0:(MOVWstore [i-12] {s} p w0 mem))))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && is20Bit(i-12) && clobber(x0) && clobber(x1) && clobber(x2)
+ // result: (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ w3 := v.Args[1]
+ x2 := v.Args[2]
+ if x2.Op != OpS390XMOVWstore {
+ break
+ }
+ if x2.AuxInt != i-4 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
+ break
+ }
+ w2 := x2.Args[1]
+ x1 := x2.Args[2]
+ if x1.Op != OpS390XMOVWstore {
+ break
+ }
+ if x1.AuxInt != i-8 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ w1 := x1.Args[1]
+ x0 := x1.Args[2]
+ if x0.Op != OpS390XMOVWstore {
+ break
+ }
+ if x0.AuxInt != i-12 {
+ break
+ }
+ if x0.Aux != s {
+ break
+ }
+ if p != x0.Args[0] {
+ break
+ }
+ w0 := x0.Args[1]
+ mem := x0.Args[2]
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && is20Bit(i-12) && clobber(x0) && clobber(x1) && clobber(x2)) {
+ break
+ }
+ v.reset(OpS390XSTM4)
+ v.AuxInt = i - 12
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w0)
+ v.AddArg(w1)
+ v.AddArg(w2)
+ v.AddArg(w3)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w2 x1:(MOVWstore [i-4] {s} p w1 x0:(MOVWstore [i-8] {s} p w0 mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && is20Bit(i-8) && clobber(x0) && clobber(x1)
+ // result: (STM3 [i-8] {s} p w0 w1 w2 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ w2 := v.Args[1]
+ x1 := v.Args[2]
+ if x1.Op != OpS390XMOVWstore {
+ break
+ }
+ if x1.AuxInt != i-4 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ w1 := x1.Args[1]
+ x0 := x1.Args[2]
+ if x0.Op != OpS390XMOVWstore {
+ break
+ }
+ if x0.AuxInt != i-8 {
+ break
+ }
+ if x0.Aux != s {
+ break
+ }
+ if p != x0.Args[0] {
+ break
+ }
+ w0 := x0.Args[1]
+ mem := x0.Args[2]
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && is20Bit(i-8) && clobber(x0) && clobber(x1)) {
+ break
+ }
+ v.reset(OpS390XSTM3)
+ v.AuxInt = i - 8
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w0)
+ v.AddArg(w1)
+ v.AddArg(w2)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(i-4) && clobber(x)
+ // result: (STM2 [i-4] {s} p w0 w1 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ w1 := v.Args[1]
+ x := v.Args[2]
+ if x.Op != OpS390XMOVWstore {
+ break
+ }
+ if x.AuxInt != i-4 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ mem := x.Args[2]
+ if !(p.Op != OpSB && x.Uses == 1 && is20Bit(i-4) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM2)
+ v.AuxInt = i - 4
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w0)
+ v.AddArg(w1)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVDstore [ValAndOff(a).Off()] {s} p (MOVDconst [ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32]) mem)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ x := v.Args[1]
+ if x.Op != OpS390XMOVWstoreconst {
+ break
+ }
+ a := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ mem := x.Args[1]
+ if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = ValAndOff(a).Off()
+ v.Aux = s
+ v.AddArg(p)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpS390XMOVWstoreidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem)
+ // cond:
+ // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpS390XMOVWstoreidx)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVDstoreidx [i-4] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ idx := v.Args[1]
+ w := v.Args[2]
+ x := v.Args[3]
+ if x.Op != OpS390XMOVWstoreidx {
+ break
+ }
+ if x.AuxInt != i-4 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != OpS390XSRDconst {
+ break
+ }
+ if x_2.AuxInt != 32 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreidx)
+ v.AuxInt = i - 4
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVDstoreidx [i-4] {s} p idx w0 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ idx := v.Args[1]
+ w0 := v.Args[2]
+ if w0.Op != OpS390XSRDconst {
+ break
+ }
+ j := w0.AuxInt
+ w := w0.Args[0]
+ x := v.Args[3]
+ if x.Op != OpS390XMOVWstoreidx {
+ break
+ }
+ if x.AuxInt != i-4 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != OpS390XSRDconst {
+ break
+ }
+ if x_2.AuxInt != j+32 {
+ break
+ }
+ if w != x_2.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreidx)
+ v.AuxInt = i - 4
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLD(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MULLD x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (MULLDconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XMULLDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLD (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (MULLDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XMULLDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLDconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MULLDconst [-1] x)
+ // cond:
+ // result: (NEG x)
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpS390XNEG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLDconst [0] _)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (MULLDconst [1] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLDconst [c] x)
+ // cond: isPowerOfTwo(c)
+ // result: (SLDconst [log2(c)] x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpS390XSLDconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLDconst [c] x)
+ // cond: isPowerOfTwo(c+1) && c >= 15
+ // result: (SUB (SLDconst <v.Type> [log2(c+1)] x) x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c+1) && c >= 15) {
+ break
+ }
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Line, OpS390XSLDconst, v.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLDconst [c] x)
+ // cond: isPowerOfTwo(c-1) && c >= 17
+ // result: (ADD (SLDconst <v.Type> [log2(c-1)] x) x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c-1) && c >= 17) {
+ break
+ }
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Line, OpS390XSLDconst, v.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLDconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c*d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = c * d
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MULLW x (MOVDconst [c]))
+ // cond:
+ // result: (MULLWconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XMULLWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLW (MOVDconst [c]) x)
+ // cond:
+ // result: (MULLWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpS390XMULLWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MULLWconst [-1] x)
+ // cond:
+ // result: (NEGW x)
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpS390XNEGW)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLWconst [0] _)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (MULLWconst [1] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLWconst [c] x)
+ // cond: isPowerOfTwo(c)
+ // result: (SLWconst [log2(c)] x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpS390XSLWconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLWconst [c] x)
+ // cond: isPowerOfTwo(c+1) && c >= 15
+ // result: (SUBW (SLWconst <v.Type> [log2(c+1)] x) x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c+1) && c >= 15) {
+ break
+ }
+ v.reset(OpS390XSUBW)
+ v0 := b.NewValue0(v.Line, OpS390XSLWconst, v.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLWconst [c] x)
+ // cond: isPowerOfTwo(c-1) && c >= 17
+ // result: (ADDW (SLWconst <v.Type> [log2(c-1)] x) x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c-1) && c >= 17) {
+ break
+ }
+ v.reset(OpS390XADDW)
+ v0 := b.NewValue0(v.Line, OpS390XSLWconst, v.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLWconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(int32(c*d))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64(int32(c * d))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNEG(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NEG (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [-c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = -c
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNEGW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NEGW (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(int32(-c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64(int32(-c))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNOT(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NOT x)
+ // cond: true
+ // result: (XORconst [-1] x)
+ for {
+ x := v.Args[0]
+ if !(true) {
+ break
+ }
+ v.reset(OpS390XXORconst)
+ v.AuxInt = -1
+ v.AddArg(x)
+ return true
+ }
+ // match: (NOT (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [^c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = ^c
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNOTW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NOTW x)
+ // cond: true
+ // result: (XORWconst [-1] x)
+ for {
+ x := v.Args[0]
+ if !(true) {
+ break
+ }
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = -1
+ v.AddArg(x)
+ return true
+ }
+ // match: (NOTW (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [^c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = ^c
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XOR(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OR x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (ORconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (OR (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (ORconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (OR x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR x0:(MOVBZload [i] {s} p mem) s0:(SLDconst [8] x1:(MOVBZload [i+1] {s} p mem))) s1:(SLDconst [16] x2:(MOVBZload [i+2] {s} p mem))) s2:(SLDconst [24] x3:(MOVBZload [i+3] {s} p mem))) s3:(SLDconst [32] x4:(MOVBZload [i+4] {s} p mem))) s4:(SLDconst [40] x5:(MOVBZload [i+5] {s} p mem))) s5:(SLDconst [48] x6:(MOVBZload [i+6] {s} p mem))) s6:(SLDconst [56] x7:(MOVBZload [i+7] {s} p mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRload [i] {s} p mem)
+ for {
+ o0 := v.Args[0]
+ if o0.Op != OpS390XOR {
+ break
+ }
+ o1 := o0.Args[0]
+ if o1.Op != OpS390XOR {
+ break
+ }
+ o2 := o1.Args[0]
+ if o2.Op != OpS390XOR {
+ break
+ }
+ o3 := o2.Args[0]
+ if o3.Op != OpS390XOR {
+ break
+ }
+ o4 := o3.Args[0]
+ if o4.Op != OpS390XOR {
+ break
+ }
+ o5 := o4.Args[0]
+ if o5.Op != OpS390XOR {
+ break
+ }
+ x0 := o5.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ mem := x0.Args[1]
+ s0 := o5.Args[1]
+ if s0.Op != OpS390XSLDconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ break
+ }
+ if x1.AuxInt != i+1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if mem != x1.Args[1] {
+ break
+ }
+ s1 := o4.Args[1]
+ if s1.Op != OpS390XSLDconst {
+ break
+ }
+ if s1.AuxInt != 16 {
+ break
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpS390XMOVBZload {
+ break
+ }
+ if x2.AuxInt != i+2 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
+ break
+ }
+ if mem != x2.Args[1] {
+ break
+ }
+ s2 := o3.Args[1]
+ if s2.Op != OpS390XSLDconst {
+ break
+ }
+ if s2.AuxInt != 24 {
+ break
+ }
+ x3 := s2.Args[0]
+ if x3.Op != OpS390XMOVBZload {
+ break
+ }
+ if x3.AuxInt != i+3 {
+ break
+ }
+ if x3.Aux != s {
+ break
+ }
+ if p != x3.Args[0] {
+ break
+ }
+ if mem != x3.Args[1] {
+ break
+ }
+ s3 := o2.Args[1]
+ if s3.Op != OpS390XSLDconst {
+ break
+ }
+ if s3.AuxInt != 32 {
+ break
+ }
+ x4 := s3.Args[0]
+ if x4.Op != OpS390XMOVBZload {
+ break
+ }
+ if x4.AuxInt != i+4 {
+ break
+ }
+ if x4.Aux != s {
+ break
+ }
+ if p != x4.Args[0] {
+ break
+ }
+ if mem != x4.Args[1] {
+ break
+ }
+ s4 := o1.Args[1]
+ if s4.Op != OpS390XSLDconst {
+ break
+ }
+ if s4.AuxInt != 40 {
+ break
+ }
+ x5 := s4.Args[0]
+ if x5.Op != OpS390XMOVBZload {
+ break
+ }
+ if x5.AuxInt != i+5 {
+ break
+ }
+ if x5.Aux != s {
+ break
+ }
+ if p != x5.Args[0] {
+ break
+ }
+ if mem != x5.Args[1] {
+ break
+ }
+ s5 := o0.Args[1]
+ if s5.Op != OpS390XSLDconst {
+ break
+ }
+ if s5.AuxInt != 48 {
+ break
+ }
+ x6 := s5.Args[0]
+ if x6.Op != OpS390XMOVBZload {
+ break
+ }
+ if x6.AuxInt != i+6 {
+ break
+ }
+ if x6.Aux != s {
+ break
+ }
+ if p != x6.Args[0] {
+ break
+ }
+ if mem != x6.Args[1] {
+ break
+ }
+ s6 := v.Args[1]
+ if s6.Op != OpS390XSLDconst {
+ break
+ }
+ if s6.AuxInt != 56 {
+ break
+ }
+ x7 := s6.Args[0]
+ if x7.Op != OpS390XMOVBZload {
+ break
+ }
+ if x7.AuxInt != i+7 {
+ break
+ }
+ if x7.Aux != s {
+ break
+ }
+ if p != x7.Args[0] {
+ break
+ }
+ if mem != x7.Args[1] {
+ break
+ }
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDBRload, config.fe.TypeUInt64())
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLDconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem))) s1:(SLDconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem))) s2:(SLDconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem))) s3:(SLDconst [32] x4:(MOVBZloadidx [i+4] {s} p idx mem))) s4:(SLDconst [40] x5:(MOVBZloadidx [i+5] {s} p idx mem))) s5:(SLDconst [48] x6:(MOVBZloadidx [i+6] {s} p idx mem))) s6:(SLDconst [56] x7:(MOVBZloadidx [i+7] {s} p idx mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRloadidx <v.Type> [i] {s} p idx mem)
+ for {
+ o0 := v.Args[0]
+ if o0.Op != OpS390XOR {
+ break
+ }
+ o1 := o0.Args[0]
+ if o1.Op != OpS390XOR {
+ break
+ }
+ o2 := o1.Args[0]
+ if o2.Op != OpS390XOR {
+ break
+ }
+ o3 := o2.Args[0]
+ if o3.Op != OpS390XOR {
+ break
+ }
+ o4 := o3.Args[0]
+ if o4.Op != OpS390XOR {
+ break
+ }
+ o5 := o4.Args[0]
+ if o5.Op != OpS390XOR {
+ break
+ }
+ x0 := o5.Args[0]
+ if x0.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ idx := x0.Args[1]
+ mem := x0.Args[2]
+ s0 := o5.Args[1]
+ if s0.Op != OpS390XSLDconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x1.AuxInt != i+1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if idx != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ s1 := o4.Args[1]
+ if s1.Op != OpS390XSLDconst {
+ break
+ }
+ if s1.AuxInt != 16 {
+ break
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x2.AuxInt != i+2 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
+ break
+ }
+ if idx != x2.Args[1] {
+ break
+ }
+ if mem != x2.Args[2] {
+ break
+ }
+ s2 := o3.Args[1]
+ if s2.Op != OpS390XSLDconst {
+ break
+ }
+ if s2.AuxInt != 24 {
+ break
+ }
+ x3 := s2.Args[0]
+ if x3.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x3.AuxInt != i+3 {
+ break
+ }
+ if x3.Aux != s {
+ break
+ }
+ if p != x3.Args[0] {
+ break
+ }
+ if idx != x3.Args[1] {
+ break
+ }
+ if mem != x3.Args[2] {
+ break
+ }
+ s3 := o2.Args[1]
+ if s3.Op != OpS390XSLDconst {
+ break
+ }
+ if s3.AuxInt != 32 {
+ break
+ }
+ x4 := s3.Args[0]
+ if x4.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x4.AuxInt != i+4 {
+ break
+ }
+ if x4.Aux != s {
+ break
+ }
+ if p != x4.Args[0] {
+ break
+ }
+ if idx != x4.Args[1] {
+ break
+ }
+ if mem != x4.Args[2] {
+ break
+ }
+ s4 := o1.Args[1]
+ if s4.Op != OpS390XSLDconst {
+ break
+ }
+ if s4.AuxInt != 40 {
+ break
+ }
+ x5 := s4.Args[0]
+ if x5.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x5.AuxInt != i+5 {
+ break
+ }
+ if x5.Aux != s {
+ break
+ }
+ if p != x5.Args[0] {
+ break
+ }
+ if idx != x5.Args[1] {
+ break
+ }
+ if mem != x5.Args[2] {
+ break
+ }
+ s5 := o0.Args[1]
+ if s5.Op != OpS390XSLDconst {
+ break
+ }
+ if s5.AuxInt != 48 {
+ break
+ }
+ x6 := s5.Args[0]
+ if x6.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x6.AuxInt != i+6 {
+ break
+ }
+ if x6.Aux != s {
+ break
+ }
+ if p != x6.Args[0] {
+ break
+ }
+ if idx != x6.Args[1] {
+ break
+ }
+ if mem != x6.Args[2] {
+ break
+ }
+ s6 := v.Args[1]
+ if s6.Op != OpS390XSLDconst {
+ break
+ }
+ if s6.AuxInt != 56 {
+ break
+ }
+ x7 := s6.Args[0]
+ if x7.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x7.AuxInt != i+7 {
+ break
+ }
+ if x7.Aux != s {
+ break
+ }
+ if p != x7.Args[0] {
+ break
+ }
+ if idx != x7.Args[1] {
+ break
+ }
+ if mem != x7.Args[2] {
+ break
+ }
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDBRloadidx, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR x0:(MOVBZload [i] {s} p mem) s0:(SLDconst [8] x1:(MOVBZload [i-1] {s} p mem))) s1:(SLDconst [16] x2:(MOVBZload [i-2] {s} p mem))) s2:(SLDconst [24] x3:(MOVBZload [i-3] {s} p mem))) s3:(SLDconst [32] x4:(MOVBZload [i-4] {s} p mem))) s4:(SLDconst [40] x5:(MOVBZload [i-5] {s} p mem))) s5:(SLDconst [48] x6:(MOVBZload [i-6] {s} p mem))) s6:(SLDconst [56] x7:(MOVBZload [i-7] {s} p mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload [i-7] {s} p mem)
+ for {
+ o0 := v.Args[0]
+ if o0.Op != OpS390XOR {
+ break
+ }
+ o1 := o0.Args[0]
+ if o1.Op != OpS390XOR {
+ break
+ }
+ o2 := o1.Args[0]
+ if o2.Op != OpS390XOR {
+ break
+ }
+ o3 := o2.Args[0]
+ if o3.Op != OpS390XOR {
+ break
+ }
+ o4 := o3.Args[0]
+ if o4.Op != OpS390XOR {
+ break
+ }
+ o5 := o4.Args[0]
+ if o5.Op != OpS390XOR {
+ break
+ }
+ x0 := o5.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ mem := x0.Args[1]
+ s0 := o5.Args[1]
+ if s0.Op != OpS390XSLDconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ break
+ }
+ if x1.AuxInt != i-1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if mem != x1.Args[1] {
+ break
+ }
+ s1 := o4.Args[1]
+ if s1.Op != OpS390XSLDconst {
+ break
+ }
+ if s1.AuxInt != 16 {
+ break
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpS390XMOVBZload {
+ break
+ }
+ if x2.AuxInt != i-2 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
+ break
+ }
+ if mem != x2.Args[1] {
+ break
+ }
+ s2 := o3.Args[1]
+ if s2.Op != OpS390XSLDconst {
+ break
+ }
+ if s2.AuxInt != 24 {
+ break
+ }
+ x3 := s2.Args[0]
+ if x3.Op != OpS390XMOVBZload {
+ break
+ }
+ if x3.AuxInt != i-3 {
+ break
+ }
+ if x3.Aux != s {
+ break
+ }
+ if p != x3.Args[0] {
+ break
+ }
+ if mem != x3.Args[1] {
+ break
+ }
+ s3 := o2.Args[1]
+ if s3.Op != OpS390XSLDconst {
+ break
+ }
+ if s3.AuxInt != 32 {
+ break
+ }
+ x4 := s3.Args[0]
+ if x4.Op != OpS390XMOVBZload {
+ break
+ }
+ if x4.AuxInt != i-4 {
+ break
+ }
+ if x4.Aux != s {
+ break
+ }
+ if p != x4.Args[0] {
+ break
+ }
+ if mem != x4.Args[1] {
+ break
+ }
+ s4 := o1.Args[1]
+ if s4.Op != OpS390XSLDconst {
+ break
+ }
+ if s4.AuxInt != 40 {
+ break
+ }
+ x5 := s4.Args[0]
+ if x5.Op != OpS390XMOVBZload {
+ break
+ }
+ if x5.AuxInt != i-5 {
+ break
+ }
+ if x5.Aux != s {
+ break
+ }
+ if p != x5.Args[0] {
+ break
+ }
+ if mem != x5.Args[1] {
+ break
+ }
+ s5 := o0.Args[1]
+ if s5.Op != OpS390XSLDconst {
+ break
+ }
+ if s5.AuxInt != 48 {
+ break
+ }
+ x6 := s5.Args[0]
+ if x6.Op != OpS390XMOVBZload {
+ break
+ }
+ if x6.AuxInt != i-6 {
+ break
+ }
+ if x6.Aux != s {
+ break
+ }
+ if p != x6.Args[0] {
+ break
+ }
+ if mem != x6.Args[1] {
+ break
+ }
+ s6 := v.Args[1]
+ if s6.Op != OpS390XSLDconst {
+ break
+ }
+ if s6.AuxInt != 56 {
+ break
+ }
+ x7 := s6.Args[0]
+ if x7.Op != OpS390XMOVBZload {
+ break
+ }
+ if x7.AuxInt != i-7 {
+ break
+ }
+ if x7.Aux != s {
+ break
+ }
+ if p != x7.Args[0] {
+ break
+ }
+ if mem != x7.Args[1] {
+ break
+ }
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i - 7
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLDconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem))) s1:(SLDconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem))) s2:(SLDconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem))) s3:(SLDconst [32] x4:(MOVBZloadidx [i-4] {s} p idx mem))) s4:(SLDconst [40] x5:(MOVBZloadidx [i-5] {s} p idx mem))) s5:(SLDconst [48] x6:(MOVBZloadidx [i-6] {s} p idx mem))) s6:(SLDconst [56] x7:(MOVBZloadidx [i-7] {s} p idx mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <v.Type> [i-7] {s} p idx mem)
+ for {
+ o0 := v.Args[0]
+ if o0.Op != OpS390XOR {
+ break
+ }
+ o1 := o0.Args[0]
+ if o1.Op != OpS390XOR {
+ break
+ }
+ o2 := o1.Args[0]
+ if o2.Op != OpS390XOR {
+ break
+ }
+ o3 := o2.Args[0]
+ if o3.Op != OpS390XOR {
+ break
+ }
+ o4 := o3.Args[0]
+ if o4.Op != OpS390XOR {
+ break
+ }
+ o5 := o4.Args[0]
+ if o5.Op != OpS390XOR {
+ break
+ }
+ x0 := o5.Args[0]
+ if x0.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ idx := x0.Args[1]
+ mem := x0.Args[2]
+ s0 := o5.Args[1]
+ if s0.Op != OpS390XSLDconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x1.AuxInt != i-1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if idx != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ s1 := o4.Args[1]
+ if s1.Op != OpS390XSLDconst {
+ break
+ }
+ if s1.AuxInt != 16 {
+ break
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x2.AuxInt != i-2 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
+ break
+ }
+ if idx != x2.Args[1] {
+ break
+ }
+ if mem != x2.Args[2] {
+ break
+ }
+ s2 := o3.Args[1]
+ if s2.Op != OpS390XSLDconst {
+ break
+ }
+ if s2.AuxInt != 24 {
+ break
+ }
+ x3 := s2.Args[0]
+ if x3.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x3.AuxInt != i-3 {
+ break
+ }
+ if x3.Aux != s {
+ break
+ }
+ if p != x3.Args[0] {
+ break
+ }
+ if idx != x3.Args[1] {
+ break
+ }
+ if mem != x3.Args[2] {
+ break
+ }
+ s3 := o2.Args[1]
+ if s3.Op != OpS390XSLDconst {
+ break
+ }
+ if s3.AuxInt != 32 {
+ break
+ }
+ x4 := s3.Args[0]
+ if x4.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x4.AuxInt != i-4 {
+ break
+ }
+ if x4.Aux != s {
+ break
+ }
+ if p != x4.Args[0] {
+ break
+ }
+ if idx != x4.Args[1] {
+ break
+ }
+ if mem != x4.Args[2] {
+ break
+ }
+ s4 := o1.Args[1]
+ if s4.Op != OpS390XSLDconst {
+ break
+ }
+ if s4.AuxInt != 40 {
+ break
+ }
+ x5 := s4.Args[0]
+ if x5.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x5.AuxInt != i-5 {
+ break
+ }
+ if x5.Aux != s {
+ break
+ }
+ if p != x5.Args[0] {
+ break
+ }
+ if idx != x5.Args[1] {
+ break
+ }
+ if mem != x5.Args[2] {
+ break
+ }
+ s5 := o0.Args[1]
+ if s5.Op != OpS390XSLDconst {
+ break
+ }
+ if s5.AuxInt != 48 {
+ break
+ }
+ x6 := s5.Args[0]
+ if x6.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x6.AuxInt != i-6 {
+ break
+ }
+ if x6.Aux != s {
+ break
+ }
+ if p != x6.Args[0] {
+ break
+ }
+ if idx != x6.Args[1] {
+ break
+ }
+ if mem != x6.Args[2] {
+ break
+ }
+ s6 := v.Args[1]
+ if s6.Op != OpS390XSLDconst {
+ break
+ }
+ if s6.AuxInt != 56 {
+ break
+ }
+ x7 := s6.Args[0]
+ if x7.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x7.AuxInt != i-7 {
+ break
+ }
+ if x7.Aux != s {
+ break
+ }
+ if p != x7.Args[0] {
+ break
+ }
+ if idx != x7.Args[1] {
+ break
+ }
+ if mem != x7.Args[2] {
+ break
+ }
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(v.Line, OpS390XMOVDloadidx, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i - 7
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORW x (MOVDconst [c]))
+ // cond:
+ // result: (ORWconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XORWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORW (MOVDconst [c]) x)
+ // cond:
+ // result: (ORWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpS390XORWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORW x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORW x0:(MOVBZload [i] {s} p mem) s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
+ // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i] {s} p mem))
+ for {
+ x0 := v.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ mem := x0.Args[1]
+ s0 := v.Args[1]
+ if s0.Op != OpS390XSLWconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ break
+ }
+ if x1.AuxInt != i+1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if mem != x1.Args[1] {
+ break
+ }
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVHBRload, config.fe.TypeUInt16())
+ v1.AuxInt = i
+ v1.Aux = s
+ v1.AddArg(p)
+ v1.AddArg(mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORW o0:(ORW o1:(ORW x0:(MOVBZload [i] {s} p mem) s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem))) s1:(SLWconst [16] x2:(MOVBZload [i+2] {s} p mem))) s2:(SLWconst [24] x3:(MOVBZload [i+3] {s} p mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVWZreg (MOVWBRload [i] {s} p mem))
+ for {
+ o0 := v.Args[0]
+ if o0.Op != OpS390XORW {
+ break
+ }
+ o1 := o0.Args[0]
+ if o1.Op != OpS390XORW {
+ break
+ }
+ x0 := o1.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ mem := x0.Args[1]
+ s0 := o1.Args[1]
+ if s0.Op != OpS390XSLWconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ break
+ }
+ if x1.AuxInt != i+1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if mem != x1.Args[1] {
+ break
+ }
+ s1 := o0.Args[1]
+ if s1.Op != OpS390XSLWconst {
+ break
+ }
+ if s1.AuxInt != 16 {
+ break
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpS390XMOVBZload {
+ break
+ }
+ if x2.AuxInt != i+2 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
+ break
+ }
+ if mem != x2.Args[1] {
+ break
+ }
+ s2 := v.Args[1]
+ if s2.Op != OpS390XSLWconst {
+ break
+ }
+ if s2.AuxInt != 24 {
+ break
+ }
+ x3 := s2.Args[0]
+ if x3.Op != OpS390XMOVBZload {
+ break
+ }
+ if x3.AuxInt != i+3 {
+ break
+ }
+ if x3.Aux != s {
+ break
+ }
+ if p != x3.Args[0] {
+ break
+ }
+ if mem != x3.Args[1] {
+ break
+ }
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64())
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVWBRload, config.fe.TypeUInt32())
+ v1.AuxInt = i
+ v1.Aux = s
+ v1.AddArg(p)
+ v1.AddArg(mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORW x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
+ // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx <v.Type> [i] {s} p idx mem))
+ for {
+ x0 := v.Args[0]
+ if x0.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ idx := x0.Args[1]
+ mem := x0.Args[2]
+ s0 := v.Args[1]
+ if s0.Op != OpS390XSLWconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x1.AuxInt != i+1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if idx != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVHBRloadidx, v.Type)
+ v1.AuxInt = i
+ v1.Aux = s
+ v1.AddArg(p)
+ v1.AddArg(idx)
+ v1.AddArg(mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORW o0:(ORW o1:(ORW x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem))) s1:(SLWconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem))) s2:(SLWconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVWZreg (MOVWBRloadidx <v.Type> [i] {s} p idx mem))
+ for {
+ o0 := v.Args[0]
+ if o0.Op != OpS390XORW {
+ break
+ }
+ o1 := o0.Args[0]
+ if o1.Op != OpS390XORW {
+ break
+ }
+ x0 := o1.Args[0]
+ if x0.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ idx := x0.Args[1]
+ mem := x0.Args[2]
+ s0 := o1.Args[1]
+ if s0.Op != OpS390XSLWconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x1.AuxInt != i+1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if idx != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ s1 := o0.Args[1]
+ if s1.Op != OpS390XSLWconst {
+ break
+ }
+ if s1.AuxInt != 16 {
+ break
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x2.AuxInt != i+2 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
+ break
+ }
+ if idx != x2.Args[1] {
+ break
+ }
+ if mem != x2.Args[2] {
+ break
+ }
+ s2 := v.Args[1]
+ if s2.Op != OpS390XSLWconst {
+ break
+ }
+ if s2.AuxInt != 24 {
+ break
+ }
+ x3 := s2.Args[0]
+ if x3.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x3.AuxInt != i+3 {
+ break
+ }
+ if x3.Aux != s {
+ break
+ }
+ if p != x3.Args[0] {
+ break
+ }
+ if idx != x3.Args[1] {
+ break
+ }
+ if mem != x3.Args[2] {
+ break
+ }
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64())
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpS390XMOVWBRloadidx, v.Type)
+ v1.AuxInt = i
+ v1.Aux = s
+ v1.AddArg(p)
+ v1.AddArg(idx)
+ v1.AddArg(mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORW x0:(MOVBZload [i] {s} p mem) s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
+ // result: @mergePoint(b,x0,x1) (MOVHZload [i-1] {s} p mem)
+ for {
+ x0 := v.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ mem := x0.Args[1]
+ s0 := v.Args[1]
+ if s0.Op != OpS390XSLWconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ break
+ }
+ if x1.AuxInt != i-1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if mem != x1.Args[1] {
+ break
+ }
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16())
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i - 1
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (ORW o0:(ORW o1:(ORW x0:(MOVBZload [i] {s} p mem) s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem))) s1:(SLWconst [16] x2:(MOVBZload [i-2] {s} p mem))) s2:(SLWconst [24] x3:(MOVBZload [i-3] {s} p mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVWZload [i-3] {s} p mem)
+ for {
+ o0 := v.Args[0]
+ if o0.Op != OpS390XORW {
+ break
+ }
+ o1 := o0.Args[0]
+ if o1.Op != OpS390XORW {
+ break
+ }
+ x0 := o1.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ mem := x0.Args[1]
+ s0 := o1.Args[1]
+ if s0.Op != OpS390XSLWconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ break
+ }
+ if x1.AuxInt != i-1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if mem != x1.Args[1] {
+ break
+ }
+ s1 := o0.Args[1]
+ if s1.Op != OpS390XSLWconst {
+ break
+ }
+ if s1.AuxInt != 16 {
+ break
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpS390XMOVBZload {
+ break
+ }
+ if x2.AuxInt != i-2 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
+ break
+ }
+ if mem != x2.Args[1] {
+ break
+ }
+ s2 := v.Args[1]
+ if s2.Op != OpS390XSLWconst {
+ break
+ }
+ if s2.AuxInt != 24 {
+ break
+ }
+ x3 := s2.Args[0]
+ if x3.Op != OpS390XMOVBZload {
+ break
+ }
+ if x3.AuxInt != i-3 {
+ break
+ }
+ if x3.Aux != s {
+ break
+ }
+ if p != x3.Args[0] {
+ break
+ }
+ if mem != x3.Args[1] {
+ break
+ }
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32())
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i - 3
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (ORW x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
+ // result: @mergePoint(b,x0,x1) (MOVHZloadidx <v.Type> [i-1] {s} p idx mem)
+ for {
+ x0 := v.Args[0]
+ if x0.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ idx := x0.Args[1]
+ mem := x0.Args[2]
+ s0 := v.Args[1]
+ if s0.Op != OpS390XSLWconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x1.AuxInt != i-1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if idx != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Line, OpS390XMOVHZloadidx, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i - 1
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (ORW o0:(ORW o1:(ORW x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem))) s1:(SLWconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem))) s2:(SLWconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem)))
+ // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVWZloadidx <v.Type> [i-3] {s} p idx mem)
+ for {
+ o0 := v.Args[0]
+ if o0.Op != OpS390XORW {
+ break
+ }
+ o1 := o0.Args[0]
+ if o1.Op != OpS390XORW {
+ break
+ }
+ x0 := o1.Args[0]
+ if x0.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ idx := x0.Args[1]
+ mem := x0.Args[2]
+ s0 := o1.Args[1]
+ if s0.Op != OpS390XSLWconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x1.AuxInt != i-1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if idx != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ s1 := o0.Args[1]
+ if s1.Op != OpS390XSLWconst {
+ break
+ }
+ if s1.AuxInt != 16 {
+ break
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x2.AuxInt != i-2 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
+ break
+ }
+ if idx != x2.Args[1] {
+ break
+ }
+ if mem != x2.Args[2] {
+ break
+ }
+ s2 := v.Args[1]
+ if s2.Op != OpS390XSLWconst {
+ break
+ }
+ if s2.AuxInt != 24 {
+ break
+ }
+ x3 := s2.Args[0]
+ if x3.Op != OpS390XMOVBZloadidx {
+ break
+ }
+ if x3.AuxInt != i-3 {
+ break
+ }
+ if x3.Aux != s {
+ break
+ }
+ if p != x3.Args[0] {
+ break
+ }
+ if idx != x3.Args[1] {
+ break
+ }
+ if mem != x3.Args[2] {
+ break
+ }
+ if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(v.Line, OpS390XMOVWZloadidx, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i - 3
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORWconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORWconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVDconst [-1])
+ for {
+ c := v.AuxInt
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (ORWconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c|d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = c | d
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // cond:
+ // result: (MOVDconst [-1])
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (ORconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c|d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = c | d
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLD(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SLD x (MOVDconst [c]))
+ // cond:
+ // result: (SLDconst [c&63] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XSLDconst)
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLD x (ANDconst [63] y))
+ // cond:
+ // result: (SLD x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XANDconst {
+ break
+ }
+ if v_1.AuxInt != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SLW x (MOVDconst [c]))
+ // cond:
+ // result: (SLWconst [c&63] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XSLWconst)
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLW x (ANDWconst [63] y))
+ // cond:
+ // result: (SLW x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ if v_1.AuxInt != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRAD(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRAD x (MOVDconst [c]))
+ // cond:
+ // result: (SRADconst [c&63] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XSRADconst)
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAD x (ANDconst [63] y))
+ // cond:
+ // result: (SRAD x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XANDconst {
+ break
+ }
+ if v_1.AuxInt != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRADconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRADconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [d>>uint64(c)])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = d >> uint64(c)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRAW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRAW x (MOVDconst [c]))
+ // cond:
+ // result: (SRAWconst [c&63] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XSRAWconst)
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAW x (ANDWconst [63] y))
+ // cond:
+ // result: (SRAW x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ if v_1.AuxInt != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRAWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRAWconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [d>>uint64(c)])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = d >> uint64(c)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRD(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRD x (MOVDconst [c]))
+ // cond:
+ // result: (SRDconst [c&63] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRD x (ANDconst [63] y))
+ // cond:
+ // result: (SRD x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XANDconst {
+ break
+ }
+ if v_1.AuxInt != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRW x (MOVDconst [c]))
+ // cond:
+ // result: (SRWconst [c&63] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XSRWconst)
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRW x (ANDWconst [63] y))
+ // cond:
+ // result: (SRW x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ if v_1.AuxInt != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUB x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XSUBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (NEG (SUBconst <v.Type> x [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XNEG)
+ v0 := b.NewValue0(v.Line, OpS390XSUBconst, v.Type)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUB x x)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBEWcarrymask(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBEWcarrymask (FlagEQ))
+ // cond:
+ // result: (MOVDconst [-1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XFlagEQ {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (SUBEWcarrymask (FlagLT))
+ // cond:
+ // result: (MOVDconst [-1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XFlagLT {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (SUBEWcarrymask (FlagGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XFlagGT {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBEcarrymask(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBEcarrymask (FlagEQ))
+ // cond:
+ // result: (MOVDconst [-1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XFlagEQ {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (SUBEcarrymask (FlagLT))
+ // cond:
+ // result: (MOVDconst [-1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XFlagLT {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (SUBEcarrymask (FlagGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XFlagGT {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBW x (MOVDconst [c]))
+ // cond:
+ // result: (SUBWconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XSUBWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBW (MOVDconst [c]) x)
+ // cond:
+ // result: (NEGW (SUBWconst <v.Type> x [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpS390XNEGW)
+ v0 := b.NewValue0(v.Line, OpS390XSUBWconst, v.Type)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBW x x)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBWconst [c] x)
+ // cond: int32(c) == 0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBWconst [c] x)
+ // cond:
+ // result: (ADDWconst [int64(int32(-c))] x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = int64(int32(-c))
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XSUBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] x)
+ // cond: c != -(1<<31)
+ // result: (ADDconst [-c] x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = -c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst (MOVDconst [d]) [c])
+ // cond:
+ // result: (MOVDconst [d-c])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = d - c
+ return true
+ }
+ // match: (SUBconst (SUBconst x [d]) [c])
+ // cond: is32Bit(-c-d)
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(is32Bit(-c - d)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = -c - d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXOR(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XOR x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XXORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XOR (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (XORconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XXORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XOR x x)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORW x (MOVDconst [c]))
+ // cond:
+ // result: (XORWconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORW (MOVDconst [c]) x)
+ // cond:
+ // result: (XORWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORW x x)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORWconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORWconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSignExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to32 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpSignExt16to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to64 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpSignExt32to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt32to64 x)
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpSignExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to16 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpSignExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to32 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpSignExt8to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to64 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpSqrt(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sqrt x)
+ // cond:
+ // result: (FSQRT x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XFSQRT)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpStaticCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (StaticCall [argwid] {target} mem)
+ // cond:
+ // result: (CALLstatic [argwid] {target} mem)
+ for {
+ argwid := v.AuxInt
+ target := v.Aux
+ mem := v.Args[0]
+ v.reset(OpS390XCALLstatic)
+ v.AuxInt = argwid
+ v.Aux = target
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpStore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Store [8] ptr val mem)
+ // cond: is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpS390XFMOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: is32BitFloat(val.Type)
+ // result: (FMOVSstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpS390XFMOVSstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [8] ptr val mem)
+ // cond:
+ // result: (MOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond:
+ // result: (MOVWstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVWstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [2] ptr val mem)
+ // cond:
+ // result: (MOVHstore ptr val mem)
+ for {
+ if v.AuxInt != 2 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVHstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [1] ptr val mem)
+ // cond:
+ // result: (MOVBstore ptr val mem)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpS390XMOVBstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSub16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub16 x y)
+ // cond:
+ // result: (SUBW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSUBW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpSub32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32 x y)
+ // cond:
+ // result: (SUBW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSUBW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpSub32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32F x y)
+ // cond:
+ // result: (FSUBS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XFSUBS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpSub64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpSub64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64F x y)
+ // cond:
+ // result: (FSUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XFSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpSub8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub8 x y)
+ // cond:
+ // result: (SUBW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSUBW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpSubPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SubPtr x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpTrunc16to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc16to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpTrunc32to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to16 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpTrunc32to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpTrunc64to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to16 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpTrunc64to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to32 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpTrunc64to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpXor16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor16 x y)
+ // cond:
+ // result: (XORW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XXORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpXor32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor32 x y)
+ // cond:
+ // result: (XORW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XXORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpXor64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor64 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XXOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpXor8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor8 x y)
+ // cond:
+ // result: (XORW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpS390XXORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueS390X_OpZero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Zero [s] _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstoreconst [0] destptr mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = 0
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVHstoreconst [0] destptr mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = 0
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVWstoreconst [0] destptr mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = 0
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 8
+ // result: (MOVDstoreconst [0] destptr mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 8) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = 0
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVHstoreconst [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = makeValAndOff(0, 2)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpS390XMOVHstoreconst, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 5
+ // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 5) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = makeValAndOff(0, 4)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpS390XMOVWstoreconst, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 6
+ // result: (MOVHstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 6) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = makeValAndOff(0, 4)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpS390XMOVWstoreconst, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 7
+ // result: (MOVWstoreconst [makeValAndOff(0,3)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 7) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = makeValAndOff(0, 3)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpS390XMOVWstoreconst, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 1024
+ // result: (CLEAR [makeValAndOff(SizeAndAlign(s).Size(), 0)] destptr mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 1024) {
+ break
+ }
+ v.reset(OpS390XCLEAR)
+ v.AuxInt = makeValAndOff(SizeAndAlign(s).Size(), 0)
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() > 1024
+ // result: (LoweredZero [SizeAndAlign(s).Size()%256] destptr (ADDconst <destptr.Type> destptr [(SizeAndAlign(s).Size()/256)*256]) mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() > 1024) {
+ break
+ }
+ v.reset(OpS390XLoweredZero)
+ v.AuxInt = SizeAndAlign(s).Size() % 256
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpS390XADDconst, destptr.Type)
+ v0.AuxInt = (SizeAndAlign(s).Size() / 256) * 256
+ v0.AddArg(destptr)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpZeroExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt16to32 x)
+ // cond:
+ // result: (MOVHZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpZeroExt16to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt16to64 x)
+ // cond:
+ // result: (MOVHZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpZeroExt32to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt32to64 x)
+ // cond:
+ // result: (MOVWZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpZeroExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to16 x)
+ // cond:
+ // result: (MOVBZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpZeroExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to32 x)
+ // cond:
+ // result: (MOVBZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpZeroExt8to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to64 x)
+ // cond:
+ // result: (MOVBZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteBlockS390X(b *Block, config *Config) bool {
+ switch b.Kind {
+ case BlockS390XEQ:
+ // match: (EQ (InvertFlags cmp) yes no)
+ // cond:
+ // result: (EQ cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XEQ
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (EQ (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (EQ (FlagLT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagLT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ case BlockS390XGE:
+ // match: (GE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XLE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (FlagLT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagLT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GE (FlagGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockS390XGT:
+ // match: (GT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XLT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagLT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagLT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockIf:
+ // match: (If (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
+ // cond:
+ // result: (LT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XMOVDLT {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ cmp := v.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XLT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
+ // cond:
+ // result: (LE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XMOVDLE {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ cmp := v.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XLE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
+ // cond:
+ // result: (GT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XMOVDGT {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ cmp := v.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XGT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
+ // cond:
+ // result: (GE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XMOVDGE {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ cmp := v.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XGE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
+ // cond:
+ // result: (EQ cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XMOVDEQ {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ cmp := v.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XEQ
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
+ // cond:
+ // result: (NE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XMOVDNE {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ cmp := v.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XNE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
+ // cond:
+ // result: (GTF cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XMOVDGTnoinv {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ cmp := v.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XGTF
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
+ // cond:
+ // result: (GEF cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XMOVDGEnoinv {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ cmp := v.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XGEF
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If cond yes no)
+ // cond:
+ // result: (NE (TESTB cond) yes no)
+ for {
+ v := b.Control
+ _ = v
+ cond := b.Control
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XNE
+ v0 := b.NewValue0(v.Line, OpS390XTESTB, TypeFlags)
+ v0.AddArg(cond)
+ b.SetControl(v0)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockS390XLE:
+ // match: (LE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XGE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagLT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagLT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ case BlockS390XLT:
+ // match: (LT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XGT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (FlagLT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagLT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LT (FlagGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ case BlockS390XNE:
+ // match: (NE (TESTB (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
+ // cond:
+ // result: (LT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XTESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDLT {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_0.AuxInt != 0 {
+ break
+ }
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_1.AuxInt != 1 {
+ break
+ }
+ cmp := v_0.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XLT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
+ // cond:
+ // result: (LE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XTESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDLE {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_0.AuxInt != 0 {
+ break
+ }
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_1.AuxInt != 1 {
+ break
+ }
+ cmp := v_0.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XLE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
+ // cond:
+ // result: (GT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XTESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDGT {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_0.AuxInt != 0 {
+ break
+ }
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_1.AuxInt != 1 {
+ break
+ }
+ cmp := v_0.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XGT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
+ // cond:
+ // result: (GE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XTESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDGE {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_0.AuxInt != 0 {
+ break
+ }
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_1.AuxInt != 1 {
+ break
+ }
+ cmp := v_0.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XGE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
+ // cond:
+ // result: (EQ cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XTESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDEQ {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_0.AuxInt != 0 {
+ break
+ }
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_1.AuxInt != 1 {
+ break
+ }
+ cmp := v_0.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XEQ
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
+ // cond:
+ // result: (NE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XTESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDNE {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_0.AuxInt != 0 {
+ break
+ }
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_1.AuxInt != 1 {
+ break
+ }
+ cmp := v_0.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XNE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
+ // cond:
+ // result: (GTF cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XTESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDGTnoinv {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_0.AuxInt != 0 {
+ break
+ }
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_1.AuxInt != 1 {
+ break
+ }
+ cmp := v_0.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XGTF
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
+ // cond:
+ // result: (GEF cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XTESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDGEnoinv {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_0.AuxInt != 0 {
+ break
+ }
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ if v_0_1.AuxInt != 1 {
+ break
+ }
+ cmp := v_0.Args[2]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XGEF
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (NE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockS390XNE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (NE (FlagLT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagLT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpS390XFlagGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ }
+ return false
+}