// zero upper bit of the register; no need to zero-extend
(MOVBUreg x:((Equal|NotEqual|LessThan|LessThanU|LessThanF|LessEqual|LessEqualU|LessEqualF|GreaterThan|GreaterThanU|GreaterThanF|GreaterEqual|GreaterEqualU|GreaterEqualF) _)) => (MOVDreg x)
+// omit unsign extension
+
+(MOVWUreg x) && zeroUpper32Bits(x, 3) => x
+
+// omit sign extension
+
+(MOVWreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst <t> x [c])
+(MOVHreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst <t> x [c])
+(MOVBreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst <t> x [c])
+
// absorb flag constants into conditional instructions
(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
// - *const instructions may use a constant larger than the instruction can encode.
// In this case the assembler expands to multiple instructions and uses tmp
// register (R27).
+// - All 32-bit Ops will zero the upper 32 bits of the destination register.
// Suffixes encode the bit width of various instructions.
// D (double word) = 64 bit
OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
OpAMD64SHLL, OpAMD64SHLLconst:
return true
+ case OpARM64REV16W, OpARM64REVW, OpARM64RBITW, OpARM64CLZW, OpARM64EXTRWconst,
+ OpARM64MULW, OpARM64MNEGW, OpARM64UDIVW, OpARM64DIVW, OpARM64UMODW,
+ OpARM64MADDW, OpARM64MSUBW, OpARM64RORW, OpARM64RORWconst:
+ return true
case OpArg:
return x.Type.Size() == 4
case OpPhi, OpSelect0, OpSelect1:
v.AuxInt = int64ToAuxInt(int64(int8(c)))
return true
}
+ // match: (MOVBreg <t> (ANDconst x [c]))
+ // cond: uint64(c) & uint64(0xffffffffffffff80) == 0
+ // result: (ANDconst <t> x [c])
+ for {
+ t := v.Type
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(uint64(c)&uint64(0xffffffffffffff80) == 0) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
// match: (MOVBreg (SLLconst [lc] x))
// cond: lc < 8
// result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
v.AuxInt = int64ToAuxInt(int64(int16(c)))
return true
}
+ // match: (MOVHreg <t> (ANDconst x [c]))
+ // cond: uint64(c) & uint64(0xffffffffffff8000) == 0
+ // result: (ANDconst <t> x [c])
+ for {
+ t := v.Type
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(uint64(c)&uint64(0xffffffffffff8000) == 0) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
// match: (MOVHreg (SLLconst [lc] x))
// cond: lc < 16
// result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
v.AuxInt = int64ToAuxInt(int64(uint32(c)))
return true
}
+ // match: (MOVWUreg x)
+ // cond: zeroUpper32Bits(x, 3)
+ // result: x
+ for {
+ x := v_0
+ if !(zeroUpper32Bits(x, 3)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
// match: (MOVWUreg (SLLconst [lc] x))
// cond: lc >= 32
// result: (MOVDconst [0])
v.AuxInt = int64ToAuxInt(int64(int32(c)))
return true
}
+ // match: (MOVWreg <t> (ANDconst x [c]))
+ // cond: uint64(c) & uint64(0xffffffff80000000) == 0
+ // result: (ANDconst <t> x [c])
+ for {
+ t := v.Type
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(uint64(c)&uint64(0xffffffff80000000) == 0) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
// match: (MOVWreg (SLLconst [lc] x))
// cond: lc < 32
// result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
package codegen
+import "math/bits"
+
var sval64 [8]int64
var sval32 [8]int32
var sval16 [8]int16
}
return false
}
+
+// no unsign extension following 32 bits ops
+
+func noUnsignEXT(t1, t2, t3, t4 uint32, k int64) uint64 {
+ var ret uint64
+
+ // arm64:"RORW",-"MOVWU"
+ ret += uint64(bits.RotateLeft32(t1, 7))
+
+ // arm64:"MULW",-"MOVWU"
+ ret *= uint64(t1 * t2)
+
+ // arm64:"MNEGW",-"MOVWU"
+ ret += uint64(-t1 * t3)
+
+ // arm64:"UDIVW",-"MOVWU"
+ ret += uint64(t1 / t4)
+
+ // arm64:-"MOVWU"
+ ret += uint64(t2 % t3)
+
+ // arm64:"MSUBW",-"MOVWU"
+ ret += uint64(t1 - t2*t3)
+
+ // arm64:"MADDW",-"MOVWU"
+ ret += uint64(t3*t4 + t2)
+
+ // arm64:"REVW",-"MOVWU"
+ ret += uint64(bits.ReverseBytes32(t1))
+
+ // arm64:"RBITW",-"MOVWU"
+ ret += uint64(bits.Reverse32(t1))
+
+ // arm64:"CLZW",-"MOVWU"
+ ret += uint64(bits.LeadingZeros32(t1))
+
+ // arm64:"REV16W",-"MOVWU"
+ ret += uint64(((t1 & 0xff00ff00) >> 8) | ((t1 & 0x00ff00ff) << 8))
+
+ // arm64:"EXTRW",-"MOVWU"
+ ret += uint64((t1 << 25) | (t2 >> 7))
+
+ return ret
+}
+
+// no sign extension when the upper bits of the result are zero
+
+func noSignEXT(x int) int64 {
+ t1 := int32(x)
+
+ var ret int64
+
+ // arm64:-"MOVW"
+ ret += int64(t1 & 1)
+
+ // arm64:-"MOVW"
+ ret += int64(int32(x & 0x7fffffff))
+
+ // arm64:-"MOVH"
+ ret += int64(int16(x & 0x7fff))
+
+ // arm64:-"MOVB"
+ ret += int64(int8(x & 0x7f))
+
+ return ret
+}
+
+// corner cases that sign extension must not be omitted
+
+func shouldSignEXT(x int) int64 {
+ t1 := int32(x)
+
+ var ret int64
+
+ // arm64:"MOVW"
+ ret += int64(t1 & (-1))
+
+ // arm64:"MOVW"
+ ret += int64(int32(x & 0x80000000))
+
+ // arm64:"MOVW"
+ ret += int64(int32(x & 0x1100000011111111))
+
+ // arm64:"MOVH"
+ ret += int64(int16(x & 0x1100000000001111))
+
+ // arm64:"MOVB"
+ ret += int64(int8(x & 0x1100000000000011))
+
+ return ret
+
+}