(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
// Canonicalize the order of arguments to comparisons - helps with CSE.
-(CMP(L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(L|W|B) y x))
+(CMP(L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(L|W|B) y x))
// strength reduction
// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
// Canonicalize the order of arguments to comparisons - helps with CSE.
-(CMP(Q|L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(Q|L|W|B) y x))
+(CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x))
// Using MOVZX instead of AND is cheaper.
(AND(Q|L)const [ 0xFF] x) => (MOVBQZX x)
(TEQ x (MOVWconst [c])) => (TEQconst [c] x)
// Canonicalize the order of arguments to comparisons - helps with CSE.
-(CMP x y) && x.ID > y.ID => (InvertFlags (CMP y x))
+(CMP x y) && canonLessThan(x,y) => (InvertFlags (CMP y x))
// don't extend after proper load
// MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type.
(CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
// Canonicalize the order of arguments to comparisons - helps with CSE.
-((CMP|CMPW) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW) y x))
+((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x))
// mul-neg => mneg
(NEG (MUL x y)) => (MNEG x y)
(CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
// Canonicalize the order of arguments to comparisons - helps with CSE.
-((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
// ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
// ISEL auxInt values 4=GE 5=LE 6=NE arg2 ? arg1 : arg0
=> (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
// Canonicalize the order of arguments to comparisons - helps with CSE.
-((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
// Use sign/zero extend instead of RISBGZ.
(RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x)
return v.AuxInt != 0
}
+// canonLessThan returns whether x is "ordered" less than y, for purposes of normalizing
+// generated code as much as possible.
+func canonLessThan(x, y *Value) bool {
+ if x.Op != y.Op {
+ return x.Op < y.Op
+ }
+ if !x.Pos.SameFileAndLine(y.Pos) {
+ return x.Pos.Before(y.Pos)
+ }
+ return x.ID < y.ID
+}
+
// truncate64Fto32F converts a float64 value to a float32 preserving the bit pattern
// of the mantissa. It will panic if the truncation results in lost information.
func truncate64Fto32F(f float64) float32 {
return true
}
// match: (CMPB x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPB y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(Op386InvertFlags)
return true
}
// match: (CMPL x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPL y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(Op386InvertFlags)
return true
}
// match: (CMPW x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(Op386InvertFlags)
return true
}
// match: (CMPB x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPB y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpAMD64InvertFlags)
return true
}
// match: (CMPL x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPL y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpAMD64InvertFlags)
return true
}
// match: (CMPQ x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPQ y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpAMD64InvertFlags)
return true
}
// match: (CMPW x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpAMD64InvertFlags)
return true
}
// match: (CMP x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMP y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpARMInvertFlags)
return true
}
// match: (CMP x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMP y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpARM64InvertFlags)
return true
}
// match: (CMPW x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpARM64InvertFlags)
return true
}
// match: (CMP x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMP y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpPPC64InvertFlags)
return true
}
// match: (CMPU x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPU y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpPPC64InvertFlags)
return true
}
// match: (CMPW x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpPPC64InvertFlags)
return true
}
// match: (CMPWU x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPWU y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpPPC64InvertFlags)
return true
}
// match: (CMP x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMP y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpS390XInvertFlags)
return true
}
// match: (CMPU x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPU y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpS390XInvertFlags)
return true
}
// match: (CMPW x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpS390XInvertFlags)
return true
}
// match: (CMPWU x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPWU y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpS390XInvertFlags)
if x < y {
x = -y
}
- // amd64:"CMOVQCS"
+ // amd64:"CMOVQ(HI|CS)"
// arm64:"CSEL\t(LO|HI)"
// wasm:"Select"
return x
if x < y {
x = -y
}
- // amd64:"CMOVLCS"
+ // amd64:"CMOVL(HI|CS)"
// arm64:"CSEL\t(LO|HI)"
// wasm:"Select"
return x
if x < y {
x = -y
}
- // amd64:"CMOVWCS"
+ // amd64:"CMOVW(HI|CS)"
// arm64:"CSEL\t(LO|HI)"
// wasm:"Select"
return x
}
func IndexString(x string, i int) byte {
- // amd64:`CMOVQCC`
+ // amd64:`CMOVQLS`
return x[i]
}
func IndexSlice(x []float64, i int) float64 {
- // amd64:`CMOVQCC`
+ // amd64:`CMOVQLS`
return x[i]
}