}
break
}
- // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d]))
- // cond: d==32-c
- // result: (ROLLconst x [c])
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLLconst {
- continue
- }
- c := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if v_1.Op != OpAMD64SHRLconst {
- continue
- }
- d := auxIntToInt8(v_1.AuxInt)
- if x != v_1.Args[0] || !(d == 32-c) {
- continue
- }
- v.reset(OpAMD64ROLLconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- break
- }
- // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: d==16-c && c < 16 && t.Size() == 2
- // result: (ROLWconst x [c])
- for {
- t := v.Type
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLLconst {
- continue
- }
- c := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if v_1.Op != OpAMD64SHRWconst {
- continue
- }
- d := auxIntToInt8(v_1.AuxInt)
- if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
- continue
- }
- v.reset(OpAMD64ROLWconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- break
- }
- // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: d==8-c && c < 8 && t.Size() == 1
- // result: (ROLBconst x [c])
- for {
- t := v.Type
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLLconst {
- continue
- }
- c := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if v_1.Op != OpAMD64SHRBconst {
- continue
- }
- d := auxIntToInt8(v_1.AuxInt)
- if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
- continue
- }
- v.reset(OpAMD64ROLBconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- break
- }
// match: (ADDL x (SHLLconst [3] y))
// result: (LEAL8 x y)
for {
}
break
}
- // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d]))
- // cond: d==64-c
- // result: (ROLQconst x [c])
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLQconst {
- continue
- }
- c := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if v_1.Op != OpAMD64SHRQconst {
- continue
- }
- d := auxIntToInt8(v_1.AuxInt)
- if x != v_1.Args[0] || !(d == 64-c) {
- continue
- }
- v.reset(OpAMD64ROLQconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- break
- }
// match: (ADDQ x (SHLQconst [3] y))
// result: (LEAQ8 x y)
for {
func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
- // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32])
- // result: (FlagLT_ULT)
- for {
- if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -16 {
- break
- }
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 15 {
- break
- }
- v.reset(OpAMD64FlagLT_ULT)
- return true
- }
- // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32])
- // result: (FlagLT_ULT)
- for {
- if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
- break
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -8 {
- break
- }
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 7 {
- break
- }
- v.reset(OpAMD64FlagLT_ULT)
- return true
- }
// match: (CMPQconst (MOVQconst [x]) [y])
// cond: x==int64(y)
// result: (FlagEQ)
}
break
}
- // match: (ORL (SHLLconst x [c]) (SHRLconst x [d]))
- // cond: d==32-c
- // result: (ROLLconst x [c])
+ // match: (ORL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLLconst {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVBload {
continue
}
- c := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if v_1.Op != OpAMD64SHRLconst {
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
continue
}
- d := auxIntToInt8(v_1.AuxInt)
- if x != v_1.Args[0] || !(d == 32-c) {
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVBload {
continue
}
- v.reset(OpAMD64ROLLconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
return true
}
break
}
- // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: d==16-c && c < 16 && t.Size() == 2
- // result: (ROLWconst x [c])
+ // match: (ORL x0:(MOVBload [i] {s} p0 mem) sh:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
for {
- t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLLconst {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVBload {
continue
}
- c := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if v_1.Op != OpAMD64SHRWconst {
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
continue
}
- d := auxIntToInt8(v_1.AuxInt)
- if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
continue
}
- v.reset(OpAMD64ROLWconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
return true
}
break
}
- // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: d==8-c && c < 8 && t.Size() == 1
- // result: (ROLBconst x [c])
+ // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
for {
- t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLLconst {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVWload {
continue
}
- c := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if v_1.Op != OpAMD64SHRBconst {
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
continue
}
- d := auxIntToInt8(v_1.AuxInt)
- if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVWload {
continue
}
- v.reset(OpAMD64ROLBconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
return true
}
break
}
- // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
- // result: (ROLL x y)
+ // match: (ORL x0:(MOVWload [i] {s} p0 mem) sh:(SHLLconst [16] x1:(MOVWload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLL {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVWload {
continue
}
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDL {
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
continue
}
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRL {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64ROLL)
- v.AddArg2(x, y)
- return true
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
}
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
}
break
}
- // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
- // result: (ROLL x y)
+ // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLL {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLLconst {
continue
}
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRL {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64ROLL)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
- // result: (RORL x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRL {
- continue
- }
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHLL {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64RORL)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
- // result: (RORL x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRL {
- continue
- }
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHLL {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64RORL)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHLXL x y) (ANDL (SHRXL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
- // result: (ROLL x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLXL {
- continue
- }
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRXL {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64ROLL)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHLXL x y) (ANDL (SHRXL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
- // result: (ROLL x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLXL {
- continue
- }
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRXL {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64ROLL)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHRXL x y) (ANDL (SHLXL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
- // result: (RORL x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRXL {
- continue
- }
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHLXL {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64RORL)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHRXL x y) (ANDL (SHLXL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
- // result: (RORL x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRXL {
- continue
- }
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHLXL {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64RORL)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
- // cond: v.Type.Size() == 2
- // result: (ROLW x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLL {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRW {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGQ {
- continue
- }
- v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
- continue
- }
- v_1_0_1_0_0 := v_1_0_1_0.Args[0]
- if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
- continue
- }
- v.reset(OpAMD64ROLW)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
- // cond: v.Type.Size() == 2
- // result: (ROLW x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLL {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRW {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGL {
- continue
- }
- v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
- continue
- }
- v_1_0_1_0_0 := v_1_0_1_0.Args[0]
- if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
- continue
- }
- v.reset(OpAMD64ROLW)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
- // cond: v.Type.Size() == 2
- // result: (RORW x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRW {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64SHLL {
- continue
- }
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
- continue
- }
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
- continue
- }
- v.reset(OpAMD64RORW)
- v.AddArg2(x, y)
- return true
- }
- break
- }
- // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
- // cond: v.Type.Size() == 2
- // result: (RORW x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRW {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64SHLL {
- continue
- }
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
- continue
- }
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
- continue
- }
- v.reset(OpAMD64RORW)
- v.AddArg2(x, y)
- return true
- }
- break
- }
- // match: (ORL (SHLXL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
- // cond: v.Type.Size() == 2
- // result: (ROLW x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLXL {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRW {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGQ {
- continue
- }
- v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
- continue
- }
- v_1_0_1_0_0 := v_1_0_1_0.Args[0]
- if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
- continue
- }
- v.reset(OpAMD64ROLW)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHLXL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
- // cond: v.Type.Size() == 2
- // result: (ROLW x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLXL {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRW {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGL {
- continue
- }
- v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
- continue
- }
- v_1_0_1_0_0 := v_1_0_1_0.Args[0]
- if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
- continue
- }
- v.reset(OpAMD64ROLW)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHRW x (ANDQconst y [15])) (SHLXL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
- // cond: v.Type.Size() == 2
- // result: (RORW x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRW {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64SHLXL {
- continue
- }
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
- continue
- }
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
- continue
- }
- v.reset(OpAMD64RORW)
- v.AddArg2(x, y)
- return true
- }
- break
- }
- // match: (ORL (SHRW x (ANDLconst y [15])) (SHLXL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
- // cond: v.Type.Size() == 2
- // result: (RORW x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRW {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64SHLXL {
- continue
- }
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
- continue
- }
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
- continue
- }
- v.reset(OpAMD64RORW)
- v.AddArg2(x, y)
- return true
- }
- break
- }
- // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
- // cond: v.Type.Size() == 1
- // result: (ROLB x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLL {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRB {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGQ {
- continue
- }
- v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
- continue
- }
- v_1_0_1_0_0 := v_1_0_1_0.Args[0]
- if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
- continue
- }
- v.reset(OpAMD64ROLB)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
- // cond: v.Type.Size() == 1
- // result: (ROLB x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLL {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRB {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGL {
- continue
- }
- v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
- continue
- }
- v_1_0_1_0_0 := v_1_0_1_0.Args[0]
- if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
- continue
- }
- v.reset(OpAMD64ROLB)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
- // cond: v.Type.Size() == 1
- // result: (RORB x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRB {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64SHLL {
- continue
- }
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
- continue
- }
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
- continue
- }
- v.reset(OpAMD64RORB)
- v.AddArg2(x, y)
- return true
- }
- break
- }
- // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
- // cond: v.Type.Size() == 1
- // result: (RORB x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRB {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64SHLL {
- continue
- }
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
- continue
- }
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
- continue
- }
- v.reset(OpAMD64RORB)
- v.AddArg2(x, y)
- return true
- }
- break
- }
- // match: (ORL (SHLXL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
- // cond: v.Type.Size() == 1
- // result: (ROLB x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLXL {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRB {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGQ {
- continue
- }
- v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
- continue
- }
- v_1_0_1_0_0 := v_1_0_1_0.Args[0]
- if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
- continue
- }
- v.reset(OpAMD64ROLB)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHLXL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
- // cond: v.Type.Size() == 1
- // result: (ROLB x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLXL {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64ANDL {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRB {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGL {
- continue
- }
- v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
- continue
- }
- v_1_0_1_0_0 := v_1_0_1_0.Args[0]
- if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
- continue
- }
- v.reset(OpAMD64ROLB)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLXL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
- // cond: v.Type.Size() == 1
- // result: (RORB x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRB {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64SHLXL {
- continue
- }
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
- continue
- }
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
- continue
- }
- v.reset(OpAMD64RORB)
- v.AddArg2(x, y)
- return true
- }
- break
- }
- // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLXL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
- // cond: v.Type.Size() == 1
- // result: (RORB x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRB {
- continue
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
- continue
- }
- y := v_0_1.Args[0]
- if v_1.Op != OpAMD64SHLXL {
- continue
- }
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
- continue
- }
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
- continue
- }
- v.reset(OpAMD64RORB)
- v.AddArg2(x, y)
- return true
- }
- break
- }
- // match: (ORL x x)
- // result: x
- for {
- x := v_0
- if x != v_1 {
- break
- }
- v.copyOf(x)
- return true
- }
- // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
- // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
- // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x0 := v_0
- if x0.Op != OpAMD64MOVBload {
- continue
- }
- i0 := auxIntToInt32(x0.AuxInt)
- s := auxToSym(x0.Aux)
- mem := x0.Args[1]
- p := x0.Args[0]
- sh := v_1
- if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
- continue
- }
- x1 := sh.Args[0]
- if x1.Op != OpAMD64MOVBload {
- continue
- }
- i1 := auxIntToInt32(x1.AuxInt)
- if auxToSym(x1.Aux) != s {
- continue
- }
- _ = x1.Args[1]
- if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
- continue
- }
- b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
- v.copyOf(v0)
- v0.AuxInt = int32ToAuxInt(i0)
- v0.Aux = symToAux(s)
- v0.AddArg2(p, mem)
- return true
- }
- break
- }
- // match: (ORL x0:(MOVBload [i] {s} p0 mem) sh:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
- // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
- // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x0 := v_0
- if x0.Op != OpAMD64MOVBload {
- continue
- }
- i := auxIntToInt32(x0.AuxInt)
- s := auxToSym(x0.Aux)
- mem := x0.Args[1]
- p0 := x0.Args[0]
- sh := v_1
- if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
- continue
- }
- x1 := sh.Args[0]
- if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
- continue
- }
- _ = x1.Args[1]
- p1 := x1.Args[0]
- if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
- continue
- }
- b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
- v.copyOf(v0)
- v0.AuxInt = int32ToAuxInt(i)
- v0.Aux = symToAux(s)
- v0.AddArg2(p0, mem)
- return true
- }
- break
- }
- // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)))
- // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
- // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x0 := v_0
- if x0.Op != OpAMD64MOVWload {
- continue
- }
- i0 := auxIntToInt32(x0.AuxInt)
- s := auxToSym(x0.Aux)
- mem := x0.Args[1]
- p := x0.Args[0]
- sh := v_1
- if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
- continue
- }
- x1 := sh.Args[0]
- if x1.Op != OpAMD64MOVWload {
- continue
- }
- i1 := auxIntToInt32(x1.AuxInt)
- if auxToSym(x1.Aux) != s {
- continue
- }
- _ = x1.Args[1]
- if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
- continue
- }
- b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
- v.copyOf(v0)
- v0.AuxInt = int32ToAuxInt(i0)
- v0.Aux = symToAux(s)
- v0.AddArg2(p, mem)
- return true
- }
- break
- }
- // match: (ORL x0:(MOVWload [i] {s} p0 mem) sh:(SHLLconst [16] x1:(MOVWload [i] {s} p1 mem)))
- // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
- // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x0 := v_0
- if x0.Op != OpAMD64MOVWload {
- continue
- }
- i := auxIntToInt32(x0.AuxInt)
- s := auxToSym(x0.Aux)
- mem := x0.Args[1]
- p0 := x0.Args[0]
- sh := v_1
- if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
- continue
- }
- x1 := sh.Args[0]
- if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
- continue
- }
- _ = x1.Args[1]
- p1 := x1.Args[0]
- if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
- continue
- }
- b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
- v.copyOf(v0)
- v0.AuxInt = int32ToAuxInt(i)
- v0.Aux = symToAux(s)
- v0.AddArg2(p0, mem)
- return true
- }
- break
- }
- // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
- // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
- // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- s1 := v_0
- if s1.Op != OpAMD64SHLLconst {
- continue
- }
- j1 := auxIntToInt8(s1.AuxInt)
- x1 := s1.Args[0]
- if x1.Op != OpAMD64MOVBload {
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
continue
}
i1 := auxIntToInt32(x1.AuxInt)
if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64ORLmodify)
- v.AuxInt = int32ToAuxInt(off1 + off2)
- v.Aux = symToAux(mergeSym(sym1, sym2))
- v.AddArg3(base, val, mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
- // match: (ORQ (SHLQ (MOVQconst [1]) y) x)
- // result: (BTSQ x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLQ {
- continue
- }
- y := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
- continue
- }
- x := v_1
- v.reset(OpAMD64BTSQ)
- v.AddArg2(x, y)
- return true
- }
- break
- }
- // match: (ORQ (SHLXQ (MOVQconst [1]) y) x)
- // result: (BTSQ x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLXQ {
- continue
- }
- y := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
- continue
- }
- x := v_1
- v.reset(OpAMD64BTSQ)
- v.AddArg2(x, y)
- return true
- }
- break
- }
- // match: (ORQ (MOVQconst [c]) x)
- // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
- // result: (BTSQconst [int8(log64(c))] x)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64MOVQconst {
- continue
- }
- c := auxIntToInt64(v_0.AuxInt)
- x := v_1
- if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
- continue
- }
- v.reset(OpAMD64BTSQconst)
- v.AuxInt = int8ToAuxInt(int8(log64(c)))
- v.AddArg(x)
- return true
- }
- break
- }
- // match: (ORQ x (MOVQconst [c]))
- // cond: is32Bit(c)
- // result: (ORQconst [int32(c)] x)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- if v_1.Op != OpAMD64MOVQconst {
- continue
- }
- c := auxIntToInt64(v_1.AuxInt)
- if !(is32Bit(c)) {
- continue
- }
- v.reset(OpAMD64ORQconst)
- v.AuxInt = int32ToAuxInt(int32(c))
- v.AddArg(x)
- return true
- }
- break
- }
- // match: (ORQ x (MOVLconst [c]))
- // result: (ORQconst [c] x)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- if v_1.Op != OpAMD64MOVLconst {
- continue
- }
- c := auxIntToInt32(v_1.AuxInt)
- v.reset(OpAMD64ORQconst)
- v.AuxInt = int32ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- break
- }
- // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d]))
- // cond: d==64-c
- // result: (ROLQconst x [c])
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLQconst {
- continue
- }
- c := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if v_1.Op != OpAMD64SHRQconst {
- continue
- }
- d := auxIntToInt8(v_1.AuxInt)
- if x != v_1.Args[0] || !(d == 64-c) {
- continue
- }
- v.reset(OpAMD64ROLQconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- break
- }
- // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
- // result: (ROLQ x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLQ {
- continue
- }
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDQ {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRQ {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64ROLQ)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
- // result: (ROLQ x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLQ {
- continue
- }
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDQ {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRQ {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64ROLQ)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
- // result: (RORQ x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRQ {
- continue
- }
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDQ {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHLQ {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64RORQ)
- v.AddArg2(x, y)
- return true
- }
- }
- break
- }
- // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
- // result: (RORQ x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRQ {
- continue
- }
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDQ {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHLQ {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64RORQ)
- v.AddArg2(x, y)
- return true
- }
- }
- break
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
}
- // match: (ORQ (SHLXQ x y) (ANDQ (SHRXQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
- // result: (ROLQ x y)
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORQ (SHLQ (MOVQconst [1]) y) x)
+ // result: (BTSQ x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLXQ {
+ if v_0.Op != OpAMD64SHLQ {
continue
}
y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDQ {
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
continue
}
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRXQ {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64ROLQ)
- v.AddArg2(x, y)
- return true
- }
+ x := v_1
+ v.reset(OpAMD64BTSQ)
+ v.AddArg2(x, y)
+ return true
}
break
}
- // match: (ORQ (SHLXQ x y) (ANDQ (SHRXQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
- // result: (ROLQ x y)
+ // match: (ORQ (SHLXQ (MOVQconst [1]) y) x)
+ // result: (BTSQ x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpAMD64SHLXQ {
continue
}
y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDQ {
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
continue
}
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHRXQ {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64ROLQ)
- v.AddArg2(x, y)
- return true
- }
+ x := v_1
+ v.reset(OpAMD64BTSQ)
+ v.AddArg2(x, y)
+ return true
}
break
}
- // match: (ORQ (SHRXQ x y) (ANDQ (SHLXQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
- // result: (RORQ x y)
+ // match: (ORQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
+ // result: (BTSQconst [int8(log64(c))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRXQ {
+ if v_0.Op != OpAMD64MOVQconst {
continue
}
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDQ {
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
continue
}
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHLXQ {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGQ {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64RORQ)
- v.AddArg2(x, y)
- return true
- }
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v.AddArg(x)
+ return true
}
break
}
- // match: (ORQ (SHRXQ x y) (ANDQ (SHLXQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
- // result: (RORQ x y)
+ // match: (ORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ORQconst [int32(c)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHRXQ {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
continue
}
- y := v_0.Args[1]
- x := v_0.Args[0]
- if v_1.Op != OpAMD64ANDQ {
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
continue
}
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHLXQ {
- continue
- }
- _ = v_1_0.Args[1]
- if x != v_1_0.Args[0] {
- continue
- }
- v_1_0_1 := v_1_0.Args[1]
- if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
- continue
- }
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
- continue
- }
- v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpAMD64NEGL {
- continue
- }
- v_1_1_0_0_0 := v_1_1_0_0.Args[0]
- if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
- continue
- }
- v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
- if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
- continue
- }
- v.reset(OpAMD64RORQ)
- v.AddArg2(x, y)
- return true
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x (MOVLconst [c]))
+ // result: (ORQconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
}
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
}
break
}
}
func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (ROLBconst [c] (ROLBconst [d] x))
- // result: (ROLBconst [(c+d)& 7] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64ROLBconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64ROLBconst)
- v.AuxInt = int8ToAuxInt((c + d) & 7)
- v.AddArg(x)
- return true
- }
// match: (ROLBconst x [0])
// result: x
for {
}
func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (ROLLconst [c] (ROLLconst [d] x))
- // result: (ROLLconst [(c+d)&31] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64ROLLconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64ROLLconst)
- v.AuxInt = int8ToAuxInt((c + d) & 31)
- v.AddArg(x)
- return true
- }
// match: (ROLLconst x [0])
// result: x
for {
}
func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (ROLQconst [c] (ROLQconst [d] x))
- // result: (ROLQconst [(c+d)&63] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64ROLQconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64ROLQconst)
- v.AuxInt = int8ToAuxInt((c + d) & 63)
- v.AddArg(x)
- return true
- }
// match: (ROLQconst x [0])
// result: x
for {
}
func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (ROLWconst [c] (ROLWconst [d] x))
- // result: (ROLWconst [(c+d)&15] x)
- for {
- c := auxIntToInt8(v.AuxInt)
- if v_0.Op != OpAMD64ROLWconst {
- break
- }
- d := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- v.reset(OpAMD64ROLWconst)
- v.AuxInt = int8ToAuxInt((c + d) & 15)
- v.AddArg(x)
- return true
- }
// match: (ROLWconst x [0])
// result: x
for {
}
break
}
- // match: (XORL (SHLLconst x [c]) (SHRLconst x [d]))
- // cond: d==32-c
- // result: (ROLLconst x [c])
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLLconst {
- continue
- }
- c := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if v_1.Op != OpAMD64SHRLconst {
- continue
- }
- d := auxIntToInt8(v_1.AuxInt)
- if x != v_1.Args[0] || !(d == 32-c) {
- continue
- }
- v.reset(OpAMD64ROLLconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- break
- }
- // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: d==16-c && c < 16 && t.Size() == 2
- // result: (ROLWconst x [c])
- for {
- t := v.Type
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLLconst {
- continue
- }
- c := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if v_1.Op != OpAMD64SHRWconst {
- continue
- }
- d := auxIntToInt8(v_1.AuxInt)
- if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
- continue
- }
- v.reset(OpAMD64ROLWconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- break
- }
- // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: d==8-c && c < 8 && t.Size() == 1
- // result: (ROLBconst x [c])
- for {
- t := v.Type
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLLconst {
- continue
- }
- c := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if v_1.Op != OpAMD64SHRBconst {
- continue
- }
- d := auxIntToInt8(v_1.AuxInt)
- if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
- continue
- }
- v.reset(OpAMD64ROLBconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- break
- }
// match: (XORL x x)
// result: (MOVLconst [0])
for {
}
break
}
- // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d]))
- // cond: d==64-c
- // result: (ROLQconst x [c])
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLQconst {
- continue
- }
- c := auxIntToInt8(v_0.AuxInt)
- x := v_0.Args[0]
- if v_1.Op != OpAMD64SHRQconst {
- continue
- }
- d := auxIntToInt8(v_1.AuxInt)
- if x != v_1.Args[0] || !(d == 64-c) {
- continue
- }
- v.reset(OpAMD64ROLQconst)
- v.AuxInt = int8ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- break
- }
// match: (XORQ x x)
// result: (MOVQconst [0])
for {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ config := b.Func.Config
// match: (Add16 (Const16 [c]) (Const16 [d]))
// result: (Const16 [c+d])
for {
}
break
}
+ // match: (Add16 (Lsh16x64 x z:(Const64 <t> [c])) (Rsh16Ux64 x (Const64 [d])))
+ // cond: c < 16 && d == 16-c && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 16 && d == 16-c && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add16 left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add16 left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add16 left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add16 left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add16 right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add16 right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add16 right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add16 right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
return false
}
func rewriteValuegeneric_OpAdd32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ config := b.Func.Config
// match: (Add32 (Const32 [c]) (Const32 [d]))
// result: (Const32 [c+d])
for {
}
break
}
- return false
-}
-func rewriteValuegeneric_OpAdd32F(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (Add32F (Const32F [c]) (Const32F [d]))
- // cond: c+d == c+d
- // result: (Const32F [c+d])
+ // match: (Add32 (Lsh32x64 x z:(Const64 <t> [c])) (Rsh32Ux64 x (Const64 [d])))
+ // cond: c < 32 && d == 32-c && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst32F {
+ if v_0.Op != OpLsh32x64 {
continue
}
- c := auxIntToFloat32(v_0.AuxInt)
- if v_1.Op != OpConst32F {
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
continue
}
- d := auxIntToFloat32(v_1.AuxInt)
- if !(c+d == c+d) {
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh32Ux64 {
continue
}
- v.reset(OpConst32F)
- v.AuxInt = float32ToAuxInt(c + d)
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 32 && d == 32-c && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
return true
}
break
}
- return false
-}
-func rewriteValuegeneric_OpAdd64(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (Add64 (Const64 [c]) (Const64 [d]))
- // result: (Const64 [c+d])
+ // match: (Add32 left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst64 {
+ left := v_0
+ if left.Op != OpLsh32x64 {
continue
}
- c := auxIntToInt64(v_0.AuxInt)
- if v_1.Op != OpConst64 {
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux64 {
continue
}
- d := auxIntToInt64(v_1.AuxInt)
- v.reset(OpConst64)
- v.AuxInt = int64ToAuxInt(c + d)
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (Add64 <t> (Mul64 x y) (Mul64 x z))
- // result: (Mul64 x (Add64 <t> y z))
+ // match: (Add32 left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
for {
- t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpMul64 {
+ left := v_0
+ if left.Op != OpLsh32x32 {
continue
}
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
- x := v_0_0
- y := v_0_1
- if v_1.Op != OpMul64 {
- continue
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
- if x != v_1_0 {
- continue
- }
- z := v_1_1
- v.reset(OpMul64)
- v0 := b.NewValue0(v.Pos, OpAdd64, t)
- v0.AddArg2(y, z)
- v.AddArg2(x, v0)
- return true
- }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux32 {
+ continue
}
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
}
break
}
- // match: (Add64 (Const64 [0]) x)
- // result: x
+ // match: (Add32 left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ left := v_0
+ if left.Op != OpLsh32x16 {
continue
}
- x := v_1
- v.copyOf(x)
- return true
- }
- break
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add32 left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add32 right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add32 right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add32 right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add32 right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Add32F (Const32F [c]) (Const32F [d]))
+ // cond: c+d == c+d
+ // result: (Const32F [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c+d == c+d) {
+ continue
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Add64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add64 <t> (Mul64 x y) (Mul64 x z))
+ // result: (Mul64 x (Add64 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add64 (Const64 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
}
// match: (Add64 x (Neg64 y))
// result: (Sub64 x y)
}
break
}
- return false
-}
-func rewriteValuegeneric_OpAdd64F(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (Add64F (Const64F [c]) (Const64F [d]))
- // cond: c+d == c+d
- // result: (Const64F [c+d])
+ // match: (Add64 (Lsh64x64 x z:(Const64 <t> [c])) (Rsh64Ux64 x (Const64 [d])))
+ // cond: c < 64 && d == 64-c && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst64F {
+ if v_0.Op != OpLsh64x64 {
continue
}
- c := auxIntToFloat64(v_0.AuxInt)
- if v_1.Op != OpConst64F {
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
continue
}
- d := auxIntToFloat64(v_1.AuxInt)
- if !(c+d == c+d) {
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh64Ux64 {
continue
}
- v.reset(OpConst64F)
- v.AuxInt = float64ToAuxInt(c + d)
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 64 && d == 64-c && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
return true
}
break
}
- return false
-}
-func rewriteValuegeneric_OpAdd8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (Add8 (Const8 [c]) (Const8 [d]))
- // result: (Const8 [c+d])
+ // match: (Add64 left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst8 {
+ left := v_0
+ if left.Op != OpLsh64x64 {
continue
}
- c := auxIntToInt8(v_0.AuxInt)
- if v_1.Op != OpConst8 {
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux64 {
continue
}
- d := auxIntToInt8(v_1.AuxInt)
- v.reset(OpConst8)
- v.AuxInt = int8ToAuxInt(c + d)
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (Add8 <t> (Mul8 x y) (Mul8 x z))
- // result: (Mul8 x (Add8 <t> y z))
+ // match: (Add64 left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
for {
- t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpMul8 {
+ left := v_0
+ if left.Op != OpLsh64x32 {
continue
}
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
- x := v_0_0
- y := v_0_1
- if v_1.Op != OpMul8 {
- continue
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add64 left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add64 left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add64 right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add64 right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add64 right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add64 right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Add64F (Const64F [c]) (Const64F [d]))
+ // cond: c+d == c+d
+ // result: (Const64F [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c+d == c+d) {
+ continue
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Add8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add8 <t> (Mul8 x y) (Mul8 x z))
+ // result: (Mul8 x (Add8 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul8 {
+ continue
}
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
}
break
}
- return false
-}
-func rewriteValuegeneric_OpAddPtr(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (AddPtr <t> x (Const64 [c]))
- // result: (OffPtr <t> x [c])
- for {
- t := v.Type
- x := v_0
- if v_1.Op != OpConst64 {
- break
- }
- c := auxIntToInt64(v_1.AuxInt)
- v.reset(OpOffPtr)
- v.Type = t
- v.AuxInt = int64ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- // match: (AddPtr <t> x (Const32 [c]))
- // result: (OffPtr <t> x [int64(c)])
- for {
- t := v.Type
- x := v_0
- if v_1.Op != OpConst32 {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- v.reset(OpOffPtr)
- v.Type = t
- v.AuxInt = int64ToAuxInt(int64(c))
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValuegeneric_OpAnd16(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (And16 (Const16 [c]) (Const16 [d]))
- // result: (Const16 [c&d])
+ // match: (Add8 (Lsh8x64 x z:(Const64 <t> [c])) (Rsh8Ux64 x (Const64 [d])))
+ // cond: c < 8 && d == 8-c && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst16 {
+ if v_0.Op != OpLsh8x64 {
continue
}
- c := auxIntToInt16(v_0.AuxInt)
- if v_1.Op != OpConst16 {
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
continue
}
- d := auxIntToInt16(v_1.AuxInt)
- v.reset(OpConst16)
- v.AuxInt = int16ToAuxInt(c & d)
- return true
- }
- break
- }
- // match: (And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c])))
- // cond: c >= int64(16-ntz16(m))
- // result: (Const16 [0])
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst16 {
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh8Ux64 {
continue
}
- m := auxIntToInt16(v_0.AuxInt)
- if v_1.Op != OpRsh16Ux64 {
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
continue
}
- _ = v_1.Args[1]
v_1_1 := v_1.Args[1]
if v_1_1.Op != OpConst64 {
continue
}
- c := auxIntToInt64(v_1_1.AuxInt)
- if !(c >= int64(16-ntz16(m))) {
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 8 && d == 8-c && canRotate(config, 8)) {
continue
}
- v.reset(OpConst16)
- v.AuxInt = int16ToAuxInt(0)
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
return true
}
break
}
- // match: (And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c])))
- // cond: c >= int64(16-nlz16(m))
- // result: (Const16 [0])
+ // match: (Add8 left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst16 {
+ left := v_0
+ if left.Op != OpLsh8x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add8 left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add8 left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add8 left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add8 right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add8 right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add8 right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add8 right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAddPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AddPtr <t> x (Const64 [c]))
+ // result: (OffPtr <t> x [c])
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOffPtr)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (AddPtr <t> x (Const32 [c]))
+ // result: (OffPtr <t> x [int64(c)])
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOffPtr)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(int64(c))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c])))
+ // cond: c >= int64(16-ntz16(m))
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ m := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(16-ntz16(m))) {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c])))
+ // cond: c >= int64(16-nlz16(m))
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
continue
}
m := auxIntToInt16(v_0.AuxInt)
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ config := b.Func.Config
// match: (Or16 (Const16 [c]) (Const16 [d]))
// result: (Const16 [c|d])
for {
}
break
}
- return false
-}
-func rewriteValuegeneric_OpOr32(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (Or32 (Const32 [c]) (Const32 [d]))
- // result: (Const32 [c|d])
+ // match: (Or16 (Lsh16x64 x z:(Const64 <t> [c])) (Rsh16Ux64 x (Const64 [d])))
+ // cond: c < 16 && d == 16-c && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst32 {
+ if v_0.Op != OpLsh16x64 {
continue
}
- c := auxIntToInt32(v_0.AuxInt)
- if v_1.Op != OpConst32 {
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
continue
}
- d := auxIntToInt32(v_1.AuxInt)
- v.reset(OpConst32)
- v.AuxInt = int32ToAuxInt(c | d)
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 16 && d == 16-c && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
return true
}
break
}
- // match: (Or32 x x)
- // result: x
+ // match: (Or16 left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or16 left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or16 left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or16 left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or16 right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or16 right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or16 right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or16 right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Or32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or32 x x)
+ // result: x
for {
x := v_0
if x != v_1 {
}
break
}
- return false
-}
-func rewriteValuegeneric_OpOr64(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (Or64 (Const64 [c]) (Const64 [d]))
- // result: (Const64 [c|d])
+ // match: (Or32 (Lsh32x64 x z:(Const64 <t> [c])) (Rsh32Ux64 x (Const64 [d])))
+ // cond: c < 32 && d == 32-c && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst64 {
+ if v_0.Op != OpLsh32x64 {
continue
}
- c := auxIntToInt64(v_0.AuxInt)
- if v_1.Op != OpConst64 {
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
continue
}
- d := auxIntToInt64(v_1.AuxInt)
- v.reset(OpConst64)
- v.AuxInt = int64ToAuxInt(c | d)
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 32 && d == 32-c && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
return true
}
break
}
- // match: (Or64 x x)
- // result: x
+ // match: (Or32 left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
for {
- x := v_0
- if x != v_1 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
}
- v.copyOf(x)
- return true
+ break
}
- // match: (Or64 (Const64 [0]) x)
- // result: x
+ // match: (Or32 left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ left := v_0
+ if left.Op != OpLsh32x32 {
continue
}
- x := v_1
- v.copyOf(x)
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (Or64 (Const64 [-1]) _)
- // result: (Const64 [-1])
+ // match: (Or32 left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ left := v_0
+ if left.Op != OpLsh32x16 {
continue
}
- v.reset(OpConst64)
- v.AuxInt = int64ToAuxInt(-1)
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (Or64 (Com64 x) x)
- // result: (Const64 [-1])
+ // match: (Or32 left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpCom64 {
+ left := v_0
+ if left.Op != OpLsh32x8 {
continue
}
- x := v_0.Args[0]
- if x != v_1 {
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux8 {
continue
}
- v.reset(OpConst64)
- v.AuxInt = int64ToAuxInt(-1)
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (Or64 x (Or64 x y))
- // result: (Or64 x y)
+ // match: (Or32 right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- if v_1.Op != OpOr64 {
+ right := v_0
+ if right.Op != OpRsh32Ux64 {
continue
}
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if x != v_1_0 {
- continue
- }
- y := v_1_1
- v.reset(OpOr64)
- v.AddArg2(x, y)
- return true
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
}
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
}
break
}
- // match: (Or64 (And64 x (Const64 [c2])) (Const64 <t> [c1]))
- // cond: ^(c1 | c2) == 0
- // result: (Or64 (Const64 <t> [c1]) x)
+ // match: (Or32 right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAnd64 {
+ right := v_0
+ if right.Op != OpRsh32Ux32 {
continue
}
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
- x := v_0_0
- if v_0_1.Op != OpConst64 {
- continue
- }
- c2 := auxIntToInt64(v_0_1.AuxInt)
- if v_1.Op != OpConst64 {
- continue
- }
- t := v_1.Type
- c1 := auxIntToInt64(v_1.AuxInt)
- if !(^(c1 | c2) == 0) {
- continue
- }
- v.reset(OpOr64)
- v0 := b.NewValue0(v.Pos, OpConst64, t)
- v0.AuxInt = int64ToAuxInt(c1)
- v.AddArg2(v0, x)
- return true
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
}
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
}
break
}
- // match: (Or64 (Or64 i:(Const64 <t>) z) x)
- // cond: (z.Op != OpConst64 && x.Op != OpConst64)
- // result: (Or64 i (Or64 <t> z x))
+ // match: (Or32 right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpOr64 {
+ right := v_0
+ if right.Op != OpRsh32Ux16 {
continue
}
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
- i := v_0_0
- if i.Op != OpConst64 {
- continue
- }
- t := i.Type
- z := v_0_1
- x := v_1
- if !(z.Op != OpConst64 && x.Op != OpConst64) {
- continue
- }
- v.reset(OpOr64)
- v0 := b.NewValue0(v.Pos, OpOr64, t)
- v0.AddArg2(z, x)
- v.AddArg2(i, v0)
- return true
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
}
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
}
break
}
- // match: (Or64 (Const64 <t> [c]) (Or64 (Const64 <t> [d]) x))
- // result: (Or64 (Const64 <t> [c|d]) x)
+ // match: (Or32 right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst64 {
+ right := v_0
+ if right.Op != OpRsh32Ux8 {
continue
}
- t := v_0.Type
- c := auxIntToInt64(v_0.AuxInt)
- if v_1.Op != OpOr64 {
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x8 {
continue
}
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpConst64 || v_1_0.Type != t {
- continue
- }
- d := auxIntToInt64(v_1_0.AuxInt)
- x := v_1_1
- v.reset(OpOr64)
- v0 := b.NewValue0(v.Pos, OpConst64, t)
- v0.AuxInt = int64ToAuxInt(c | d)
- v.AddArg2(v0, x)
- return true
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
}
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
}
break
}
return false
}
-func rewriteValuegeneric_OpOr8(v *Value) bool {
+func rewriteValuegeneric_OpOr64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (Or8 (Const8 [c]) (Const8 [d]))
- // result: (Const8 [c|d])
+ config := b.Func.Config
+ // match: (Or64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c|d])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst8 {
+ if v_0.Op != OpConst64 {
continue
}
- c := auxIntToInt8(v_0.AuxInt)
- if v_1.Op != OpConst8 {
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
continue
}
- d := auxIntToInt8(v_1.AuxInt)
- v.reset(OpConst8)
- v.AuxInt = int8ToAuxInt(c | d)
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c | d)
return true
}
break
}
- // match: (Or8 x x)
+ // match: (Or64 x x)
// result: x
for {
x := v_0
v.copyOf(x)
return true
}
- // match: (Or8 (Const8 [0]) x)
+ // match: (Or64 (Const64 [0]) x)
// result: x
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
continue
}
x := v_1
}
break
}
- // match: (Or8 (Const8 [-1]) _)
- // result: (Const8 [-1])
+ // match: (Or64 (Const64 [-1]) _)
+ // result: (Const64 [-1])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
continue
}
- v.reset(OpConst8)
- v.AuxInt = int8ToAuxInt(-1)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
break
}
- // match: (Or8 (Com8 x) x)
- // result: (Const8 [-1])
+ // match: (Or64 (Com64 x) x)
+ // result: (Const64 [-1])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpCom8 {
+ if v_0.Op != OpCom64 {
continue
}
x := v_0.Args[0]
if x != v_1 {
continue
}
- v.reset(OpConst8)
- v.AuxInt = int8ToAuxInt(-1)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
break
}
- // match: (Or8 x (Or8 x y))
- // result: (Or8 x y)
+ // match: (Or64 x (Or64 x y))
+ // result: (Or64 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
- if v_1.Op != OpOr8 {
+ if v_1.Op != OpOr64 {
continue
}
_ = v_1.Args[1]
continue
}
y := v_1_1
- v.reset(OpOr8)
+ v.reset(OpOr64)
v.AddArg2(x, y)
return true
}
}
break
}
- // match: (Or8 (And8 x (Const8 [c2])) (Const8 <t> [c1]))
+ // match: (Or64 (And64 x (Const64 [c2])) (Const64 <t> [c1]))
// cond: ^(c1 | c2) == 0
- // result: (Or8 (Const8 <t> [c1]) x)
+ // result: (Or64 (Const64 <t> [c1]) x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAnd8 {
+ if v_0.Op != OpAnd64 {
continue
}
_ = v_0.Args[1]
v_0_1 := v_0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
x := v_0_0
- if v_0_1.Op != OpConst8 {
+ if v_0_1.Op != OpConst64 {
continue
}
- c2 := auxIntToInt8(v_0_1.AuxInt)
- if v_1.Op != OpConst8 {
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
continue
}
t := v_1.Type
- c1 := auxIntToInt8(v_1.AuxInt)
+ c1 := auxIntToInt64(v_1.AuxInt)
if !(^(c1 | c2) == 0) {
continue
}
- v.reset(OpOr8)
- v0 := b.NewValue0(v.Pos, OpConst8, t)
- v0.AuxInt = int8ToAuxInt(c1)
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c1)
v.AddArg2(v0, x)
return true
}
}
break
}
- // match: (Or8 (Or8 i:(Const8 <t>) z) x)
- // cond: (z.Op != OpConst8 && x.Op != OpConst8)
- // result: (Or8 i (Or8 <t> z x))
+ // match: (Or64 (Or64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Or64 i (Or64 <t> z x))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpOr8 {
+ if v_0.Op != OpOr64 {
continue
}
_ = v_0.Args[1]
v_0_1 := v_0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
i := v_0_0
- if i.Op != OpConst8 {
+ if i.Op != OpConst64 {
continue
}
t := i.Type
z := v_0_1
x := v_1
- if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
continue
}
- v.reset(OpOr8)
- v0 := b.NewValue0(v.Pos, OpOr8, t)
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpOr64, t)
v0.AddArg2(z, x)
v.AddArg2(i, v0)
return true
}
break
}
- // match: (Or8 (Const8 <t> [c]) (Or8 (Const8 <t> [d]) x))
- // result: (Or8 (Const8 <t> [c|d]) x)
+ // match: (Or64 (Const64 <t> [c]) (Or64 (Const64 <t> [d]) x))
+ // result: (Or64 (Const64 <t> [c|d]) x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst8 {
+ if v_0.Op != OpConst64 {
continue
}
t := v_0.Type
- c := auxIntToInt8(v_0.AuxInt)
- if v_1.Op != OpOr8 {
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpOr64 {
continue
}
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
v_1_1 := v_1.Args[1]
for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
continue
}
- d := auxIntToInt8(v_1_0.AuxInt)
+ d := auxIntToInt64(v_1_0.AuxInt)
x := v_1_1
- v.reset(OpOr8)
- v0 := b.NewValue0(v.Pos, OpConst8, t)
- v0.AuxInt = int8ToAuxInt(c | d)
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c | d)
v.AddArg2(v0, x)
return true
}
}
break
}
- return false
-}
-func rewriteValuegeneric_OpOrB(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (OrB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d])))
- // cond: c >= d
- // result: (Less64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ // match: (Or64 (Lsh64x64 x z:(Const64 <t> [c])) (Rsh64Ux64 x (Const64 [d])))
+ // cond: c < 64 && d == 64-c && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess64 {
+ if v_0.Op != OpLsh64x64 {
continue
}
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst64 {
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
continue
}
- c := auxIntToInt64(v_0_0.AuxInt)
- if v_1.Op != OpLess64 {
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh64Ux64 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt64(v_1_1.AuxInt)
- if !(c >= d) {
+ if !(c < 64 && d == 64-c && canRotate(config, 64)) {
continue
}
- v.reset(OpLess64U)
- v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
- v0.AuxInt = int64ToAuxInt(c - d)
- v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
- v2.AuxInt = int64ToAuxInt(d)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
return true
}
break
}
- // match: (OrB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d])))
- // cond: c >= d
- // result: (Leq64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ // match: (Or64 left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq64 {
+ left := v_0
+ if left.Op != OpLsh64x64 {
continue
}
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst64 {
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux64 {
continue
}
- c := auxIntToInt64(v_0_0.AuxInt)
- if v_1.Op != OpLess64 {
- continue
- }
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
+ _ = right.Args[1]
+ if x != right.Args[0] {
continue
}
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst64 {
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
continue
}
- d := auxIntToInt64(v_1_1.AuxInt)
- if !(c >= d) {
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
continue
}
- v.reset(OpLeq64U)
- v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
- v0.AuxInt = int64ToAuxInt(c - d)
- v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
- v2.AuxInt = int64ToAuxInt(d)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (OrB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d])))
- // cond: c >= d
- // result: (Less32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ // match: (Or64 left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess32 {
- continue
- }
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst32 {
+ left := v_0
+ if left.Op != OpLsh64x32 {
continue
}
- c := auxIntToInt32(v_0_0.AuxInt)
- if v_1.Op != OpLess32 {
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux32 {
continue
}
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
+ _ = right.Args[1]
+ if x != right.Args[0] {
continue
}
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst32 {
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
continue
}
- d := auxIntToInt32(v_1_1.AuxInt)
- if !(c >= d) {
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
continue
}
- v.reset(OpLess32U)
- v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
- v0.AuxInt = int32ToAuxInt(c - d)
- v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
- v2.AuxInt = int32ToAuxInt(d)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (OrB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d])))
- // cond: c >= d
- // result: (Leq32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ // match: (Or64 left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq32 {
+ left := v_0
+ if left.Op != OpLsh64x16 {
continue
}
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst32 {
- continue
- }
- c := auxIntToInt32(v_0_0.AuxInt)
- if v_1.Op != OpLess32 {
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux16 {
continue
}
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
+ _ = right.Args[1]
+ if x != right.Args[0] {
continue
}
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst32 {
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
continue
}
- d := auxIntToInt32(v_1_1.AuxInt)
- if !(c >= d) {
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
continue
}
- v.reset(OpLeq32U)
- v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
- v0.AuxInt = int32ToAuxInt(c - d)
- v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
- v2.AuxInt = int32ToAuxInt(d)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (OrB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d])))
- // cond: c >= d
- // result: (Less16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ // match: (Or64 left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess16 {
- continue
- }
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst16 {
+ left := v_0
+ if left.Op != OpLsh64x8 {
continue
}
- c := auxIntToInt16(v_0_0.AuxInt)
- if v_1.Op != OpLess16 {
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux8 {
continue
}
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
+ _ = right.Args[1]
+ if x != right.Args[0] {
continue
}
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst16 {
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
continue
}
- d := auxIntToInt16(v_1_1.AuxInt)
- if !(c >= d) {
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
continue
}
- v.reset(OpLess16U)
- v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
- v0.AuxInt = int16ToAuxInt(c - d)
- v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
- v2.AuxInt = int16ToAuxInt(d)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (OrB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d])))
- // cond: c >= d
- // result: (Leq16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ // match: (Or64 right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq16 {
- continue
- }
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst16 {
+ right := v_0
+ if right.Op != OpRsh64Ux64 {
continue
}
- c := auxIntToInt16(v_0_0.AuxInt)
- if v_1.Op != OpLess16 {
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x64 {
continue
}
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
+ _ = left.Args[1]
+ if x != left.Args[0] {
continue
}
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst16 {
+ z := left.Args[1]
+ if z.Op != OpSub64 {
continue
}
- d := auxIntToInt16(v_1_1.AuxInt)
- if !(c >= d) {
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
continue
}
- v.reset(OpLeq16U)
- v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
- v0.AuxInt = int16ToAuxInt(c - d)
- v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
- v2.AuxInt = int16ToAuxInt(d)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
return true
}
break
}
- // match: (OrB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d])))
- // cond: c >= d
- // result: (Less8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ // match: (Or64 right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess8 {
- continue
- }
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst8 {
+ right := v_0
+ if right.Op != OpRsh64Ux32 {
continue
}
- c := auxIntToInt8(v_0_0.AuxInt)
- if v_1.Op != OpLess8 {
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x32 {
continue
}
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
+ _ = left.Args[1]
+ if x != left.Args[0] {
continue
}
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst8 {
+ z := left.Args[1]
+ if z.Op != OpSub32 {
continue
}
- d := auxIntToInt8(v_1_1.AuxInt)
- if !(c >= d) {
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
continue
}
- v.reset(OpLess8U)
- v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
- v0.AuxInt = int8ToAuxInt(c - d)
- v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
- v2.AuxInt = int8ToAuxInt(d)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
return true
}
break
}
- // match: (OrB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d])))
- // cond: c >= d
- // result: (Leq8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ // match: (Or64 right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq8 {
- continue
- }
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst8 {
+ right := v_0
+ if right.Op != OpRsh64Ux16 {
continue
}
- c := auxIntToInt8(v_0_0.AuxInt)
- if v_1.Op != OpLess8 {
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x16 {
continue
}
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
+ _ = left.Args[1]
+ if x != left.Args[0] {
continue
}
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst8 {
+ z := left.Args[1]
+ if z.Op != OpSub16 {
continue
}
- d := auxIntToInt8(v_1_1.AuxInt)
- if !(c >= d) {
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
continue
}
- v.reset(OpLeq8U)
- v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
- v0.AuxInt = int8ToAuxInt(c - d)
- v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
- v2.AuxInt = int8ToAuxInt(d)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
return true
}
break
}
- // match: (OrB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
- // cond: c >= d+1 && d+1 > d
- // result: (Less64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ // match: (Or64 right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess64 {
- continue
- }
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst64 {
+ right := v_0
+ if right.Op != OpRsh64Ux8 {
continue
}
- c := auxIntToInt64(v_0_0.AuxInt)
- if v_1.Op != OpLeq64 {
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x8 {
continue
}
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
+ _ = left.Args[1]
+ if x != left.Args[0] {
continue
}
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst64 {
+ z := left.Args[1]
+ if z.Op != OpSub8 {
continue
}
- d := auxIntToInt64(v_1_1.AuxInt)
- if !(c >= d+1 && d+1 > d) {
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
continue
}
- v.reset(OpLess64U)
- v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
- v0.AuxInt = int64ToAuxInt(c - d - 1)
- v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
- v2.AuxInt = int64ToAuxInt(d + 1)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
return true
}
break
}
- // match: (OrB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
- // cond: c >= d+1 && d+1 > d
- // result: (Leq64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ return false
+}
+func rewriteValuegeneric_OpOr8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Or8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c|d])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq64 {
- continue
- }
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst64 {
- continue
- }
- c := auxIntToInt64(v_0_0.AuxInt)
- if v_1.Op != OpLeq64 {
- continue
- }
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
- continue
- }
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst64 {
+ if v_0.Op != OpConst8 {
continue
}
- d := auxIntToInt64(v_1_1.AuxInt)
- if !(c >= d+1 && d+1 > d) {
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
continue
}
- v.reset(OpLeq64U)
- v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
- v0.AuxInt = int64ToAuxInt(c - d - 1)
- v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
- v2.AuxInt = int64ToAuxInt(d + 1)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c | d)
return true
}
break
}
- // match: (OrB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
- // cond: c >= d+1 && d+1 > d
- // result: (Less32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ // match: (Or8 x x)
+ // result: x
for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess32 {
- continue
- }
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst32 {
- continue
- }
- c := auxIntToInt32(v_0_0.AuxInt)
- if v_1.Op != OpLeq32 {
- continue
- }
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
- continue
- }
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst32 {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or8 (Const8 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
continue
}
- d := auxIntToInt32(v_1_1.AuxInt)
- if !(c >= d+1 && d+1 > d) {
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or8 (Const8 [-1]) _)
+ // result: (Const8 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
continue
}
- v.reset(OpLess32U)
- v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
- v0.AuxInt = int32ToAuxInt(c - d - 1)
- v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
- v2.AuxInt = int32ToAuxInt(d + 1)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
return true
}
break
}
- // match: (OrB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
- // cond: c >= d+1 && d+1 > d
- // result: (Leq32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ // match: (Or8 (Com8 x) x)
+ // result: (Const8 [-1])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq32 {
+ if v_0.Op != OpCom8 {
continue
}
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst32 {
+ x := v_0.Args[0]
+ if x != v_1 {
continue
}
- c := auxIntToInt32(v_0_0.AuxInt)
- if v_1.Op != OpLeq32 {
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or8 x (Or8 x y))
+ // result: (Or8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr8 {
continue
}
_ = v_1.Args[1]
- if x != v_1.Args[0] {
- continue
- }
+ v_1_0 := v_1.Args[0]
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst32 {
- continue
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr8)
+ v.AddArg2(x, y)
+ return true
}
- d := auxIntToInt32(v_1_1.AuxInt)
- if !(c >= d+1 && d+1 > d) {
+ }
+ break
+ }
+ // match: (Or8 (And8 x (Const8 [c2])) (Const8 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or8 (Const8 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
continue
}
- v.reset(OpLeq32U)
- v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
- v0.AuxInt = int32ToAuxInt(c - d - 1)
- v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
- v2.AuxInt = int32ToAuxInt(d + 1)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
- return true
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst8 {
+ continue
+ }
+ c2 := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt8(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
}
break
}
- // match: (OrB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
- // cond: c >= d+1 && d+1 > d
- // result: (Less16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ // match: (Or8 (Or8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Or8 i (Or8 <t> z x))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess16 {
+ if v_0.Op != OpOr8 {
continue
}
- x := v_0.Args[1]
+ _ = v_0.Args[1]
v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst16 {
- continue
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpOr8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
}
- c := auxIntToInt16(v_0_0.AuxInt)
- if v_1.Op != OpLeq16 {
+ }
+ break
+ }
+ // match: (Or8 (Const8 <t> [c]) (Or8 (Const8 <t> [d]) x))
+ // result: (Or8 (Const8 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
continue
}
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpOr8 {
continue
}
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst16 {
- continue
- }
- d := auxIntToInt16(v_1_1.AuxInt)
- if !(c >= d+1 && d+1 > d) {
- continue
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
}
- v.reset(OpLess16U)
- v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
- v0.AuxInt = int16ToAuxInt(c - d - 1)
- v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
- v2.AuxInt = int16ToAuxInt(d + 1)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
- return true
}
break
}
- // match: (OrB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
- // cond: c >= d+1 && d+1 > d
- // result: (Leq16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ // match: (Or8 (Lsh8x64 x z:(Const64 <t> [c])) (Rsh8Ux64 x (Const64 [d])))
+ // cond: c < 8 && d == 8-c && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq16 {
+ if v_0.Op != OpLsh8x64 {
continue
}
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst16 {
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
continue
}
- c := auxIntToInt16(v_0_0.AuxInt)
- if v_1.Op != OpLeq16 {
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh8Ux64 {
continue
}
_ = v_1.Args[1]
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst16 {
+ if v_1_1.Op != OpConst64 {
continue
}
- d := auxIntToInt16(v_1_1.AuxInt)
- if !(c >= d+1 && d+1 > d) {
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 8 && d == 8-c && canRotate(config, 8)) {
continue
}
- v.reset(OpLeq16U)
- v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
- v0.AuxInt = int16ToAuxInt(c - d - 1)
- v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
- v2.AuxInt = int16ToAuxInt(d + 1)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
return true
}
break
}
- // match: (OrB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
- // cond: c >= d+1 && d+1 > d
- // result: (Less8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ // match: (Or8 left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess8 {
- continue
- }
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst8 {
+ left := v_0
+ if left.Op != OpLsh8x64 {
continue
}
- c := auxIntToInt8(v_0_0.AuxInt)
- if v_1.Op != OpLeq8 {
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux64 {
continue
}
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
+ _ = right.Args[1]
+ if x != right.Args[0] {
continue
}
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst8 {
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
continue
}
- d := auxIntToInt8(v_1_1.AuxInt)
- if !(c >= d+1 && d+1 > d) {
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
continue
}
- v.reset(OpLess8U)
- v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
- v0.AuxInt = int8ToAuxInt(c - d - 1)
- v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
- v2.AuxInt = int8ToAuxInt(d + 1)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (OrB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
- // cond: c >= d+1 && d+1 > d
- // result: (Leq8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ // match: (Or8 left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq8 {
+ left := v_0
+ if left.Op != OpLsh8x32 {
continue
}
- x := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpConst8 {
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux32 {
continue
}
- c := auxIntToInt8(v_0_0.AuxInt)
- if v_1.Op != OpLeq8 {
+ _ = right.Args[1]
+ if x != right.Args[0] {
continue
}
- _ = v_1.Args[1]
- if x != v_1.Args[0] {
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
continue
}
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpConst8 {
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
continue
}
- d := auxIntToInt8(v_1_1.AuxInt)
- if !(c >= d+1 && d+1 > d) {
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or8 left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x16 {
continue
}
- v.reset(OpLeq8U)
- v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
- v0.AuxInt = int8ToAuxInt(c - d - 1)
- v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
- v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
- v2.AuxInt = int8ToAuxInt(d + 1)
- v1.AddArg2(x, v2)
- v.AddArg2(v0, v1)
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (OrB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d])))
- // cond: uint64(c) >= uint64(d)
+ // match: (Or8 left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or8 right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or8 right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or8 right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or8 right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOrB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (OrB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: c >= d
// result: (Less64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess64U {
+ if v_0.Op != OpLess64 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt64(v_0_0.AuxInt)
- if v_1.Op != OpLess64U {
+ if v_1.Op != OpLess64 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt64(v_1_1.AuxInt)
- if !(uint64(c) >= uint64(d)) {
+ if !(c >= d) {
continue
}
v.reset(OpLess64U)
}
break
}
- // match: (OrB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d])))
- // cond: uint64(c) >= uint64(d)
+ // match: (OrB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: c >= d
// result: (Leq64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq64U {
+ if v_0.Op != OpLeq64 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt64(v_0_0.AuxInt)
- if v_1.Op != OpLess64U {
+ if v_1.Op != OpLess64 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt64(v_1_1.AuxInt)
- if !(uint64(c) >= uint64(d)) {
+ if !(c >= d) {
continue
}
v.reset(OpLeq64U)
}
break
}
- // match: (OrB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d])))
- // cond: uint32(c) >= uint32(d)
+ // match: (OrB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: c >= d
// result: (Less32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess32U {
+ if v_0.Op != OpLess32 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt32(v_0_0.AuxInt)
- if v_1.Op != OpLess32U {
+ if v_1.Op != OpLess32 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt32(v_1_1.AuxInt)
- if !(uint32(c) >= uint32(d)) {
+ if !(c >= d) {
continue
}
v.reset(OpLess32U)
}
break
}
- // match: (OrB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d])))
- // cond: uint32(c) >= uint32(d)
+ // match: (OrB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: c >= d
// result: (Leq32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq32U {
+ if v_0.Op != OpLeq32 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt32(v_0_0.AuxInt)
- if v_1.Op != OpLess32U {
+ if v_1.Op != OpLess32 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt32(v_1_1.AuxInt)
- if !(uint32(c) >= uint32(d)) {
+ if !(c >= d) {
continue
}
v.reset(OpLeq32U)
}
break
}
- // match: (OrB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d])))
- // cond: uint16(c) >= uint16(d)
+ // match: (OrB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: c >= d
// result: (Less16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess16U {
+ if v_0.Op != OpLess16 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt16(v_0_0.AuxInt)
- if v_1.Op != OpLess16U {
+ if v_1.Op != OpLess16 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt16(v_1_1.AuxInt)
- if !(uint16(c) >= uint16(d)) {
+ if !(c >= d) {
continue
}
v.reset(OpLess16U)
}
break
}
- // match: (OrB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d])))
- // cond: uint16(c) >= uint16(d)
+ // match: (OrB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: c >= d
// result: (Leq16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq16U {
+ if v_0.Op != OpLeq16 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt16(v_0_0.AuxInt)
- if v_1.Op != OpLess16U {
+ if v_1.Op != OpLess16 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt16(v_1_1.AuxInt)
- if !(uint16(c) >= uint16(d)) {
+ if !(c >= d) {
continue
}
v.reset(OpLeq16U)
}
break
}
- // match: (OrB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d])))
- // cond: uint8(c) >= uint8(d)
+ // match: (OrB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: c >= d
// result: (Less8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess8U {
+ if v_0.Op != OpLess8 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt8(v_0_0.AuxInt)
- if v_1.Op != OpLess8U {
+ if v_1.Op != OpLess8 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt8(v_1_1.AuxInt)
- if !(uint8(c) >= uint8(d)) {
+ if !(c >= d) {
continue
}
v.reset(OpLess8U)
}
break
}
- // match: (OrB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d])))
- // cond: uint8(c) >= uint8(d)
+ // match: (OrB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: c >= d
// result: (Leq8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq8U {
+ if v_0.Op != OpLeq8 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt8(v_0_0.AuxInt)
- if v_1.Op != OpLess8U {
+ if v_1.Op != OpLess8 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt8(v_1_1.AuxInt)
- if !(uint8(c) >= uint8(d)) {
+ if !(c >= d) {
continue
}
v.reset(OpLeq8U)
}
break
}
- // match: (OrB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
- // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)
+ // match: (OrB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: c >= d+1 && d+1 > d
// result: (Less64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess64U {
+ if v_0.Op != OpLess64 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt64(v_0_0.AuxInt)
- if v_1.Op != OpLeq64U {
+ if v_1.Op != OpLeq64 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt64(v_1_1.AuxInt)
- if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) {
+ if !(c >= d+1 && d+1 > d) {
continue
}
v.reset(OpLess64U)
}
break
}
- // match: (OrB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
- // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)
+ // match: (OrB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: c >= d+1 && d+1 > d
// result: (Leq64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq64U {
+ if v_0.Op != OpLeq64 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt64(v_0_0.AuxInt)
- if v_1.Op != OpLeq64U {
+ if v_1.Op != OpLeq64 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt64(v_1_1.AuxInt)
- if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) {
+ if !(c >= d+1 && d+1 > d) {
continue
}
v.reset(OpLeq64U)
}
break
}
- // match: (OrB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
- // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)
+ // match: (OrB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: c >= d+1 && d+1 > d
// result: (Less32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess32U {
+ if v_0.Op != OpLess32 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt32(v_0_0.AuxInt)
- if v_1.Op != OpLeq32U {
+ if v_1.Op != OpLeq32 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt32(v_1_1.AuxInt)
- if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) {
+ if !(c >= d+1 && d+1 > d) {
continue
}
v.reset(OpLess32U)
}
break
}
- // match: (OrB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
- // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)
+ // match: (OrB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: c >= d+1 && d+1 > d
// result: (Leq32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq32U {
+ if v_0.Op != OpLeq32 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt32(v_0_0.AuxInt)
- if v_1.Op != OpLeq32U {
+ if v_1.Op != OpLeq32 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt32(v_1_1.AuxInt)
- if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) {
+ if !(c >= d+1 && d+1 > d) {
continue
}
v.reset(OpLeq32U)
}
break
}
- // match: (OrB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
- // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)
+ // match: (OrB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: c >= d+1 && d+1 > d
// result: (Less16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess16U {
+ if v_0.Op != OpLess16 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt16(v_0_0.AuxInt)
- if v_1.Op != OpLeq16U {
+ if v_1.Op != OpLeq16 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt16(v_1_1.AuxInt)
- if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) {
+ if !(c >= d+1 && d+1 > d) {
continue
}
v.reset(OpLess16U)
}
break
}
- // match: (OrB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
- // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)
+ // match: (OrB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: c >= d+1 && d+1 > d
// result: (Leq16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq16U {
+ if v_0.Op != OpLeq16 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt16(v_0_0.AuxInt)
- if v_1.Op != OpLeq16U {
+ if v_1.Op != OpLeq16 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt16(v_1_1.AuxInt)
- if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) {
+ if !(c >= d+1 && d+1 > d) {
continue
}
v.reset(OpLeq16U)
}
break
}
- // match: (OrB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
- // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)
+ // match: (OrB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: c >= d+1 && d+1 > d
// result: (Less8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLess8U {
+ if v_0.Op != OpLess8 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt8(v_0_0.AuxInt)
- if v_1.Op != OpLeq8U {
+ if v_1.Op != OpLeq8 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt8(v_1_1.AuxInt)
- if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) {
+ if !(c >= d+1 && d+1 > d) {
continue
}
v.reset(OpLess8U)
}
break
}
- // match: (OrB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
- // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)
+ // match: (OrB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: c >= d+1 && d+1 > d
// result: (Leq8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpLeq8U {
+ if v_0.Op != OpLeq8 {
continue
}
x := v_0.Args[1]
continue
}
c := auxIntToInt8(v_0_0.AuxInt)
- if v_1.Op != OpLeq8U {
+ if v_1.Op != OpLeq8 {
continue
}
_ = v_1.Args[1]
continue
}
d := auxIntToInt8(v_1_1.AuxInt)
- if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) {
+ if !(c >= d+1 && d+1 > d) {
continue
}
v.reset(OpLeq8U)
}
break
}
- return false
-}
-func rewriteValuegeneric_OpPhi(v *Value) bool {
- // match: (Phi (Const8 [c]) (Const8 [c]))
- // result: (Const8 [c])
+ // match: (OrB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d)
+ // result: (Less64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
for {
- if len(v.Args) != 2 {
- break
- }
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpConst8 {
- break
- }
- c := auxIntToInt8(v_0.AuxInt)
- v_1 := v.Args[1]
- if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != c {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
}
- v.reset(OpConst8)
- v.AuxInt = int8ToAuxInt(c)
- return true
+ break
}
- // match: (Phi (Const16 [c]) (Const16 [c]))
- // result: (Const16 [c])
+ // match: (OrB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d)
+ // result: (Leq64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
for {
- if len(v.Args) != 2 {
- break
- }
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpConst16 {
- break
- }
- c := auxIntToInt16(v_0.AuxInt)
- v_1 := v.Args[1]
- if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != c {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
}
- v.reset(OpConst16)
- v.AuxInt = int16ToAuxInt(c)
- return true
+ break
}
- // match: (Phi (Const32 [c]) (Const32 [c]))
- // result: (Const32 [c])
+ // match: (OrB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d)
+ // result: (Less32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
for {
- if len(v.Args) != 2 {
- break
- }
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpConst32 {
- break
- }
- c := auxIntToInt32(v_0.AuxInt)
- v_1 := v.Args[1]
- if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != c {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
}
- v.reset(OpConst32)
- v.AuxInt = int32ToAuxInt(c)
- return true
+ break
}
- // match: (Phi (Const64 [c]) (Const64 [c]))
- // result: (Const64 [c])
+ // match: (OrB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d)
+ // result: (Leq32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
for {
- if len(v.Args) != 2 {
- break
- }
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpConst64 {
- break
- }
- c := auxIntToInt64(v_0.AuxInt)
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
}
- v.reset(OpConst64)
- v.AuxInt = int64ToAuxInt(c)
- return true
+ break
}
- return false
-}
-func rewriteValuegeneric_OpPtrIndex(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
+ // match: (OrB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d)
+ // result: (Less16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d)
+ // result: (Leq16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d)
+ // result: (Less8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d)
+ // result: (Leq8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)
+ // result: (Less64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)
+ // result: (Leq64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)
+ // result: (Less32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)
+ // result: (Leq32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)
+ // result: (Less16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)
+ // result: (Leq16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)
+ // result: (Less8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)
+ // result: (Leq8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpPhi(v *Value) bool {
+ // match: (Phi (Const8 [c]) (Const8 [c]))
+ // result: (Const8 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c)
+ return true
+ }
+ // match: (Phi (Const16 [c]) (Const16 [c]))
+ // result: (Const16 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c)
+ return true
+ }
+ // match: (Phi (Const32 [c]) (Const32 [c]))
+ // result: (Const32 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (Phi (Const64 [c]) (Const64 [c]))
+ // result: (Const64 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpPtrIndex(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
typ := &b.Func.Config.Types
// match: (PtrIndex <t> ptr idx)
// cond: config.PtrSize == 4 && is32Bit(t.Elem().Size())
// result: (AddPtr ptr (Mul32 <typ.Int> idx (Const32 <typ.Int> [int32(t.Elem().Size())])))
for {
- t := v.Type
- ptr := v_0
- idx := v_1
- if !(config.PtrSize == 4 && is32Bit(t.Elem().Size())) {
+ t := v.Type
+ ptr := v_0
+ idx := v_1
+ if !(config.PtrSize == 4 && is32Bit(t.Elem().Size())) {
+ break
+ }
+ v.reset(OpAddPtr)
+ v0 := b.NewValue0(v.Pos, OpMul32, typ.Int)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v1.AuxInt = int32ToAuxInt(int32(t.Elem().Size()))
+ v0.AddArg2(idx, v1)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+ // match: (PtrIndex <t> ptr idx)
+ // cond: config.PtrSize == 8
+ // result: (AddPtr ptr (Mul64 <typ.Int> idx (Const64 <typ.Int> [t.Elem().Size()])))
+ for {
+ t := v.Type
+ ptr := v_0
+ idx := v_1
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAddPtr)
+ v0 := b.NewValue0(v.Pos, OpMul64, typ.Int)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v1.AuxInt = int64ToAuxInt(t.Elem().Size())
+ v0.AddArg2(idx, v1)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (RotateLeft16 x (Const16 [c]))
+ // cond: c%16 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(c%16 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (RotateLeft16 x (And64 y (Const64 [c])))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (And32 y (Const32 [c])))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (And16 y (Const16 [c])))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (And8 y (Const8 [c])))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Neg64 (And64 y (Const64 [c]))))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Neg32 (And32 y (Const32 [c]))))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Neg16 (And16 y (Const16 [c]))))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg16 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd16 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Neg8 (And8 y (Const8 [c]))))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg8 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd8 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Add64 y (Const64 [c])))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&15 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Add32 y (Const32 [c])))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&15 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Add16 y (Const16 [c])))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&15 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Add8 y (Const8 [c])))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&15 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Sub64 (Const64 [c]) y))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if !(c&15 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft16 x (Sub32 (Const32 [c]) y))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub32 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c&15 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft16 x (Sub16 (Const16 [c]) y))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub16 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if !(c&15 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft16 x (Sub8 (Const8 [c]) y))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub8 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if !(c&15 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft16 x (Const64 <t> [c]))
+ // cond: config.PtrSize == 4
+ // result: (RotateLeft16 x (Const32 <t> [int32(c)]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (RotateLeft32 x (Const32 [c]))
+ // cond: c%32 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%32 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (RotateLeft32 x (And64 y (Const64 [c])))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (And32 y (Const32 [c])))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (And16 y (Const16 [c])))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (And8 y (Const8 [c])))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Neg64 (And64 y (Const64 [c]))))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Neg32 (And32 y (Const32 [c]))))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Neg16 (And16 y (Const16 [c]))))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg16 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd16 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Neg8 (And8 y (Const8 [c]))))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg8 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd8 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Add64 y (Const64 [c])))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&31 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Add32 y (Const32 [c])))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&31 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Add16 y (Const16 [c])))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&31 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Add8 y (Const8 [c])))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&31 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Sub64 (Const64 [c]) y))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft32 x (Sub32 (Const32 [c]) y))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub32 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft32 x (Sub16 (Const16 [c]) y))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub16 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft32 x (Sub8 (Const8 [c]) y))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub8 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft32 x (Const64 <t> [c]))
+ // cond: config.PtrSize == 4
+ // result: (RotateLeft32 x (Const32 <t> [int32(c)]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (RotateLeft64 x (Const64 [c]))
+ // cond: c%64 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%64 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (RotateLeft64 x (And64 y (Const64 [c])))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (And32 y (Const32 [c])))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (And16 y (Const16 [c])))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (And8 y (Const8 [c])))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Neg64 (And64 y (Const64 [c]))))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Neg32 (And32 y (Const32 [c]))))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Neg16 (And16 y (Const16 [c]))))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg16 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd16 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Neg8 (And8 y (Const8 [c]))))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg8 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd8 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Add64 y (Const64 [c])))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&63 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Add32 y (Const32 [c])))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&63 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Add16 y (Const16 [c])))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&63 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Add8 y (Const8 [c])))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&63 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Sub64 (Const64 [c]) y))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft64 x (Sub32 (Const32 [c]) y))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub32 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft64 x (Sub16 (Const16 [c]) y))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub16 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft64 x (Sub8 (Const8 [c]) y))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub8 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft64 x (Const64 <t> [c]))
+ // cond: config.PtrSize == 4
+ // result: (RotateLeft64 x (Const32 <t> [int32(c)]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (RotateLeft8 x (Const8 [c]))
+ // cond: c%8 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(c%8 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (RotateLeft8 x (And64 y (Const64 [c])))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (And32 y (Const32 [c])))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (And16 y (Const16 [c])))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (And8 y (Const8 [c])))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd8 {
break
}
- v.reset(OpAddPtr)
- v0 := b.NewValue0(v.Pos, OpMul32, typ.Int)
- v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
- v1.AuxInt = int32ToAuxInt(int32(t.Elem().Size()))
- v0.AddArg2(idx, v1)
- v.AddArg2(ptr, v0)
- return true
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
}
- // match: (PtrIndex <t> ptr idx)
- // cond: config.PtrSize == 8
- // result: (AddPtr ptr (Mul64 <typ.Int> idx (Const64 <typ.Int> [t.Elem().Size()])))
+ // match: (RotateLeft8 x (Neg64 (And64 y (Const64 [c]))))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x (Neg64 <y.Type> y))
for {
- t := v.Type
- ptr := v_0
- idx := v_1
- if !(config.PtrSize == 8) {
+ x := v_0
+ if v_1.Op != OpNeg64 {
break
}
- v.reset(OpAddPtr)
- v0 := b.NewValue0(v.Pos, OpMul64, typ.Int)
- v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
- v1.AuxInt = int64ToAuxInt(t.Elem().Size())
- v0.AddArg2(idx, v1)
- v.AddArg2(ptr, v0)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Neg32 (And32 y (Const32 [c]))))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Neg16 (And16 y (Const16 [c]))))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg16 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd16 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Neg8 (And8 y (Const8 [c]))))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg8 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd8 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Add64 y (Const64 [c])))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&7 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Add32 y (Const32 [c])))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&7 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Add16 y (Const16 [c])))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&7 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Add8 y (Const8 [c])))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&7 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Sub64 (Const64 [c]) y))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if !(c&7 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
return true
}
- return false
-}
-func rewriteValuegeneric_OpRotateLeft16(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (RotateLeft16 x (Const16 [c]))
- // cond: c%16 == 0
- // result: x
+ // match: (RotateLeft8 x (Sub32 (Const32 [c]) y))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x (Neg32 <y.Type> y))
for {
x := v_0
- if v_1.Op != OpConst16 {
+ if v_1.Op != OpSub32 {
break
}
- c := auxIntToInt16(v_1.AuxInt)
- if !(c%16 == 0) {
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
break
}
- v.copyOf(x)
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c&7 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
return true
}
- return false
-}
-func rewriteValuegeneric_OpRotateLeft32(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (RotateLeft32 x (Const32 [c]))
- // cond: c%32 == 0
- // result: x
+ // match: (RotateLeft8 x (Sub16 (Const16 [c]) y))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x (Neg16 <y.Type> y))
for {
x := v_0
- if v_1.Op != OpConst32 {
+ if v_1.Op != OpSub16 {
break
}
- c := auxIntToInt32(v_1.AuxInt)
- if !(c%32 == 0) {
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 {
break
}
- v.copyOf(x)
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if !(c&7 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
return true
}
- return false
-}
-func rewriteValuegeneric_OpRotateLeft64(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (RotateLeft64 x (Const64 [c]))
- // cond: c%64 == 0
- // result: x
+ // match: (RotateLeft8 x (Sub8 (Const8 [c]) y))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x (Neg8 <y.Type> y))
for {
x := v_0
- if v_1.Op != OpConst64 {
+ if v_1.Op != OpSub8 {
break
}
- c := auxIntToInt64(v_1.AuxInt)
- if !(c%64 == 0) {
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 {
break
}
- v.copyOf(x)
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if !(c&7 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
return true
}
- return false
-}
-func rewriteValuegeneric_OpRotateLeft8(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (RotateLeft8 x (Const8 [c]))
- // cond: c%8 == 0
- // result: x
+ // match: (RotateLeft8 x (Const64 <t> [c]))
+ // cond: config.PtrSize == 4
+ // result: (RotateLeft8 x (Const32 <t> [int32(c)]))
for {
x := v_0
- if v_1.Op != OpConst8 {
+ if v_1.Op != OpConst64 {
break
}
- c := auxIntToInt8(v_1.AuxInt)
- if !(c%8 == 0) {
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(config.PtrSize == 4) {
break
}
- v.copyOf(x)
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, v0)
return true
}
return false
break
}
x := v_0.Args[0]
- v.reset(OpSignExt8to16)
+ v.reset(OpSignExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to16 (SignExt16to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt16to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to16 (And32 (Const32 [y]) x))
+ // cond: y&0xFFFF == 0xFFFF
+ // result: (Trunc32to16 x)
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ y := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFFFF == 0xFFFF) {
+ continue
+ }
+ v.reset(OpTrunc32to16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc32to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to8 (Const32 [c]))
+ // result: (Const8 [int8(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ return true
+ }
+ // match: (Trunc32to8 (ZeroExt8to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to8 (SignExt8to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to8 (And32 (Const32 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc32to8 x)
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ y := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFF == 0xFF) {
+ continue
+ }
+ v.reset(OpTrunc32to8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc64to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to16 (Const64 [c]))
+ // result: (Const16 [int16(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ return true
+ }
+ // match: (Trunc64to16 (ZeroExt8to64 x))
+ // result: (ZeroExt8to16 x)
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to16 (ZeroExt16to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to16 (SignExt8to64 x))
+ // result: (SignExt8to16 x)
+ for {
+ if v_0.Op != OpSignExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to16 (SignExt16to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to16 (And64 (Const64 [y]) x))
+ // cond: y&0xFFFF == 0xFFFF
+ // result: (Trunc64to16 x)
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ y := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFFFF == 0xFFFF) {
+ continue
+ }
+ v.reset(OpTrunc64to16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc64to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to32 (Const64 [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ // match: (Trunc64to32 (ZeroExt8to64 x))
+ // result: (ZeroExt8to32 x)
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt8to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (ZeroExt16to64 x))
+ // result: (ZeroExt16to32 x)
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt16to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (ZeroExt32to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt32to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to32 (SignExt8to64 x))
+ // result: (SignExt8to32 x)
+ for {
+ if v_0.Op != OpSignExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt8to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (SignExt16to64 x))
+ // result: (SignExt16to32 x)
+ for {
+ if v_0.Op != OpSignExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt16to32)
v.AddArg(x)
return true
}
- // match: (Trunc32to16 (SignExt16to32 x))
+ // match: (Trunc64to32 (SignExt32to64 x))
// result: x
for {
- if v_0.Op != OpSignExt16to32 {
+ if v_0.Op != OpSignExt32to64 {
break
}
x := v_0.Args[0]
v.copyOf(x)
return true
}
- // match: (Trunc32to16 (And32 (Const32 [y]) x))
- // cond: y&0xFFFF == 0xFFFF
- // result: (Trunc32to16 x)
+ // match: (Trunc64to32 (And64 (Const64 [y]) x))
+ // cond: y&0xFFFFFFFF == 0xFFFFFFFF
+ // result: (Trunc64to32 x)
for {
- if v_0.Op != OpAnd32 {
+ if v_0.Op != OpAnd64 {
break
}
_ = v_0.Args[1]
v_0_0 := v_0.Args[0]
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpConst32 {
+ if v_0_0.Op != OpConst64 {
continue
}
- y := auxIntToInt32(v_0_0.AuxInt)
+ y := auxIntToInt64(v_0_0.AuxInt)
x := v_0_1
- if !(y&0xFFFF == 0xFFFF) {
+ if !(y&0xFFFFFFFF == 0xFFFFFFFF) {
continue
}
- v.reset(OpTrunc32to16)
+ v.reset(OpTrunc64to32)
v.AddArg(x)
return true
}
}
return false
}
-func rewriteValuegeneric_OpTrunc32to8(v *Value) bool {
+func rewriteValuegeneric_OpTrunc64to8(v *Value) bool {
v_0 := v.Args[0]
- // match: (Trunc32to8 (Const32 [c]))
+ // match: (Trunc64to8 (Const64 [c]))
// result: (Const8 [int8(c)])
for {
- if v_0.Op != OpConst32 {
+ if v_0.Op != OpConst64 {
break
}
- c := auxIntToInt32(v_0.AuxInt)
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpConst8)
v.AuxInt = int8ToAuxInt(int8(c))
return true
}
- // match: (Trunc32to8 (ZeroExt8to32 x))
+ // match: (Trunc64to8 (ZeroExt8to64 x))
// result: x
for {
- if v_0.Op != OpZeroExt8to32 {
+ if v_0.Op != OpZeroExt8to64 {
break
}
x := v_0.Args[0]
v.copyOf(x)
return true
}
- // match: (Trunc32to8 (SignExt8to32 x))
+ // match: (Trunc64to8 (SignExt8to64 x))
// result: x
for {
- if v_0.Op != OpSignExt8to32 {
+ if v_0.Op != OpSignExt8to64 {
break
}
x := v_0.Args[0]
v.copyOf(x)
return true
}
- // match: (Trunc32to8 (And32 (Const32 [y]) x))
+ // match: (Trunc64to8 (And64 (Const64 [y]) x))
// cond: y&0xFF == 0xFF
- // result: (Trunc32to8 x)
+ // result: (Trunc64to8 x)
for {
- if v_0.Op != OpAnd32 {
+ if v_0.Op != OpAnd64 {
break
}
_ = v_0.Args[1]
v_0_0 := v_0.Args[0]
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpConst32 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ y := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFF == 0xFF) {
+ continue
+ }
+ v.reset(OpTrunc64to8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Xor16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 x x)
+ // result: (Const16 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Xor16 (Const16 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 (Com16 x) x)
+ // result: (Const16 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom16 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 (Const16 [-1]) x)
+ // result: (Com16 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpCom16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 x (Xor16 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor16 (Xor16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Xor16 i (Xor16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpXor16)
+ v0 := b.NewValue0(v.Pos, OpXor16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor16 (Const16 <t> [c]) (Xor16 (Const16 <t> [d]) x))
+ // result: (Xor16 (Const16 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpXor16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor16 (Lsh16x64 x z:(Const64 <t> [c])) (Rsh16Ux64 x (Const64 [d])))
+ // cond: c < 16 && d == 16-c && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 16 && d == 16-c && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
continue
}
- y := auxIntToInt32(v_0_0.AuxInt)
- x := v_0_1
- if !(y&0xFF == 0xFF) {
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
continue
}
- v.reset(OpTrunc32to8)
- v.AddArg(x)
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
return true
}
break
}
- return false
-}
-func rewriteValuegeneric_OpTrunc64to16(v *Value) bool {
- v_0 := v.Args[0]
- // match: (Trunc64to16 (Const64 [c]))
- // result: (Const16 [int16(c)])
+ // match: (Xor16 left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
for {
- if v_0.Op != OpConst64 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
}
- c := auxIntToInt64(v_0.AuxInt)
- v.reset(OpConst16)
- v.AuxInt = int16ToAuxInt(int16(c))
- return true
+ break
}
- // match: (Trunc64to16 (ZeroExt8to64 x))
- // result: (ZeroExt8to16 x)
+ // match: (Xor16 left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
for {
- if v_0.Op != OpZeroExt8to64 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
}
- x := v_0.Args[0]
- v.reset(OpZeroExt8to16)
- v.AddArg(x)
- return true
+ break
}
- // match: (Trunc64to16 (ZeroExt16to64 x))
- // result: x
+ // match: (Xor16 left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
for {
- if v_0.Op != OpZeroExt16to64 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
}
- x := v_0.Args[0]
- v.copyOf(x)
- return true
+ break
}
- // match: (Trunc64to16 (SignExt8to64 x))
- // result: (SignExt8to16 x)
+ // match: (Xor16 right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
for {
- if v_0.Op != OpSignExt8to64 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
}
- x := v_0.Args[0]
- v.reset(OpSignExt8to16)
- v.AddArg(x)
- return true
+ break
}
- // match: (Trunc64to16 (SignExt16to64 x))
- // result: x
+ // match: (Xor16 right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
for {
- if v_0.Op != OpSignExt16to64 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
}
- x := v_0.Args[0]
- v.copyOf(x)
- return true
+ break
}
- // match: (Trunc64to16 (And64 (Const64 [y]) x))
- // cond: y&0xFFFF == 0xFFFF
- // result: (Trunc64to16 x)
+ // match: (Xor16 right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
for {
- if v_0.Op != OpAnd64 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
}
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpConst64 {
+ break
+ }
+ // match: (Xor16 right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux8 {
continue
}
- y := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_1
- if !(y&0xFFFF == 0xFFFF) {
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x8 {
continue
}
- v.reset(OpTrunc64to16)
- v.AddArg(x)
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
return true
}
break
}
return false
}
-func rewriteValuegeneric_OpTrunc64to32(v *Value) bool {
+func rewriteValuegeneric_OpXor32(v *Value) bool {
+ v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (Trunc64to32 (Const64 [c]))
- // result: (Const32 [int32(c)])
- for {
- if v_0.Op != OpConst64 {
- break
- }
- c := auxIntToInt64(v_0.AuxInt)
- v.reset(OpConst32)
- v.AuxInt = int32ToAuxInt(int32(c))
- return true
- }
- // match: (Trunc64to32 (ZeroExt8to64 x))
- // result: (ZeroExt8to32 x)
+ b := v.Block
+ config := b.Func.Config
+ // match: (Xor32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c^d])
for {
- if v_0.Op != OpZeroExt8to64 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
}
- x := v_0.Args[0]
- v.reset(OpZeroExt8to32)
- v.AddArg(x)
- return true
+ break
}
- // match: (Trunc64to32 (ZeroExt16to64 x))
- // result: (ZeroExt16to32 x)
+ // match: (Xor32 x x)
+ // result: (Const32 [0])
for {
- if v_0.Op != OpZeroExt16to64 {
+ x := v_0
+ if x != v_1 {
break
}
- x := v_0.Args[0]
- v.reset(OpZeroExt16to32)
- v.AddArg(x)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
return true
}
- // match: (Trunc64to32 (ZeroExt32to64 x))
+ // match: (Xor32 (Const32 [0]) x)
// result: x
for {
- if v_0.Op != OpZeroExt32to64 {
- break
- }
- x := v_0.Args[0]
- v.copyOf(x)
- return true
- }
- // match: (Trunc64to32 (SignExt8to64 x))
- // result: (SignExt8to32 x)
- for {
- if v_0.Op != OpSignExt8to64 {
- break
- }
- x := v_0.Args[0]
- v.reset(OpSignExt8to32)
- v.AddArg(x)
- return true
- }
- // match: (Trunc64to32 (SignExt16to64 x))
- // result: (SignExt16to32 x)
- for {
- if v_0.Op != OpSignExt16to64 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
}
- x := v_0.Args[0]
- v.reset(OpSignExt16to32)
- v.AddArg(x)
- return true
+ break
}
- // match: (Trunc64to32 (SignExt32to64 x))
- // result: x
+ // match: (Xor32 (Com32 x) x)
+ // result: (Const32 [-1])
for {
- if v_0.Op != OpSignExt32to64 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom32 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
}
- x := v_0.Args[0]
- v.copyOf(x)
- return true
+ break
}
- // match: (Trunc64to32 (And64 (Const64 [y]) x))
- // cond: y&0xFFFFFFFF == 0xFFFFFFFF
- // result: (Trunc64to32 x)
+ // match: (Xor32 (Const32 [-1]) x)
+ // result: (Com32 x)
for {
- if v_0.Op != OpAnd64 {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpConst64 {
- continue
- }
- y := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_1
- if !(y&0xFFFFFFFF == 0xFFFFFFFF) {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
continue
}
- v.reset(OpTrunc64to32)
+ x := v_1
+ v.reset(OpCom32)
v.AddArg(x)
return true
}
break
}
- return false
-}
-func rewriteValuegeneric_OpTrunc64to8(v *Value) bool {
- v_0 := v.Args[0]
- // match: (Trunc64to8 (Const64 [c]))
- // result: (Const8 [int8(c)])
+ // match: (Xor32 x (Xor32 x y))
+ // result: y
for {
- if v_0.Op != OpConst64 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
}
- c := auxIntToInt64(v_0.AuxInt)
- v.reset(OpConst8)
- v.AuxInt = int8ToAuxInt(int8(c))
- return true
+ break
}
- // match: (Trunc64to8 (ZeroExt8to64 x))
- // result: x
+ // match: (Xor32 (Xor32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Xor32 i (Xor32 <t> z x))
for {
- if v_0.Op != OpZeroExt8to64 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpXor32)
+ v0 := b.NewValue0(v.Pos, OpXor32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
}
- x := v_0.Args[0]
- v.copyOf(x)
- return true
+ break
}
- // match: (Trunc64to8 (SignExt8to64 x))
- // result: x
+ // match: (Xor32 (Const32 <t> [c]) (Xor32 (Const32 <t> [d]) x))
+ // result: (Xor32 (Const32 <t> [c^d]) x)
for {
- if v_0.Op != OpSignExt8to64 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpXor32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
}
- x := v_0.Args[0]
- v.copyOf(x)
- return true
+ break
}
- // match: (Trunc64to8 (And64 (Const64 [y]) x))
- // cond: y&0xFF == 0xFF
- // result: (Trunc64to8 x)
+ // match: (Xor32 (Lsh32x64 x z:(Const64 <t> [c])) (Rsh32Ux64 x (Const64 [d])))
+ // cond: c < 32 && d == 32-c && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
for {
- if v_0.Op != OpAnd64 {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpConst64 {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh32x64 {
continue
}
- y := auxIntToInt64(v_0_0.AuxInt)
- x := v_0_1
- if !(y&0xFF == 0xFF) {
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
continue
}
- v.reset(OpTrunc64to8)
- v.AddArg(x)
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 32 && d == 32-c && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
return true
}
break
}
- return false
-}
-func rewriteValuegeneric_OpXor16(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (Xor16 (Const16 [c]) (Const16 [d]))
- // result: (Const16 [c^d])
+ // match: (Xor32 left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst16 {
+ left := v_0
+ if left.Op != OpLsh32x64 {
continue
}
- c := auxIntToInt16(v_0.AuxInt)
- if v_1.Op != OpConst16 {
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux64 {
continue
}
- d := auxIntToInt16(v_1.AuxInt)
- v.reset(OpConst16)
- v.AuxInt = int16ToAuxInt(c ^ d)
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (Xor16 x x)
- // result: (Const16 [0])
+ // match: (Xor32 left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
for {
- x := v_0
- if x != v_1 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
}
- v.reset(OpConst16)
- v.AuxInt = int16ToAuxInt(0)
- return true
+ break
}
- // match: (Xor16 (Const16 [0]) x)
- // result: x
+ // match: (Xor32 left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ left := v_0
+ if left.Op != OpLsh32x16 {
continue
}
- x := v_1
- v.copyOf(x)
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (Xor16 (Com16 x) x)
- // result: (Const16 [-1])
+ // match: (Xor32 left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpCom16 {
+ left := v_0
+ if left.Op != OpLsh32x8 {
continue
}
- x := v_0.Args[0]
- if x != v_1 {
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux8 {
continue
}
- v.reset(OpConst16)
- v.AuxInt = int16ToAuxInt(-1)
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (Xor16 (Const16 [-1]) x)
- // result: (Com16 x)
+ // match: (Xor32 right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ right := v_0
+ if right.Op != OpRsh32Ux64 {
continue
}
- x := v_1
- v.reset(OpCom16)
- v.AddArg(x)
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
return true
}
break
}
- // match: (Xor16 x (Xor16 x y))
- // result: y
+ // match: (Xor32 right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- if v_1.Op != OpXor16 {
+ right := v_0
+ if right.Op != OpRsh32Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
continue
}
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if x != v_1_0 {
- continue
- }
- y := v_1_1
- v.copyOf(y)
- return true
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
}
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
}
break
}
- // match: (Xor16 (Xor16 i:(Const16 <t>) z) x)
- // cond: (z.Op != OpConst16 && x.Op != OpConst16)
- // result: (Xor16 i (Xor16 <t> z x))
+ // match: (Xor32 right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpXor16 {
+ right := v_0
+ if right.Op != OpRsh32Ux16 {
continue
}
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
- i := v_0_0
- if i.Op != OpConst16 {
- continue
- }
- t := i.Type
- z := v_0_1
- x := v_1
- if !(z.Op != OpConst16 && x.Op != OpConst16) {
- continue
- }
- v.reset(OpXor16)
- v0 := b.NewValue0(v.Pos, OpXor16, t)
- v0.AddArg2(z, x)
- v.AddArg2(i, v0)
- return true
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
}
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
}
break
}
- // match: (Xor16 (Const16 <t> [c]) (Xor16 (Const16 <t> [d]) x))
- // result: (Xor16 (Const16 <t> [c^d]) x)
+ // match: (Xor32 right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst16 {
+ right := v_0
+ if right.Op != OpRsh32Ux8 {
continue
}
- t := v_0.Type
- c := auxIntToInt16(v_0.AuxInt)
- if v_1.Op != OpXor16 {
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x8 {
continue
}
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpConst16 || v_1_0.Type != t {
- continue
- }
- d := auxIntToInt16(v_1_0.AuxInt)
- x := v_1_1
- v.reset(OpXor16)
- v0 := b.NewValue0(v.Pos, OpConst16, t)
- v0.AuxInt = int16ToAuxInt(c ^ d)
- v.AddArg2(v0, x)
- return true
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
}
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
}
break
}
return false
}
-func rewriteValuegeneric_OpXor32(v *Value) bool {
+func rewriteValuegeneric_OpXor64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (Xor32 (Const32 [c]) (Const32 [d]))
- // result: (Const32 [c^d])
+ config := b.Func.Config
+ // match: (Xor64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c^d])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst32 {
+ if v_0.Op != OpConst64 {
continue
}
- c := auxIntToInt32(v_0.AuxInt)
- if v_1.Op != OpConst32 {
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
continue
}
- d := auxIntToInt32(v_1.AuxInt)
- v.reset(OpConst32)
- v.AuxInt = int32ToAuxInt(c ^ d)
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c ^ d)
return true
}
break
}
- // match: (Xor32 x x)
- // result: (Const32 [0])
+ // match: (Xor64 x x)
+ // result: (Const64 [0])
for {
x := v_0
if x != v_1 {
break
}
- v.reset(OpConst32)
- v.AuxInt = int32ToAuxInt(0)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
return true
}
- // match: (Xor32 (Const32 [0]) x)
+ // match: (Xor64 (Const64 [0]) x)
// result: x
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
continue
}
x := v_1
}
break
}
- // match: (Xor32 (Com32 x) x)
- // result: (Const32 [-1])
+ // match: (Xor64 (Com64 x) x)
+ // result: (Const64 [-1])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpCom32 {
+ if v_0.Op != OpCom64 {
continue
}
x := v_0.Args[0]
if x != v_1 {
continue
}
- v.reset(OpConst32)
- v.AuxInt = int32ToAuxInt(-1)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
break
}
- // match: (Xor32 (Const32 [-1]) x)
- // result: (Com32 x)
+ // match: (Xor64 (Const64 [-1]) x)
+ // result: (Com64 x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
continue
}
x := v_1
- v.reset(OpCom32)
+ v.reset(OpCom64)
v.AddArg(x)
return true
}
break
}
- // match: (Xor32 x (Xor32 x y))
+ // match: (Xor64 x (Xor64 x y))
// result: y
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
- if v_1.Op != OpXor32 {
+ if v_1.Op != OpXor64 {
continue
}
_ = v_1.Args[1]
}
break
}
- // match: (Xor32 (Xor32 i:(Const32 <t>) z) x)
- // cond: (z.Op != OpConst32 && x.Op != OpConst32)
- // result: (Xor32 i (Xor32 <t> z x))
+ // match: (Xor64 (Xor64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Xor64 i (Xor64 <t> z x))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpXor32 {
+ if v_0.Op != OpXor64 {
continue
}
_ = v_0.Args[1]
v_0_1 := v_0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
i := v_0_0
- if i.Op != OpConst32 {
+ if i.Op != OpConst64 {
continue
}
t := i.Type
z := v_0_1
x := v_1
- if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
continue
}
- v.reset(OpXor32)
- v0 := b.NewValue0(v.Pos, OpXor32, t)
+ v.reset(OpXor64)
+ v0 := b.NewValue0(v.Pos, OpXor64, t)
v0.AddArg2(z, x)
v.AddArg2(i, v0)
return true
}
break
}
- // match: (Xor32 (Const32 <t> [c]) (Xor32 (Const32 <t> [d]) x))
- // result: (Xor32 (Const32 <t> [c^d]) x)
+ // match: (Xor64 (Const64 <t> [c]) (Xor64 (Const64 <t> [d]) x))
+ // result: (Xor64 (Const64 <t> [c^d]) x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst32 {
+ if v_0.Op != OpConst64 {
continue
}
t := v_0.Type
- c := auxIntToInt32(v_0.AuxInt)
- if v_1.Op != OpXor32 {
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpXor64 {
continue
}
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
v_1_1 := v_1.Args[1]
for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
continue
}
- d := auxIntToInt32(v_1_0.AuxInt)
+ d := auxIntToInt64(v_1_0.AuxInt)
x := v_1_1
- v.reset(OpXor32)
- v0 := b.NewValue0(v.Pos, OpConst32, t)
- v0.AuxInt = int32ToAuxInt(c ^ d)
+ v.reset(OpXor64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c ^ d)
v.AddArg2(v0, x)
return true
}
}
break
}
- return false
-}
-func rewriteValuegeneric_OpXor64(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (Xor64 (Const64 [c]) (Const64 [d]))
- // result: (Const64 [c^d])
+ // match: (Xor64 (Lsh64x64 x z:(Const64 <t> [c])) (Rsh64Ux64 x (Const64 [d])))
+ // cond: c < 64 && d == 64-c && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst64 {
+ if v_0.Op != OpLsh64x64 {
continue
}
- c := auxIntToInt64(v_0.AuxInt)
- if v_1.Op != OpConst64 {
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
continue
}
- d := auxIntToInt64(v_1.AuxInt)
- v.reset(OpConst64)
- v.AuxInt = int64ToAuxInt(c ^ d)
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 64 && d == 64-c && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
return true
}
break
}
- // match: (Xor64 x x)
- // result: (Const64 [0])
+ // match: (Xor64 left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
for {
- x := v_0
- if x != v_1 {
- break
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
}
- v.reset(OpConst64)
- v.AuxInt = int64ToAuxInt(0)
- return true
+ break
}
- // match: (Xor64 (Const64 [0]) x)
- // result: x
+ // match: (Xor64 left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ left := v_0
+ if left.Op != OpLsh64x32 {
continue
}
- x := v_1
- v.copyOf(x)
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (Xor64 (Com64 x) x)
- // result: (Const64 [-1])
+ // match: (Xor64 left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpCom64 {
+ left := v_0
+ if left.Op != OpLsh64x16 {
continue
}
- x := v_0.Args[0]
- if x != v_1 {
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
continue
}
- v.reset(OpConst64)
- v.AuxInt = int64ToAuxInt(-1)
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
return true
}
break
}
- // match: (Xor64 (Const64 [-1]) x)
- // result: (Com64 x)
+ // match: (Xor64 right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ right := v_0
+ if right.Op != OpRsh64Ux64 {
continue
}
- x := v_1
- v.reset(OpCom64)
- v.AddArg(x)
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
return true
}
break
}
- // match: (Xor64 x (Xor64 x y))
- // result: y
+ // match: (Xor64 right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- if v_1.Op != OpXor64 {
+ right := v_0
+ if right.Op != OpRsh64Ux32 {
continue
}
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if x != v_1_0 {
- continue
- }
- y := v_1_1
- v.copyOf(y)
- return true
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x32 {
+ continue
}
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
}
break
}
- // match: (Xor64 (Xor64 i:(Const64 <t>) z) x)
- // cond: (z.Op != OpConst64 && x.Op != OpConst64)
- // result: (Xor64 i (Xor64 <t> z x))
+ // match: (Xor64 right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpXor64 {
+ right := v_0
+ if right.Op != OpRsh64Ux16 {
continue
}
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
- i := v_0_0
- if i.Op != OpConst64 {
- continue
- }
- t := i.Type
- z := v_0_1
- x := v_1
- if !(z.Op != OpConst64 && x.Op != OpConst64) {
- continue
- }
- v.reset(OpXor64)
- v0 := b.NewValue0(v.Pos, OpXor64, t)
- v0.AddArg2(z, x)
- v.AddArg2(i, v0)
- return true
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
}
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
}
break
}
- // match: (Xor64 (Const64 <t> [c]) (Xor64 (Const64 <t> [d]) x))
- // result: (Xor64 (Const64 <t> [c^d]) x)
+ // match: (Xor64 right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpConst64 {
+ right := v_0
+ if right.Op != OpRsh64Ux8 {
continue
}
- t := v_0.Type
- c := auxIntToInt64(v_0.AuxInt)
- if v_1.Op != OpXor64 {
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x8 {
continue
}
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpConst64 || v_1_0.Type != t {
- continue
- }
- d := auxIntToInt64(v_1_0.AuxInt)
- x := v_1_1
- v.reset(OpXor64)
- v0 := b.NewValue0(v.Pos, OpConst64, t)
- v0.AuxInt = int64ToAuxInt(c ^ d)
- v.AddArg2(v0, x)
- return true
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
}
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
}
break
}
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ config := b.Func.Config
// match: (Xor8 (Const8 [c]) (Const8 [d]))
// result: (Const8 [c^d])
for {
}
break
}
+ // match: (Xor8 (Lsh8x64 x z:(Const64 <t> [c])) (Rsh8Ux64 x (Const64 [d])))
+ // cond: c < 8 && d == 8-c && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh8Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 8 && d == 8-c && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
return false
}
func rewriteValuegeneric_OpZero(v *Value) bool {