// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Lowering arithmetic
+// GOPPC64 values indicate power8, power9, etc.
+// That means the code is compiled for that target,
+// and will not run on earlier targets.
+//
(Add(Ptr|64|32|16|8) ...) => (ADD ...)
(Add64F ...) => (FADD ...)
(Add32F ...) => (FADDS ...)
(MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem)
(MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem)
+// Rules for MOV* or FMOV* ops determine when indexed (MOV*loadidx or MOV*storeidx)
+// or non-indexed (MOV*load or MOV*store) should be used. Indexed instructions
+// require an extra instruction and register to load the index so non-indexed is preferred.
+// Indexed ops generate indexed load or store instructions for all GOPPC64 values.
+// Non-indexed ops generate DS-form loads and stores when the offset fits in 16 bits,
+// and on power8 and power9, a multiple of 4 is required for MOVW and MOVD ops.
+// On power10, prefixed loads and stores can be used for offsets > 16 bits and <= 32 bits.
+// and support for PC relative addressing must be available if relocation is needed.
+// On power10, the assembler will determine when to use DS-form or prefixed
+// instructions for non-indexed ops depending on the value of the offset.
+//
// Fold offsets for stores.
-(MOV(D|W|H|B)store [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOV(D|W|H|B)store [off1+int32(off2)] {sym} x val mem)
+(MOV(D|W|H|B)store [off1] {sym} (ADDconst [off2] x) val mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (MOV(D|W|H|B)store [off1+int32(off2)] {sym} x val mem)
-(FMOV(S|D)store [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(int64(off1)+off2) => (FMOV(S|D)store [off1+int32(off2)] {sym} ptr val mem)
+(FMOV(S|D)store [off1] {sym} (ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (FMOV(S|D)store [off1+int32(off2)] {sym} ptr val mem)
// Fold address into load/store.
-// The assembler needs to generate several instructions and use
+// If power10 with PCRel is not available, then
+// the assembler needs to generate several instructions and use
// temp register for accessing global, and each time it will reload
-// the temp register. So don't fold address of global, unless there
-// is only one use.
+// the temp register. So don't fold address of global in that case if there is more than
+// one use.
(MOV(B|H|W|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
(MOV(B|H|W|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOV(S|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
(FMOV(S|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOV(B|H|W)Zload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
(MOV(B|H|W)Zload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOV(H|W|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
(MOV(H|W|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOV(S|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
(FMOV(S|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
// Fold offsets for loads.
-(FMOV(S|D)load [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOV(S|D)load [off1+int32(off2)] {sym} ptr mem)
+(FMOV(S|D)load [off1] {sym} (ADDconst [off2] ptr) mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (FMOV(S|D)load [off1+int32(off2)] {sym} ptr mem)
-(MOV(D|W|WZ|H|HZ|BZ)load [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOV(D|W|WZ|H|HZ|BZ)load [off1+int32(off2)] {sym} x mem)
+(MOV(D|W|WZ|H|HZ|BZ)load [off1] {sym} (ADDconst [off2] x) mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (MOV(D|W|WZ|H|HZ|BZ)load [off1+int32(off2)] {sym} x mem)
// Determine load + addressing that can be done as a register indexed load
(MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
-// Determine if there is benefit to using a non-indexed load, since that saves the load
-// of the index register. With MOVDload and MOVWload, there is no benefit if the offset
-// value is not a multiple of 4, since that results in an extra instruction in the base
-// register address computation.
-(MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
-(MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
-(MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
-(MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
+// See comments above concerning selection of indexed vs. non-indexed ops.
+// These cases don't have relocation.
+(MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(D|W)load [int32(c)] ptr mem)
+(MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
+(MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(D|W)load [int32(c)] ptr mem)
+(MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
// Store of zero => storezero
(MOV(D|W|H|B)store [off] {sym} ptr (MOVDconst [0]) mem) => (MOV(D|W|H|B)storezero [off] {sym} ptr mem)
// Fold offsets for storezero
-(MOV(D|W|H|B)storezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
+(MOV(D|W|H|B)storezero [off1] {sym} (ADDconst [off2] x) mem) && ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2))) =>
(MOV(D|W|H|B)storezero [off1+int32(off2)] {sym} x mem)
// Stores with addressing that can be done as indexed stores
(MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem)
-// Stores with constant index values can be done without indexed instructions
-// No need to lower the idx cases if c%4 is not 0
-(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
-(MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
-(MOVDstoreidx (MOVDconst [c]) ptr val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
-(MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
+(MOVDstoreidx ptr (MOVDconst [c]) val mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOVDstore [int32(c)] ptr val mem)
+(MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(W|H|B)store [int32(c)] ptr val mem)
+(MOVDstoreidx (MOVDconst [c]) ptr val mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOVDstore [int32(c)] ptr val mem)
+(MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(W|H|B)store [int32(c)] ptr val mem)
// Fold symbols into storezero
(MOV(D|W|H|B)storezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
- && (x.Op != OpSB || p.Uses == 1) =>
+ && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
(MOV(D|W|H|B)storezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
// atomic intrinsics
return true
}
// match: (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64FMOVDload)
return true
}
// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (FMOVDload [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64FMOVDload)
return true
}
// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64FMOVDstore)
return true
}
// match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := p.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64FMOVDstore)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64FMOVSload)
return true
}
// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (FMOVSload [off1+int32(off2)] {sym} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0]
mem := v_1
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64FMOVSload)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := v_0.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64FMOVSstore)
return true
}
// match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := p.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64FMOVSstore)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVBZload)
return true
}
// match: (MOVBZload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVBZload [off1+int32(off2)] {sym} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64MOVBZload)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBZloadidx ptr (MOVDconst [c]) mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVBZload [int32(c)] ptr mem)
for {
ptr := v_0
}
c := auxIntToInt64(v_1.AuxInt)
mem := v_2
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVBZload)
return true
}
// match: (MOVBZloadidx (MOVDconst [c]) ptr mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVBZload [int32(c)] ptr mem)
for {
if v_0.Op != OpPPC64MOVDconst {
c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
mem := v_2
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVBZload)
b := v.Block
typ := &b.Func.Config.Types
// match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVBstore [off1+int32(off2)] {sym} x val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
x := v_0.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64MOVBstore)
return true
}
// match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := p.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVBstore)
b := v.Block
typ := &b.Func.Config.Types
// match: (MOVBstoreidx ptr (MOVDconst [c]) val mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVBstore [int32(c)] ptr val mem)
for {
ptr := v_0
c := auxIntToInt64(v_1.AuxInt)
val := v_2
mem := v_3
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVBstore)
return true
}
// match: (MOVBstoreidx (MOVDconst [c]) ptr val mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVBstore [int32(c)] ptr val mem)
for {
if v_0.Op != OpPPC64MOVDconst {
ptr := v_1
val := v_2
mem := v_3
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVBstore)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2)))
// result: (MOVBstorezero [off1+int32(off2)] {sym} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(int64(off1) + off2)) {
+ if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) {
break
}
v.reset(OpPPC64MOVBstorezero)
return true
}
// match: (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
- // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
x := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVBstorezero)
return true
}
// match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVDload)
return true
}
// match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVDload [off1+int32(off2)] {sym} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64MOVDload)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVDloadidx ptr (MOVDconst [c]) mem)
- // cond: is16Bit(c) && c%4 == 0
+ // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVDload [int32(c)] ptr mem)
for {
ptr := v_0
}
c := auxIntToInt64(v_1.AuxInt)
mem := v_2
- if !(is16Bit(c) && c%4 == 0) {
+ if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVDload)
return true
}
// match: (MOVDloadidx (MOVDconst [c]) ptr mem)
- // cond: is16Bit(c) && c%4 == 0
+ // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVDload [int32(c)] ptr mem)
for {
if v_0.Op != OpPPC64MOVDconst {
c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
mem := v_2
- if !(is16Bit(c) && c%4 == 0) {
+ if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVDload)
return true
}
// match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVDstore [off1+int32(off2)] {sym} x val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
x := v_0.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64MOVDstore)
return true
}
// match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := p.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVDstore)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVDstoreidx ptr (MOVDconst [c]) val mem)
- // cond: is16Bit(c) && c%4 == 0
+ // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVDstore [int32(c)] ptr val mem)
for {
ptr := v_0
c := auxIntToInt64(v_1.AuxInt)
val := v_2
mem := v_3
- if !(is16Bit(c) && c%4 == 0) {
+ if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVDstore)
return true
}
// match: (MOVDstoreidx (MOVDconst [c]) ptr val mem)
- // cond: is16Bit(c) && c%4 == 0
+ // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVDstore [int32(c)] ptr val mem)
for {
if v_0.Op != OpPPC64MOVDconst {
ptr := v_1
val := v_2
mem := v_3
- if !(is16Bit(c) && c%4 == 0) {
+ if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVDstore)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2)))
// result: (MOVDstorezero [off1+int32(off2)] {sym} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(int64(off1) + off2)) {
+ if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) {
break
}
v.reset(OpPPC64MOVDstorezero)
return true
}
// match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
- // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
x := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVDstorezero)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVHZload)
return true
}
// match: (MOVHZload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVHZload [off1+int32(off2)] {sym} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64MOVHZload)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHZloadidx ptr (MOVDconst [c]) mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVHZload [int32(c)] ptr mem)
for {
ptr := v_0
}
c := auxIntToInt64(v_1.AuxInt)
mem := v_2
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVHZload)
return true
}
// match: (MOVHZloadidx (MOVDconst [c]) ptr mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVHZload [int32(c)] ptr mem)
for {
if v_0.Op != OpPPC64MOVDconst {
c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
mem := v_2
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVHZload)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVHload)
return true
}
// match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVHload [off1+int32(off2)] {sym} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64MOVHload)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHloadidx ptr (MOVDconst [c]) mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVHload [int32(c)] ptr mem)
for {
ptr := v_0
}
c := auxIntToInt64(v_1.AuxInt)
mem := v_2
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVHload)
return true
}
// match: (MOVHloadidx (MOVDconst [c]) ptr mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVHload [int32(c)] ptr mem)
for {
if v_0.Op != OpPPC64MOVDconst {
c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
mem := v_2
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVHload)
v_0 := v.Args[0]
b := v.Block
// match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVHstore [off1+int32(off2)] {sym} x val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
x := v_0.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64MOVHstore)
return true
}
// match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := p.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVHstore)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHstoreidx ptr (MOVDconst [c]) val mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVHstore [int32(c)] ptr val mem)
for {
ptr := v_0
c := auxIntToInt64(v_1.AuxInt)
val := v_2
mem := v_3
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVHstore)
return true
}
// match: (MOVHstoreidx (MOVDconst [c]) ptr val mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVHstore [int32(c)] ptr val mem)
for {
if v_0.Op != OpPPC64MOVDconst {
ptr := v_1
val := v_2
mem := v_3
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVHstore)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2)))
// result: (MOVHstorezero [off1+int32(off2)] {sym} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(int64(off1) + off2)) {
+ if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) {
break
}
v.reset(OpPPC64MOVHstorezero)
return true
}
// match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
- // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
x := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVHstorezero)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVWZload)
return true
}
// match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVWZload [off1+int32(off2)] {sym} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64MOVWZload)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWZloadidx ptr (MOVDconst [c]) mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVWZload [int32(c)] ptr mem)
for {
ptr := v_0
}
c := auxIntToInt64(v_1.AuxInt)
mem := v_2
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVWZload)
return true
}
// match: (MOVWZloadidx (MOVDconst [c]) ptr mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVWZload [int32(c)] ptr mem)
for {
if v_0.Op != OpPPC64MOVDconst {
c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
mem := v_2
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVWZload)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVWload)
return true
}
// match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVWload [off1+int32(off2)] {sym} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64MOVWload)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWloadidx ptr (MOVDconst [c]) mem)
- // cond: is16Bit(c) && c%4 == 0
+ // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVWload [int32(c)] ptr mem)
for {
ptr := v_0
}
c := auxIntToInt64(v_1.AuxInt)
mem := v_2
- if !(is16Bit(c) && c%4 == 0) {
+ if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVWload)
return true
}
// match: (MOVWloadidx (MOVDconst [c]) ptr mem)
- // cond: is16Bit(c) && c%4 == 0
+ // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVWload [int32(c)] ptr mem)
for {
if v_0.Op != OpPPC64MOVDconst {
c := auxIntToInt64(v_0.AuxInt)
ptr := v_1
mem := v_2
- if !(is16Bit(c) && c%4 == 0) {
+ if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVWload)
v_0 := v.Args[0]
b := v.Block
// match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVWstore [off1+int32(off2)] {sym} x val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
x := v_0.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(int64(off1) + off2)) {
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break
}
v.reset(OpPPC64MOVWstore)
return true
}
// match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := p.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVWstore)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWstoreidx ptr (MOVDconst [c]) val mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVWstore [int32(c)] ptr val mem)
for {
ptr := v_0
c := auxIntToInt64(v_1.AuxInt)
val := v_2
mem := v_3
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVWstore)
return true
}
// match: (MOVWstoreidx (MOVDconst [c]) ptr val mem)
- // cond: is16Bit(c)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVWstore [int32(c)] ptr val mem)
for {
if v_0.Op != OpPPC64MOVDconst {
ptr := v_1
val := v_2
mem := v_3
- if !(is16Bit(c)) {
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break
}
v.reset(OpPPC64MOVWstore)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(int64(off1)+off2)
+ // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2)))
// result: (MOVWstorezero [off1+int32(off2)] {sym} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
mem := v_1
- if !(is16Bit(int64(off1) + off2)) {
+ if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) {
break
}
v.reset(OpPPC64MOVWstorezero)
return true
}
// match: (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
- // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
x := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break
}
v.reset(OpPPC64MOVWstorezero)