// Lowering arithmetic
(Add64 x y) -> (ADDQ x y)
-(AddPtr x y) -> (ADDQ x y)
+(AddPtr x y) && config.PtrSize == 8 -> (ADDQ x y)
+(AddPtr x y) && config.PtrSize == 4 -> (ADDL x y)
(Add32 x y) -> (ADDL x y)
(Add16 x y) -> (ADDL x y)
(Add8 x y) -> (ADDL x y)
(Add64F x y) -> (ADDSD x y)
(Sub64 x y) -> (SUBQ x y)
-(SubPtr x y) -> (SUBQ x y)
+(SubPtr x y) && config.PtrSize == 8 -> (SUBQ x y)
+(SubPtr x y) && config.PtrSize == 4 -> (SUBL x y)
(Sub32 x y) -> (SUBL x y)
(Sub16 x y) -> (SUBL x y)
(Sub8 x y) -> (SUBL x y)
(Not x) -> (XORLconst [1] x)
// Lowering pointer arithmetic
-(OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
-(OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
+(OffPtr [off] ptr) && config.PtrSize == 8 && is32Bit(off) -> (ADDQconst [off] ptr)
+(OffPtr [off] ptr) && config.PtrSize == 8 -> (ADDQ (MOVQconst [off]) ptr)
+(OffPtr [off] ptr) && config.PtrSize == 4 -> (ADDLconst [off] ptr)
// Lowering other arithmetic
// TODO: CMPQconst 0 below is redundant because BSF sets Z but how to remove?
(Eq16 x y) -> (SETEQ (CMPW x y))
(Eq8 x y) -> (SETEQ (CMPB x y))
(EqB x y) -> (SETEQ (CMPB x y))
-(EqPtr x y) -> (SETEQ (CMPQ x y))
+(EqPtr x y) && config.PtrSize == 8 -> (SETEQ (CMPQ x y))
+(EqPtr x y) && config.PtrSize == 4 -> (SETEQ (CMPL x y))
(Eq64F x y) -> (SETEQF (UCOMISD x y))
(Eq32F x y) -> (SETEQF (UCOMISS x y))
(Neq16 x y) -> (SETNE (CMPW x y))
(Neq8 x y) -> (SETNE (CMPB x y))
(NeqB x y) -> (SETNE (CMPB x y))
-(NeqPtr x y) -> (SETNE (CMPQ x y))
+(NeqPtr x y) && config.PtrSize == 8 -> (SETNE (CMPQ x y))
+(NeqPtr x y) && config.PtrSize == 4 -> (SETNE (CMPL x y))
(Neq64F x y) -> (SETNEF (UCOMISD x y))
(Neq32F x y) -> (SETNEF (UCOMISS x y))
+(Int64Hi x) -> (SHRQconst [32] x) // needed for amd64p32
+
// Lowering loads
-(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
-(Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) -> (MOVQload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) -> (MOVLload ptr mem)
(Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
(Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
(Move [s] dst src mem)
&& SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8 ->
(Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]
- (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16])
- (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16])
+ (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16])
+ (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16])
(MOVQstore dst (MOVQload src mem) mem))
(Move [s] dst src mem)
&& SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8 ->
(Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]
- (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16])
- (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16])
+ (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16])
+ (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16])
(MOVOstore dst (MOVOload src mem) mem))
// Medium copying uses a duff device.
// Strip off any fractional word zeroing.
(Zero [s] destptr mem) && SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8 ->
- (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (ADDQconst destptr [SizeAndAlign(s).Size()%8])
+ (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8])
(MOVQstoreconst [0] destptr mem))
// Zero small numbers of words directly.
(Zero [s] destptr mem)
&& SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0
&& !config.noDuffDevice ->
- (Zero [SizeAndAlign(s).Size()-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
+ (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
(Zero [s] destptr mem)
&& SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice ->
(DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem)
(Const64 [val]) -> (MOVQconst [val])
(Const32F [val]) -> (MOVSSconst [val])
(Const64F [val]) -> (MOVSDconst [val])
-(ConstNil) -> (MOVQconst [0])
+(ConstNil) && config.PtrSize == 8 -> (MOVQconst [0])
+(ConstNil) && config.PtrSize == 4 -> (MOVLconst [0])
(ConstBool [b]) -> (MOVLconst [b])
// Lowering calls
(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
// Miscellaneous
-(Convert <t> x mem) -> (MOVQconvert <t> x mem)
-(IsNonNil p) -> (SETNE (TESTQ p p))
+(Convert <t> x mem) && config.PtrSize == 8 -> (MOVQconvert <t> x mem)
+(Convert <t> x mem) && config.PtrSize == 4 -> (MOVLconvert <t> x mem)
+(IsNonNil p) && config.PtrSize == 8 -> (SETNE (TESTQ p p))
+(IsNonNil p) && config.PtrSize == 4 -> (SETNE (TESTL p p))
(IsInBounds idx len) -> (SETB (CMPQ idx len))
(IsSliceInBounds idx len) -> (SETBE (CMPQ idx len))
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
(GetG mem) -> (LoweredGetG mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
-(Addr {sym} base) -> (LEAQ {sym} base)
+(Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base)
+(Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base)
// block rewrites
(If (SETL cmp) yes no) -> (LT cmp yes no)
&& x.Uses == 1
&& clobber(x)
-> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)
+
+// amd64p32 rules
+// same as the rules above, but with 32 instead of 64 bit pointer arithmetic.
+// LEAQ,ADDQ -> LEAL,ADDL
+(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
+(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
+
+(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+ (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+ (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+ (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+ (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+(MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
+ (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
+ (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
+ (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
+ (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+
+(MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {sym} ptr mem)
+(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem)
+(MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {sym} ptr val mem)
+(MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
return rewriteValueAMD64_OpAMD64CMPW(v, config)
case OpAMD64CMPWconst:
return rewriteValueAMD64_OpAMD64CMPWconst(v, config)
+ case OpAMD64LEAL:
+ return rewriteValueAMD64_OpAMD64LEAL(v, config)
case OpAMD64LEAQ:
return rewriteValueAMD64_OpAMD64LEAQ(v, config)
case OpAMD64LEAQ1:
return rewriteValueAMD64_OpHmul8(v, config)
case OpHmul8u:
return rewriteValueAMD64_OpHmul8u(v, config)
+ case OpInt64Hi:
+ return rewriteValueAMD64_OpInt64Hi(v, config)
case OpInterCall:
return rewriteValueAMD64_OpInterCall(v, config)
case OpIsInBounds:
v.AddArg(x)
return true
}
+ // match: (ADDLconst [c] (LEAL [d] {s} x))
+ // cond: is32Bit(c+d)
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ d := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64LEAL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LEAL [c] {s} (ADDLconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
b := v.Block
_ = b
v.AddArg(mem)
return true
}
+ // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVLload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVLstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVQload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVQstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool {
break
}
ptr := v_0.Args[0]
- idx := v_0.Args[1]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64MOVWloadidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
mem := v.Args[1]
- if !(ptr.Op != OpSB) {
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64MOVWloadidx1)
- v.AuxInt = off
+ v.reset(OpAMD64MOVWload)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
- v.AddArg(idx)
v.AddArg(mem)
return true
}
v.AddArg(mem)
return true
}
+ // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (AddPtr x y)
- // cond:
- // result: (ADDQ x y)
+ // cond: config.PtrSize == 8
+ // result: (ADDQ x y)
for {
x := v.Args[0]
y := v.Args[1]
+ if !(config.PtrSize == 8) {
+ break
+ }
v.reset(OpAMD64ADDQ)
v.AddArg(x)
v.AddArg(y)
return true
}
+ // match: (AddPtr x y)
+ // cond: config.PtrSize == 4
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Addr {sym} base)
- // cond:
+ // cond: config.PtrSize == 8
// result: (LEAQ {sym} base)
for {
sym := v.Aux
base := v.Args[0]
+ if !(config.PtrSize == 8) {
+ break
+ }
v.reset(OpAMD64LEAQ)
v.Aux = sym
v.AddArg(base)
return true
}
+ // match: (Addr {sym} base)
+ // cond: config.PtrSize == 4
+ // result: (LEAL {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
b := v.Block
b := v.Block
_ = b
// match: (ConstNil)
- // cond:
+ // cond: config.PtrSize == 8
// result: (MOVQconst [0])
for {
+ if !(config.PtrSize == 8) {
+ break
+ }
v.reset(OpAMD64MOVQconst)
v.AuxInt = 0
return true
}
+ // match: (ConstNil)
+ // cond: config.PtrSize == 4
+ // result: (MOVLconst [0])
+ for {
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Convert <t> x mem)
- // cond:
+ // cond: config.PtrSize == 8
// result: (MOVQconvert <t> x mem)
for {
t := v.Type
x := v.Args[0]
mem := v.Args[1]
+ if !(config.PtrSize == 8) {
+ break
+ }
v.reset(OpAMD64MOVQconvert)
v.Type = t
v.AddArg(x)
v.AddArg(mem)
return true
}
+ // match: (Convert <t> x mem)
+ // cond: config.PtrSize == 4
+ // result: (MOVLconvert <t> x mem)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ mem := v.Args[1]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64MOVLconvert)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpCtz16(v *Value, config *Config) bool {
b := v.Block
b := v.Block
_ = b
// match: (EqPtr x y)
- // cond:
+ // cond: config.PtrSize == 8
// result: (SETEQ (CMPQ x y))
for {
x := v.Args[0]
y := v.Args[1]
+ if !(config.PtrSize == 8) {
+ break
+ }
v.reset(OpAMD64SETEQ)
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
v0.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (EqPtr x y)
+ // cond: config.PtrSize == 4
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
b := v.Block
return true
}
}
+func rewriteValueAMD64_OpInt64Hi(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Int64Hi x)
+ // cond:
+ // result: (SHRQconst [32] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = 32
+ v.AddArg(x)
+ return true
+ }
+}
func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
b := v.Block
_ = b
b := v.Block
_ = b
// match: (IsNonNil p)
- // cond:
+ // cond: config.PtrSize == 8
// result: (SETNE (TESTQ p p))
for {
p := v.Args[0]
+ if !(config.PtrSize == 8) {
+ break
+ }
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags)
v0.AddArg(p)
v.AddArg(v0)
return true
}
+ // match: (IsNonNil p)
+ // cond: config.PtrSize == 4
+ // result: (SETNE (TESTL p p))
+ for {
+ p := v.Args[0]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64TESTL, TypeFlags)
+ v0.AddArg(p)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
b := v.Block
b := v.Block
_ = b
// match: (Load <t> ptr mem)
- // cond: (is64BitInt(t) || isPtr(t))
+ // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8)
// result: (MOVQload ptr mem)
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
- if !(is64BitInt(t) || isPtr(t)) {
+ if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) {
break
}
v.reset(OpAMD64MOVQload)
return true
}
// match: (Load <t> ptr mem)
- // cond: is32BitInt(t)
+ // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4)
// result: (MOVLload ptr mem)
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
- if !(is32BitInt(t)) {
+ if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) {
break
}
v.reset(OpAMD64MOVLload)
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8
- // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16]) (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem))
+ // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem))
for {
s := v.AuxInt
dst := v.Args[0]
}
v.reset(OpMove)
v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
- v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
+ v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type)
v0.AuxInt = SizeAndAlign(s).Size() % 16
v0.AddArg(dst)
v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
+ v1 := b.NewValue0(v.Line, OpOffPtr, src.Type)
v1.AuxInt = SizeAndAlign(s).Size() % 16
v1.AddArg(src)
v.AddArg(v1)
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8
- // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16]) (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem))
+ // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem))
for {
s := v.AuxInt
dst := v.Args[0]
}
v.reset(OpMove)
v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
- v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
+ v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type)
v0.AuxInt = SizeAndAlign(s).Size() % 16
v0.AddArg(dst)
v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
+ v1 := b.NewValue0(v.Line, OpOffPtr, src.Type)
v1.AuxInt = SizeAndAlign(s).Size() % 16
v1.AddArg(src)
v.AddArg(v1)
b := v.Block
_ = b
// match: (NeqPtr x y)
- // cond:
+ // cond: config.PtrSize == 8
// result: (SETNE (CMPQ x y))
for {
x := v.Args[0]
y := v.Args[1]
+ if !(config.PtrSize == 8) {
+ break
+ }
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
v0.AddArg(x)
v.AddArg(v0)
return true
}
+ // match: (NeqPtr x y)
+ // cond: config.PtrSize == 4
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
b := v.Block
b := v.Block
_ = b
// match: (OffPtr [off] ptr)
- // cond: is32Bit(off)
+ // cond: config.PtrSize == 8 && is32Bit(off)
// result: (ADDQconst [off] ptr)
for {
off := v.AuxInt
ptr := v.Args[0]
- if !(is32Bit(off)) {
+ if !(config.PtrSize == 8 && is32Bit(off)) {
break
}
v.reset(OpAMD64ADDQconst)
return true
}
// match: (OffPtr [off] ptr)
- // cond:
+ // cond: config.PtrSize == 8
// result: (ADDQ (MOVQconst [off]) ptr)
for {
off := v.AuxInt
ptr := v.Args[0]
+ if !(config.PtrSize == 8) {
+ break
+ }
v.reset(OpAMD64ADDQ)
v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
v0.AuxInt = off
v.AddArg(ptr)
return true
}
+ // match: (OffPtr [off] ptr)
+ // cond: config.PtrSize == 4
+ // result: (ADDLconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64ADDLconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
b := v.Block
b := v.Block
_ = b
// match: (SubPtr x y)
- // cond:
- // result: (SUBQ x y)
+ // cond: config.PtrSize == 8
+ // result: (SUBQ x y)
for {
x := v.Args[0]
y := v.Args[1]
+ if !(config.PtrSize == 8) {
+ break
+ }
v.reset(OpAMD64SUBQ)
v.AddArg(x)
v.AddArg(y)
return true
}
+ // match: (SubPtr x y)
+ // cond: config.PtrSize == 4
+ // result: (SUBL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64SUBL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool {
b := v.Block
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8
- // result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (ADDQconst destptr [SizeAndAlign(s).Size()%8]) (MOVQstoreconst [0] destptr mem))
+ // result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8]) (MOVQstoreconst [0] destptr mem))
for {
s := v.AuxInt
destptr := v.Args[0]
}
v.reset(OpZero)
v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8
- v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type)
v0.AuxInt = SizeAndAlign(s).Size() % 8
v0.AddArg(destptr)
v.AddArg(v0)
}
// match: (Zero [s] destptr mem)
// cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice
- // result: (Zero [SizeAndAlign(s).Size()-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
+ // result: (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
for {
s := v.AuxInt
destptr := v.Args[0]
}
v.reset(OpZero)
v.AuxInt = SizeAndAlign(s).Size() - 8
- v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type)
v0.AuxInt = 8
v0.AddArg(destptr)
v.AddArg(v0)