import (
"cmd/compile/internal/base"
"cmd/compile/internal/logopt"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
"cmd/internal/src"
"encoding/binary"
"fmt"
+ "internal/buildcfg"
"io"
"math"
"math/bits"
"os"
"path/filepath"
+ "strings"
)
type deadValueChoice bool
return t.IsPtrShaped()
}
-func isSigned(t *types.Type) bool {
- return t.IsSigned()
-}
-
// mergeSym merges two symbolic offsets. There is no real merging of
// offsets, we just pick the non-nil one.
func mergeSym(x, y Sym) Sym {
CanBeAnSSAAux()
}
+// for now only used to mark moves that need to avoid clobbering flags
+type auxMark bool
+
+func (auxMark) CanBeAnSSAAux() {}
+
+var AuxMark auxMark
+
// stringAux wraps string values for use in Aux.
type stringAux string
return nil
}
-// de-virtualize an InterLECall
-// 'sym' is the symbol for the itab.
-func devirtLESym(v *Value, aux Aux, sym Sym, offset int64) *obj.LSym {
- n, ok := sym.(*obj.LSym)
- if !ok {
- return nil
- }
-
- lsym := loadLSymOffset(n, offset)
- if f := v.Block.Func; f.pass.debug > 0 {
- if lsym != nil {
- f.Warnl(v.Pos, "de-virtualizing call")
- } else {
- f.Warnl(v.Pos, "couldn't de-virtualize call")
- }
- }
- return lsym
-}
-
func devirtLECall(v *Value, sym *obj.LSym) *Value {
v.Op = OpStaticLECall
auxcall := v.Aux.(*AuxCall)
copy(v.Args[0:], v.Args[1:])
v.Args[len(v.Args)-1] = nil // aid GC
v.Args = v.Args[:len(v.Args)-1]
+ if f := v.Block.Func; f.pass.debug > 0 {
+ f.Warnl(v.Pos, "de-virtualizing call")
+ }
return v
}
case OpOffPtr:
return p1.AuxInt == p2.AuxInt && isSamePtr(p1.Args[0], p2.Args[0])
case OpAddr, OpLocalAddr:
- // OpAddr's 0th arg is either OpSP or OpSB, which means that it is uniquely identified by its Op.
- // Checking for value equality only works after [z]cse has run.
- return p1.Aux == p2.Aux && p1.Args[0].Op == p2.Args[0].Op
+ return p1.Aux == p2.Aux
case OpAddPtr:
return p1.Args[1] == p2.Args[1] && isSamePtr(p1.Args[0], p2.Args[0])
}
offset += base.AuxInt
base = base.Args[0]
}
+ if opcodeTable[base.Op].nilCheck {
+ base = base.Args[0]
+ }
return base, offset
}
p1, off1 := baseAndOffset(p1)
}
return y
}
+func max(x, y int64) int64 {
+ if x > y {
+ return x
+ }
+ return y
+}
func isConstZero(v *Value) bool {
switch v.Op {
OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
OpAMD64SHLL, OpAMD64SHLLconst:
return true
+ case OpARM64REV16W, OpARM64REVW, OpARM64RBITW, OpARM64CLZW, OpARM64EXTRWconst,
+ OpARM64MULW, OpARM64MNEGW, OpARM64UDIVW, OpARM64DIVW, OpARM64UMODW,
+ OpARM64MADDW, OpARM64MSUBW, OpARM64RORW, OpARM64RORWconst:
+ return true
case OpArg:
return x.Type.Size() == 4
case OpPhi, OpSelect0, OpSelect1:
return false
}
+func isInlinableMemclr(c *Config, sz int64) bool {
+ if sz < 0 {
+ return false
+ }
+ // TODO: expand this check to allow other architectures
+ // see CL 454255 and issue 56997
+ switch c.arch {
+ case "amd64", "arm64":
+ return true
+ case "ppc64le", "ppc64":
+ return sz < 512
+ }
+ return false
+}
+
// isInlinableMemmove reports whether the given arch performs a Move of the given size
// faster than memmove. It will only return true if replacing the memmove with a Move is
// safe, either because Move will do all of its loads before any of its stores, or
}
}
+func supportsPPC64PCRel() bool {
+ // PCRel is currently supported for >= power10, linux only
+ // Internal and external linking supports this on ppc64le; internal linking on ppc64.
+ return buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux"
+}
+
func newPPC64ShiftAuxInt(sh, mb, me, sz int64) int32 {
if sh < 0 || sh >= sz {
panic("PPC64 shift arg sh out of range")
// Determine boundaries and then decode them
if mask == 0 || ^mask == 0 || rotate >= nbits {
- panic("Invalid PPC64 rotate mask")
+ panic(fmt.Sprintf("invalid PPC64 rotate mask: %x %d %d", uint64(mask), rotate, nbits))
} else if nbits == 32 {
mb = bits.LeadingZeros32(uint32(mask))
me = 32 - bits.TrailingZeros32(uint32(mask))
return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24)
}
+// Merge (RLDICL [encoded] (SRDconst [s] x)) into (RLDICL [new_encoded] x)
+// SRDconst on PPC64 is an extended mnemonic of RLDICL. If the input to an
+// RLDICL is an SRDconst, and the RLDICL does not rotate its value, the two
+// operations can be combined. This functions assumes the two opcodes can
+// be merged, and returns an encoded rotate+mask value of the combined RLDICL.
+func mergePPC64RLDICLandSRDconst(encoded, s int64) int64 {
+ mb := s
+ r := 64 - s
+ // A larger mb is a smaller mask.
+ if (encoded>>8)&0xFF < mb {
+ encoded = (encoded &^ 0xFF00) | mb<<8
+ }
+ // The rotate is expected to be 0.
+ if (encoded & 0xFF0000) != 0 {
+ panic("non-zero rotate")
+ }
+ return encoded | r<<16
+}
+
// DecodePPC64RotateMask is the inverse operation of encodePPC64RotateMask. The values returned as
// mb and me satisfy the POWER ISA definition of MASK(x,y) where MASK(mb,me) = mask.
func DecodePPC64RotateMask(sauxint int64) (rotate, mb, me int64, mask uint64) {
// Return the encoded RLWINM constant, or 0 if they cannot be merged.
func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
mask_1 := uint64(0xFFFFFFFF >> uint(srw))
- // for CLRLSLDI, it's more convient to think of it as a mask left bits then rotate left.
+ // for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left.
mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
// Rewrite mask to apply after the final left shift.
// the encoded RLWINM constant, or 0 if they cannot be merged.
func mergePPC64ClrlsldiRlwinm(sld int32, rlw int64) int64 {
r_1, _, _, mask_1 := DecodePPC64RotateMask(rlw)
- // for CLRLSLDI, it's more convient to think of it as a mask left bits then rotate left.
+ // for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left.
mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
// combine the masks, and adjust for the final left shift.
return encodePPC64RotateMask((32-srw+sld)&31, int64(mask), 32)
}
+// Convert a PPC64 opcode from the Op to OpCC form. This converts (op x y)
+// to (Select0 (opCC x y)) without having to explicitly fixup every user
+// of op.
+//
+// E.g consider the case:
+// a = (ADD x y)
+// b = (CMPconst [0] a)
+// c = (OR a z)
+//
+// A rule like (CMPconst [0] (ADD x y)) => (CMPconst [0] (Select0 (ADDCC x y)))
+// would produce:
+// a = (ADD x y)
+// a' = (ADDCC x y)
+// a” = (Select0 a')
+// b = (CMPconst [0] a”)
+// c = (OR a z)
+//
+// which makes it impossible to rewrite the second user. Instead the result
+// of this conversion is:
+// a' = (ADDCC x y)
+// a = (Select0 a')
+// b = (CMPconst [0] a)
+// c = (OR a z)
+//
+// Which makes it trivial to rewrite b using a lowering rule.
+func convertPPC64OpToOpCC(op *Value) *Value {
+ ccOpMap := map[Op]Op{
+ OpPPC64ADD: OpPPC64ADDCC,
+ OpPPC64ADDconst: OpPPC64ADDCCconst,
+ OpPPC64AND: OpPPC64ANDCC,
+ OpPPC64ANDN: OpPPC64ANDNCC,
+ OpPPC64CNTLZD: OpPPC64CNTLZDCC,
+ OpPPC64OR: OpPPC64ORCC,
+ OpPPC64SUB: OpPPC64SUBCC,
+ OpPPC64NEG: OpPPC64NEGCC,
+ OpPPC64NOR: OpPPC64NORCC,
+ OpPPC64XOR: OpPPC64XORCC,
+ }
+ b := op.Block
+ opCC := b.NewValue0I(op.Pos, ccOpMap[op.Op], types.NewTuple(op.Type, types.TypeFlags), op.AuxInt)
+ opCC.AddArgs(op.Args...)
+ op.reset(OpSelect0)
+ op.AddArgs(opCC)
+ return op
+}
+
// Convenience function to rotate a 32 bit constant value by another constant.
func rotateLeft32(v, rotate int64) int64 {
return int64(bits.RotateLeft32(uint32(v), int(rotate)))
return true
}
+// isFixed32 returns true if the int32 at offset off in symbol sym
+// is known and constant.
+func isFixed32(c *Config, sym Sym, off int64) bool {
+ return isFixed(c, sym, off, 4)
+}
+
+// isFixed returns true if the range [off,off+size] of the symbol sym
+// is known and constant.
+func isFixed(c *Config, sym Sym, off, size int64) bool {
+ lsym := sym.(*obj.LSym)
+ if lsym.Extra == nil {
+ return false
+ }
+ if _, ok := (*lsym.Extra).(*obj.TypeInfo); ok {
+ if off == 2*c.PtrSize && size == 4 {
+ return true // type hash field
+ }
+ }
+ return false
+}
+func fixed32(c *Config, sym Sym, off int64) int32 {
+ lsym := sym.(*obj.LSym)
+ if ti, ok := (*lsym.Extra).(*obj.TypeInfo); ok {
+ if off == 2*c.PtrSize {
+ return int32(types.TypeHash(ti.Type.(*types.Type)))
+ }
+ }
+ base.Fatalf("fixed32 data not known for %s:%d", sym, off)
+ return 0
+}
+
+// isFixedSym returns true if the contents of sym at the given offset
+// is known and is the constant address of another symbol.
+func isFixedSym(sym Sym, off int64) bool {
+ lsym := sym.(*obj.LSym)
+ switch {
+ case lsym.Type == objabi.SRODATA:
+ // itabs, dictionaries
+ default:
+ return false
+ }
+ for _, r := range lsym.R {
+ if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off && r.Add == 0 {
+ return true
+ }
+ }
+ return false
+}
+func fixedSym(f *Func, sym Sym, off int64) Sym {
+ lsym := sym.(*obj.LSym)
+ for _, r := range lsym.R {
+ if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off {
+ if strings.HasPrefix(r.Sym.Name, "type:") {
+ // In case we're loading a type out of a dictionary, we need to record
+ // that the containing function might put that type in an interface.
+ // That information is currently recorded in relocations in the dictionary,
+ // but if we perform this load at compile time then the dictionary
+ // might be dead.
+ reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym())
+ } else if strings.HasPrefix(r.Sym.Name, "go:itab") {
+ // Same, but if we're using an itab we need to record that the
+ // itab._type might be put in an interface.
+ reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym())
+ }
+ return r.Sym
+ }
+ }
+ base.Fatalf("fixedSym data not known for %s:%d", sym, off)
+ return nil
+}
+
// read8 reads one byte from the read-only global sym at offset off.
func read8(sym interface{}, off int64) uint8 {
lsym := sym.(*obj.LSym)
}
func makeJumpTableSym(b *Block) *obj.LSym {
- s := base.Ctxt.Lookup(fmt.Sprintf("%s.jump%d", b.Func.fe.LSym(), b.ID))
+ s := base.Ctxt.Lookup(fmt.Sprintf("%s.jump%d", b.Func.fe.Func().LSym.Name, b.ID))
s.Set(obj.AttrDuplicateOK, true)
s.Set(obj.AttrLocal, true)
return s
}
return v <= 0xFFF
}
+
+// setPos sets the position of v to pos, then returns true.
+// Useful for setting the result of a rewrite's position to
+// something other than the default.
+func setPos(v *Value, pos src.XPos) bool {
+ v.Pos = pos
+ return true
+}