import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
return -dzClearStep * (dzBlockLen - tailSteps)
}
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
const (
ax = 1 << iota
x0
base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
if *state&ax == 0 {
- p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
- p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
off += int64(types.PtrSize)
cnt -= int64(types.PtrSize)
}
if cnt == 8 {
if *state&ax == 0 {
- p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
- p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
if *state&x0 == 0 {
- p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
+ p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0
}
for i := int64(0); i < cnt/16; i++ {
- p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
}
if cnt%16 != 0 {
- p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
}
} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
if *state&x0 == 0 {
- p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
+ p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0
}
- p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
+ p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
p.To.Sym = ir.Syms.Duffzero
if cnt%16 != 0 {
- p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
}
} else {
if *state&ax == 0 {
- p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
- p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
- p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
- p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
- p = pp.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
+ p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
// This is a hardware nop (1-byte 0x90) instruction,
// even though we describe it as an explicit XCHGL here.
// Particularly, this does not zero the high 32 bits
package arm
import (
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm"
)
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if *r0 == 0 {
- p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
+ p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
*r0 = 1
}
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
- p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
+ p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
}
} else if cnt <= int64(128*types.PtrSize) {
- p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+ p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
} else {
- p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+ p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
- p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
+ p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
p.Reg = arm.REG_R1
- p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
+ p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p1 := p
p.Scond |= arm.C_PBIT
- p = pp.Appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm.REG_R2
- p = pp.Appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
- gc.Patch(p, p1)
+ p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(p1)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(arm.AAND)
p.From.Type = obj.TYPE_REG
p.From.Reg = arm.REG_R0
p2.Reg = arm.REG_R1
p3 := s.Prog(arm.ABLE)
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
case ssa.OpARMLoweredMove:
// MOVW.P 4(R1), Rtmp
// MOVW.P Rtmp, 4(R2)
p3.Reg = arm.REG_R1
p4 := s.Prog(arm.ABLE)
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p)
+ p4.To.SetTarget(p)
case ssa.OpARMEqual,
ssa.OpARMNotEqual,
ssa.OpARMLessThan,
package arm64
import (
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
return frame
}
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
}
} else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
if cnt%(2*int64(types.PtrSize)) != 0 {
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
off += int64(types.PtrSize)
cnt -= int64(types.PtrSize)
}
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
- p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
+ p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
p.Reg = arm64.REG_R20
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize)))
// We are at the function entry, where no register is live, so it is okay to clobber
// other registers
const rtmp = arm64.REG_R20
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
- p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+ p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p.Reg = arm64.REGRT1
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
- p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
+ p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
p.Reg = arm64.REGRT1
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
p.Scond = arm64.C_XPRE
p1 := p
- p = pp.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm64.REGRT2
- p = pp.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
- gc.Patch(p, p1)
+ p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(p1)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(arm64.AHINT)
p.From.Type = obj.TYPE_CONST
return p
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_BRANCH
- gc.Patch(p2, p)
+ p2.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicExchange64Variant,
ssa.OpARM64LoweredAtomicExchange32Variant:
swap := arm64.ASWPALD
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicAdd64Variant,
ssa.OpARM64LoweredAtomicAdd32Variant:
// LDADDAL Rarg1, (Rarg0), Rout
p4.From.Type = obj.TYPE_REG
p4.From.Reg = arm64.REGTMP
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p)
+ p4.To.SetTarget(p)
p5 := s.Prog(arm64.ACSET)
p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p5.From.Reg = arm64.COND_EQ
p5.To.Type = obj.TYPE_REG
p5.To.Reg = out
- gc.Patch(p2, p5)
+ p2.To.SetTarget(p5)
case ssa.OpARM64LoweredAtomicCas64Variant,
ssa.OpARM64LoweredAtomicCas32Variant:
// Rarg0: ptr
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicAnd8Variant,
ssa.OpARM64LoweredAtomicAnd32Variant:
atomic_clear := arm64.ALDCLRALW
p2.Reg = arm64.REG_R16
p3 := s.Prog(arm64.ABLE)
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
case ssa.OpARM64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p3.Reg = arm64.REG_R16
p4 := s.Prog(arm64.ABLE)
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p)
+ p4.To.SetTarget(p)
case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
s.Call(v)
case ssa.OpARM64LoweredWB:
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bitvec
+
+import (
+ "math/bits"
+
+ "cmd/compile/internal/base"
+)
+
+const (
+ wordBits = 32
+ wordMask = wordBits - 1
+ wordShift = 5
+)
+
+// A BitVec is a bit vector.
+type BitVec struct {
+ N int32 // number of bits in vector
+ B []uint32 // words holding bits
+}
+
+func New(n int32) BitVec {
+ nword := (n + wordBits - 1) / wordBits
+ return BitVec{n, make([]uint32, nword)}
+}
+
+type Bulk struct {
+ words []uint32
+ nbit int32
+ nword int32
+}
+
+func NewBulk(nbit int32, count int32) Bulk {
+ nword := (nbit + wordBits - 1) / wordBits
+ size := int64(nword) * int64(count)
+ if int64(int32(size*4)) != size*4 {
+ base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
+ }
+ return Bulk{
+ words: make([]uint32, size),
+ nbit: nbit,
+ nword: nword,
+ }
+}
+
+func (b *Bulk) Next() BitVec {
+ out := BitVec{b.nbit, b.words[:b.nword]}
+ b.words = b.words[b.nword:]
+ return out
+}
+
+func (bv1 BitVec) Eq(bv2 BitVec) bool {
+ if bv1.N != bv2.N {
+ base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N)
+ }
+ for i, x := range bv1.B {
+ if x != bv2.B[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (dst BitVec) Copy(src BitVec) {
+ copy(dst.B, src.B)
+}
+
+func (bv BitVec) Get(i int32) bool {
+ if i < 0 || i >= bv.N {
+ base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ return bv.B[i>>wordShift]&mask != 0
+}
+
+func (bv BitVec) Set(i int32) {
+ if i < 0 || i >= bv.N {
+ base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ bv.B[i/wordBits] |= mask
+}
+
+func (bv BitVec) Unset(i int32) {
+ if i < 0 || i >= bv.N {
+ base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ bv.B[i/wordBits] &^= mask
+}
+
+// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
+// If there is no such index, bvnext returns -1.
+func (bv BitVec) Next(i int32) int32 {
+ if i >= bv.N {
+ return -1
+ }
+
+ // Jump i ahead to next word with bits.
+ if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 {
+ i &^= wordMask
+ i += wordBits
+ for i < bv.N && bv.B[i>>wordShift] == 0 {
+ i += wordBits
+ }
+ }
+
+ if i >= bv.N {
+ return -1
+ }
+
+ // Find 1 bit.
+ w := bv.B[i>>wordShift] >> uint(i&wordMask)
+ i += int32(bits.TrailingZeros32(w))
+
+ return i
+}
+
+func (bv BitVec) IsEmpty() bool {
+ for _, x := range bv.B {
+ if x != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func (bv BitVec) Not() {
+ for i, x := range bv.B {
+ bv.B[i] = ^x
+ }
+}
+
+// union
+func (dst BitVec) Or(src1, src2 BitVec) {
+ if len(src1.B) == 0 {
+ return
+ }
+ _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.B {
+ dst.B[i] = x | src2.B[i]
+ }
+}
+
+// intersection
+func (dst BitVec) And(src1, src2 BitVec) {
+ if len(src1.B) == 0 {
+ return
+ }
+ _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.B {
+ dst.B[i] = x & src2.B[i]
+ }
+}
+
+// difference
+func (dst BitVec) AndNot(src1, src2 BitVec) {
+ if len(src1.B) == 0 {
+ return
+ }
+ _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.B {
+ dst.B[i] = x &^ src2.B[i]
+ }
+}
+
+func (bv BitVec) String() string {
+ s := make([]byte, 2+bv.N)
+ copy(s, "#*")
+ for i := int32(0); i < bv.N; i++ {
+ ch := byte('0')
+ if bv.Get(i) {
+ ch = '1'
+ }
+ s[2+i] = ch
+ }
+ return string(s)
+}
+
+func (bv BitVec) Clear() {
+ for i := range bv.B {
+ bv.B[i] = 0
+ }
+}
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen")
}
ot := 0
- ot = dsymptr(closure, ot, memhashvarlen, 0)
- ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure
- ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
+ ot = objw.SymPtr(closure, ot, memhashvarlen, 0)
+ ot = objw.Uintptr(closure, ot, uint64(t.Width)) // size encoded in closure
+ objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
case types.ASPECIAL:
break
// Build closure. It doesn't close over any variables, so
// it contains just the function pointer.
- dsymptr(closure, 0, sym.Linksym(), 0)
- ggloblsym(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+ objw.SymPtr(closure, 0, sym.Linksym(), 0)
+ objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
return closure
}
s := typecheck.LookupRuntimeVar(name + "·f")
if len(s.P) == 0 {
f := typecheck.LookupRuntimeFunc(name)
- dsymptr(s, 0, f, 0)
- ggloblsym(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+ objw.SymPtr(s, 0, f, 0)
+ objw.Global(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
}
return s
}
memequalvarlen = typecheck.LookupRuntimeVar("memequal_varlen") // asm func
}
ot := 0
- ot = dsymptr(closure, ot, memequalvarlen, 0)
- ot = duintptr(closure, ot, uint64(t.Width))
- ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
+ ot = objw.SymPtr(closure, ot, memequalvarlen, 0)
+ ot = objw.Uintptr(closure, ot, uint64(t.Width))
+ objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
case types.ASPECIAL:
break
typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
// Generate a closure which points at the function we just generated.
- dsymptr(closure, 0, sym.Linksym(), 0)
- ggloblsym(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+ objw.SymPtr(closure, 0, sym.Linksym(), 0)
+ objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
return closure
}
+++ /dev/null
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "math/bits"
-
- "cmd/compile/internal/base"
-)
-
-const (
- wordBits = 32
- wordMask = wordBits - 1
- wordShift = 5
-)
-
-// A bvec is a bit vector.
-type bvec struct {
- n int32 // number of bits in vector
- b []uint32 // words holding bits
-}
-
-func bvalloc(n int32) bvec {
- nword := (n + wordBits - 1) / wordBits
- return bvec{n, make([]uint32, nword)}
-}
-
-type bulkBvec struct {
- words []uint32
- nbit int32
- nword int32
-}
-
-func bvbulkalloc(nbit int32, count int32) bulkBvec {
- nword := (nbit + wordBits - 1) / wordBits
- size := int64(nword) * int64(count)
- if int64(int32(size*4)) != size*4 {
- base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
- }
- return bulkBvec{
- words: make([]uint32, size),
- nbit: nbit,
- nword: nword,
- }
-}
-
-func (b *bulkBvec) next() bvec {
- out := bvec{b.nbit, b.words[:b.nword]}
- b.words = b.words[b.nword:]
- return out
-}
-
-func (bv1 bvec) Eq(bv2 bvec) bool {
- if bv1.n != bv2.n {
- base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
- }
- for i, x := range bv1.b {
- if x != bv2.b[i] {
- return false
- }
- }
- return true
-}
-
-func (dst bvec) Copy(src bvec) {
- copy(dst.b, src.b)
-}
-
-func (bv bvec) Get(i int32) bool {
- if i < 0 || i >= bv.n {
- base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
- }
- mask := uint32(1 << uint(i%wordBits))
- return bv.b[i>>wordShift]&mask != 0
-}
-
-func (bv bvec) Set(i int32) {
- if i < 0 || i >= bv.n {
- base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
- }
- mask := uint32(1 << uint(i%wordBits))
- bv.b[i/wordBits] |= mask
-}
-
-func (bv bvec) Unset(i int32) {
- if i < 0 || i >= bv.n {
- base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
- }
- mask := uint32(1 << uint(i%wordBits))
- bv.b[i/wordBits] &^= mask
-}
-
-// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
-// If there is no such index, bvnext returns -1.
-func (bv bvec) Next(i int32) int32 {
- if i >= bv.n {
- return -1
- }
-
- // Jump i ahead to next word with bits.
- if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 {
- i &^= wordMask
- i += wordBits
- for i < bv.n && bv.b[i>>wordShift] == 0 {
- i += wordBits
- }
- }
-
- if i >= bv.n {
- return -1
- }
-
- // Find 1 bit.
- w := bv.b[i>>wordShift] >> uint(i&wordMask)
- i += int32(bits.TrailingZeros32(w))
-
- return i
-}
-
-func (bv bvec) IsEmpty() bool {
- for _, x := range bv.b {
- if x != 0 {
- return false
- }
- }
- return true
-}
-
-func (bv bvec) Not() {
- for i, x := range bv.b {
- bv.b[i] = ^x
- }
-}
-
-// union
-func (dst bvec) Or(src1, src2 bvec) {
- if len(src1.b) == 0 {
- return
- }
- _, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
-
- for i, x := range src1.b {
- dst.b[i] = x | src2.b[i]
- }
-}
-
-// intersection
-func (dst bvec) And(src1, src2 bvec) {
- if len(src1.b) == 0 {
- return
- }
- _, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
-
- for i, x := range src1.b {
- dst.b[i] = x & src2.b[i]
- }
-}
-
-// difference
-func (dst bvec) AndNot(src1, src2 bvec) {
- if len(src1.b) == 0 {
- return
- }
- _, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
-
- for i, x := range src1.b {
- dst.b[i] = x &^ src2.b[i]
- }
-}
-
-func (bv bvec) String() string {
- s := make([]byte, 2+bv.n)
- copy(s, "#*")
- for i := int32(0); i < bv.n; i++ {
- ch := byte('0')
- if bv.Get(i) {
- ch = '1'
- }
- s[2+i] = ch
- }
- return string(s)
-}
-
-func (bv bvec) Clear() {
- for i := range bv.b {
- bv.b[i] = 0
- }
-}
-
-// FNV-1 hash function constants.
-const (
- H0 = 2166136261
- Hp = 16777619
-)
-
-func hashbitmap(h uint32, bv bvec) uint32 {
- n := int((bv.n + 31) / 32)
- for i := 0; i < n; i++ {
- w := bv.b[i]
- h = (h * Hp) ^ (w & 0xff)
- h = (h * Hp) ^ ((w >> 8) & 0xff)
- h = (h * Hp) ^ ((w >> 16) & 0xff)
- h = (h * Hp) ^ ((w >> 24) & 0xff)
- }
-
- return h
-}
-
-// bvecSet is a set of bvecs, in initial insertion order.
-type bvecSet struct {
- index []int // hash -> uniq index. -1 indicates empty slot.
- uniq []bvec // unique bvecs, in insertion order
-}
-
-func (m *bvecSet) grow() {
- // Allocate new index.
- n := len(m.index) * 2
- if n == 0 {
- n = 32
- }
- newIndex := make([]int, n)
- for i := range newIndex {
- newIndex[i] = -1
- }
-
- // Rehash into newIndex.
- for i, bv := range m.uniq {
- h := hashbitmap(H0, bv) % uint32(len(newIndex))
- for {
- j := newIndex[h]
- if j < 0 {
- newIndex[h] = i
- break
- }
- h++
- if h == uint32(len(newIndex)) {
- h = 0
- }
- }
- }
- m.index = newIndex
-}
-
-// add adds bv to the set and returns its index in m.extractUniqe.
-// The caller must not modify bv after this.
-func (m *bvecSet) add(bv bvec) int {
- if len(m.uniq)*4 >= len(m.index) {
- m.grow()
- }
-
- index := m.index
- h := hashbitmap(H0, bv) % uint32(len(index))
- for {
- j := index[h]
- if j < 0 {
- // New bvec.
- index[h] = len(m.uniq)
- m.uniq = append(m.uniq, bv)
- return len(m.uniq) - 1
- }
- jlive := m.uniq[j]
- if bv.Eq(jlive) {
- // Existing bvec.
- return j
- }
-
- h++
- if h == uint32(len(index)) {
- h = 0
- }
- }
-}
-
-// extractUniqe returns this slice of unique bit vectors in m, as
-// indexed by the result of bvecSet.add.
-func (m *bvecSet) extractUniqe() []bvec {
- return m.uniq
-}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/compile/internal/bitvec"
+
+// FNV-1 hash function constants.
+const (
+ h0 = 2166136261
+ hp = 16777619
+)
+
+// bvecSet is a set of bvecs, in initial insertion order.
+type bvecSet struct {
+ index []int // hash -> uniq index. -1 indicates empty slot.
+ uniq []bitvec.BitVec // unique bvecs, in insertion order
+}
+
+func (m *bvecSet) grow() {
+ // Allocate new index.
+ n := len(m.index) * 2
+ if n == 0 {
+ n = 32
+ }
+ newIndex := make([]int, n)
+ for i := range newIndex {
+ newIndex[i] = -1
+ }
+
+ // Rehash into newIndex.
+ for i, bv := range m.uniq {
+ h := hashbitmap(h0, bv) % uint32(len(newIndex))
+ for {
+ j := newIndex[h]
+ if j < 0 {
+ newIndex[h] = i
+ break
+ }
+ h++
+ if h == uint32(len(newIndex)) {
+ h = 0
+ }
+ }
+ }
+ m.index = newIndex
+}
+
+// add adds bv to the set and returns its index in m.extractUniqe.
+// The caller must not modify bv after this.
+func (m *bvecSet) add(bv bitvec.BitVec) int {
+ if len(m.uniq)*4 >= len(m.index) {
+ m.grow()
+ }
+
+ index := m.index
+ h := hashbitmap(h0, bv) % uint32(len(index))
+ for {
+ j := index[h]
+ if j < 0 {
+ // New bvec.
+ index[h] = len(m.uniq)
+ m.uniq = append(m.uniq, bv)
+ return len(m.uniq) - 1
+ }
+ jlive := m.uniq[j]
+ if bv.Eq(jlive) {
+ // Existing bvec.
+ return j
+ }
+
+ h++
+ if h == uint32(len(index)) {
+ h = 0
+ }
+ }
+}
+
+// extractUnique returns this slice of unique bit vectors in m, as
+// indexed by the result of bvecSet.add.
+func (m *bvecSet) extractUnique() []bitvec.BitVec {
+ return m.uniq
+}
+
+func hashbitmap(h uint32, bv bitvec.BitVec) uint32 {
+ n := int((bv.N + 31) / 32)
+ for i := 0; i < n; i++ {
+ w := bv.B[i]
+ h = (h * hp) ^ (w & 0xff)
+ h = (h * hp) ^ ((w >> 8) & 0xff)
+ h = (h * hp) ^ ((w >> 16) & 0xff)
+ h = (h * hp) ^ ((w >> 24) & 0xff)
+ }
+
+ return h
+}
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/syntax"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
}
sym := v.Sym().Linksym()
off := 0
- off = dsymptr(sym, off, fsym, 0) // data string
- off = duintptr(sym, off, uint64(size)) // len
+ off = objw.SymPtr(sym, off, fsym, 0) // data string
+ off = objw.Uintptr(sym, off, uint64(size)) // len
if kind == embedBytes {
- duintptr(sym, off, uint64(size)) // cap for slice
+ objw.Uintptr(sym, off, uint64(size)) // cap for slice
}
case embedFiles:
slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`)
off := 0
// []files pointed at by Files
- off = dsymptr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice
- off = duintptr(slicedata, off, uint64(len(files)))
- off = duintptr(slicedata, off, uint64(len(files)))
+ off = objw.SymPtr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice
+ off = objw.Uintptr(slicedata, off, uint64(len(files)))
+ off = objw.Uintptr(slicedata, off, uint64(len(files)))
// embed/embed.go type file is:
// name string
const hashSize = 16
hash := make([]byte, hashSize)
for _, file := range files {
- off = dsymptr(slicedata, off, stringsym(v.Pos(), file), 0) // file string
- off = duintptr(slicedata, off, uint64(len(file)))
+ off = objw.SymPtr(slicedata, off, stringsym(v.Pos(), file), 0) // file string
+ off = objw.Uintptr(slicedata, off, uint64(len(file)))
if strings.HasSuffix(file, "/") {
// entry for directory - no data
- off = duintptr(slicedata, off, 0)
- off = duintptr(slicedata, off, 0)
+ off = objw.Uintptr(slicedata, off, 0)
+ off = objw.Uintptr(slicedata, off, 0)
off += hashSize
} else {
fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash)
if err != nil {
base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
}
- off = dsymptr(slicedata, off, fsym, 0) // data string
- off = duintptr(slicedata, off, uint64(size))
+ off = objw.SymPtr(slicedata, off, fsym, 0) // data string
+ off = objw.Uintptr(slicedata, off, uint64(size))
off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash))
}
}
- ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
+ objw.Global(slicedata, int32(off), obj.RODATA|obj.LOCAL)
sym := v.Sym().Linksym()
- dsymptr(sym, 0, slicedata, 0)
+ objw.SymPtr(sym, 0, slicedata, 0)
}
}
package gc
import (
+ "cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
// ZeroRange zeroes a range of memory on stack. It is only inserted
// at function entry, and it is ok to clobber registers.
- ZeroRange func(*Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
+ ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
- Ginsnop func(*Progs) *obj.Prog
- Ginsnopdefer func(*Progs) *obj.Prog // special ginsnop for deferreturn
+ Ginsnop func(*objw.Progs) *obj.Prog
+ Ginsnopdefer func(*objw.Progs) *obj.Prog // special ginsnop for deferreturn
// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
SSAMarkMoves func(*SSAGenState, *ssa.Block)
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
- "cmd/compile/internal/ssa"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
- "cmd/internal/src"
"fmt"
"os"
)
-var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
-
-// Progs accumulates Progs for a function and converts them into machine code.
-type Progs struct {
- Text *obj.Prog // ATEXT Prog for this function
- next *obj.Prog // next Prog
- pc int64 // virtual PC; count of Progs
- pos src.XPos // position to use for new Progs
- curfn *ir.Func // fn these Progs are for
- progcache []obj.Prog // local progcache
- cacheidx int // first free element of progcache
-
- nextLive LivenessIndex // liveness index for the next Prog
- prevLive LivenessIndex // last emitted liveness index
-}
-
-// newProgs returns a new Progs for fn.
-// worker indicates which of the backend workers will use the Progs.
-func newProgs(fn *ir.Func, worker int) *Progs {
- pp := new(Progs)
- if base.Ctxt.CanReuseProgs() {
- sz := len(sharedProgArray) / base.Flag.LowerC
- pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)]
- }
- pp.curfn = fn
-
- // prime the pump
- pp.next = pp.NewProg()
- pp.clearp(pp.next)
-
- pp.pos = fn.Pos()
- pp.settext(fn)
- // PCDATA tables implicitly start with index -1.
- pp.prevLive = LivenessIndex{-1, false}
- pp.nextLive = pp.prevLive
- return pp
-}
-
-func (pp *Progs) NewProg() *obj.Prog {
- var p *obj.Prog
- if pp.cacheidx < len(pp.progcache) {
- p = &pp.progcache[pp.cacheidx]
- pp.cacheidx++
- } else {
- p = new(obj.Prog)
- }
- p.Ctxt = base.Ctxt
- return p
-}
-
-// Flush converts from pp to machine code.
-func (pp *Progs) Flush() {
- plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
- obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
-}
-
-// Free clears pp and any associated resources.
-func (pp *Progs) Free() {
- if base.Ctxt.CanReuseProgs() {
- // Clear progs to enable GC and avoid abuse.
- s := pp.progcache[:pp.cacheidx]
- for i := range s {
- s[i] = obj.Prog{}
- }
- }
- // Clear pp to avoid abuse.
- *pp = Progs{}
-}
-
-// Prog adds a Prog with instruction As to pp.
-func (pp *Progs) Prog(as obj.As) *obj.Prog {
- if pp.nextLive.StackMapValid() && pp.nextLive.stackMapIndex != pp.prevLive.stackMapIndex {
- // Emit stack map index change.
- idx := pp.nextLive.stackMapIndex
- pp.prevLive.stackMapIndex = idx
- p := pp.Prog(obj.APCDATA)
- Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
- Addrconst(&p.To, int64(idx))
- }
- if pp.nextLive.isUnsafePoint != pp.prevLive.isUnsafePoint {
- // Emit unsafe-point marker.
- pp.prevLive.isUnsafePoint = pp.nextLive.isUnsafePoint
- p := pp.Prog(obj.APCDATA)
- Addrconst(&p.From, objabi.PCDATA_UnsafePoint)
- if pp.nextLive.isUnsafePoint {
- Addrconst(&p.To, objabi.PCDATA_UnsafePointUnsafe)
- } else {
- Addrconst(&p.To, objabi.PCDATA_UnsafePointSafe)
- }
- }
-
- p := pp.next
- pp.next = pp.NewProg()
- pp.clearp(pp.next)
- p.Link = pp.next
-
- if !pp.pos.IsKnown() && base.Flag.K != 0 {
- base.Warn("prog: unknown position (line 0)")
- }
-
- p.As = as
- p.Pos = pp.pos
- if pp.pos.IsStmt() == src.PosIsStmt {
- // Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
- if ssa.LosesStmtMark(as) {
- return p
- }
- pp.pos = pp.pos.WithNotStmt()
- }
- return p
-}
-
-func (pp *Progs) clearp(p *obj.Prog) {
- obj.Nopout(p)
- p.As = obj.AEND
- p.Pc = pp.pc
- pp.pc++
-}
-
-func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
- q := pp.NewProg()
- pp.clearp(q)
- q.As = as
- q.Pos = p.Pos
- q.From.Type = ftype
- q.From.Reg = freg
- q.From.Offset = foffset
- q.To.Type = ttype
- q.To.Reg = treg
- q.To.Offset = toffset
- q.Link = p.Link
- p.Link = q
- return q
-}
-
-func (pp *Progs) settext(fn *ir.Func) {
- if pp.Text != nil {
- base.Fatalf("Progs.settext called twice")
- }
- ptxt := pp.Prog(obj.ATEXT)
- pp.Text = ptxt
-
- fn.LSym.Func().Text = ptxt
- ptxt.From.Type = obj.TYPE_MEM
- ptxt.From.Name = obj.NAME_EXTERN
- ptxt.From.Sym = fn.LSym
-}
-
// makeABIWrapper creates a new function that wraps a cross-ABI call
// to "f". The wrapper is marked as an ABIWRAPPER.
func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
base.Ctxt.InitTextSym(f.LSym, flag)
}
-
-func ggloblnod(nam ir.Node) {
- s := nam.Sym().Linksym()
- s.Gotype = ngotype(nam).Linksym()
- flags := 0
- if nam.Name().Readonly() {
- flags = obj.RODATA
- }
- if nam.Type() != nil && !nam.Type().HasPointers() {
- flags |= obj.NOPTR
- }
- base.Ctxt.Globl(s, nam.Type().Width, flags)
- if nam.Name().LibfuzzerExtraCounter() {
- s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
- }
- if nam.Sym().Linkname != "" {
- // Make sure linkname'd symbol is non-package. When a symbol is
- // both imported and linkname'd, s.Pkg may not set to "_" in
- // types.Sym.Linksym because LSym already exists. Set it here.
- s.Pkg = "_"
- }
-}
-
-func ggloblsym(s *obj.LSym, width int32, flags int16) {
- if flags&obj.LOCAL != 0 {
- s.Set(obj.AttrLocal, true)
- flags &^= obj.LOCAL
- }
- base.Ctxt.Globl(s, int64(width), int(flags))
-}
-
-func Addrconst(a *obj.Addr, v int64) {
- a.SetConst(v)
-}
-
-func Patch(p *obj.Prog, to *obj.Prog) {
- p.To.SetTarget(to)
-}
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
sym.Def = task
lsym := sym.Linksym()
ot := 0
- ot = duintptr(lsym, ot, 0) // state: not initialized yet
- ot = duintptr(lsym, ot, uint64(len(deps)))
- ot = duintptr(lsym, ot, uint64(len(fns)))
+ ot = objw.Uintptr(lsym, ot, 0) // state: not initialized yet
+ ot = objw.Uintptr(lsym, ot, uint64(len(deps)))
+ ot = objw.Uintptr(lsym, ot, uint64(len(fns)))
for _, d := range deps {
- ot = dsymptr(lsym, ot, d, 0)
+ ot = objw.SymPtr(lsym, ot, d, 0)
}
for _, f := range fns {
- ot = dsymptr(lsym, ot, f, 0)
+ ot = objw.SymPtr(lsym, ot, f, 0)
}
// An initTask has pointers, but none into the Go heap.
// It's not quite read only, the state field must be modifiable.
- ggloblsym(lsym, int32(ot), obj.NOPTR)
+ objw.Global(lsym, int32(ot), obj.NOPTR)
return task
}
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/bio"
if zerosize > 0 {
zero := ir.Pkgs.Map.Lookup("zero")
- ggloblsym(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA)
+ objw.Global(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA)
}
addGCLocals()
})
for _, s := range funcsyms {
sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym()
- dsymptr(sf, 0, s.Linksym(), 0)
- ggloblsym(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+ objw.SymPtr(sf, 0, s.Linksym(), 0)
+ objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
}
}
}
for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} {
if gcsym != nil && !gcsym.OnList() {
- ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
+ objw.Global(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
}
}
if x := fn.StackObjects; x != nil {
attr := int16(obj.RODATA)
- ggloblsym(x, int32(len(x.P)), attr)
+ objw.Global(x, int32(len(x.P)), attr)
x.Set(obj.AttrStatic, true)
}
if x := fn.OpenCodedDeferInfo; x != nil {
- ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
}
}
}
-func duintxx(s *obj.LSym, off int, v uint64, wid int) int {
- if off&(wid-1) != 0 {
- base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
- }
- s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
- return off + wid
-}
-
-func duint8(s *obj.LSym, off int, v uint8) int {
- return duintxx(s, off, uint64(v), 1)
-}
-
-func duint16(s *obj.LSym, off int, v uint16) int {
- return duintxx(s, off, uint64(v), 2)
-}
-
-func duint32(s *obj.LSym, off int, v uint32) int {
- return duintxx(s, off, uint64(v), 4)
-}
-
-func duintptr(s *obj.LSym, off int, v uint64) int {
- return duintxx(s, off, v, types.PtrSize)
-}
-
-func dbvec(s *obj.LSym, off int, bv bvec) int {
- // Runtime reads the bitmaps as byte arrays. Oblige.
- for j := 0; int32(j) < bv.n; j += 8 {
- word := bv.b[j/32]
- off = duint8(s, off, uint8(word>>(uint(j)%32)))
- }
- return off
-}
-
const (
stringSymPrefix = "go.string."
stringSymPattern = ".gostring.%d.%x"
symdata := base.Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
off := dstringdata(symdata, 0, s, pos, "string")
- ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ objw.Global(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
symdata.Set(obj.AttrContentAddressable, true)
}
info := symdata.NewFileInfo()
info.Name = file
info.Size = size
- ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ objw.Global(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
// Note: AttrContentAddressable cannot be set here,
// because the content-addressable-handling code
// does not know about file symbols.
lsym := sym.Linksym()
off := dstringdata(lsym, 0, s, pos, "slice")
- ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL)
+ objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL)
return symnode
}
return off + len(t)
}
-func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
- off = int(types.Rnd(int64(off), int64(types.PtrSize)))
- s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff))
- off += types.PtrSize
- return off
-}
-
-func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int {
- s.WriteOff(base.Ctxt, int64(off), x, 0)
- off += 4
- return off
-}
-
-func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
- s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
- off += 4
- return off
-}
-
// slicesym writes a static slice symbol {&arr, lencap, lencap} to n+noff.
// slicesym does not modify n.
func slicesym(n *ir.Name, noff int64, arr *ir.Name, lencap int64) {
base.Fatalf("litsym unhandled OLITERAL %v", c)
}
}
+
+func ggloblnod(nam ir.Node) {
+ s := nam.Sym().Linksym()
+ s.Gotype = ngotype(nam).Linksym()
+ flags := 0
+ if nam.Name().Readonly() {
+ flags = obj.RODATA
+ }
+ if nam.Type() != nil && !nam.Type().HasPointers() {
+ flags |= obj.NOPTR
+ }
+ base.Ctxt.Globl(s, nam.Type().Width, flags)
+ if nam.Name().LibfuzzerExtraCounter() {
+ s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
+ }
+ if nam.Sym().Linkname != "" {
+ // Make sure linkname'd symbol is non-package. When a symbol is
+ // both imported and linkname'd, s.Pkg may not set to "_" in
+ // types.Sym.Linksym because LSym already exists. Set it here.
+ s.Pkg = "_"
+ }
+}
import (
"cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
}
lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap")
nptr := int(fn.Type().ArgWidth() / int64(types.PtrSize))
- bv := bvalloc(int32(nptr) * 2)
+ bv := bitvec.New(int32(nptr) * 2)
nbitmap := 1
if fn.Type().NumResults() > 0 {
nbitmap = 2
}
- off := duint32(lsym, 0, uint32(nbitmap))
- off = duint32(lsym, off, uint32(bv.n))
+ off := objw.Uint32(lsym, 0, uint32(nbitmap))
+ off = objw.Uint32(lsym, off, uint32(bv.N))
if ir.IsMethod(fn) {
onebitwalktype1(fn.Type().Recvs(), 0, bv)
if fn.Type().NumParams() > 0 {
onebitwalktype1(fn.Type().Params(), 0, bv)
}
- off = dbvec(lsym, off, bv)
+ off = objw.BitVec(lsym, off, bv)
if fn.Type().NumResults() > 0 {
onebitwalktype1(fn.Type().Results(), 0, bv)
- off = dbvec(lsym, off, bv)
+ off = objw.BitVec(lsym, off, bv)
}
- ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL)
+ objw.Global(lsym, int32(off), obj.RODATA|obj.LOCAL)
}
// cmpstackvarlt reports whether the stack variable a sorts before b.
largeStackFramesMu.Unlock()
return
}
- pp := newProgs(fn, worker)
+ pp := objw.NewProgs(fn, worker)
defer pp.Free()
genssa(f, pp)
// Check frame size again.
import (
"cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
//
// uevar: upward exposed variables (used before set in block)
// varkill: killed variables (set in block)
- uevar bvec
- varkill bvec
+ uevar bitvec.BitVec
+ varkill bitvec.BitVec
// Computed during Liveness.solve using control flow information:
//
// livein: variables live at block entry
// liveout: variables live at block exit
- livein bvec
- liveout bvec
+ livein bitvec.BitVec
+ liveout bitvec.BitVec
}
// A collection of global state used by liveness analysis.
allUnsafe bool
// unsafePoints bit i is set if Value ID i is an unsafe-point
// (preemption is not allowed). Only valid if !allUnsafe.
- unsafePoints bvec
+ unsafePoints bitvec.BitVec
// An array with a bit vector for each safe point in the
// current Block during Liveness.epilogue. Indexed in Value
// order for that block. Additionally, for the entry block
// livevars[0] is the entry bitmap. Liveness.compact moves
// these to stackMaps.
- livevars []bvec
+ livevars []bitvec.BitVec
// livenessMap maps from safe points (i.e., CALLs) to their
// liveness map indexes.
livenessMap LivenessMap
stackMapSet bvecSet
- stackMaps []bvec
+ stackMaps []bitvec.BitVec
cache progeffectscache
}
// LivenessMap maps from *ssa.Value to LivenessIndex.
type LivenessMap struct {
- vals map[ssa.ID]LivenessIndex
+ vals map[ssa.ID]objw.LivenessIndex
// The set of live, pointer-containing variables at the deferreturn
// call (only set when open-coded defers are used).
- deferreturn LivenessIndex
+ deferreturn objw.LivenessIndex
}
func (m *LivenessMap) reset() {
if m.vals == nil {
- m.vals = make(map[ssa.ID]LivenessIndex)
+ m.vals = make(map[ssa.ID]objw.LivenessIndex)
} else {
for k := range m.vals {
delete(m.vals, k)
}
}
- m.deferreturn = LivenessDontCare
+ m.deferreturn = objw.LivenessDontCare
}
-func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) {
+func (m *LivenessMap) set(v *ssa.Value, i objw.LivenessIndex) {
m.vals[v.ID] = i
}
-func (m LivenessMap) Get(v *ssa.Value) LivenessIndex {
+func (m LivenessMap) Get(v *ssa.Value) objw.LivenessIndex {
// If v isn't in the map, then it's a "don't care" and not an
// unsafe-point.
if idx, ok := m.vals[v.ID]; ok {
return idx
}
- return LivenessIndex{StackMapDontCare, false}
-}
-
-// LivenessIndex stores the liveness map information for a Value.
-type LivenessIndex struct {
- stackMapIndex int
-
- // isUnsafePoint indicates that this is an unsafe-point.
- //
- // Note that it's possible for a call Value to have a stack
- // map while also being an unsafe-point. This means it cannot
- // be preempted at this instruction, but that a preemption or
- // stack growth may happen in the called function.
- isUnsafePoint bool
-}
-
-// LivenessDontCare indicates that the liveness information doesn't
-// matter. Currently it is used in deferreturn liveness when we don't
-// actually need it. It should never be emitted to the PCDATA stream.
-var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
-
-// StackMapDontCare indicates that the stack map index at a Value
-// doesn't matter.
-//
-// This is a sentinel value that should never be emitted to the PCDATA
-// stream. We use -1000 because that's obviously never a valid stack
-// index (but -1 is).
-const StackMapDontCare = -1000
-
-func (idx LivenessIndex) StackMapValid() bool {
- return idx.stackMapIndex != StackMapDontCare
+ return objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: false}
}
type progeffectscache struct {
if cap(lc.be) >= f.NumBlocks() {
lv.be = lc.be[:f.NumBlocks()]
}
- lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessDontCare}
+ lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: objw.LivenessDontCare}
lc.livenessMap.vals = nil
}
if lv.be == nil {
nblocks := int32(len(f.Blocks))
nvars := int32(len(vars))
- bulk := bvbulkalloc(nvars, nblocks*7)
+ bulk := bitvec.NewBulk(nvars, nblocks*7)
for _, b := range f.Blocks {
be := lv.blockEffects(b)
- be.uevar = bulk.next()
- be.varkill = bulk.next()
- be.livein = bulk.next()
- be.liveout = bulk.next()
+ be.uevar = bulk.Next()
+ be.varkill = bulk.Next()
+ be.livein = bulk.Next()
+ be.liveout = bulk.Next()
}
lv.livenessMap.reset()
// NOTE: The bitmap for a specific type t could be cached in t after
// the first run and then simply copied into bv at the correct offset
// on future calls with the same type t.
-func onebitwalktype1(t *types.Type, off int64, bv bvec) {
+func onebitwalktype1(t *types.Type, off int64, bv bitvec.BitVec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 {
base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
}
// Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes.
-func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Name, args, locals bvec) {
+func (lv *Liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, locals bitvec.BitVec) {
for i := int32(0); ; i++ {
i = liveout.Next(i)
if i < 0 {
return
}
- lv.unsafePoints = bvalloc(int32(lv.f.NumValues()))
+ lv.unsafePoints = bitvec.New(int32(lv.f.NumValues()))
// Mark architecture-specific unsafe points.
for _, b := range lv.f.Blocks {
// nice to only flood as far as the unsafe.Pointer -> uintptr
// conversion, but it's hard to know which argument of an Add
// or Sub to follow.
- var flooded bvec
+ var flooded bitvec.BitVec
var flood func(b *ssa.Block, vi int)
flood = func(b *ssa.Block, vi int) {
- if flooded.n == 0 {
- flooded = bvalloc(int32(lv.f.NumBlocks()))
+ if flooded.N == 0 {
+ flooded = bitvec.New(int32(lv.f.NumBlocks()))
}
if flooded.Get(int32(b.ID)) {
return
// These temporary bitvectors exist to avoid successive allocations and
// frees within the loop.
nvars := int32(len(lv.vars))
- newlivein := bvalloc(nvars)
- newliveout := bvalloc(nvars)
+ newlivein := bitvec.New(nvars)
+ newliveout := bitvec.New(nvars)
// Walk blocks in postorder ordering. This improves convergence.
po := lv.f.Postorder()
// variables at each safe point locations.
func (lv *Liveness) epilogue() {
nvars := int32(len(lv.vars))
- liveout := bvalloc(nvars)
- livedefer := bvalloc(nvars) // always-live variables
+ liveout := bitvec.New(nvars)
+ livedefer := bitvec.New(nvars) // always-live variables
// If there is a defer (that could recover), then all output
// parameters are live all the time. In addition, any locals
{
// Reserve an entry for function entry.
- live := bvalloc(nvars)
+ live := bitvec.New(nvars)
lv.livevars = append(lv.livevars, live)
}
continue
}
- live := bvalloc(nvars)
+ live := bitvec.New(nvars)
lv.livevars = append(lv.livevars, live)
}
// If we have an open-coded deferreturn call, make a liveness map for it.
if lv.fn.OpenCodedDeferDisallowed() {
- lv.livenessMap.deferreturn = LivenessDontCare
+ lv.livenessMap.deferreturn = objw.LivenessDontCare
} else {
- lv.livenessMap.deferreturn = LivenessIndex{
- stackMapIndex: lv.stackMapSet.add(livedefer),
- isUnsafePoint: false,
+ lv.livenessMap.deferreturn = objw.LivenessIndex{
+ StackMapIndex: lv.stackMapSet.add(livedefer),
+ IsUnsafePoint: false,
}
}
// Done compacting. Throw out the stack map set.
- lv.stackMaps = lv.stackMapSet.extractUniqe()
+ lv.stackMaps = lv.stackMapSet.extractUnique()
lv.stackMapSet = bvecSet{}
// Useful sanity check: on entry to the function,
for _, v := range b.Values {
hasStackMap := lv.hasStackMap(v)
isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID))
- idx := LivenessIndex{StackMapDontCare, isUnsafePoint}
+ idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint}
if hasStackMap {
- idx.stackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
+ idx.StackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
pos++
}
if hasStackMap || isUnsafePoint {
lv.livevars = lv.livevars[:0]
}
-func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
+func (lv *Liveness) showlive(v *ssa.Value, live bitvec.BitVec) {
if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
return
}
base.WarnfAt(pos, s)
}
-func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
+func (lv *Liveness) printbvec(printed bool, name string, live bitvec.BitVec) bool {
if live.IsEmpty() {
return printed
}
fmt.Printf("\tlive=")
printed = false
if pcdata.StackMapValid() {
- live := lv.stackMaps[pcdata.stackMapIndex]
+ live := lv.stackMaps[pcdata.StackMapIndex]
for j, n := range lv.vars {
if !live.Get(int32(j)) {
continue
fmt.Printf("\n")
}
- if pcdata.isUnsafePoint {
+ if pcdata.IsUnsafePoint {
fmt.Printf("\tunsafe-point\n")
}
}
// Temporary symbols for encoding bitmaps.
var argsSymTmp, liveSymTmp obj.LSym
- args := bvalloc(int32(maxArgs / int64(types.PtrSize)))
- aoff := duint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
- aoff = duint32(&argsSymTmp, aoff, uint32(args.n)) // number of bits in each bitmap
+ args := bitvec.New(int32(maxArgs / int64(types.PtrSize)))
+ aoff := objw.Uint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
+ aoff = objw.Uint32(&argsSymTmp, aoff, uint32(args.N)) // number of bits in each bitmap
- locals := bvalloc(int32(maxLocals / int64(types.PtrSize)))
- loff := duint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
- loff = duint32(&liveSymTmp, loff, uint32(locals.n)) // number of bits in each bitmap
+ locals := bitvec.New(int32(maxLocals / int64(types.PtrSize)))
+ loff := objw.Uint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
+ loff = objw.Uint32(&liveSymTmp, loff, uint32(locals.N)) // number of bits in each bitmap
for _, live := range lv.stackMaps {
args.Clear()
lv.pointerMap(live, lv.vars, args, locals)
- aoff = dbvec(&argsSymTmp, aoff, args)
- loff = dbvec(&liveSymTmp, loff, locals)
+ aoff = objw.BitVec(&argsSymTmp, aoff, args)
+ loff = objw.BitVec(&liveSymTmp, loff, locals)
}
// Give these LSyms content-addressable names,
// pointer variables in the function and emits a runtime data
// structure read by the garbage collector.
// Returns a map from GC safe points to their corresponding stack map index.
-func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) LivenessMap {
+func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) LivenessMap {
// Construct the global liveness state.
vars, idx := getvariables(curfn)
lv := newliveness(curfn, f, vars, idx, stkptrsize)
for _, b := range f.Blocks {
for _, val := range b.Values {
if idx := lv.livenessMap.Get(val); idx.StackMapValid() {
- lv.showlive(val, lv.stackMaps[idx.stackMapIndex])
+ lv.showlive(val, lv.stackMaps[idx.StackMapIndex])
}
}
}
fninfo.GCArgs, fninfo.GCLocals = lv.emit()
p := pp.Prog(obj.AFUNCDATA)
- Addrconst(&p.From, objabi.FUNCDATA_ArgsPointerMaps)
+ p.From.SetConst(objabi.FUNCDATA_ArgsPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCArgs
p = pp.Prog(obj.AFUNCDATA)
- Addrconst(&p.From, objabi.FUNCDATA_LocalsPointerMaps)
+ p.From.SetConst(objabi.FUNCDATA_LocalsPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCLocals
import (
"cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/gcprog"
s := base.Ctxt.Lookup("type..importpath." + p.Prefix + ".")
ot := dnameData(s, 0, str, "", nil, false)
- ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
+ objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
s.Set(obj.AttrContentAddressable, true)
p.Pathsym = s
}
func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
if pkg == nil {
- return duintptr(s, ot, 0)
+ return objw.Uintptr(s, ot, 0)
}
if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
// Every package that imports this one directly defines the symbol.
// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
ns := base.Ctxt.Lookup(`type..importpath."".`)
- return dsymptr(s, ot, ns, 0)
+ return objw.SymPtr(s, ot, ns, 0)
}
dimportpath(pkg)
- return dsymptr(s, ot, pkg.Pathsym, 0)
+ return objw.SymPtr(s, ot, pkg.Pathsym, 0)
}
// dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol.
func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
if pkg == nil {
- return duint32(s, ot, 0)
+ return objw.Uint32(s, ot, 0)
}
if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
// If we don't know the full import path of the package being compiled
// Every package that imports this one directly defines the symbol.
// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
ns := base.Ctxt.Lookup(`type..importpath."".`)
- return dsymptrOff(s, ot, ns)
+ return objw.SymPtrOff(s, ot, ns)
}
dimportpath(pkg)
- return dsymptrOff(s, ot, pkg.Pathsym)
+ return objw.SymPtrOff(s, ot, pkg.Pathsym)
}
// dnameField dumps a reflect.name for a struct field.
base.Fatalf("package mismatch for %v", ft.Sym)
}
nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
- return dsymptr(lsym, ot, nsym, 0)
+ return objw.SymPtr(lsym, ot, nsym, 0)
}
// dnameData writes the contents of a reflect.name into s at offset ot.
return s
}
ot := dnameData(s, 0, name, tag, pkg, exported)
- ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
+ objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
s.Set(obj.AttrContentAddressable, true)
return s
}
base.Fatalf("methods are too far away on %v: %d", t, dataAdd)
}
- ot = duint16(lsym, ot, uint16(mcount))
- ot = duint16(lsym, ot, uint16(xcount))
- ot = duint32(lsym, ot, uint32(dataAdd))
- ot = duint32(lsym, ot, 0)
+ ot = objw.Uint16(lsym, ot, uint16(mcount))
+ ot = objw.Uint16(lsym, ot, uint16(xcount))
+ ot = objw.Uint32(lsym, ot, uint32(dataAdd))
+ ot = objw.Uint32(lsym, ot, 0)
return ot
}
}
nsym := dname(a.name.Name, "", pkg, exported)
- ot = dsymptrOff(lsym, ot, nsym)
+ ot = objw.SymPtrOff(lsym, ot, nsym)
ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype))
ot = dmethodptrOff(lsym, ot, a.isym.Linksym())
ot = dmethodptrOff(lsym, ot, a.tsym.Linksym())
}
func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int {
- duint32(s, ot, 0)
+ objw.Uint32(s, ot, 0)
r := obj.Addrel(s)
r.Off = int32(ot)
r.Siz = 4
// ptrToThis typeOff
// }
ot := 0
- ot = duintptr(lsym, ot, uint64(t.Width))
- ot = duintptr(lsym, ot, uint64(ptrdata))
- ot = duint32(lsym, ot, types.TypeHash(t))
+ ot = objw.Uintptr(lsym, ot, uint64(t.Width))
+ ot = objw.Uintptr(lsym, ot, uint64(ptrdata))
+ ot = objw.Uint32(lsym, ot, types.TypeHash(t))
var tflag uint8
if uncommonSize(t) != 0 {
}
}
- ot = duint8(lsym, ot, tflag)
+ ot = objw.Uint8(lsym, ot, tflag)
// runtime (and common sense) expects alignment to be a power of two.
i := int(t.Align)
if i&(i-1) != 0 {
base.Fatalf("invalid alignment %d for %v", t.Align, t)
}
- ot = duint8(lsym, ot, t.Align) // align
- ot = duint8(lsym, ot, t.Align) // fieldAlign
+ ot = objw.Uint8(lsym, ot, t.Align) // align
+ ot = objw.Uint8(lsym, ot, t.Align) // fieldAlign
i = kinds[t.Kind()]
if types.IsDirectIface(t) {
if useGCProg {
i |= objabi.KindGCProg
}
- ot = duint8(lsym, ot, uint8(i)) // kind
+ ot = objw.Uint8(lsym, ot, uint8(i)) // kind
if eqfunc != nil {
- ot = dsymptr(lsym, ot, eqfunc, 0) // equality function
+ ot = objw.SymPtr(lsym, ot, eqfunc, 0) // equality function
} else {
- ot = duintptr(lsym, ot, 0) // type we can't do == with
+ ot = objw.Uintptr(lsym, ot, 0) // type we can't do == with
}
- ot = dsymptr(lsym, ot, gcsym, 0) // gcdata
+ ot = objw.SymPtr(lsym, ot, gcsym, 0) // gcdata
nsym := dname(p, "", nil, exported)
- ot = dsymptrOff(lsym, ot, nsym) // str
+ ot = objw.SymPtrOff(lsym, ot, nsym) // str
// ptrToThis
if sptr == nil {
- ot = duint32(lsym, ot, 0)
+ ot = objw.Uint32(lsym, ot, 0)
} else if sptrWeak {
- ot = dsymptrWeakOff(lsym, ot, sptr)
+ ot = objw.SymPtrWeakOff(lsym, ot, sptr)
} else {
- ot = dsymptrOff(lsym, ot, sptr)
+ ot = objw.SymPtrOff(lsym, ot, sptr)
}
return ot
t2 := types.NewSlice(t.Elem())
s2 := dtypesym(t2)
ot = dcommontype(lsym, t)
- ot = dsymptr(lsym, ot, s1, 0)
- ot = dsymptr(lsym, ot, s2, 0)
- ot = duintptr(lsym, ot, uint64(t.NumElem()))
+ ot = objw.SymPtr(lsym, ot, s1, 0)
+ ot = objw.SymPtr(lsym, ot, s2, 0)
+ ot = objw.Uintptr(lsym, ot, uint64(t.NumElem()))
ot = dextratype(lsym, ot, t, 0)
case types.TSLICE:
// ../../../../runtime/type.go:/sliceType
s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, t)
- ot = dsymptr(lsym, ot, s1, 0)
+ ot = objw.SymPtr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
case types.TCHAN:
// ../../../../runtime/type.go:/chanType
s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, t)
- ot = dsymptr(lsym, ot, s1, 0)
- ot = duintptr(lsym, ot, uint64(t.ChanDir()))
+ ot = objw.SymPtr(lsym, ot, s1, 0)
+ ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir()))
ot = dextratype(lsym, ot, t, 0)
case types.TFUNC:
if isddd {
outCount |= 1 << 15
}
- ot = duint16(lsym, ot, uint16(inCount))
- ot = duint16(lsym, ot, uint16(outCount))
+ ot = objw.Uint16(lsym, ot, uint16(inCount))
+ ot = objw.Uint16(lsym, ot, uint16(outCount))
if types.PtrSize == 8 {
ot += 4 // align for *rtype
}
// Array of rtype pointers follows funcType.
for _, t1 := range t.Recvs().Fields().Slice() {
- ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
+ ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0)
}
for _, t1 := range t.Params().Fields().Slice() {
- ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
+ ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0)
}
for _, t1 := range t.Results().Fields().Slice() {
- ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
+ ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0)
}
case types.TINTER:
}
ot = dgopkgpath(lsym, ot, tpkg)
- ot = dsymptr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
- ot = duintptr(lsym, ot, uint64(n))
- ot = duintptr(lsym, ot, uint64(n))
+ ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
+ ot = objw.Uintptr(lsym, ot, uint64(n))
+ ot = objw.Uintptr(lsym, ot, uint64(n))
dataAdd := imethodSize() * n
ot = dextratype(lsym, ot, t, dataAdd)
}
nsym := dname(a.name.Name, "", pkg, exported)
- ot = dsymptrOff(lsym, ot, nsym)
- ot = dsymptrOff(lsym, ot, dtypesym(a.type_))
+ ot = objw.SymPtrOff(lsym, ot, nsym)
+ ot = objw.SymPtrOff(lsym, ot, dtypesym(a.type_))
}
// ../../../../runtime/type.go:/mapType
hasher := genhash(t.Key())
ot = dcommontype(lsym, t)
- ot = dsymptr(lsym, ot, s1, 0)
- ot = dsymptr(lsym, ot, s2, 0)
- ot = dsymptr(lsym, ot, s3, 0)
- ot = dsymptr(lsym, ot, hasher, 0)
+ ot = objw.SymPtr(lsym, ot, s1, 0)
+ ot = objw.SymPtr(lsym, ot, s2, 0)
+ ot = objw.SymPtr(lsym, ot, s3, 0)
+ ot = objw.SymPtr(lsym, ot, hasher, 0)
var flags uint32
// Note: flags must match maptype accessors in ../../../../runtime/type.go
// and maptype builder in ../../../../reflect/type.go:MapOf.
if t.Key().Width > MAXKEYSIZE {
- ot = duint8(lsym, ot, uint8(types.PtrSize))
+ ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
flags |= 1 // indirect key
} else {
- ot = duint8(lsym, ot, uint8(t.Key().Width))
+ ot = objw.Uint8(lsym, ot, uint8(t.Key().Width))
}
if t.Elem().Width > MAXELEMSIZE {
- ot = duint8(lsym, ot, uint8(types.PtrSize))
+ ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
flags |= 2 // indirect value
} else {
- ot = duint8(lsym, ot, uint8(t.Elem().Width))
+ ot = objw.Uint8(lsym, ot, uint8(t.Elem().Width))
}
- ot = duint16(lsym, ot, uint16(bmap(t).Width))
+ ot = objw.Uint16(lsym, ot, uint16(bmap(t).Width))
if types.IsReflexive(t.Key()) {
flags |= 4 // reflexive key
}
if hashMightPanic(t.Key()) {
flags |= 16 // hash might panic
}
- ot = duint32(lsym, ot, flags)
+ ot = objw.Uint32(lsym, ot, flags)
ot = dextratype(lsym, ot, t, 0)
case types.TPTR:
s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, t)
- ot = dsymptr(lsym, ot, s1, 0)
+ ot = objw.SymPtr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
// ../../../../runtime/type.go:/structType
ot = dcommontype(lsym, t)
ot = dgopkgpath(lsym, ot, spkg)
- ot = dsymptr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
- ot = duintptr(lsym, ot, uint64(len(fields)))
- ot = duintptr(lsym, ot, uint64(len(fields)))
+ ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
+ ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
+ ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
dataAdd := len(fields) * structfieldSize()
ot = dextratype(lsym, ot, t, dataAdd)
for _, f := range fields {
// ../../../../runtime/type.go:/structField
ot = dnameField(lsym, ot, spkg, f)
- ot = dsymptr(lsym, ot, dtypesym(f.Type), 0)
+ ot = objw.SymPtr(lsym, ot, dtypesym(f.Type), 0)
offsetAnon := uint64(f.Offset) << 1
if offsetAnon>>1 != uint64(f.Offset) {
base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
if f.Embedded != 0 {
offsetAnon |= 1
}
- ot = duintptr(lsym, ot, offsetAnon)
+ ot = objw.Uintptr(lsym, ot, offsetAnon)
}
}
ot = dextratypeData(lsym, ot, t)
- ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA))
+ objw.Global(lsym, int32(ot), int16(dupok|obj.RODATA))
// The linker will leave a table of all the typelinks for
// types in the binary, so the runtime can find them.
// _ [4]byte
// fun [1]uintptr // variable sized
// }
- o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0)
- o = dsymptr(i.lsym, o, dtypesym(i.t), 0)
- o = duint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash
- o += 4 // skip unused field
+ o := objw.SymPtr(i.lsym, 0, dtypesym(i.itype), 0)
+ o = objw.SymPtr(i.lsym, o, dtypesym(i.t), 0)
+ o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash
+ o += 4 // skip unused field
for _, fn := range genfun(i.t, i.itype) {
- o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method
+ o = objw.SymPtr(i.lsym, o, fn, 0) // method pointer for each method
}
// Nothing writes static itabs, so they are read only.
- ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
+ objw.Global(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
i.lsym.Set(obj.AttrContentAddressable, true)
}
// }
nsym := dname(p.s.Name, "", nil, true)
tsym := dtypesym(p.t)
- ot = dsymptrOff(s, ot, nsym)
- ot = dsymptrOff(s, ot, tsym)
+ ot = objw.SymPtrOff(s, ot, nsym)
+ ot = objw.SymPtrOff(s, ot, tsym)
// Plugin exports symbols as interfaces. Mark their types
// as UsedInIface.
tsym.Set(obj.AttrUsedInIface, true)
}
- ggloblsym(s, int32(ot), int16(obj.RODATA))
+ objw.Global(s, int32(ot), int16(obj.RODATA))
ot = 0
s = base.Ctxt.Lookup("go.plugin.exports")
for _, p := range ptabs {
- ot = dsymptr(s, ot, p.s.Linksym(), 0)
+ ot = objw.SymPtr(s, ot, p.s.Linksym(), 0)
}
- ggloblsym(s, int32(ot), int16(obj.RODATA))
+ objw.Global(s, int32(ot), int16(obj.RODATA))
}
}
if !sym.Uniq() {
sym.SetUniq(true)
for i, x := range ptrmask {
- duint8(lsym, i, x)
+ objw.Uint8(lsym, i, x)
}
- ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ objw.Global(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
lsym.Set(obj.AttrContentAddressable, true)
}
return lsym
return
}
- vec := bvalloc(8 * int32(len(ptrmask)))
+ vec := bitvec.New(8 * int32(len(ptrmask)))
onebitwalktype1(t, 0, vec)
nptr := types.PtrDataSize(t) / int64(types.PtrSize)
}
func (p *GCProg) writeByte(x byte) {
- p.symoff = duint8(p.lsym, p.symoff, x)
+ p.symoff = objw.Uint8(p.lsym, p.symoff, x)
}
func (p *GCProg) end() {
p.w.End()
- duint32(p.lsym, 0, uint32(p.symoff-4))
- ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ objw.Uint32(p.lsym, 0, uint32(p.symoff-4))
+ objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
if base.Debug.GCProg > 0 {
fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
}
"bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
}
if v < 1<<7 {
- return duint8(x, off, uint8(v))
+ return objw.Uint8(x, off, uint8(v))
}
- off = duint8(x, off, uint8((v&127)|128))
+ off = objw.Uint8(x, off, uint8((v&127)|128))
if v < 1<<14 {
- return duint8(x, off, uint8(v>>7))
+ return objw.Uint8(x, off, uint8(v>>7))
}
- off = duint8(x, off, uint8(((v>>7)&127)|128))
+ off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
if v < 1<<21 {
- return duint8(x, off, uint8(v>>14))
+ return objw.Uint8(x, off, uint8(v>>14))
}
- off = duint8(x, off, uint8(((v>>14)&127)|128))
+ off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
if v < 1<<28 {
- return duint8(x, off, uint8(v>>21))
+ return objw.Uint8(x, off, uint8(v>>21))
}
- off = duint8(x, off, uint8(((v>>21)&127)|128))
- return duint8(x, off, uint8(v>>28))
+ off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
+ return objw.Uint8(x, off, uint8(v>>28))
}
// emitOpenDeferInfo emits FUNCDATA information about the defers in a function
}
// Generate a disconnected call to a runtime routine and a return.
-func gencallret(pp *Progs, sym *obj.LSym) *obj.Prog {
+func gencallret(pp *objw.Progs, sym *obj.LSym) *obj.Prog {
p := pp.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
// SSAGenState contains state needed during Prog generation.
type SSAGenState struct {
- pp *Progs
+ pp *objw.Progs
// Branches remembers all the branch instructions we've seen
// and where they would like to go.
// Pc returns the current Prog.
func (s *SSAGenState) Pc() *obj.Prog {
- return s.pp.next
+ return s.pp.Next
}
// SetPos sets the current source position.
func (s *SSAGenState) SetPos(pos src.XPos) {
- s.pp.pos = pos
+ s.pp.Pos = pos
}
// Br emits a single branch instruction and returns the instruction.
}
s.SetPos(p)
} else {
- s.SetPos(s.pp.pos.WithNotStmt())
+ s.SetPos(s.pp.Pos.WithNotStmt())
}
}
}
func (s byXoffset) Less(i, j int) bool { return s[i].FrameOffset() < s[j].FrameOffset() }
func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func emitStackObjects(e *ssafn, pp *Progs) {
+func emitStackObjects(e *ssafn, pp *objw.Progs) {
var vars []*ir.Name
for _, n := range e.curfn.Dcl {
if livenessShouldTrack(n) && n.Addrtaken() {
// Format must match runtime/stack.go:stackObjectRecord.
x := e.curfn.LSym.Func().StackObjects
off := 0
- off = duintptr(x, off, uint64(len(vars)))
+ off = objw.Uintptr(x, off, uint64(len(vars)))
for _, v := range vars {
// Note: arguments and return values have non-negative Xoffset,
// in which case the offset is relative to argp.
// Locals have a negative Xoffset, in which case the offset is relative to varp.
- off = duintptr(x, off, uint64(v.FrameOffset()))
+ off = objw.Uintptr(x, off, uint64(v.FrameOffset()))
if !types.TypeSym(v.Type()).Siggen() {
e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type())
}
- off = dsymptr(x, off, dtypesym(v.Type()), 0)
+ off = objw.SymPtr(x, off, dtypesym(v.Type()), 0)
}
// Emit a funcdata pointing at the stack object data.
p := pp.Prog(obj.AFUNCDATA)
- Addrconst(&p.From, objabi.FUNCDATA_StackObjects)
+ p.From.SetConst(objabi.FUNCDATA_StackObjects)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = x
}
// genssa appends entries to pp for each instruction in f.
-func genssa(f *ssa.Func, pp *Progs) {
+func genssa(f *ssa.Func, pp *objw.Progs) {
var s SSAGenState
e := f.Frontend().(*ssafn)
// This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa.
p := pp.Prog(obj.AFUNCDATA)
- Addrconst(&p.From, objabi.FUNCDATA_OpenCodedDeferInfo)
+ p.From.SetConst(objabi.FUNCDATA_OpenCodedDeferInfo)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = openDeferInfo
progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name)
- progToBlock[s.pp.next] = f.Blocks[0]
+ progToBlock[s.pp.Next] = f.Blocks[0]
}
s.ScratchFpMem = e.scratchFpMem
// Emit basic blocks
for i, b := range f.Blocks {
- s.bstart[b.ID] = s.pp.next
+ s.bstart[b.ID] = s.pp.Next
s.lineRunStart = nil
// Attach a "default" liveness info. Normally this will be
// instruction. We won't use the actual liveness map on a
// control instruction. Just mark it something that is
// preemptible, unless this function is "all unsafe".
- s.pp.nextLive = LivenessIndex{-1, allUnsafe(f)}
+ s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: allUnsafe(f)}
// Emit values in block
thearch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
- x := s.pp.next
+ x := s.pp.Next
s.DebugFriendlySetPosFrom(v)
switch v.Op {
default:
// Attach this safe point to the next
// instruction.
- s.pp.nextLive = s.livenessMap.Get(v)
+ s.pp.NextLive = s.livenessMap.Get(v)
// Special case for first line in function; move it to the start.
if firstPos != src.NoXPos {
}
if base.Ctxt.Flag_locationlists {
- valueToProgAfter[v.ID] = s.pp.next
+ valueToProgAfter[v.ID] = s.pp.Next
}
if f.PrintOrHtmlSSA {
- for ; x != s.pp.next; x = x.Link {
+ for ; x != s.pp.Next; x = x.Link {
progToValue[x] = v
}
}
}
// If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
- if s.bstart[b.ID] == s.pp.next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
+ if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
p := thearch.Ginsnop(s.pp)
p.Pos = p.Pos.WithIsStmt()
if b.Pos == src.NoXPos {
// line numbers for otherwise empty blocks.
next = f.Blocks[i+1]
}
- x := s.pp.next
+ x := s.pp.Next
s.SetPos(b.Pos)
thearch.SSAGenBlock(&s, b, next)
if f.PrintOrHtmlSSA {
- for ; x != s.pp.next; x = x.Link {
+ for ; x != s.pp.Next; x = x.Link {
progToBlock[x] = b
}
}
// When doing open-coded defers, generate a disconnected call to
// deferreturn and a return. This will be used to during panic
// recovery to unwind the stack and return back to the runtime.
- s.pp.nextLive = s.livenessMap.deferreturn
+ s.pp.NextLive = s.livenessMap.deferreturn
gencallret(pp, ir.Syms.Deferreturn)
}
// some of the inline marks.
// Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement
- pp.curfn.LSym.Func().AddInlMark(p, inlMarks[m])
+ pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP
m.Pos = src.NoXPos
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList {
if p.As != obj.ANOP {
- pp.curfn.LSym.Func().AddInlMark(p, inlMarks[p])
+ pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
}
}
}
// Call returns a new CALL instruction for the SSA value v.
// It uses PrepareCall to prepare the call.
func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
- pPosIsStmt := s.pp.pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
+ pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
s.PrepareCall(v)
p := s.Prog(obj.ACALL)
// Record call graph information for nowritebarrierrec
// analysis.
if nowritebarrierrecCheck != nil {
- nowritebarrierrecCheck.recordCall(s.pp.curfn, call.Fn, v.Pos)
+ nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
}
}
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
)
// TODO(mips): implement DUFFZERO
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
- p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
+ p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
}
} else {
//fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
// MOVW R0, (Widthptr)r1
// ADD $Widthptr, r1
// BNE r1, r2, loop
- p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
- p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1
- p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
+ p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
p1 := p
- p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
- p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = mips.REGRT2
- gc.Patch(p, p1)
+ p.To.SetTarget(p1)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(mips.ANOR)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REG_R0
p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p2)
+ p4.To.SetTarget(p2)
case ssa.OpMIPSLoweredMove:
// SUBU $4, R1
// MOVW 4(R1), Rtmp
p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1
p6.To.Type = obj.TYPE_BRANCH
- gc.Patch(p6, p2)
+ p6.To.SetTarget(p2)
case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
s.Call(v)
case ssa.OpMIPSLoweredWB:
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicAdd:
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH
- gc.Patch(p5, p1)
+ p5.To.SetTarget(p1)
s.Prog(mips.ASYNC)
p6 := s.Prog(obj.ANOP)
- gc.Patch(p2, p6)
+ p2.To.SetTarget(p6)
case ssa.OpMIPSLoweredNilCheck:
// Issue a load which will fault if arg is nil.
package mips64
import (
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
)
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
- p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
+ p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
}
} else if cnt <= int64(128*types.PtrSize) {
- p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
// MOVV R0, (Widthptr)r1
// ADDV $Widthptr, r1
// BNE r1, r2, loop
- p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
- p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1
- p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
+ p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
p1 := p
- p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
- p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = mips.REGRT2
- gc.Patch(p, p1)
+ p.To.SetTarget(p1)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(mips.ANOR)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REG_R0
p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p2)
+ p4.To.SetTarget(p2)
case ssa.OpMIPS64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1
p6.To.Type = obj.TYPE_BRANCH
- gc.Patch(p6, p2)
+ p6.To.SetTarget(p2)
case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter:
s.Call(v)
case ssa.OpMIPS64LoweredWB:
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64:
// SYNC
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_REG
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_CONST
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH
- gc.Patch(p5, p1)
+ p5.To.SetTarget(p1)
p6 := s.Prog(mips.ASYNC)
- gc.Patch(p2, p6)
+ p2.To.SetTarget(p6)
case ssa.OpMIPS64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(mips.AMOVB)
p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg()
p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
- gc.Patch(p2, p4)
+ p2.To.SetTarget(p4)
case ssa.OpMIPS64LoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package objw
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+func Uint8(s *obj.LSym, off int, v uint8) int {
+ return UintN(s, off, uint64(v), 1)
+}
+
+func Uint16(s *obj.LSym, off int, v uint16) int {
+ return UintN(s, off, uint64(v), 2)
+}
+
+func Uint32(s *obj.LSym, off int, v uint32) int {
+ return UintN(s, off, uint64(v), 4)
+}
+
+func Uintptr(s *obj.LSym, off int, v uint64) int {
+ return UintN(s, off, v, types.PtrSize)
+}
+
+func UintN(s *obj.LSym, off int, v uint64, wid int) int {
+ if off&(wid-1) != 0 {
+ base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
+ }
+ s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
+ return off + wid
+}
+
+func SymPtr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
+ off = int(types.Rnd(int64(off), int64(types.PtrSize)))
+ s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff))
+ off += types.PtrSize
+ return off
+}
+
+func SymPtrOff(s *obj.LSym, off int, x *obj.LSym) int {
+ s.WriteOff(base.Ctxt, int64(off), x, 0)
+ off += 4
+ return off
+}
+
+func SymPtrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
+ s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
+ off += 4
+ return off
+}
+
+func Global(s *obj.LSym, width int32, flags int16) {
+ if flags&obj.LOCAL != 0 {
+ s.Set(obj.AttrLocal, true)
+ flags &^= obj.LOCAL
+ }
+ base.Ctxt.Globl(s, int64(width), int(flags))
+}
+
+func BitVec(s *obj.LSym, off int, bv bitvec.BitVec) int {
+ // Runtime reads the bitmaps as byte arrays. Oblige.
+ for j := 0; int32(j) < bv.N; j += 8 {
+ word := bv.B[j/32]
+ off = Uint8(s, off, uint8(word>>(uint(j)%32)))
+ }
+ return off
+}
--- /dev/null
+// Derived from Inferno utils/6c/txt.c
+// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package objw
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
+
+// NewProgs returns a new Progs for fn.
+// worker indicates which of the backend workers will use the Progs.
+func NewProgs(fn *ir.Func, worker int) *Progs {
+ pp := new(Progs)
+ if base.Ctxt.CanReuseProgs() {
+ sz := len(sharedProgArray) / base.Flag.LowerC
+ pp.Cache = sharedProgArray[sz*worker : sz*(worker+1)]
+ }
+ pp.CurFunc = fn
+
+ // prime the pump
+ pp.Next = pp.NewProg()
+ pp.Clear(pp.Next)
+
+ pp.Pos = fn.Pos()
+ pp.SetText(fn)
+ // PCDATA tables implicitly start with index -1.
+ pp.PrevLive = LivenessIndex{-1, false}
+ pp.NextLive = pp.PrevLive
+ return pp
+}
+
+// Progs accumulates Progs for a function and converts them into machine code.
+type Progs struct {
+ Text *obj.Prog // ATEXT Prog for this function
+ Next *obj.Prog // next Prog
+ PC int64 // virtual PC; count of Progs
+ Pos src.XPos // position to use for new Progs
+ CurFunc *ir.Func // fn these Progs are for
+ Cache []obj.Prog // local progcache
+ CacheIndex int // first free element of progcache
+
+ NextLive LivenessIndex // liveness index for the next Prog
+ PrevLive LivenessIndex // last emitted liveness index
+}
+
+// LivenessIndex stores the liveness map information for a Value.
+type LivenessIndex struct {
+ StackMapIndex int
+
+ // IsUnsafePoint indicates that this is an unsafe-point.
+ //
+ // Note that it's possible for a call Value to have a stack
+ // map while also being an unsafe-point. This means it cannot
+ // be preempted at this instruction, but that a preemption or
+ // stack growth may happen in the called function.
+ IsUnsafePoint bool
+}
+
+// StackMapDontCare indicates that the stack map index at a Value
+// doesn't matter.
+//
+// This is a sentinel value that should never be emitted to the PCDATA
+// stream. We use -1000 because that's obviously never a valid stack
+// index (but -1 is).
+const StackMapDontCare = -1000
+
+// LivenessDontCare indicates that the liveness information doesn't
+// matter. Currently it is used in deferreturn liveness when we don't
+// actually need it. It should never be emitted to the PCDATA stream.
+var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
+
+func (idx LivenessIndex) StackMapValid() bool {
+ return idx.StackMapIndex != StackMapDontCare
+}
+
+func (pp *Progs) NewProg() *obj.Prog {
+ var p *obj.Prog
+ if pp.CacheIndex < len(pp.Cache) {
+ p = &pp.Cache[pp.CacheIndex]
+ pp.CacheIndex++
+ } else {
+ p = new(obj.Prog)
+ }
+ p.Ctxt = base.Ctxt
+ return p
+}
+
+// Flush converts from pp to machine code.
+func (pp *Progs) Flush() {
+ plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.CurFunc}
+ obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
+}
+
+// Free clears pp and any associated resources.
+func (pp *Progs) Free() {
+ if base.Ctxt.CanReuseProgs() {
+ // Clear progs to enable GC and avoid abuse.
+ s := pp.Cache[:pp.CacheIndex]
+ for i := range s {
+ s[i] = obj.Prog{}
+ }
+ }
+ // Clear pp to avoid abuse.
+ *pp = Progs{}
+}
+
+// Prog adds a Prog with instruction As to pp.
+func (pp *Progs) Prog(as obj.As) *obj.Prog {
+ if pp.NextLive.StackMapValid() && pp.NextLive.StackMapIndex != pp.PrevLive.StackMapIndex {
+ // Emit stack map index change.
+ idx := pp.NextLive.StackMapIndex
+ pp.PrevLive.StackMapIndex = idx
+ p := pp.Prog(obj.APCDATA)
+ p.From.SetConst(objabi.PCDATA_StackMapIndex)
+ p.To.SetConst(int64(idx))
+ }
+ if pp.NextLive.IsUnsafePoint != pp.PrevLive.IsUnsafePoint {
+ // Emit unsafe-point marker.
+ pp.PrevLive.IsUnsafePoint = pp.NextLive.IsUnsafePoint
+ p := pp.Prog(obj.APCDATA)
+ p.From.SetConst(objabi.PCDATA_UnsafePoint)
+ if pp.NextLive.IsUnsafePoint {
+ p.To.SetConst(objabi.PCDATA_UnsafePointUnsafe)
+ } else {
+ p.To.SetConst(objabi.PCDATA_UnsafePointSafe)
+ }
+ }
+
+ p := pp.Next
+ pp.Next = pp.NewProg()
+ pp.Clear(pp.Next)
+ p.Link = pp.Next
+
+ if !pp.Pos.IsKnown() && base.Flag.K != 0 {
+ base.Warn("prog: unknown position (line 0)")
+ }
+
+ p.As = as
+ p.Pos = pp.Pos
+ if pp.Pos.IsStmt() == src.PosIsStmt {
+ // Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
+ if ssa.LosesStmtMark(as) {
+ return p
+ }
+ pp.Pos = pp.Pos.WithNotStmt()
+ }
+ return p
+}
+
+func (pp *Progs) Clear(p *obj.Prog) {
+ obj.Nopout(p)
+ p.As = obj.AEND
+ p.Pc = pp.PC
+ pp.PC++
+}
+
+func (pp *Progs) Append(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
+ q := pp.NewProg()
+ pp.Clear(q)
+ q.As = as
+ q.Pos = p.Pos
+ q.From.Type = ftype
+ q.From.Reg = freg
+ q.From.Offset = foffset
+ q.To.Type = ttype
+ q.To.Reg = treg
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+func (pp *Progs) SetText(fn *ir.Func) {
+ if pp.Text != nil {
+ base.Fatalf("Progs.settext called twice")
+ }
+ ptxt := pp.Prog(obj.ATEXT)
+ pp.Text = ptxt
+
+ fn.LSym.Func().Text = ptxt
+ ptxt.From.Type = obj.TYPE_MEM
+ ptxt.From.Name = obj.NAME_EXTERN
+ ptxt.From.Sym = fn.LSym
+}
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
)
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
- p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
}
} else if cnt <= int64(128*types.PtrSize) {
- p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
} else {
- p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
- p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
- p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
- p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p.Reg = ppc64.REGRT1
- p = pp.Appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize))
+ p = pp.Append(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize))
p1 := p
- p = pp.Appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
- p = pp.Appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
- gc.Patch(p, p1)
+ p = pp.Append(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p = pp.Append(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(p1)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(ppc64.AOR)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R0
return p
}
-func ginsnopdefer(pp *gc.Progs) *obj.Prog {
+func ginsnopdefer(pp *objw.Progs) *obj.Prog {
// On PPC64 two nops are required in the defer case.
//
// (see gc/cgen.go, gc/plive.go -- copy of comment below)
// BNE retry
p3 := s.Prog(ppc64.ABNE)
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
case ssa.OpPPC64LoweredAtomicAdd32,
ssa.OpPPC64LoweredAtomicAdd64:
// BNE retry
p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p)
+ p4.To.SetTarget(p)
// Ensure a 32 bit result
if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
// BNE retry
p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH
- gc.Patch(p2, p)
+ p2.To.SetTarget(p)
// ISYNC
pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
// ISYNC
pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
- gc.Patch(p2, pisync)
+ p2.To.SetTarget(pisync)
case ssa.OpPPC64LoweredAtomicStore8,
ssa.OpPPC64LoweredAtomicStore32,
// BNE retry
p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p)
+ p4.To.SetTarget(p)
// LWSYNC - Assuming shared data not write-through-required nor
// caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
// If the operation is a CAS-Release, then synchronization is not necessary.
p7.From.Offset = 0
p7.To.Type = obj.TYPE_REG
p7.To.Reg = out
- gc.Patch(p2, p7)
+ p2.To.SetTarget(p7)
// done (label)
p8 := s.Prog(obj.ANOP)
- gc.Patch(p6, p8)
+ p6.To.SetTarget(p8)
case ssa.OpPPC64LoweredGetClosurePtr:
// Closure pointer is R11 (already)
p.To.Reg = r
p.From.Type = obj.TYPE_REG
p.From.Reg = r0
- gc.Patch(pbahead, p)
+ pbahead.To.SetTarget(p)
p = s.Prog(obj.ANOP)
- gc.Patch(pbover, p)
+ pbover.To.SetTarget(p)
case ssa.OpPPC64DIVW:
// word-width version of above
p.To.Reg = r
p.From.Type = obj.TYPE_REG
p.From.Reg = r0
- gc.Patch(pbahead, p)
+ pbahead.To.SetTarget(p)
p = s.Prog(obj.ANOP)
- gc.Patch(pbover, p)
+ pbover.To.SetTarget(p)
case ssa.OpPPC64CLRLSLWI:
r := v.Reg()
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH
- gc.Patch(p, top)
+ p.To.SetTarget(top)
}
// When ctr == 1 the loop was not generated but
// there are at least 64 bytes to clear, so add
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH
- gc.Patch(p, top)
+ p.To.SetTarget(top)
}
// when ctr == 1 the loop was not generated but
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH
- gc.Patch(p, top)
+ p.To.SetTarget(top)
// srcReg and dstReg were incremented in the loop, so
// later instructions start with offset 0.
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH
- gc.Patch(p, top)
+ p.To.SetTarget(top)
// srcReg and dstReg were incremented in the loop, so
// later instructions start with offset 0.
// NOP (so the BNE has somewhere to land)
nop := s.Prog(obj.ANOP)
- gc.Patch(p2, nop)
+ p2.To.SetTarget(nop)
} else {
// Issue a load which will fault if arg is nil.
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
)
-func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
- p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
+ p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
}
return p
}
if cnt <= int64(128*types.PtrSize) {
- p = pp.Appendpp(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
+ p = pp.Append(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
p.Reg = riscv.REG_SP
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
// MOV ZERO, (T0)
// ADD $Widthptr, T0
// BNE T0, T1, loop
- p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
+ p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
p.Reg = riscv.REG_SP
- p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
+ p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
p.Reg = riscv.REG_T0
- p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
+ p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
loop := p
- p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0)
- p = pp.Appendpp(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
+ p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0)
+ p = pp.Append(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = riscv.REG_T1
- gc.Patch(p, loop)
+ p.To.SetTarget(loop)
return p
}
package riscv64
import (
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/objw"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
)
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
// Hardware nop is ADD $0, ZERO
p := pp.Prog(riscv.AADD)
p.From.Type = obj.TYPE_CONST
p4.From.Reg = riscv.REG_TMP
p4.Reg = riscv.REG_ZERO
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p1)
+ p4.To.SetTarget(p1)
p5 := s.Prog(riscv.AMOV)
p5.From.Type = obj.TYPE_CONST
p5.To.Reg = out
p6 := s.Prog(obj.ANOP)
- gc.Patch(p2, p6)
+ p2.To.SetTarget(p6)
case ssa.OpRISCV64LoweredZero:
mov, sz := largestMove(v.AuxInt)
p3.Reg = v.Args[0].Reg()
p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[1].Reg()
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
case ssa.OpRISCV64LoweredMove:
mov, sz := largestMove(v.AuxInt)
p5.Reg = v.Args[1].Reg()
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Args[2].Reg()
- gc.Patch(p5, p)
+ p5.To.SetTarget(p)
case ssa.OpRISCV64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/objw"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
)
const clearLoopCutoff = 1024
// zerorange clears the stack in the given range.
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
// need to create a copy of the stack pointer that we can adjust.
// We also need to do this if we are going to loop.
if off < 0 || off > 4096-clearLoopCutoff || cnt > clearLoopCutoff {
- p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0)
+ p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0)
p.Reg = int16(s390x.REGSP)
reg = s390x.REGRT1
off = 0
// Generate a loop of large clears.
if cnt > clearLoopCutoff {
ireg := int16(s390x.REGRT2) // register holds number of remaining loop iterations
- p = pp.Appendpp(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
- p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
+ p = pp.Append(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
+ p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
pl := p
- p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
- p = pp.Appendpp(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
- gc.Patch(p, pl)
+ p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
+ p = pp.Append(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(pl)
cnt = cnt % 256
}
case 2:
ins = s390x.AMOVH
}
- p = pp.Appendpp(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off)
+ p = pp.Append(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off)
// Handle clears that would require multiple move instructions with CLEAR (assembled as XC).
default:
- p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off)
+ p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off)
}
cnt -= n
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
return pp.Prog(s390x.ANOPH)
}
bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH
- gc.Patch(bne, mvc)
+ bne.To.SetTarget(mvc)
if v.AuxInt > 0 {
mvc := s.Prog(s390x.AMVC)
bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH
- gc.Patch(bne, clear)
+ bne.To.SetTarget(clear)
if v.AuxInt > 0 {
clear := s.Prog(s390x.ACLEAR)
// NOP (so the BNE has somewhere to land)
nop := s.Prog(obj.ANOP)
- gc.Patch(bne, nop)
+ bne.To.SetTarget(nop)
case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64:
// Loop until the CS{,G} succeeds.
// MOV{WZ,D} arg0, ret
// BNE cs
bne := s.Prog(s390x.ABNE)
bne.To.Type = obj.TYPE_BRANCH
- gc.Patch(bne, cs)
+ bne.To.SetTarget(cs)
case ssa.OpS390XSYNC:
s.Prog(s390x.ASYNC)
case ssa.OpClobber:
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
arch.SSAGenBlock = ssaGenBlock
}
-func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
if cnt == 0 {
return p
}
}
for i := int64(0); i < cnt; i += 8 {
- p = pp.Appendpp(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
- p = pp.Appendpp(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
- p = pp.Appendpp(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
+ p = pp.Append(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
+ p = pp.Append(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
+ p = pp.Append(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
return pp.Prog(wasm.ANop)
}
package x86
import (
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if *ax == 0 {
- p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1
}
if cnt <= int64(4*types.RegSize) {
for i := int64(0); i < cnt; i += int64(types.RegSize) {
- p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
}
} else if cnt <= int64(128*types.RegSize) {
- p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(types.RegSize)))
+ p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(types.RegSize)))
p.To.Sym = ir.Syms.Duffzero
} else {
- p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
- p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
- p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
- p = pp.Appendpp(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
+ p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
// See comment in ../amd64/ggen.go.
p := pp.Prog(x86.AXCHGL)
p.From.Type = obj.TYPE_REG