"[]cmd/compile/internal/types2.Type %s": "",
"cmd/compile/internal/arm.shift %d": "",
"cmd/compile/internal/gc.RegIndex %d": "",
- "cmd/compile/internal/gc.initKind %d": "",
"cmd/compile/internal/ir.Class %d": "",
"cmd/compile/internal/ir.Node %+v": "",
"cmd/compile/internal/ir.Node %L": "",
"cmd/compile/internal/types2.Object %s": "",
"cmd/compile/internal/types2.Type %s": "",
"cmd/compile/internal/types2.color %s": "",
+ "cmd/compile/internal/walk.initKind %d": "",
"go/constant.Value %#v": "",
"go/constant.Value %s": "",
"map[*cmd/compile/internal/types2.TypeParam]cmd/compile/internal/types2.Type %s": "",
package amd64
import (
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssagen"
"cmd/internal/obj/x86"
)
var leaptr = x86.ALEAQ
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &x86.Linkamd64
arch.REGSP = x86.REGSP
arch.MAXWIDTH = 1 << 50
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
return -dzClearStep * (dzBlockLen - tailSteps)
}
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
const (
ax = 1 << iota
x0
return p
}
- if cnt%int64(gc.Widthreg) != 0 {
+ if cnt%int64(types.RegSize) != 0 {
// should only happen with nacl
- if cnt%int64(gc.Widthptr) != 0 {
+ if cnt%int64(types.PtrSize) != 0 {
base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
if *state&ax == 0 {
- p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
- p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
- off += int64(gc.Widthptr)
- cnt -= int64(gc.Widthptr)
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
+ off += int64(types.PtrSize)
+ cnt -= int64(types.PtrSize)
}
if cnt == 8 {
if *state&ax == 0 {
- p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
- p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
- } else if !isPlan9 && cnt <= int64(8*gc.Widthreg) {
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
+ } else if !isPlan9 && cnt <= int64(8*types.RegSize) {
if *state&x0 == 0 {
- p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
+ p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0
}
for i := int64(0); i < cnt/16; i++ {
- p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
}
if cnt%16 != 0 {
- p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
}
- } else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
+ } else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
if *state&x0 == 0 {
- p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
+ p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0
}
- p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
- p.To.Sym = gc.Duffzero
+ p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
+ p.To.Sym = ir.Syms.Duffzero
if cnt%16 != 0 {
- p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
}
} else {
if *state&ax == 0 {
- p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
- p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
- p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
- p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
- p = pp.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
+ p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
// This is a hardware nop (1-byte 0x90) instruction,
// even though we describe it as an explicit XCHGL here.
// Particularly, this does not zero the high 32 bits
"math"
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
flive := b.FlagsLiveAtEnd
for _, c := range b.ControlValues() {
flive = c.Type.IsFlags() || flive
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
-func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
return off, adj
}
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpAMD64VFMADD231SD:
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = o
}
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg()
case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.From, v, sc.Off())
+ ssagen.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[2].Reg()
case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
sc := v.AuxValAndOff()
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
- gc.AddAux2(&p.From, v, sc.Off())
+ ssagen.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
memIdx(&p.To, v)
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
sc := v.AuxValAndOff()
off := sc.Off()
p := s.Prog(asm)
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, off)
+ ssagen.AddAux2(&p.To, v, off)
break
}
fallthrough
p.From.Offset = val
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, off)
+ ssagen.AddAux2(&p.To, v, off)
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
p := s.Prog(v.Op.Asm())
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, sc.Off())
+ ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
p.From.Type = obj.TYPE_NONE
}
memIdx(&p.To, v)
- gc.AddAux2(&p.To, v, sc.Off())
+ ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
p.From.Reg = r
p.From.Index = i
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
}
p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
- p.To.Sym = gc.Duffzero
+ p.To.Sym = ir.Syms.Duffzero
p.To.Offset = off
case ssa.OpAMD64MOVOconst:
if v.AuxInt != 0 {
case ssa.OpAMD64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
- p.To.Sym = gc.Duffcopy
+ p.To.Sym = ir.Syms.Duffcopy
if v.AuxInt%16 != 0 {
v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt)
}
return
}
p := s.Prog(loadByType(v.Type))
- gc.AddrAuto(&p.From, v.Args[0])
+ ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
- gc.AddrAuto(&p.To, v)
+ ssagen.AddrAuto(&p.To, v)
case ssa.OpAMD64LoweredHasCPUFeature:
p := s.Prog(x86.AMOVBQZX)
p.From.Type = obj.TYPE_MEM
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64LoweredGetClosurePtr:
// Closure pointer is DX.
- gc.CheckLoweredGetClosurePtr(v)
+ ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpAMD64LoweredGetG:
r := v.Reg()
// See the comments in cmd/internal/obj/x86/obj6.go
case ssa.OpAMD64LoweredGetCallerSP:
// caller's SP is the address of the first arg
mov := x86.AMOVQ
- if gc.Widthptr == 4 {
+ if types.PtrSize == 4 {
mov = x86.AMOVL
}
p := s.Prog(mov)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
// arg0 is in DI. Set sym to match where regalloc put arg1.
- p.To.Sym = gc.GCWriteBarrierReg[v.Args[1].Reg()]
+ p.To.Sym = ssagen.GCWriteBarrierReg[v.Args[1].Reg()]
case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(int64(2 * gc.Widthptr)) // space used in callee args area by assembly stubs
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs
case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpAMD64SETNEF:
p := s.Prog(v.Op.Asm())
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[1].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
r := v.Reg0()
if r != v.Args[0].Reg() {
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[1].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
if v.Args[1].Reg() != x86.REG_AX {
v.Fatalf("input[1] not in AX %s", v.LongString())
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
p = s.Prog(x86.ASETEQ)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpClobber:
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
p = s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
p.To.Offset += 4
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
}
-var eqfJumps = [2][2]gc.IndexJump{
+var eqfJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
}
-var nefJumps = [2][2]gc.IndexJump{
+var nefJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
}
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in rax:
p.To.Reg = x86.REG_AX
p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:
package arm
import (
- "cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/internal/obj/arm"
"cmd/internal/objabi"
)
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &arm.Linkarm
arch.REGSP = arm.REGSP
arch.MAXWIDTH = (1 << 32) - 1
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
- arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}
package arm
import (
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm"
)
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if *r0 == 0 {
- p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
+ p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
*r0 = 1
}
- if cnt < int64(4*gc.Widthptr) {
- for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
- p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
}
- } else if cnt <= int64(128*gc.Widthptr) {
- p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+ } else if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffzero
- p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
} else {
- p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+ p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
- p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
+ p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
p.Reg = arm.REG_R1
- p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
+ p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p1 := p
p.Scond |= arm.C_PBIT
- p = pp.Appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm.REG_R2
- p = pp.Appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
- gc.Patch(p, p1)
+ p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(p1)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(arm.AAND)
p.From.Type = obj.TYPE_REG
p.From.Reg = arm.REG_R0
"math/bits"
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm"
}
// genshift generates a Prog for r = r0 op (r1 shifted by n)
-func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeshift(r1, typ, n))
}
// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
-func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
+func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeregshift(r1, typ, r2))
return 0xffffffff, 0
}
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpARMMOVWreg:
if v.Type.IsMemory() {
return
}
p := s.Prog(loadByType(v.Type))
- gc.AddrAuto(&p.From, v.Args[0])
+ ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
- gc.AddrAuto(&p.To, v)
+ ssagen.AddrAuto(&p.To, v)
case ssa.OpARMADD,
ssa.OpARMADC,
ssa.OpARMSUB,
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVW $off(SP), R
wantreg = "SP"
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARMMOVBstore,
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
// this is just shift 0 bits
fallthrough
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Udiv
+ p.To.Sym = ir.Syms.Udiv
case ssa.OpARMLoweredWB:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(8) // space used in callee args area by assembly stubs
case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
+ p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
s.UseArgs(12) // space used in callee args area by assembly stubs
case ssa.OpARMDUFFZERO:
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffzero
+ p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpARMDUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffcopy
+ p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpARMLoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(arm.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP
if logopt.Enabled() {
p2.Reg = arm.REG_R1
p3 := s.Prog(arm.ABLE)
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
case ssa.OpARMLoweredMove:
// MOVW.P 4(R1), Rtmp
// MOVW.P Rtmp, 4(R2)
p3.Reg = arm.REG_R1
p4 := s.Prog(arm.ABLE)
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p)
+ p4.To.SetTarget(p)
case ssa.OpARMEqual,
ssa.OpARMNotEqual,
ssa.OpARMLessThan,
p.To.Reg = v.Reg()
case ssa.OpARMLoweredGetClosurePtr:
// Closure pointer is R7 (arm.REGCTXT).
- gc.CheckLoweredGetClosurePtr(v)
+ ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpARMLoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm.AMOVW)
}
// To model a 'LEnoov' ('<=' without overflow checking) branching
-var leJumps = [2][2]gc.IndexJump{
+var leJumps = [2][2]ssagen.IndexJump{
{{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
{{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
}
// To model a 'GTnoov' ('>' without overflow checking) branching
-var gtJumps = [2][2]gc.IndexJump{
+var gtJumps = [2][2]ssagen.IndexJump{
{{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
{{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
}
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
p.Reg = arm.REG_R0
p = s.Prog(arm.ABNE)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
package arm64
import (
- "cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/internal/obj/arm64"
)
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &arm64.Linkarm64
arch.REGSP = arm64.REGSP
arch.MAXWIDTH = 1 << 50
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
- arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}
package arm64
import (
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
"cmd/internal/objabi"
return frame
}
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
- if cnt < int64(4*gc.Widthptr) {
- for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
}
- } else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
- if cnt%(2*int64(gc.Widthptr)) != 0 {
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
- off += int64(gc.Widthptr)
- cnt -= int64(gc.Widthptr)
+ } else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
+ if cnt%(2*int64(types.PtrSize)) != 0 {
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
+ off += int64(types.PtrSize)
+ cnt -= int64(types.PtrSize)
}
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
- p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
+ p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
p.Reg = arm64.REG_R20
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffzero
- p.To.Offset = 4 * (64 - cnt/(2*int64(gc.Widthptr)))
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize)))
} else {
// Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP).
// We are at the function entry, where no register is live, so it is okay to clobber
// other registers
const rtmp = arm64.REG_R20
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
- p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+ p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p.Reg = arm64.REGRT1
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
- p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
+ p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
p.Reg = arm64.REGRT1
- p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr))
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
p.Scond = arm64.C_XPRE
p1 := p
- p = pp.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm64.REGRT2
- p = pp.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
- gc.Patch(p, p1)
+ p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(p1)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(arm64.AHINT)
p.From.Type = obj.TYPE_CONST
return p
"math"
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
}
// genshift generates a Prog for r = r0 op (r1 shifted by n)
-func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = makeshift(r1, typ, n)
return mop
}
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpARM64MOVDreg:
if v.Type.IsMemory() {
return
}
p := s.Prog(loadByType(v.Type))
- gc.AddrAuto(&p.From, v.Args[0])
+ ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
- gc.AddrAuto(&p.To, v)
+ ssagen.AddrAuto(&p.To, v)
case ssa.OpARM64ADD,
ssa.OpARM64SUB,
ssa.OpARM64AND,
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVD $off(SP), R
wantreg = "SP"
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64MOVBloadidx,
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpARM64MOVBstore,
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpARM64MOVBstoreidx,
ssa.OpARM64MOVHstoreidx,
ssa.OpARM64MOVWstoreidx,
p.From.Offset = int64(v.Args[2].Reg())
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpARM64MOVBstorezero,
ssa.OpARM64MOVHstorezero,
ssa.OpARM64MOVWstorezero,
p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpARM64MOVBstorezeroidx,
ssa.OpARM64MOVHstorezeroidx,
ssa.OpARM64MOVWstorezeroidx,
p.From.Offset = int64(arm64.REGZERO)
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpARM64BFI,
ssa.OpARM64BFXIL:
r := v.Reg()
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_BRANCH
- gc.Patch(p2, p)
+ p2.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicExchange64Variant,
ssa.OpARM64LoweredAtomicExchange32Variant:
swap := arm64.ASWPALD
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicAdd64Variant,
ssa.OpARM64LoweredAtomicAdd32Variant:
// LDADDAL Rarg1, (Rarg0), Rout
p4.From.Type = obj.TYPE_REG
p4.From.Reg = arm64.REGTMP
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p)
+ p4.To.SetTarget(p)
p5 := s.Prog(arm64.ACSET)
p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p5.From.Reg = arm64.COND_EQ
p5.To.Type = obj.TYPE_REG
p5.To.Reg = out
- gc.Patch(p2, p5)
+ p2.To.SetTarget(p5)
case ssa.OpARM64LoweredAtomicCas64Variant,
ssa.OpARM64LoweredAtomicCas32Variant:
// Rarg0: ptr
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicAnd8Variant,
ssa.OpARM64LoweredAtomicAnd32Variant:
atomic_clear := arm64.ALDCLRALW
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffzero
+ p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpARM64LoweredZero:
// STP.P (ZR,ZR), 16(R16)
p2.Reg = arm64.REG_R16
p3 := s.Prog(arm64.ABLE)
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
case ssa.OpARM64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffcopy
+ p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpARM64LoweredMove:
// MOVD.P 8(R16), Rtmp
p3.Reg = arm64.REG_R16
p4 := s.Prog(arm64.ABLE)
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p)
+ p4.To.SetTarget(p)
case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
s.Call(v)
case ssa.OpARM64LoweredWB:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpARM64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(arm64.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
if logopt.Enabled() {
p.To.Reg = v.Reg()
case ssa.OpARM64LoweredGetClosurePtr:
// Closure pointer is R26 (arm64.REGCTXT).
- gc.CheckLoweredGetClosurePtr(v)
+ ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpARM64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm64.AMOVD)
}
// To model a 'LEnoov' ('<=' without overflow checking) branching
-var leJumps = [2][2]gc.IndexJump{
+var leJumps = [2][2]ssagen.IndexJump{
{{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0]
{{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1]
}
// To model a 'GTnoov' ('>' without overflow checking) branching
-var gtJumps = [2][2]gc.IndexJump{
+var gtJumps = [2][2]ssagen.IndexJump{
{{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0]
{{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1]
}
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
p.Reg = arm64.REG_R0
p = s.Prog(arm64.ABNE)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
}
os.Exit(code)
}
+
+// To enable tracing support (-t flag), set EnableTrace to true.
+const EnableTrace = false
+
+func Compiling(pkgs []string) bool {
+ if Ctxt.Pkgpath != "" {
+ for _, p := range pkgs {
+ if Ctxt.Pkgpath == p {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// The racewalk pass is currently handled in three parts.
+//
+// First, for flag_race, it inserts calls to racefuncenter and
+// racefuncexit at the start and end (respectively) of each
+// function. This is handled below.
+//
+// Second, during buildssa, it inserts appropriate instrumentation
+// calls immediately before each memory load or store. This is handled
+// by the (*state).instrument method in ssa.go, so here we just set
+// the Func.InstrumentBody flag as needed. For background on why this
+// is done during SSA construction rather than a separate SSA pass,
+// see issue #19054.
+//
+// Third we remove calls to racefuncenter and racefuncexit, for leaf
+// functions without instrumented operations. This is done as part of
+// ssa opt pass via special rule.
+
+// TODO(dvyukov): do not instrument initialization as writes:
+// a := make([]int, 10)
+
+// Do not instrument the following packages at all,
+// at best instrumentation would cause infinite recursion.
+var NoInstrumentPkgs = []string{
+ "runtime/internal/atomic",
+ "runtime/internal/sys",
+ "runtime/internal/math",
+ "runtime",
+ "runtime/race",
+ "runtime/msan",
+ "internal/cpu",
+}
+
+// Don't insert racefuncenterfp/racefuncexit into the following packages.
+// Memory accesses in the packages are either uninteresting or will cause false positives.
+var NoRacePkgs = []string{"sync", "sync/atomic"}
ImportMap map[string]string // set by -importmap OR -importcfg
PackageFile map[string]string // set by -importcfg; nil means not in use
SpectreIndex bool // set by -spectre=index or -spectre=all
+ // Whether we are adding any sort of code instrumentation, such as
+ // when the race detector is enabled.
+ Instrumenting bool
}
}
// FlushErrors sorts errors seen so far by line number, prints them to stdout,
// and empties the errors array.
func FlushErrors() {
- Ctxt.Bso.Flush()
+ if Ctxt != nil && Ctxt.Bso != nil {
+ Ctxt.Bso.Flush()
+ }
if len(errorMsgs) == 0 {
return
}
ErrorExit()
}
}
+
+var AutogeneratedPos src.XPos
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package base
import (
"fmt"
"time"
)
-var timings Timings
+var Timer Timings
// Timings collects the execution times of labeled phases
// which are added trough a sequence of Start/Stop calls.
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bitvec
+
+import (
+ "math/bits"
+
+ "cmd/compile/internal/base"
+)
+
+const (
+ wordBits = 32
+ wordMask = wordBits - 1
+ wordShift = 5
+)
+
+// A BitVec is a bit vector.
+type BitVec struct {
+ N int32 // number of bits in vector
+ B []uint32 // words holding bits
+}
+
+func New(n int32) BitVec {
+ nword := (n + wordBits - 1) / wordBits
+ return BitVec{n, make([]uint32, nword)}
+}
+
+type Bulk struct {
+ words []uint32
+ nbit int32
+ nword int32
+}
+
+func NewBulk(nbit int32, count int32) Bulk {
+ nword := (nbit + wordBits - 1) / wordBits
+ size := int64(nword) * int64(count)
+ if int64(int32(size*4)) != size*4 {
+ base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
+ }
+ return Bulk{
+ words: make([]uint32, size),
+ nbit: nbit,
+ nword: nword,
+ }
+}
+
+func (b *Bulk) Next() BitVec {
+ out := BitVec{b.nbit, b.words[:b.nword]}
+ b.words = b.words[b.nword:]
+ return out
+}
+
+func (bv1 BitVec) Eq(bv2 BitVec) bool {
+ if bv1.N != bv2.N {
+ base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N)
+ }
+ for i, x := range bv1.B {
+ if x != bv2.B[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (dst BitVec) Copy(src BitVec) {
+ copy(dst.B, src.B)
+}
+
+func (bv BitVec) Get(i int32) bool {
+ if i < 0 || i >= bv.N {
+ base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ return bv.B[i>>wordShift]&mask != 0
+}
+
+func (bv BitVec) Set(i int32) {
+ if i < 0 || i >= bv.N {
+ base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ bv.B[i/wordBits] |= mask
+}
+
+func (bv BitVec) Unset(i int32) {
+ if i < 0 || i >= bv.N {
+ base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ bv.B[i/wordBits] &^= mask
+}
+
+// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
+// If there is no such index, bvnext returns -1.
+func (bv BitVec) Next(i int32) int32 {
+ if i >= bv.N {
+ return -1
+ }
+
+ // Jump i ahead to next word with bits.
+ if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 {
+ i &^= wordMask
+ i += wordBits
+ for i < bv.N && bv.B[i>>wordShift] == 0 {
+ i += wordBits
+ }
+ }
+
+ if i >= bv.N {
+ return -1
+ }
+
+ // Find 1 bit.
+ w := bv.B[i>>wordShift] >> uint(i&wordMask)
+ i += int32(bits.TrailingZeros32(w))
+
+ return i
+}
+
+func (bv BitVec) IsEmpty() bool {
+ for _, x := range bv.B {
+ if x != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func (bv BitVec) Not() {
+ for i, x := range bv.B {
+ bv.B[i] = ^x
+ }
+}
+
+// union
+func (dst BitVec) Or(src1, src2 BitVec) {
+ if len(src1.B) == 0 {
+ return
+ }
+ _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.B {
+ dst.B[i] = x | src2.B[i]
+ }
+}
+
+// intersection
+func (dst BitVec) And(src1, src2 BitVec) {
+ if len(src1.B) == 0 {
+ return
+ }
+ _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.B {
+ dst.B[i] = x & src2.B[i]
+ }
+}
+
+// difference
+func (dst BitVec) AndNot(src1, src2 BitVec) {
+ if len(src1.B) == 0 {
+ return
+ }
+ _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.B {
+ dst.B[i] = x &^ src2.B[i]
+ }
+}
+
+func (bv BitVec) String() string {
+ s := make([]byte, 2+bv.N)
+ copy(s, "#*")
+ for i := int32(0); i < bv.N; i++ {
+ ch := byte('0')
+ if bv.Get(i) {
+ ch = '1'
+ }
+ s[2+i] = ch
+ }
+ return string(s)
+}
+
+func (bv BitVec) Clear() {
+ for i := range bv.B {
+ bv.B[i] = 0
+ }
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package dwarfgen
import (
+ "bytes"
+ "flag"
+ "fmt"
+ "sort"
+
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
- "cmd/internal/sys"
- "internal/race"
- "math/rand"
- "sort"
- "sync"
- "time"
-)
-
-// "Portable" code generation.
-
-var (
- compilequeue []*ir.Func // functions waiting to be compiled
)
-func emitptrargsmap(fn *ir.Func) {
- if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" {
- return
- }
- lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap")
- nptr := int(fn.Type().ArgWidth() / int64(Widthptr))
- bv := bvalloc(int32(nptr) * 2)
- nbitmap := 1
- if fn.Type().NumResults() > 0 {
- nbitmap = 2
- }
- off := duint32(lsym, 0, uint32(nbitmap))
- off = duint32(lsym, off, uint32(bv.n))
-
- if ir.IsMethod(fn) {
- onebitwalktype1(fn.Type().Recvs(), 0, bv)
- }
- if fn.Type().NumParams() > 0 {
- onebitwalktype1(fn.Type().Params(), 0, bv)
- }
- off = dbvec(lsym, off, bv)
-
- if fn.Type().NumResults() > 0 {
- onebitwalktype1(fn.Type().Results(), 0, bv)
- off = dbvec(lsym, off, bv)
- }
-
- ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL)
-}
-
-// cmpstackvarlt reports whether the stack variable a sorts before b.
-//
-// Sort the list of stack variables. Autos after anything else,
-// within autos, unused after used, within used, things with
-// pointers first, zeroed things first, and then decreasing size.
-// Because autos are laid out in decreasing addresses
-// on the stack, pointers first, zeroed things first and decreasing size
-// really means, in memory, things with pointers needing zeroing at
-// the top of the stack and increasing in size.
-// Non-autos sort on offset.
-func cmpstackvarlt(a, b *ir.Name) bool {
- if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) {
- return b.Class() == ir.PAUTO
- }
-
- if a.Class() != ir.PAUTO {
- return a.FrameOffset() < b.FrameOffset()
- }
-
- if a.Used() != b.Used() {
- return a.Used()
- }
-
- ap := a.Type().HasPointers()
- bp := b.Type().HasPointers()
- if ap != bp {
- return ap
- }
-
- ap = a.Needzero()
- bp = b.Needzero()
- if ap != bp {
- return ap
- }
-
- if a.Type().Width != b.Type().Width {
- return a.Type().Width > b.Type().Width
- }
-
- return a.Sym().Name < b.Sym().Name
-}
-
-// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
-type byStackVar []*ir.Name
-
-func (s byStackVar) Len() int { return len(s) }
-func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
-func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func (s *ssafn) AllocFrame(f *ssa.Func) {
- s.stksize = 0
- s.stkptrsize = 0
- fn := s.curfn
-
- // Mark the PAUTO's unused.
- for _, ln := range fn.Dcl {
- if ln.Class() == ir.PAUTO {
- ln.SetUsed(false)
- }
- }
-
- for _, l := range f.RegAlloc {
- if ls, ok := l.(ssa.LocalSlot); ok {
- ls.N.Name().SetUsed(true)
- }
- }
-
- scratchUsed := false
- for _, b := range f.Blocks {
- for _, v := range b.Values {
- if n, ok := v.Aux.(*ir.Name); ok {
- switch n.Class() {
- case ir.PPARAM, ir.PPARAMOUT:
- // Don't modify nodfp; it is a global.
- if n != nodfp {
- n.Name().SetUsed(true)
- }
- case ir.PAUTO:
- n.Name().SetUsed(true)
- }
- }
- if !scratchUsed {
- scratchUsed = v.Op.UsesScratch()
- }
-
- }
- }
-
- if f.Config.NeedsFpScratch && scratchUsed {
- s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64])
- }
-
- sort.Sort(byStackVar(fn.Dcl))
-
- // Reassign stack offsets of the locals that are used.
- lastHasPtr := false
- for i, n := range fn.Dcl {
- if n.Op() != ir.ONAME || n.Class() != ir.PAUTO {
- continue
- }
- if !n.Used() {
- fn.Dcl = fn.Dcl[:i]
- break
- }
-
- dowidth(n.Type())
- w := n.Type().Width
- if w >= MaxWidth || w < 0 {
- base.Fatalf("bad width")
- }
- if w == 0 && lastHasPtr {
- // Pad between a pointer-containing object and a zero-sized object.
- // This prevents a pointer to the zero-sized object from being interpreted
- // as a pointer to the pointer-containing object (and causing it
- // to be scanned when it shouldn't be). See issue 24993.
- w = 1
- }
- s.stksize += w
- s.stksize = Rnd(s.stksize, int64(n.Type().Align))
- if n.Type().HasPointers() {
- s.stkptrsize = s.stksize
- lastHasPtr = true
- } else {
- lastHasPtr = false
- }
- if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
- s.stksize = Rnd(s.stksize, int64(Widthptr))
- }
- n.SetFrameOffset(-s.stksize)
- }
-
- s.stksize = Rnd(s.stksize, int64(Widthreg))
- s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
-}
-
-func funccompile(fn *ir.Func) {
- if Curfn != nil {
- base.Fatalf("funccompile %v inside %v", fn.Sym(), Curfn.Sym())
- }
-
- if fn.Type() == nil {
- if base.Errors() == 0 {
- base.Fatalf("funccompile missing type")
- }
- return
- }
-
- // assign parameter offsets
- dowidth(fn.Type())
-
- if fn.Body().Len() == 0 {
- // Initialize ABI wrappers if necessary.
- initLSym(fn, false)
- emitptrargsmap(fn)
- return
- }
-
- dclcontext = ir.PAUTO
- Curfn = fn
- compile(fn)
- Curfn = nil
- dclcontext = ir.PEXTERN
-}
-
-func compile(fn *ir.Func) {
- // Set up the function's LSym early to avoid data races with the assemblers.
- // Do this before walk, as walk needs the LSym to set attributes/relocations
- // (e.g. in markTypeUsedInInterface).
- initLSym(fn, true)
-
- errorsBefore := base.Errors()
- walk(fn)
- if base.Errors() > errorsBefore {
- return
- }
-
- // From this point, there should be no uses of Curfn. Enforce that.
- Curfn = nil
-
- if ir.FuncName(fn) == "_" {
- // We don't need to generate code for this function, just report errors in its body.
- // At this point we've generated any errors needed.
- // (Beyond here we generate only non-spec errors, like "stack frame too large".)
- // See issue 29870.
- return
- }
-
- // Make sure type syms are declared for all types that might
- // be types of stack objects. We need to do this here
- // because symbols must be allocated before the parallel
- // phase of the compiler.
- for _, n := range fn.Dcl {
- switch n.Class() {
- case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
- if livenessShouldTrack(n) && n.Addrtaken() {
- dtypesym(n.Type())
- // Also make sure we allocate a linker symbol
- // for the stack object data, for the same reason.
- if fn.LSym.Func().StackObjects == nil {
- fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj")
- }
- }
- }
- }
-
- if compilenow(fn) {
- compileSSA(fn, 0)
- } else {
- compilequeue = append(compilequeue, fn)
- }
-}
-
-// compilenow reports whether to compile immediately.
-// If functions are not compiled immediately,
-// they are enqueued in compilequeue,
-// which is drained by compileFunctions.
-func compilenow(fn *ir.Func) bool {
- // Issue 38068: if this function is a method AND an inline
- // candidate AND was not inlined (yet), put it onto the compile
- // queue instead of compiling it immediately. This is in case we
- // wind up inlining it into a method wrapper that is generated by
- // compiling a function later on in the Target.Decls list.
- if ir.IsMethod(fn) && isInlinableButNotInlined(fn) {
- return false
- }
- return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0
-}
-
-// isInlinableButNotInlined returns true if 'fn' was marked as an
-// inline candidate but then never inlined (presumably because we
-// found no call sites).
-func isInlinableButNotInlined(fn *ir.Func) bool {
- if fn.Inl == nil {
- return false
- }
- if fn.Sym() == nil {
- return true
- }
- return !fn.Sym().Linksym().WasInlined()
-}
-
-const maxStackSize = 1 << 30
-
-// compileSSA builds an SSA backend function,
-// uses it to generate a plist,
-// and flushes that plist to machine code.
-// worker indicates which of the backend workers is doing the processing.
-func compileSSA(fn *ir.Func, worker int) {
- f := buildssa(fn, worker)
- // Note: check arg size to fix issue 25507.
- if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
- largeStackFramesMu.Lock()
- largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()})
- largeStackFramesMu.Unlock()
- return
- }
- pp := newProgs(fn, worker)
- defer pp.Free()
- genssa(f, pp)
- // Check frame size again.
- // The check above included only the space needed for local variables.
- // After genssa, the space needed includes local variables and the callee arg region.
- // We must do this check prior to calling pp.Flush.
- // If there are any oversized stack frames,
- // the assembler may emit inscrutable complaints about invalid instructions.
- if pp.Text.To.Offset >= maxStackSize {
- largeStackFramesMu.Lock()
- locals := f.Frontend().(*ssafn).stksize
- largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
- largeStackFramesMu.Unlock()
- return
- }
-
- pp.Flush() // assemble, fill in boilerplate, etc.
- // fieldtrack must be called after pp.Flush. See issue 20014.
- fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
-}
-
-func init() {
- if race.Enabled {
- rand.Seed(time.Now().UnixNano())
- }
-}
-
-// compileFunctions compiles all functions in compilequeue.
-// It fans out nBackendWorkers to do the work
-// and waits for them to complete.
-func compileFunctions() {
- if len(compilequeue) != 0 {
- sizeCalculationDisabled = true // not safe to calculate sizes concurrently
- if race.Enabled {
- // Randomize compilation order to try to shake out races.
- tmp := make([]*ir.Func, len(compilequeue))
- perm := rand.Perm(len(compilequeue))
- for i, v := range perm {
- tmp[v] = compilequeue[i]
- }
- copy(compilequeue, tmp)
- } else {
- // Compile the longest functions first,
- // since they're most likely to be the slowest.
- // This helps avoid stragglers.
- sort.Slice(compilequeue, func(i, j int) bool {
- return compilequeue[i].Body().Len() > compilequeue[j].Body().Len()
- })
- }
- var wg sync.WaitGroup
- base.Ctxt.InParallel = true
- c := make(chan *ir.Func, base.Flag.LowerC)
- for i := 0; i < base.Flag.LowerC; i++ {
- wg.Add(1)
- go func(worker int) {
- for fn := range c {
- compileSSA(fn, worker)
- }
- wg.Done()
- }(i)
- }
- for _, fn := range compilequeue {
- c <- fn
- }
- close(c)
- compilequeue = nil
- wg.Wait()
- base.Ctxt.InParallel = false
- sizeCalculationDisabled = false
- }
-}
-
-func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
+func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
fn := curfn.(*ir.Func)
if fn.Nname != nil {
if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
continue
}
- switch n.Class() {
+ switch n.Class_ {
case ir.PAUTO:
if !n.Used() {
// Text == nil -> generating abstract function
continue
}
apdecls = append(apdecls, n)
- fnsym.Func().RecordAutoType(ngotype(n).Linksym())
+ fnsym.Func().RecordAutoType(reflectdata.TypeSym(n.Type()).Linksym())
}
}
return decl.Pos()
}
-// createSimpleVars creates a DWARF entry for every variable declared in the
-// function, claiming that they are permanently on the stack.
-func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
- var vars []*dwarf.Var
- var decls []*ir.Name
- selected := make(map[*ir.Name]bool)
- for _, n := range apDecls {
- if ir.IsAutoTmp(n) {
- continue
- }
-
- decls = append(decls, n)
- vars = append(vars, createSimpleVar(fnsym, n))
- selected[n] = true
- }
- return decls, vars, selected
-}
-
-func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
- var abbrev int
- var offs int64
-
- switch n.Class() {
- case ir.PAUTO:
- offs = n.FrameOffset()
- abbrev = dwarf.DW_ABRV_AUTO
- if base.Ctxt.FixedFrameSize() == 0 {
- offs -= int64(Widthptr)
- }
- if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
- // There is a word space for FP on ARM64 even if the frame pointer is disabled
- offs -= int64(Widthptr)
- }
-
- case ir.PPARAM, ir.PPARAMOUT:
- abbrev = dwarf.DW_ABRV_PARAM
- offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
- default:
- base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
- }
-
- typename := dwarf.InfoPrefix + typesymname(n.Type())
- delete(fnsym.Func().Autot, ngotype(n).Linksym())
- inlIndex := 0
- if base.Flag.GenDwarfInl > 1 {
- if n.Name().InlFormal() || n.Name().InlLocal() {
- inlIndex = posInlIndex(n.Pos()) + 1
- if n.Name().InlFormal() {
- abbrev = dwarf.DW_ABRV_PARAM
- }
- }
- }
- declpos := base.Ctxt.InnermostPos(declPos(n))
- return &dwarf.Var{
- Name: n.Sym().Name,
- IsReturnValue: n.Class() == ir.PPARAMOUT,
- IsInlFormal: n.Name().InlFormal(),
- Abbrev: abbrev,
- StackOffset: int32(offs),
- Type: base.Ctxt.Lookup(typename),
- DeclFile: declpos.RelFilename(),
- DeclLine: declpos.RelLine(),
- DeclCol: declpos.Col(),
- InlIndex: int32(inlIndex),
- ChildIndex: -1,
- }
-}
-
-// createComplexVars creates recomposed DWARF vars with location lists,
-// suitable for describing optimized code.
-func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
- debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
-
- // Produce a DWARF variable entry for each user variable.
- var decls []*ir.Name
- var vars []*dwarf.Var
- ssaVars := make(map[*ir.Name]bool)
-
- for varID, dvar := range debugInfo.Vars {
- n := dvar
- ssaVars[n] = true
- for _, slot := range debugInfo.VarSlots[varID] {
- ssaVars[debugInfo.Slots[slot].N] = true
- }
-
- if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
- decls = append(decls, n)
- vars = append(vars, dvar)
- }
- }
-
- return decls, vars, ssaVars
-}
-
// createDwarfVars process fn, returning a list of DWARF variables and the
// Nodes they represent.
func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) {
if c == '.' || n.Type().IsUntyped() {
continue
}
- if n.Class() == ir.PPARAM && !canSSAType(n.Type()) {
+ if n.Class_ == ir.PPARAM && !ssagen.TypeOK(n.Type()) {
// SSA-able args get location lists, and may move in and
// out of registers, so those are handled elsewhere.
// Autos and named output params seem to get handled
decls = append(decls, n)
continue
}
- typename := dwarf.InfoPrefix + typesymname(n.Type())
+ typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
decls = append(decls, n)
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
- isReturnValue := (n.Class() == ir.PPARAMOUT)
- if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
+ isReturnValue := (n.Class_ == ir.PPARAMOUT)
+ if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
- } else if n.Class() == ir.PAUTOHEAP {
+ } else if n.Class_ == ir.PAUTOHEAP {
// If dcl in question has been promoted to heap, do a bit
// of extra work to recover original class (auto or param);
// see issue 30908. This insures that we get the proper
// and not stack).
// TODO(thanm): generate a better location expression
stackcopy := n.Name().Stackcopy
- if stackcopy != nil && (stackcopy.Class() == ir.PPARAM || stackcopy.Class() == ir.PPARAMOUT) {
+ if stackcopy != nil && (stackcopy.Class_ == ir.PPARAM || stackcopy.Class_ == ir.PPARAMOUT) {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
- isReturnValue = (stackcopy.Class() == ir.PPARAMOUT)
+ isReturnValue = (stackcopy.Class_ == ir.PPARAMOUT)
}
}
inlIndex := 0
ChildIndex: -1,
})
// Record go type of to insure that it gets emitted by the linker.
- fnsym.Func().RecordAutoType(ngotype(n).Linksym())
+ fnsym.Func().RecordAutoType(reflectdata.TypeSym(n.Type()).Linksym())
}
return decls, vars
return rdcl
}
-// stackOffset returns the stack location of a LocalSlot relative to the
-// stack pointer, suitable for use in a DWARF location entry. This has nothing
-// to do with its offset in the user variable.
-func stackOffset(slot ssa.LocalSlot) int32 {
- n := slot.N
- var off int64
- switch n.Class() {
+// createSimpleVars creates a DWARF entry for every variable declared in the
+// function, claiming that they are permanently on the stack.
+func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
+ var vars []*dwarf.Var
+ var decls []*ir.Name
+ selected := make(map[*ir.Name]bool)
+ for _, n := range apDecls {
+ if ir.IsAutoTmp(n) {
+ continue
+ }
+
+ decls = append(decls, n)
+ vars = append(vars, createSimpleVar(fnsym, n))
+ selected[n] = true
+ }
+ return decls, vars, selected
+}
+
+func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
+ var abbrev int
+ var offs int64
+
+ switch n.Class_ {
case ir.PAUTO:
- off = n.FrameOffset()
+ offs = n.FrameOffset()
+ abbrev = dwarf.DW_ABRV_AUTO
if base.Ctxt.FixedFrameSize() == 0 {
- off -= int64(Widthptr)
+ offs -= int64(types.PtrSize)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
- off -= int64(Widthptr)
+ offs -= int64(types.PtrSize)
}
+
case ir.PPARAM, ir.PPARAMOUT:
- off = n.FrameOffset() + base.Ctxt.FixedFrameSize()
+ abbrev = dwarf.DW_ABRV_PARAM
+ offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
+ default:
+ base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class_, n)
+ }
+
+ typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
+ delete(fnsym.Func().Autot, reflectdata.TypeSym(n.Type()).Linksym())
+ inlIndex := 0
+ if base.Flag.GenDwarfInl > 1 {
+ if n.Name().InlFormal() || n.Name().InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.Name().InlFormal() {
+ abbrev = dwarf.DW_ABRV_PARAM
+ }
+ }
+ }
+ declpos := base.Ctxt.InnermostPos(declPos(n))
+ return &dwarf.Var{
+ Name: n.Sym().Name,
+ IsReturnValue: n.Class_ == ir.PPARAMOUT,
+ IsInlFormal: n.Name().InlFormal(),
+ Abbrev: abbrev,
+ StackOffset: int32(offs),
+ Type: base.Ctxt.Lookup(typename),
+ DeclFile: declpos.RelFilename(),
+ DeclLine: declpos.RelLine(),
+ DeclCol: declpos.Col(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ }
+}
+
+// createComplexVars creates recomposed DWARF vars with location lists,
+// suitable for describing optimized code.
+func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
+ debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
+
+ // Produce a DWARF variable entry for each user variable.
+ var decls []*ir.Name
+ var vars []*dwarf.Var
+ ssaVars := make(map[*ir.Name]bool)
+
+ for varID, dvar := range debugInfo.Vars {
+ n := dvar
+ ssaVars[n] = true
+ for _, slot := range debugInfo.VarSlots[varID] {
+ ssaVars[debugInfo.Slots[slot].N] = true
+ }
+
+ if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
+ decls = append(decls, n)
+ vars = append(vars, dvar)
+ }
}
- return int32(off + slot.Off)
+
+ return decls, vars, ssaVars
}
// createComplexVar builds a single DWARF variable entry and location list.
n := debug.Vars[varID]
var abbrev int
- switch n.Class() {
+ switch n.Class_ {
case ir.PAUTO:
abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
case ir.PPARAM, ir.PPARAMOUT:
return nil
}
- gotype := ngotype(n).Linksym()
+ gotype := reflectdata.TypeSym(n.Type()).Linksym()
delete(fnsym.Func().Autot, gotype)
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
declpos := base.Ctxt.InnermostPos(n.Pos())
dvar := &dwarf.Var{
Name: n.Sym().Name,
- IsReturnValue: n.Class() == ir.PPARAMOUT,
+ IsReturnValue: n.Class_ == ir.PPARAMOUT,
IsInlFormal: n.Name().InlFormal(),
Abbrev: abbrev,
Type: base.Ctxt.Lookup(typename),
// variables just give it the first one. It's not used otherwise.
// This won't work well if the first slot hasn't been assigned a stack
// location, but it's not obvious how to do better.
- StackOffset: stackOffset(debug.Slots[debug.VarSlots[varID][0]]),
+ StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
return dvar
}
-// fieldtrack adds R_USEFIELD relocations to fnsym to record any
-// struct fields that it used.
-func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
- if fnsym == nil {
- return
- }
- if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
+// RecordFlags records the specified command-line flags to be placed
+// in the DWARF info.
+func RecordFlags(flags ...string) {
+ if base.Ctxt.Pkgpath == "" {
+ // We can't record the flags if we don't know what the
+ // package name is.
return
}
- trackSyms := make([]*types.Sym, 0, len(tracked))
- for sym := range tracked {
- trackSyms = append(trackSyms, sym)
+ type BoolFlag interface {
+ IsBoolFlag() bool
}
- sort.Sort(symByName(trackSyms))
- for _, sym := range trackSyms {
- r := obj.Addrel(fnsym)
- r.Sym = sym.Linksym()
- r.Type = objabi.R_USEFIELD
+ type CountFlag interface {
+ IsCountFlag() bool
+ }
+ var cmd bytes.Buffer
+ for _, name := range flags {
+ f := flag.Lookup(name)
+ if f == nil {
+ continue
+ }
+ getter := f.Value.(flag.Getter)
+ if getter.String() == f.DefValue {
+ // Flag has default value, so omit it.
+ continue
+ }
+ if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
+ val, ok := getter.Get().(bool)
+ if ok && val {
+ fmt.Fprintf(&cmd, " -%s", f.Name)
+ continue
+ }
+ }
+ if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
+ val, ok := getter.Get().(int)
+ if ok && val == 1 {
+ fmt.Fprintf(&cmd, " -%s", f.Name)
+ continue
+ }
+ }
+ fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
}
-}
-type symByName []*types.Sym
+ if cmd.Len() == 0 {
+ return
+ }
+ s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath)
+ s.Type = objabi.SDWARFCUINFO
+ // Sometimes (for example when building tests) we can link
+ // together two package main archives. So allow dups.
+ s.Set(obj.AttrDuplicateOK, true)
+ base.Ctxt.Data = append(base.Ctxt.Data, s)
+ s.P = cmd.Bytes()[1:]
+}
-func (a symByName) Len() int { return len(a) }
-func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
-func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+// RecordPackageName records the name of the package being
+// compiled, so that the linker can save it in the compile unit's DIE.
+func RecordPackageName() {
+ s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath)
+ s.Type = objabi.SDWARFCUINFO
+ // Sometimes (for example when building tests) we can link
+ // together two package main archives. So allow dups.
+ s.Set(obj.AttrDuplicateOK, true)
+ base.Ctxt.Data = append(base.Ctxt.Data, s)
+ s.P = []byte(types.LocalPkg.Name)
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package dwarfgen
import (
+ "fmt"
+ "strings"
+
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
- "fmt"
- "strings"
)
// To identify variables by original source position.
// late in the compilation when it is determined that we need an
// abstract function DIE for an inlined routine imported from a
// previously compiled package.
-func genAbstractFunc(fn *obj.LSym) {
+func AbstractFunc(fn *obj.LSym) {
ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn)
if ifn == nil {
base.Ctxt.Diag("failed to locate precursor fn for %v", fn)
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package dwarfgen
import (
+ "sort"
+
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
- "sort"
)
// See golang.org/issue/20390.
func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
// Initialize the DWARF scope tree based on lexical scopes.
- dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func().Parents))
- for i, parent := range fn.Func().Parents {
+ dwarfScopes := make([]dwarf.Scope, 1+len(fn.Parents))
+ for i, parent := range fn.Parents {
dwarfScopes[i+1].Parent = int32(parent)
}
scopeVariables(dwarfVars, varScopes, dwarfScopes)
- scopePCs(fnsym, fn.Func().Marks, dwarfScopes)
+ scopePCs(fnsym, fn.Marks, dwarfScopes)
return compactScopes(dwarfScopes)
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc_test
+package dwarfgen
import (
- "cmd/internal/objfile"
"debug/dwarf"
"fmt"
"internal/testenv"
"strconv"
"strings"
"testing"
+
+ "cmd/internal/objfile"
)
type testline struct {
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package escape
import (
+ "fmt"
+ "math"
+ "strings"
+
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
- "fmt"
- "math"
- "strings"
)
// Escape analysis.
// u[2], etc. However, we do record the implicit dereference involved
// in indexing a slice.
-type Escape struct {
- allLocs []*EscLocation
+type escape struct {
+ allLocs []*location
labels map[*types.Sym]labelState // known labels
curfn *ir.Func
// unstructured loop).
loopDepth int
- heapLoc EscLocation
- blankLoc EscLocation
+ heapLoc location
+ blankLoc location
}
-// An EscLocation represents an abstract location that stores a Go
+// An location represents an abstract location that stores a Go
// variable.
-type EscLocation struct {
- n ir.Node // represented variable or expression, if any
- curfn *ir.Func // enclosing function
- edges []EscEdge // incoming edges
- loopDepth int // loopDepth at declaration
+type location struct {
+ n ir.Node // represented variable or expression, if any
+ curfn *ir.Func // enclosing function
+ edges []edge // incoming edges
+ loopDepth int // loopDepth at declaration
// derefs and walkgen are used during walkOne to track the
// minimal dereferences from the walk root.
// dst and dstEdgeindex track the next immediate assignment
// destination location during walkone, along with the index
// of the edge pointing back to this location.
- dst *EscLocation
+ dst *location
dstEdgeIdx int
// queued is used by walkAll to track whether this location is
transient bool
// paramEsc records the represented parameter's leak set.
- paramEsc EscLeaks
+ paramEsc leaks
}
-// An EscEdge represents an assignment edge between two Go variables.
-type EscEdge struct {
- src *EscLocation
+// An edge represents an assignment edge between two Go variables.
+type edge struct {
+ src *location
derefs int // >= -1
- notes *EscNote
+ notes *note
}
-// escFmt is called from node printing to print information about escape analysis results.
-func escFmt(n ir.Node) string {
+// Fmt is called from node printing to print information about escape analysis results.
+func Fmt(n ir.Node) string {
text := ""
switch n.Esc() {
- case EscUnknown:
+ case ir.EscUnknown:
break
- case EscHeap:
+ case ir.EscHeap:
text = "esc(h)"
- case EscNone:
+ case ir.EscNone:
text = "esc(no)"
- case EscNever:
+ case ir.EscNever:
text = "esc(N)"
default:
text = fmt.Sprintf("esc(%d)", n.Esc())
}
- if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 {
+ if e, ok := n.Opt().(*location); ok && e.loopDepth != 0 {
if text != "" {
text += " "
}
return text
}
-// escapeFuncs performs escape analysis on a minimal batch of
+// Batch performs escape analysis on a minimal batch of
// functions.
-func escapeFuncs(fns []*ir.Func, recursive bool) {
+func Batch(fns []*ir.Func, recursive bool) {
for _, fn := range fns {
if fn.Op() != ir.ODCLFUNC {
base.Fatalf("unexpected node: %v", fn)
}
}
- var e Escape
+ var e escape
e.heapLoc.escapes = true
// Construct data-flow graph from syntax trees.
e.finish(fns)
}
-func (e *Escape) initFunc(fn *ir.Func) {
- if fn.Esc() != EscFuncUnknown {
+func (e *escape) initFunc(fn *ir.Func) {
+ if fn.Esc() != escFuncUnknown {
base.Fatalf("unexpected node: %v", fn)
}
- fn.SetEsc(EscFuncPlanned)
+ fn.SetEsc(escFuncPlanned)
if base.Flag.LowerM > 3 {
ir.Dump("escAnalyze", fn)
}
}
}
-func (e *Escape) walkFunc(fn *ir.Func) {
- fn.SetEsc(EscFuncStarted)
+func (e *escape) walkFunc(fn *ir.Func) {
+ fn.SetEsc(escFuncStarted)
// Identify labels that mark the head of an unstructured loop.
ir.Visit(fn, func(n ir.Node) {
if e.labels == nil {
e.labels = make(map[*types.Sym]labelState)
}
- e.labels[n.Sym()] = nonlooping
+ e.labels[n.Label] = nonlooping
case ir.OGOTO:
// If we visited the label before the goto,
// then this is a looping label.
n := n.(*ir.BranchStmt)
- if e.labels[n.Sym()] == nonlooping {
- e.labels[n.Sym()] = looping
+ if e.labels[n.Label] == nonlooping {
+ e.labels[n.Label] = looping
}
}
})
e.curfn = fn
e.loopDepth = 1
- e.block(fn.Body())
+ e.block(fn.Body)
if len(e.labels) != 0 {
base.FatalfAt(fn.Pos(), "leftover labels after walkFunc")
// }
// stmt evaluates a single Go statement.
-func (e *Escape) stmt(n ir.Node) {
+func (e *escape) stmt(n ir.Node) {
if n == nil {
return
}
- lno := setlineno(n)
+ lno := ir.SetPos(n)
defer func() {
base.Pos = lno
}()
case ir.OBLOCK:
n := n.(*ir.BlockStmt)
- e.stmts(n.List())
+ e.stmts(n.List)
case ir.ODCL:
// Record loop depth at declaration.
n := n.(*ir.Decl)
- if !ir.IsBlank(n.Left()) {
- e.dcl(n.Left())
+ if !ir.IsBlank(n.X) {
+ e.dcl(n.X)
}
case ir.OLABEL:
n := n.(*ir.LabelStmt)
- switch e.labels[n.Sym()] {
+ switch e.labels[n.Label] {
case nonlooping:
if base.Flag.LowerM > 2 {
fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
default:
base.Fatalf("label missing tag")
}
- delete(e.labels, n.Sym())
+ delete(e.labels, n.Label)
case ir.OIF:
n := n.(*ir.IfStmt)
- e.discard(n.Left())
- e.block(n.Body())
- e.block(n.Rlist())
+ e.discard(n.Cond)
+ e.block(n.Body)
+ e.block(n.Else)
case ir.OFOR, ir.OFORUNTIL:
n := n.(*ir.ForStmt)
e.loopDepth++
- e.discard(n.Left())
- e.stmt(n.Right())
- e.block(n.Body())
+ e.discard(n.Cond)
+ e.stmt(n.Post)
+ e.block(n.Body)
e.loopDepth--
case ir.ORANGE:
// for List = range Right { Nbody }
n := n.(*ir.RangeStmt)
e.loopDepth++
- ks := e.addrs(n.List())
- e.block(n.Body())
+ ks := e.addrs(n.Vars)
+ e.block(n.Body)
e.loopDepth--
// Right is evaluated outside the loop.
k := e.discardHole()
if len(ks) >= 2 {
- if n.Right().Type().IsArray() {
+ if n.X.Type().IsArray() {
k = ks[1].note(n, "range")
} else {
k = ks[1].deref(n, "range-deref")
}
}
- e.expr(e.later(k), n.Right())
+ e.expr(e.later(k), n.X)
case ir.OSWITCH:
n := n.(*ir.SwitchStmt)
- typesw := n.Left() != nil && n.Left().Op() == ir.OTYPESW
+ typesw := n.Tag != nil && n.Tag.Op() == ir.OTYPESW
- var ks []EscHole
- for _, cas := range n.List().Slice() { // cases
+ var ks []hole
+ for _, cas := range n.Cases { // cases
cas := cas.(*ir.CaseStmt)
- if typesw && n.Left().(*ir.TypeSwitchGuard).Left() != nil {
- cv := cas.Rlist().First()
+ if typesw && n.Tag.(*ir.TypeSwitchGuard).Tag != nil {
+ cv := cas.Vars[0]
k := e.dcl(cv) // type switch variables have no ODCL.
if cv.Type().HasPointers() {
ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
}
}
- e.discards(cas.List())
- e.block(cas.Body())
+ e.discards(cas.List)
+ e.block(cas.Body)
}
if typesw {
- e.expr(e.teeHole(ks...), n.Left().(*ir.TypeSwitchGuard).Right())
+ e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X)
} else {
- e.discard(n.Left())
+ e.discard(n.Tag)
}
case ir.OSELECT:
n := n.(*ir.SelectStmt)
- for _, cas := range n.List().Slice() {
+ for _, cas := range n.Cases {
cas := cas.(*ir.CaseStmt)
- e.stmt(cas.Left())
- e.block(cas.Body())
+ e.stmt(cas.Comm)
+ e.block(cas.Body)
}
case ir.OSELRECV2:
n := n.(*ir.AssignListStmt)
- e.assign(n.List().First(), n.Rlist().First(), "selrecv", n)
- e.assign(n.List().Second(), nil, "selrecv", n)
+ e.assign(n.Lhs[0], n.Rhs[0], "selrecv", n)
+ e.assign(n.Lhs[1], nil, "selrecv", n)
case ir.ORECV:
// TODO(mdempsky): Consider e.discard(n.Left).
n := n.(*ir.UnaryExpr)
e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
case ir.OSEND:
n := n.(*ir.SendStmt)
- e.discard(n.Left())
- e.assignHeap(n.Right(), "send", n)
+ e.discard(n.Chan)
+ e.assignHeap(n.Value, "send", n)
case ir.OAS:
n := n.(*ir.AssignStmt)
- e.assign(n.Left(), n.Right(), "assign", n)
+ e.assign(n.X, n.Y, "assign", n)
case ir.OASOP:
n := n.(*ir.AssignOpStmt)
- e.assign(n.Left(), n.Right(), "assign", n)
+ e.assign(n.X, n.Y, "assign", n)
case ir.OAS2:
n := n.(*ir.AssignListStmt)
- for i, nl := range n.List().Slice() {
- e.assign(nl, n.Rlist().Index(i), "assign-pair", n)
+ for i, nl := range n.Lhs {
+ e.assign(nl, n.Rhs[i], "assign-pair", n)
}
case ir.OAS2DOTTYPE: // v, ok = x.(type)
n := n.(*ir.AssignListStmt)
- e.assign(n.List().First(), n.Rlist().First(), "assign-pair-dot-type", n)
- e.assign(n.List().Second(), nil, "assign-pair-dot-type", n)
+ e.assign(n.Lhs[0], n.Rhs[0], "assign-pair-dot-type", n)
+ e.assign(n.Lhs[1], nil, "assign-pair-dot-type", n)
case ir.OAS2MAPR: // v, ok = m[k]
n := n.(*ir.AssignListStmt)
- e.assign(n.List().First(), n.Rlist().First(), "assign-pair-mapr", n)
- e.assign(n.List().Second(), nil, "assign-pair-mapr", n)
+ e.assign(n.Lhs[0], n.Rhs[0], "assign-pair-mapr", n)
+ e.assign(n.Lhs[1], nil, "assign-pair-mapr", n)
case ir.OAS2RECV: // v, ok = <-ch
n := n.(*ir.AssignListStmt)
- e.assign(n.List().First(), n.Rlist().First(), "assign-pair-receive", n)
- e.assign(n.List().Second(), nil, "assign-pair-receive", n)
+ e.assign(n.Lhs[0], n.Rhs[0], "assign-pair-receive", n)
+ e.assign(n.Lhs[1], nil, "assign-pair-receive", n)
case ir.OAS2FUNC:
n := n.(*ir.AssignListStmt)
- e.stmts(n.Rlist().First().Init())
- e.call(e.addrs(n.List()), n.Rlist().First(), nil)
+ e.stmts(n.Rhs[0].Init())
+ e.call(e.addrs(n.Lhs), n.Rhs[0], nil)
case ir.ORETURN:
n := n.(*ir.ReturnStmt)
results := e.curfn.Type().Results().FieldSlice()
- for i, v := range n.List().Slice() {
+ for i, v := range n.Results {
e.assign(ir.AsNode(results[i].Nname), v, "return", n)
}
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
e.call(nil, n, nil)
case ir.OGO, ir.ODEFER:
n := n.(*ir.GoDeferStmt)
- e.stmts(n.Left().Init())
- e.call(nil, n.Left(), n)
+ e.stmts(n.Call.Init())
+ e.call(nil, n.Call, n)
case ir.ORETJMP:
// TODO(mdempsky): What do? esc.go just ignores it.
}
}
-func (e *Escape) stmts(l ir.Nodes) {
- for _, n := range l.Slice() {
+func (e *escape) stmts(l ir.Nodes) {
+ for _, n := range l {
e.stmt(n)
}
}
// block is like stmts, but preserves loopDepth.
-func (e *Escape) block(l ir.Nodes) {
+func (e *escape) block(l ir.Nodes) {
old := e.loopDepth
e.stmts(l)
e.loopDepth = old
// expr models evaluating an expression n and flowing the result into
// hole k.
-func (e *Escape) expr(k EscHole, n ir.Node) {
+func (e *escape) expr(k hole, n ir.Node) {
if n == nil {
return
}
e.exprSkipInit(k, n)
}
-func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
+func (e *escape) exprSkipInit(k hole, n ir.Node) {
if n == nil {
return
}
- lno := setlineno(n)
+ lno := ir.SetPos(n)
defer func() {
base.Pos = lno
}()
uintptrEscapesHack := k.uintptrEscapesHack
k.uintptrEscapesHack = false
- if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).Left().Type().IsUnsafePtr() {
+ if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
// nop
} else if k.derefs >= 0 && !n.Type().HasPointers() {
k = e.discardHole()
case ir.ONAME:
n := n.(*ir.Name)
- if n.Class() == ir.PFUNC || n.Class() == ir.PEXTERN {
+ if n.Class_ == ir.PFUNC || n.Class_ == ir.PEXTERN {
return
}
e.flow(k, e.oldLoc(n))
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
n := n.(*ir.UnaryExpr)
- e.discard(n.Left())
+ e.discard(n.X)
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
n := n.(*ir.BinaryExpr)
- e.discard(n.Left())
- e.discard(n.Right())
+ e.discard(n.X)
+ e.discard(n.Y)
case ir.OANDAND, ir.OOROR:
n := n.(*ir.LogicalExpr)
- e.discard(n.Left())
- e.discard(n.Right())
+ e.discard(n.X)
+ e.discard(n.Y)
case ir.OADDR:
n := n.(*ir.AddrExpr)
- e.expr(k.addr(n, "address-of"), n.Left()) // "address-of"
+ e.expr(k.addr(n, "address-of"), n.X) // "address-of"
case ir.ODEREF:
n := n.(*ir.StarExpr)
- e.expr(k.deref(n, "indirection"), n.Left()) // "indirection"
+ e.expr(k.deref(n, "indirection"), n.X) // "indirection"
case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
n := n.(*ir.SelectorExpr)
- e.expr(k.note(n, "dot"), n.Left())
+ e.expr(k.note(n, "dot"), n.X)
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
- e.expr(k.deref(n, "dot of pointer"), n.Left()) // "dot of pointer"
+ e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer"
case ir.ODOTTYPE, ir.ODOTTYPE2:
n := n.(*ir.TypeAssertExpr)
- e.expr(k.dotType(n.Type(), n, "dot"), n.Left())
+ e.expr(k.dotType(n.Type(), n, "dot"), n.X)
case ir.OINDEX:
n := n.(*ir.IndexExpr)
- if n.Left().Type().IsArray() {
- e.expr(k.note(n, "fixed-array-index-of"), n.Left())
+ if n.X.Type().IsArray() {
+ e.expr(k.note(n, "fixed-array-index-of"), n.X)
} else {
// TODO(mdempsky): Fix why reason text.
- e.expr(k.deref(n, "dot of pointer"), n.Left())
+ e.expr(k.deref(n, "dot of pointer"), n.X)
}
- e.discard(n.Right())
+ e.discard(n.Index)
case ir.OINDEXMAP:
n := n.(*ir.IndexExpr)
- e.discard(n.Left())
- e.discard(n.Right())
+ e.discard(n.X)
+ e.discard(n.Index)
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
n := n.(*ir.SliceExpr)
- e.expr(k.note(n, "slice"), n.Left())
+ e.expr(k.note(n, "slice"), n.X)
low, high, max := n.SliceBounds()
e.discard(low)
e.discard(high)
case ir.OCONV, ir.OCONVNOP:
n := n.(*ir.ConvExpr)
- if checkPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.Left().Type().IsPtr() {
+ if ir.ShouldCheckPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() {
// When -d=checkptr=2 is enabled, treat
// conversions to unsafe.Pointer as an
// escaping operation. This allows better
// runtime instrumentation, since we can more
// easily detect object boundaries on the heap
// than the stack.
- e.assignHeap(n.Left(), "conversion to unsafe.Pointer", n)
- } else if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() {
- e.unsafeValue(k, n.Left())
+ e.assignHeap(n.X, "conversion to unsafe.Pointer", n)
+ } else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() {
+ e.unsafeValue(k, n.X)
} else {
- e.expr(k, n.Left())
+ e.expr(k, n.X)
}
case ir.OCONVIFACE:
n := n.(*ir.ConvExpr)
- if !n.Left().Type().IsInterface() && !isdirectiface(n.Left().Type()) {
+ if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
k = e.spill(k, n)
}
- e.expr(k.note(n, "interface-converted"), n.Left())
+ e.expr(k.note(n, "interface-converted"), n.X)
case ir.ORECV:
n := n.(*ir.UnaryExpr)
- e.discard(n.Left())
+ e.discard(n.X)
case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY:
- e.call([]EscHole{k}, n, nil)
+ e.call([]hole{k}, n, nil)
case ir.ONEW:
n := n.(*ir.UnaryExpr)
case ir.OMAKESLICE:
n := n.(*ir.MakeExpr)
e.spill(k, n)
- e.discard(n.Left())
- e.discard(n.Right())
+ e.discard(n.Len)
+ e.discard(n.Cap)
case ir.OMAKECHAN:
n := n.(*ir.MakeExpr)
- e.discard(n.Left())
+ e.discard(n.Len)
case ir.OMAKEMAP:
n := n.(*ir.MakeExpr)
e.spill(k, n)
- e.discard(n.Left())
+ e.discard(n.Len)
case ir.ORECOVER:
// nop
n := n.(*ir.CallPartExpr)
closureK := e.spill(k, n)
- m := callpartMethod(n)
+ m := n.Method
// We don't know how the method value will be called
// later, so conservatively assume the result
//
// TODO(mdempsky): Change ks into a callback, so that
// we don't have to create this slice?
- var ks []EscHole
+ var ks []hole
for i := m.Type.NumResults(); i > 0; i-- {
ks = append(ks, e.heapHole())
}
name, _ := m.Nname.(*ir.Name)
paramK := e.tagHole(ks, name, m.Type.Recv())
- e.expr(e.teeHole(paramK, closureK), n.Left())
+ e.expr(e.teeHole(paramK, closureK), n.X)
case ir.OPTRLIT:
n := n.(*ir.AddrExpr)
- e.expr(e.spill(k, n), n.Left())
+ e.expr(e.spill(k, n), n.X)
case ir.OARRAYLIT:
n := n.(*ir.CompLitExpr)
- for _, elt := range n.List().Slice() {
+ for _, elt := range n.List {
if elt.Op() == ir.OKEY {
- elt = elt.(*ir.KeyExpr).Right()
+ elt = elt.(*ir.KeyExpr).Value
}
e.expr(k.note(n, "array literal element"), elt)
}
k = e.spill(k, n)
k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
- for _, elt := range n.List().Slice() {
+ for _, elt := range n.List {
if elt.Op() == ir.OKEY {
- elt = elt.(*ir.KeyExpr).Right()
+ elt = elt.(*ir.KeyExpr).Value
}
e.expr(k.note(n, "slice-literal-element"), elt)
}
case ir.OSTRUCTLIT:
n := n.(*ir.CompLitExpr)
- for _, elt := range n.List().Slice() {
- e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Left())
+ for _, elt := range n.List {
+ e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value)
}
case ir.OMAPLIT:
e.spill(k, n)
// Map keys and values are always stored in the heap.
- for _, elt := range n.List().Slice() {
+ for _, elt := range n.List {
elt := elt.(*ir.KeyExpr)
- e.assignHeap(elt.Left(), "map literal key", n)
- e.assignHeap(elt.Right(), "map literal value", n)
+ e.assignHeap(elt.Key, "map literal key", n)
+ e.assignHeap(elt.Value, "map literal value", n)
}
case ir.OCLOSURE:
k = e.spill(k, n)
// Link addresses of captured variables to closure.
- for _, v := range n.Func().ClosureVars {
+ for _, v := range n.Func.ClosureVars {
k := k
if !v.Byval() {
k = k.addr(v, "reference")
case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
n := n.(*ir.ConvExpr)
e.spill(k, n)
- e.discard(n.Left())
+ e.discard(n.X)
case ir.OADDSTR:
n := n.(*ir.AddStringExpr)
// Arguments of OADDSTR never escape;
// runtime.concatstrings makes sure of that.
- e.discards(n.List())
+ e.discards(n.List)
}
}
// unsafeValue evaluates a uintptr-typed arithmetic expression looking
// for conversions from an unsafe.Pointer.
-func (e *Escape) unsafeValue(k EscHole, n ir.Node) {
+func (e *escape) unsafeValue(k hole, n ir.Node) {
if n.Type().Kind() != types.TUINTPTR {
base.Fatalf("unexpected type %v for %v", n.Type(), n)
}
switch n.Op() {
case ir.OCONV, ir.OCONVNOP:
n := n.(*ir.ConvExpr)
- if n.Left().Type().IsUnsafePtr() {
- e.expr(k, n.Left())
+ if n.X.Type().IsUnsafePtr() {
+ e.expr(k, n.X)
} else {
- e.discard(n.Left())
+ e.discard(n.X)
}
case ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
- if isReflectHeaderDataField(n) {
- e.expr(k.deref(n, "reflect.Header.Data"), n.Left())
+ if ir.IsReflectHeaderDataField(n) {
+ e.expr(k.deref(n, "reflect.Header.Data"), n.X)
} else {
- e.discard(n.Left())
+ e.discard(n.X)
}
case ir.OPLUS, ir.ONEG, ir.OBITNOT:
n := n.(*ir.UnaryExpr)
- e.unsafeValue(k, n.Left())
+ e.unsafeValue(k, n.X)
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
n := n.(*ir.BinaryExpr)
- e.unsafeValue(k, n.Left())
- e.unsafeValue(k, n.Right())
+ e.unsafeValue(k, n.X)
+ e.unsafeValue(k, n.Y)
case ir.OLSH, ir.ORSH:
n := n.(*ir.BinaryExpr)
- e.unsafeValue(k, n.Left())
+ e.unsafeValue(k, n.X)
// RHS need not be uintptr-typed (#32959) and can't meaningfully
// flow pointers anyway.
- e.discard(n.Right())
+ e.discard(n.Y)
default:
e.exprSkipInit(e.discardHole(), n)
}
// discard evaluates an expression n for side-effects, but discards
// its value.
-func (e *Escape) discard(n ir.Node) {
+func (e *escape) discard(n ir.Node) {
e.expr(e.discardHole(), n)
}
-func (e *Escape) discards(l ir.Nodes) {
- for _, n := range l.Slice() {
+func (e *escape) discards(l ir.Nodes) {
+ for _, n := range l {
e.discard(n)
}
}
// addr evaluates an addressable expression n and returns an EscHole
// that represents storing into the represented location.
-func (e *Escape) addr(n ir.Node) EscHole {
+func (e *escape) addr(n ir.Node) hole {
if n == nil || ir.IsBlank(n) {
// Can happen in select case, range, maybe others.
return e.discardHole()
base.Fatalf("unexpected addr: %v", n)
case ir.ONAME:
n := n.(*ir.Name)
- if n.Class() == ir.PEXTERN {
+ if n.Class_ == ir.PEXTERN {
break
}
k = e.oldLoc(n).asHole()
e.addr(n.Name_)
case ir.ODOT:
n := n.(*ir.SelectorExpr)
- k = e.addr(n.Left())
+ k = e.addr(n.X)
case ir.OINDEX:
n := n.(*ir.IndexExpr)
- e.discard(n.Right())
- if n.Left().Type().IsArray() {
- k = e.addr(n.Left())
+ e.discard(n.Index)
+ if n.X.Type().IsArray() {
+ k = e.addr(n.X)
} else {
- e.discard(n.Left())
+ e.discard(n.X)
}
case ir.ODEREF, ir.ODOTPTR:
e.discard(n)
case ir.OINDEXMAP:
n := n.(*ir.IndexExpr)
- e.discard(n.Left())
- e.assignHeap(n.Right(), "key of map put", n)
+ e.discard(n.X)
+ e.assignHeap(n.Index, "key of map put", n)
}
if !n.Type().HasPointers() {
return k
}
-func (e *Escape) addrs(l ir.Nodes) []EscHole {
- var ks []EscHole
- for _, n := range l.Slice() {
+func (e *escape) addrs(l ir.Nodes) []hole {
+ var ks []hole
+ for _, n := range l {
ks = append(ks, e.addr(n))
}
return ks
}
// assign evaluates the assignment dst = src.
-func (e *Escape) assign(dst, src ir.Node, why string, where ir.Node) {
+func (e *escape) assign(dst, src ir.Node, why string, where ir.Node) {
// Filter out some no-op assignments for escape analysis.
ignore := dst != nil && src != nil && isSelfAssign(dst, src)
if ignore && base.Flag.LowerM != 0 {
}
k := e.addr(dst)
- if dst != nil && dst.Op() == ir.ODOTPTR && isReflectHeaderDataField(dst) {
+ if dst != nil && dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
e.unsafeValue(e.heapHole().note(where, why), src)
} else {
if ignore {
}
}
-func (e *Escape) assignHeap(src ir.Node, why string, where ir.Node) {
+func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
e.expr(e.heapHole().note(where, why), src)
}
// call evaluates a call expressions, including builtin calls. ks
// should contain the holes representing where the function callee's
// results flows; where is the OGO/ODEFER context of the call, if any.
-func (e *Escape) call(ks []EscHole, call, where ir.Node) {
+func (e *escape) call(ks []hole, call, where ir.Node) {
topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1
if topLevelDefer {
// force stack allocation of defer record, unless
// open-coded defers are used (see ssa.go)
- where.SetEsc(EscNever)
+ where.SetEsc(ir.EscNever)
}
- argument := func(k EscHole, arg ir.Node) {
+ argument := func(k hole, arg ir.Node) {
if topLevelDefer {
// Top level defers arguments don't escape to
// heap, but they do need to last until end of
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
call := call.(*ir.CallExpr)
- fixVariadicCall(call)
+ typecheck.FixVariadicCall(call)
// Pick out the function callee, if statically known.
var fn *ir.Name
switch call.Op() {
case ir.OCALLFUNC:
- switch v := staticValue(call.Left()); {
- case v.Op() == ir.ONAME && v.(*ir.Name).Class() == ir.PFUNC:
+ switch v := ir.StaticValue(call.X); {
+ case v.Op() == ir.ONAME && v.(*ir.Name).Class_ == ir.PFUNC:
fn = v.(*ir.Name)
case v.Op() == ir.OCLOSURE:
- fn = v.(*ir.ClosureExpr).Func().Nname
+ fn = v.(*ir.ClosureExpr).Func.Nname
}
case ir.OCALLMETH:
- fn = methodExprName(call.Left())
+ fn = ir.MethodExprName(call.X)
}
- fntype := call.Left().Type()
+ fntype := call.X.Type()
if fn != nil {
fntype = fn.Type()
}
}
if r := fntype.Recv(); r != nil {
- argument(e.tagHole(ks, fn, r), call.Left().(*ir.SelectorExpr).Left())
+ argument(e.tagHole(ks, fn, r), call.X.(*ir.SelectorExpr).X)
} else {
// Evaluate callee function expression.
- argument(e.discardHole(), call.Left())
+ argument(e.discardHole(), call.X)
}
- args := call.List().Slice()
+ args := call.Args
for i, param := range fntype.Params().FieldSlice() {
argument(e.tagHole(ks, fn, param), args[i])
}
case ir.OAPPEND:
call := call.(*ir.CallExpr)
- args := call.List().Slice()
+ args := call.Args
// Appendee slice may flow directly to the result, if
// it has enough capacity. Alternatively, a new heap
}
argument(appendeeK, args[0])
- if call.IsDDD() {
+ if call.IsDDD {
appendedK := e.discardHole()
if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
appendedK = e.heapHole().deref(call, "appended slice...")
case ir.OCOPY:
call := call.(*ir.BinaryExpr)
- argument(e.discardHole(), call.Left())
+ argument(e.discardHole(), call.X)
copiedK := e.discardHole()
- if call.Right().Type().IsSlice() && call.Right().Type().Elem().HasPointers() {
+ if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
copiedK = e.heapHole().deref(call, "copied slice")
}
- argument(copiedK, call.Right())
+ argument(copiedK, call.Y)
case ir.OPANIC:
call := call.(*ir.UnaryExpr)
- argument(e.heapHole(), call.Left())
+ argument(e.heapHole(), call.X)
case ir.OCOMPLEX:
call := call.(*ir.BinaryExpr)
- argument(e.discardHole(), call.Left())
- argument(e.discardHole(), call.Right())
+ argument(e.discardHole(), call.X)
+ argument(e.discardHole(), call.Y)
case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
call := call.(*ir.CallExpr)
- for _, arg := range call.List().Slice() {
+ for _, arg := range call.Args {
argument(e.discardHole(), arg)
}
case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
call := call.(*ir.UnaryExpr)
- argument(e.discardHole(), call.Left())
+ argument(e.discardHole(), call.X)
}
}
// ks should contain the holes representing where the function
// callee's results flows. fn is the statically-known callee function,
// if any.
-func (e *Escape) tagHole(ks []EscHole, fn *ir.Name, param *types.Field) EscHole {
+func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
// If this is a dynamic call, we can't rely on param.Note.
if fn == nil {
return e.heapHole()
// Call to previously tagged function.
- if param.Note == uintptrEscapesTag {
+ if param.Note == UintptrEscapesNote {
k := e.heapHole()
k.uintptrEscapesHack = true
return k
}
- var tagKs []EscHole
+ var tagKs []hole
- esc := ParseLeaks(param.Note)
+ esc := parseLeaks(param.Note)
if x := esc.Heap(); x >= 0 {
tagKs = append(tagKs, e.heapHole().shift(x))
}
// fn has not yet been analyzed, so its parameters and results
// should be incorporated directly into the flow graph instead of
// relying on its escape analysis tagging.
-func (e *Escape) inMutualBatch(fn *ir.Name) bool {
- if fn.Defn != nil && fn.Defn.Esc() < EscFuncTagged {
- if fn.Defn.Esc() == EscFuncUnknown {
+func (e *escape) inMutualBatch(fn *ir.Name) bool {
+ if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
+ if fn.Defn.Esc() == escFuncUnknown {
base.Fatalf("graph inconsistency")
}
return true
return false
}
-// An EscHole represents a context for evaluation a Go
+// An hole represents a context for evaluation a Go
// expression. E.g., when evaluating p in "x = **p", we'd have a hole
// with dst==x and derefs==2.
-type EscHole struct {
- dst *EscLocation
+type hole struct {
+ dst *location
derefs int // >= -1
- notes *EscNote
+ notes *note
// uintptrEscapesHack indicates this context is evaluating an
// argument for a //go:uintptrescapes function.
uintptrEscapesHack bool
}
-type EscNote struct {
- next *EscNote
+type note struct {
+ next *note
where ir.Node
why string
}
-func (k EscHole) note(where ir.Node, why string) EscHole {
+func (k hole) note(where ir.Node, why string) hole {
if where == nil || why == "" {
base.Fatalf("note: missing where/why")
}
if base.Flag.LowerM >= 2 || logopt.Enabled() {
- k.notes = &EscNote{
+ k.notes = ¬e{
next: k.notes,
where: where,
why: why,
return k
}
-func (k EscHole) shift(delta int) EscHole {
+func (k hole) shift(delta int) hole {
k.derefs += delta
if k.derefs < -1 {
base.Fatalf("derefs underflow: %v", k.derefs)
return k
}
-func (k EscHole) deref(where ir.Node, why string) EscHole { return k.shift(1).note(where, why) }
-func (k EscHole) addr(where ir.Node, why string) EscHole { return k.shift(-1).note(where, why) }
+func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
+func (k hole) addr(where ir.Node, why string) hole { return k.shift(-1).note(where, why) }
-func (k EscHole) dotType(t *types.Type, where ir.Node, why string) EscHole {
- if !t.IsInterface() && !isdirectiface(t) {
+func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
+ if !t.IsInterface() && !types.IsDirectIface(t) {
k = k.shift(1)
}
return k.note(where, why)
// teeHole returns a new hole that flows into each hole of ks,
// similar to the Unix tee(1) command.
-func (e *Escape) teeHole(ks ...EscHole) EscHole {
+func (e *escape) teeHole(ks ...hole) hole {
if len(ks) == 0 {
return e.discardHole()
}
return loc.asHole()
}
-func (e *Escape) dcl(n ir.Node) EscHole {
+func (e *escape) dcl(n ir.Node) hole {
loc := e.oldLoc(n)
loc.loopDepth = e.loopDepth
return loc.asHole()
// spill allocates a new location associated with expression n, flows
// its address to k, and returns a hole that flows values to it. It's
// intended for use with most expressions that allocate storage.
-func (e *Escape) spill(k EscHole, n ir.Node) EscHole {
+func (e *escape) spill(k hole, n ir.Node) hole {
loc := e.newLoc(n, true)
e.flow(k.addr(n, "spill"), loc)
return loc.asHole()
// later returns a new hole that flows into k, but some time later.
// Its main effect is to prevent immediate reuse of temporary
// variables introduced during Order.
-func (e *Escape) later(k EscHole) EscHole {
+func (e *escape) later(k hole) hole {
loc := e.newLoc(nil, false)
e.flow(k, loc)
return loc.asHole()
return n
}
-func (e *Escape) newLoc(n ir.Node, transient bool) *EscLocation {
+func (e *escape) newLoc(n ir.Node, transient bool) *location {
if e.curfn == nil {
base.Fatalf("e.curfn isn't set")
}
}
n = canonicalNode(n)
- loc := &EscLocation{
+ loc := &location{
n: n,
curfn: e.curfn,
loopDepth: e.loopDepth,
}
n.SetOpt(loc)
- if why := heapAllocReason(n); why != "" {
+ if why := HeapAllocReason(n); why != "" {
e.flow(e.heapHole().addr(n, why), loc)
}
}
return loc
}
-func (e *Escape) oldLoc(n ir.Node) *EscLocation {
+func (e *escape) oldLoc(n ir.Node) *location {
n = canonicalNode(n)
- return n.Opt().(*EscLocation)
+ return n.Opt().(*location)
}
-func (l *EscLocation) asHole() EscHole {
- return EscHole{dst: l}
+func (l *location) asHole() hole {
+ return hole{dst: l}
}
-func (e *Escape) flow(k EscHole, src *EscLocation) {
+func (e *escape) flow(k hole, src *location) {
dst := k.dst
if dst == &e.blankLoc {
return
}
// TODO(mdempsky): Deduplicate edges?
- dst.edges = append(dst.edges, EscEdge{src: src, derefs: k.derefs, notes: k.notes})
+ dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
}
-func (e *Escape) heapHole() EscHole { return e.heapLoc.asHole() }
-func (e *Escape) discardHole() EscHole { return e.blankLoc.asHole() }
+func (e *escape) heapHole() hole { return e.heapLoc.asHole() }
+func (e *escape) discardHole() hole { return e.blankLoc.asHole() }
// walkAll computes the minimal dereferences between all pairs of
// locations.
-func (e *Escape) walkAll() {
+func (e *escape) walkAll() {
// We use a work queue to keep track of locations that we need
// to visit, and repeatedly walk until we reach a fixed point.
//
// happen at most once. So we take Θ(len(e.allLocs)) walks.
// LIFO queue, has enough room for e.allLocs and e.heapLoc.
- todo := make([]*EscLocation, 0, len(e.allLocs)+1)
- enqueue := func(loc *EscLocation) {
+ todo := make([]*location, 0, len(e.allLocs)+1)
+ enqueue := func(loc *location) {
if !loc.queued {
todo = append(todo, loc)
loc.queued = true
// walkOne computes the minimal number of dereferences from root to
// all other locations.
-func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLocation)) {
+func (e *escape) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
// The data flow graph has negative edges (from addressing
// operations), so we use the Bellman-Ford algorithm. However,
// we don't have to worry about infinite negative cycles since
root.derefs = 0
root.dst = nil
- todo := []*EscLocation{root} // LIFO queue
+ todo := []*location{root} // LIFO queue
for len(todo) > 0 {
l := todo[len(todo)-1]
todo = todo[:len(todo)-1]
}
// explainPath prints an explanation of how src flows to the walk root.
-func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
- visited := make(map[*EscLocation]bool)
+func (e *escape) explainPath(root, src *location) []*logopt.LoggedOpt {
+ visited := make(map[*location]bool)
pos := base.FmtPos(src.n.Pos())
var explanation []*logopt.LoggedOpt
for {
return explanation
}
-func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, notes *EscNote, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
+func (e *escape) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
ops := "&"
if derefs >= 0 {
ops = strings.Repeat("*", derefs)
return explanation
}
-func (e *Escape) explainLoc(l *EscLocation) string {
+func (e *escape) explainLoc(l *location) string {
if l == &e.heapLoc {
return "{heap}"
}
// outlives reports whether values stored in l may survive beyond
// other's lifetime if stack allocated.
-func (e *Escape) outlives(l, other *EscLocation) bool {
+func (e *escape) outlives(l, other *location) bool {
// The heap outlives everything.
if l.escapes {
return true
}
// leak records that parameter l leaks to sink.
-func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
+func (l *location) leakTo(sink *location, derefs int) {
// If sink is a result parameter and we can fit return bits
// into the escape analysis tag, then record a return leak.
if sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
l.paramEsc.AddHeap(derefs)
}
-func (e *Escape) finish(fns []*ir.Func) {
+func (e *escape) finish(fns []*ir.Func) {
// Record parameter tags for package export data.
for _, fn := range fns {
- fn.SetEsc(EscFuncTagged)
+ fn.SetEsc(escFuncTagged)
narg := 0
for _, fs := range &types.RecvsParams {
logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e.curfn))
}
}
- n.SetEsc(EscHeap)
+ n.SetEsc(ir.EscHeap)
addrescapes(n)
} else {
if base.Flag.LowerM != 0 && n.Op() != ir.ONAME {
base.WarnfAt(n.Pos(), "%v does not escape", n)
}
- n.SetEsc(EscNone)
+ n.SetEsc(ir.EscNone)
if loc.transient {
switch n.Op() {
case ir.OCLOSURE:
}
}
-func (l *EscLocation) isName(c ir.Class) bool {
- return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class() == c
+func (l *location) isName(c ir.Class) bool {
+ return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class_ == c
}
const numEscResults = 7
-// An EscLeaks represents a set of assignment flows from a parameter
+// An leaks represents a set of assignment flows from a parameter
// to the heap or to any of its function's (first numEscResults)
// result parameters.
-type EscLeaks [1 + numEscResults]uint8
+type leaks [1 + numEscResults]uint8
// Empty reports whether l is an empty set (i.e., no assignment flows).
-func (l EscLeaks) Empty() bool { return l == EscLeaks{} }
+func (l leaks) Empty() bool { return l == leaks{} }
// Heap returns the minimum deref count of any assignment flow from l
// to the heap. If no such flows exist, Heap returns -1.
-func (l EscLeaks) Heap() int { return l.get(0) }
+func (l leaks) Heap() int { return l.get(0) }
// Result returns the minimum deref count of any assignment flow from
// l to its function's i'th result parameter. If no such flows exist,
// Result returns -1.
-func (l EscLeaks) Result(i int) int { return l.get(1 + i) }
+func (l leaks) Result(i int) int { return l.get(1 + i) }
// AddHeap adds an assignment flow from l to the heap.
-func (l *EscLeaks) AddHeap(derefs int) { l.add(0, derefs) }
+func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
// AddResult adds an assignment flow from l to its function's i'th
// result parameter.
-func (l *EscLeaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
+func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
-func (l *EscLeaks) setResult(i, derefs int) { l.set(1+i, derefs) }
+func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
-func (l EscLeaks) get(i int) int { return int(l[i]) - 1 }
+func (l leaks) get(i int) int { return int(l[i]) - 1 }
-func (l *EscLeaks) add(i, derefs int) {
+func (l *leaks) add(i, derefs int) {
if old := l.get(i); old < 0 || derefs < old {
l.set(i, derefs)
}
}
-func (l *EscLeaks) set(i, derefs int) {
+func (l *leaks) set(i, derefs int) {
v := derefs + 1
if v < 0 {
base.Fatalf("invalid derefs count: %v", derefs)
// Optimize removes result flow paths that are equal in length or
// longer than the shortest heap flow path.
-func (l *EscLeaks) Optimize() {
+func (l *leaks) Optimize() {
// If we have a path to the heap, then there's no use in
// keeping equal or longer paths elsewhere.
if x := l.Heap(); x >= 0 {
}
}
-var leakTagCache = map[EscLeaks]string{}
+var leakTagCache = map[leaks]string{}
// Encode converts l into a binary string for export data.
-func (l EscLeaks) Encode() string {
+func (l leaks) Encode() string {
if l.Heap() == 0 {
// Space optimization: empty string encodes more
// efficiently in export data.
return s
}
-// ParseLeaks parses a binary string representing an EscLeaks.
-func ParseLeaks(s string) EscLeaks {
- var l EscLeaks
+// parseLeaks parses a binary string representing an EscLeaks.
+func parseLeaks(s string) leaks {
+ var l leaks
if !strings.HasPrefix(s, "esc:") {
l.AddHeap(0)
return l
return l
}
-func escapes(all []ir.Node) {
- visitBottomUp(all, escapeFuncs)
-}
-
-const (
- EscFuncUnknown = 0 + iota
- EscFuncPlanned
- EscFuncStarted
- EscFuncTagged
-)
-
-func min8(a, b int8) int8 {
- if a < b {
- return a
- }
- return b
-}
-
-func max8(a, b int8) int8 {
- if a > b {
- return a
- }
- return b
+func Funcs(all []ir.Node) {
+ ir.VisitFuncsBottomUp(all, Batch)
}
const (
- EscUnknown = iota
- EscNone // Does not escape to heap, result, or parameters.
- EscHeap // Reachable from the heap
- EscNever // By construction will not escape.
+ escFuncUnknown = 0 + iota
+ escFuncPlanned
+ escFuncStarted
+ escFuncTagged
)
// funcSym returns fn.Nname.Sym if no nils are encountered along the way.
return false
case ir.ODEREF:
dst := dst.(*ir.StarExpr)
- dstX = dst.Left()
+ dstX = dst.X
case ir.ODOTPTR:
dst := dst.(*ir.SelectorExpr)
- dstX = dst.Left()
+ dstX = dst.X
}
if dstX.Op() != ir.ONAME {
return false
// For slicing an array (not pointer to array), there is an implicit OADDR.
// We check that to determine non-pointer array slicing.
src := src.(*ir.SliceExpr)
- if src.Left().Op() == ir.OADDR {
+ if src.X.Op() == ir.OADDR {
return false
}
default:
}
// slice is applied to ONAME dereference.
var baseX ir.Node
- switch base := src.(*ir.SliceExpr).Left(); base.Op() {
+ switch base := src.(*ir.SliceExpr).X; base.Op() {
default:
return false
case ir.ODEREF:
base := base.(*ir.StarExpr)
- baseX = base.Left()
+ baseX = base.X
case ir.ODOTPTR:
base := base.(*ir.SelectorExpr)
- baseX = base.Left()
+ baseX = base.X
}
if baseX.Op() != ir.ONAME {
return false
// Safe trailing accessors that are permitted to differ.
dst := dst.(*ir.SelectorExpr)
src := src.(*ir.SelectorExpr)
- return samesafeexpr(dst.Left(), src.Left())
+ return ir.SameSafeExpr(dst.X, src.X)
case ir.OINDEX:
dst := dst.(*ir.IndexExpr)
src := src.(*ir.IndexExpr)
- if mayAffectMemory(dst.Right()) || mayAffectMemory(src.Right()) {
+ if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
return false
}
- return samesafeexpr(dst.Left(), src.Left())
+ return ir.SameSafeExpr(dst.X, src.X)
default:
return false
}
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
n := n.(*ir.BinaryExpr)
- return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right())
+ return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
case ir.OINDEX:
n := n.(*ir.IndexExpr)
- return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right())
+ return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
case ir.OCONVNOP, ir.OCONV:
n := n.(*ir.ConvExpr)
- return mayAffectMemory(n.Left())
+ return mayAffectMemory(n.X)
case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
n := n.(*ir.UnaryExpr)
- return mayAffectMemory(n.Left())
+ return mayAffectMemory(n.X)
case ir.ODOT, ir.ODOTPTR:
n := n.(*ir.SelectorExpr)
- return mayAffectMemory(n.Left())
+ return mayAffectMemory(n.X)
case ir.ODEREF:
n := n.(*ir.StarExpr)
- return mayAffectMemory(n.Left())
+ return mayAffectMemory(n.X)
default:
return true
}
}
-// heapAllocReason returns the reason the given Node must be heap
+// HeapAllocReason returns the reason the given Node must be heap
// allocated, or the empty string if it doesn't.
-func heapAllocReason(n ir.Node) string {
+func HeapAllocReason(n ir.Node) string {
if n.Type() == nil {
return ""
}
// Parameters are always passed via the stack.
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
- if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
+ if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT {
return ""
}
}
- if n.Type().Width > maxStackVarSize {
+ if n.Type().Width > ir.MaxStackVarSize {
return "too large for stack"
}
- if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= maxImplicitStackVarSize {
+ if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= ir.MaxImplicitStackVarSize {
return "too large for stack"
}
- if n.Op() == ir.OCLOSURE && closureType(n.(*ir.ClosureExpr)).Size() >= maxImplicitStackVarSize {
+ if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() >= ir.MaxImplicitStackVarSize {
return "too large for stack"
}
- if n.Op() == ir.OCALLPART && partialCallType(n.(*ir.CallPartExpr)).Size() >= maxImplicitStackVarSize {
+ if n.Op() == ir.OCALLPART && typecheck.PartialCallType(n.(*ir.CallPartExpr)).Size() >= ir.MaxImplicitStackVarSize {
return "too large for stack"
}
if n.Op() == ir.OMAKESLICE {
n := n.(*ir.MakeExpr)
- r := n.Right()
+ r := n.Cap
if r == nil {
- r = n.Left()
+ r = n.Len
}
- if !smallintconst(r) {
+ if !ir.IsSmallIntConst(r) {
return "non-constant size"
}
- if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) >= maxImplicitStackVarSize/t.Elem().Width {
+ if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) >= ir.MaxImplicitStackVarSize/t.Elem().Width {
return "too large for stack"
}
}
case ir.ONAME:
n := n.(*ir.Name)
- if n == nodfp {
+ if n == ir.RegFP {
break
}
// if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
// on PPARAM it means something different.
- if n.Class() == ir.PAUTO && n.Esc() == EscNever {
+ if n.Class_ == ir.PAUTO && n.Esc() == ir.EscNever {
break
}
break
}
- if n.Class() != ir.PPARAM && n.Class() != ir.PPARAMOUT && n.Class() != ir.PAUTO {
+ if n.Class_ != ir.PPARAM && n.Class_ != ir.PPARAMOUT && n.Class_ != ir.PAUTO {
break
}
//
// then we're analyzing the inner closure but we need to move x to the
// heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
- oldfn := Curfn
- Curfn = n.Curfn
+ oldfn := ir.CurFunc
+ ir.CurFunc = n.Curfn
ln := base.Pos
- base.Pos = Curfn.Pos()
+ base.Pos = ir.CurFunc.Pos()
moveToHeap(n)
- Curfn = oldfn
+ ir.CurFunc = oldfn
base.Pos = ln
// ODOTPTR has already been introduced,
// is always a heap pointer anyway.
case ir.ODOT:
n := n.(*ir.SelectorExpr)
- addrescapes(n.Left())
+ addrescapes(n.X)
case ir.OINDEX:
n := n.(*ir.IndexExpr)
- if !n.Left().Type().IsSlice() {
- addrescapes(n.Left())
+ if !n.X.Type().IsSlice() {
+ addrescapes(n.X)
}
case ir.OPAREN:
n := n.(*ir.ParenExpr)
- addrescapes(n.Left())
+ addrescapes(n.X)
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
- addrescapes(n.Left())
+ addrescapes(n.X)
}
}
if base.Flag.CompilingRuntime {
base.Errorf("%v escapes to heap, not allowed in runtime", n)
}
- if n.Class() == ir.PAUTOHEAP {
+ if n.Class_ == ir.PAUTOHEAP {
ir.Dump("n", n)
base.Fatalf("double move to heap")
}
// Allocate a local stack variable to hold the pointer to the heap copy.
// temp will add it to the function declaration list automatically.
- heapaddr := temp(types.NewPtr(n.Type()))
- heapaddr.SetSym(lookup("&" + n.Sym().Name))
+ heapaddr := typecheck.Temp(types.NewPtr(n.Type()))
+ heapaddr.SetSym(typecheck.Lookup("&" + n.Sym().Name))
heapaddr.SetPos(n.Pos())
// Unset AutoTemp to persist the &foo variable name through SSA to
// Parameters have a local stack copy used at function start/end
// in addition to the copy in the heap that may live longer than
// the function.
- if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
+ if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT {
if n.FrameOffset() == types.BADWIDTH {
base.Fatalf("addrescapes before param assignment")
}
// Preserve a copy so we can still write code referring to the original,
// and substitute that copy into the function declaration list
// so that analyses of the local (on-stack) variables use it.
- stackcopy := NewName(n.Sym())
+ stackcopy := typecheck.NewName(n.Sym())
stackcopy.SetType(n.Type())
stackcopy.SetFrameOffset(n.FrameOffset())
- stackcopy.SetClass(n.Class())
+ stackcopy.Class_ = n.Class_
stackcopy.Heapaddr = heapaddr
- if n.Class() == ir.PPARAMOUT {
+ if n.Class_ == ir.PPARAMOUT {
// Make sure the pointer to the heap copy is kept live throughout the function.
// The function could panic at any point, and then a defer could recover.
// Thus, we need the pointer to the heap copy always available so the
// liveness and other analyses use the underlying stack slot
// and not the now-pseudo-variable n.
found := false
- for i, d := range Curfn.Dcl {
+ for i, d := range ir.CurFunc.Dcl {
if d == n {
- Curfn.Dcl[i] = stackcopy
+ ir.CurFunc.Dcl[i] = stackcopy
found = true
break
}
// Parameters are before locals, so can stop early.
// This limits the search even in functions with many local variables.
- if d.Class() == ir.PAUTO {
+ if d.Class_ == ir.PAUTO {
break
}
}
if !found {
base.Fatalf("cannot find %v in local variable list", n)
}
- Curfn.Dcl = append(Curfn.Dcl, n)
+ ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
}
// Modify n in place so that uses of n now mean indirection of the heapaddr.
- n.SetClass(ir.PAUTOHEAP)
+ n.Class_ = ir.PAUTOHEAP
n.SetFrameOffset(0)
n.Heapaddr = heapaddr
- n.SetEsc(EscHeap)
+ n.SetEsc(ir.EscHeap)
if base.Flag.LowerM != 0 {
base.WarnfAt(n.Pos(), "moved to heap: %v", n)
}
// This special tag is applied to uintptr variables
// that we believe may hold unsafe.Pointers for
// calls into assembly functions.
-const unsafeUintptrTag = "unsafe-uintptr"
+const UnsafeUintptrNote = "unsafe-uintptr"
// This special tag is applied to uintptr parameters of functions
// marked go:uintptrescapes.
-const uintptrEscapesTag = "uintptr-escapes"
+const UintptrEscapesNote = "uintptr-escapes"
-func (e *Escape) paramTag(fn *ir.Func, narg int, f *types.Field) string {
+func (e *escape) paramTag(fn *ir.Func, narg int, f *types.Field) string {
name := func() string {
if f.Sym != nil {
return f.Sym.Name
return fmt.Sprintf("arg#%d", narg)
}
- if fn.Body().Len() == 0 {
+ if len(fn.Body) == 0 {
// Assume that uintptr arguments must be held live across the call.
// This is most important for syscall.Syscall.
// See golang.org/issue/13372.
if base.Flag.LowerM != 0 {
base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name())
}
- return unsafeUintptrTag
+ return UnsafeUintptrNote
}
if !f.Type.HasPointers() { // don't bother tagging for scalars
return ""
}
- var esc EscLeaks
+ var esc leaks
// External functions are assumed unsafe, unless
// //go:noescape is given before the declaration.
- if fn.Func().Pragma&ir.Noescape != 0 {
+ if fn.Pragma&ir.Noescape != 0 {
if base.Flag.LowerM != 0 && f.Sym != nil {
base.WarnfAt(f.Pos, "%v does not escape", name())
}
return esc.Encode()
}
- if fn.Func().Pragma&ir.UintptrEscapes != 0 {
+ if fn.Pragma&ir.UintptrEscapes != 0 {
if f.Type.IsUintptr() {
if base.Flag.LowerM != 0 {
base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
}
- return uintptrEscapesTag
+ return UintptrEscapesNote
}
if f.IsDDD() && f.Type.Elem().IsUintptr() {
// final argument is ...uintptr.
if base.Flag.LowerM != 0 {
base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name())
}
- return uintptrEscapesTag
+ return UintptrEscapesNote
}
}
// Unnamed parameters are unused and therefore do not escape.
if f.Sym == nil || f.Sym.IsBlank() {
- var esc EscLeaks
+ var esc leaks
return esc.Encode()
}
result.inparams = append(result.inparams,
s.assignParamOrReturn(f.Type))
}
- s.stackOffset = Rnd(s.stackOffset, int64(Widthreg))
+ s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
// Record number of spill slots needed.
result.intSpillSlots = s.rUsed.intRegs
// specified type.
func (state *assignState) stackSlot(t *types.Type) int64 {
if t.Align > 0 {
- state.stackOffset = Rnd(state.stackOffset, int64(t.Align))
+ state.stackOffset = types.Rnd(state.stackOffset, int64(t.Align))
}
rv := state.stackOffset
state.stackOffset += t.Width
// can register allocate, FALSE otherwise (and updates state
// accordingly).
func (state *assignState) regassignIntegral(t *types.Type) bool {
- regsNeeded := int(Rnd(t.Width, int64(Widthptr)) / int64(Widthptr))
+ regsNeeded := int(types.Rnd(t.Width, int64(types.PtrSize)) / int64(types.PtrSize))
// Floating point and complex.
if t.IsFloat() || t.IsComplex() {
import (
"bufio"
"cmd/compile/internal/base"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
}
func TestMain(m *testing.M) {
- thearch.LinkArch = &x86.Linkamd64
- thearch.REGSP = x86.REGSP
- thearch.MAXWIDTH = 1 << 50
- MaxWidth = thearch.MAXWIDTH
- base.Ctxt = obj.Linknew(thearch.LinkArch)
+ ssagen.Arch.LinkArch = &x86.Linkamd64
+ ssagen.Arch.REGSP = x86.REGSP
+ ssagen.Arch.MAXWIDTH = 1 << 50
+ types.MaxWidth = ssagen.Arch.MAXWIDTH
+ base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch)
base.Ctxt.DiagFunc = base.Errorf
base.Ctxt.DiagFlush = base.FlushErrors
base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
- Widthptr = thearch.LinkArch.PtrSize
- Widthreg = thearch.LinkArch.RegSize
+ types.PtrSize = ssagen.Arch.LinkArch.PtrSize
+ types.RegSize = ssagen.Arch.LinkArch.RegSize
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
- return typenamesym(t).Linksym()
+ return reflectdata.TypeSym(t).Linksym()
}
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
- return typenamesym(t).Linksym()
+ return reflectdata.TypeSym(t).Linksym()
}
- TypecheckInit()
+ typecheck.Init()
os.Exit(m.Run())
}
import (
"cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field {
field := types.NewField(src.NoXPos, s, t)
- n := NewName(s)
- n.SetClass(which)
+ n := typecheck.NewName(s)
+ n.Class_ = which
field.Nname = n
n.SetType(t)
return field
}
func mkFuncType(rcvr *types.Type, ins []*types.Type, outs []*types.Type) *types.Type {
- q := lookup("?")
+ q := typecheck.Lookup("?")
inf := []*types.Field{}
for _, it := range ins {
inf = append(inf, mkParamResultField(it, q, ir.PPARAM))
n := ir.AsNode(f.Nname).(*ir.Name)
if n.FrameOffset() != int64(r.Offset) {
t.Errorf("%s %d: got offset %d wanted %d t=%v",
- which, idx, r.Offset, n.Offset(), f.Type)
+ which, idx, r.Offset, n.Offset_, f.Type)
return 1
}
return 0
func abitest(t *testing.T, ft *types.Type, exp expectedDump) {
- dowidth(ft)
+ types.CalcSize(ft)
// Analyze with full set of registers.
regRes := ABIAnalyze(ft, configAMD64)
+++ /dev/null
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
-)
-
-type exporter struct {
- marked map[*types.Type]bool // types already seen by markType
-}
-
-// markObject visits a reachable object.
-func (p *exporter) markObject(n ir.Node) {
- if n.Op() == ir.ONAME {
- n := n.(*ir.Name)
- if n.Class() == ir.PFUNC {
- inlFlood(n, exportsym)
- }
- }
-
- p.markType(n.Type())
-}
-
-// markType recursively visits types reachable from t to identify
-// functions whose inline bodies may be needed.
-func (p *exporter) markType(t *types.Type) {
- if p.marked[t] {
- return
- }
- p.marked[t] = true
-
- // If this is a named type, mark all of its associated
- // methods. Skip interface types because t.Methods contains
- // only their unexpanded method set (i.e., exclusive of
- // interface embeddings), and the switch statement below
- // handles their full method set.
- if t.Sym() != nil && t.Kind() != types.TINTER {
- for _, m := range t.Methods().Slice() {
- if types.IsExported(m.Sym.Name) {
- p.markObject(ir.AsNode(m.Nname))
- }
- }
- }
-
- // Recursively mark any types that can be produced given a
- // value of type t: dereferencing a pointer; indexing or
- // iterating over an array, slice, or map; receiving from a
- // channel; accessing a struct field or interface method; or
- // calling a function.
- //
- // Notably, we don't mark function parameter types, because
- // the user already needs some way to construct values of
- // those types.
- switch t.Kind() {
- case types.TPTR, types.TARRAY, types.TSLICE:
- p.markType(t.Elem())
-
- case types.TCHAN:
- if t.ChanDir().CanRecv() {
- p.markType(t.Elem())
- }
-
- case types.TMAP:
- p.markType(t.Key())
- p.markType(t.Elem())
-
- case types.TSTRUCT:
- for _, f := range t.FieldSlice() {
- if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
- p.markType(f.Type)
- }
- }
-
- case types.TFUNC:
- for _, f := range t.Results().FieldSlice() {
- p.markType(f.Type)
- }
-
- case types.TINTER:
- for _, f := range t.FieldSlice() {
- if types.IsExported(f.Sym.Name) {
- p.markType(f.Type)
- }
- }
- }
-}
-
-// ----------------------------------------------------------------------------
-// Export format
-
-// Tags. Must be < 0.
-const (
- // Objects
- packageTag = -(iota + 1)
- constTag
- typeTag
- varTag
- funcTag
- endTag
-
- // Types
- namedTag
- arrayTag
- sliceTag
- dddTag
- structTag
- pointerTag
- signatureTag
- interfaceTag
- mapTag
- chanTag
-
- // Values
- falseTag
- trueTag
- int64Tag
- floatTag
- fractionTag // not used by gc
- complexTag
- stringTag
- nilTag
- unknownTag // not used by gc (only appears in packages with errors)
-
- // Type aliases
- aliasTag
-)
-
-var predecl []*types.Type // initialized lazily
-
-func predeclared() []*types.Type {
- if predecl == nil {
- // initialize lazily to be sure that all
- // elements have been initialized before
- predecl = []*types.Type{
- // basic types
- types.Types[types.TBOOL],
- types.Types[types.TINT],
- types.Types[types.TINT8],
- types.Types[types.TINT16],
- types.Types[types.TINT32],
- types.Types[types.TINT64],
- types.Types[types.TUINT],
- types.Types[types.TUINT8],
- types.Types[types.TUINT16],
- types.Types[types.TUINT32],
- types.Types[types.TUINT64],
- types.Types[types.TUINTPTR],
- types.Types[types.TFLOAT32],
- types.Types[types.TFLOAT64],
- types.Types[types.TCOMPLEX64],
- types.Types[types.TCOMPLEX128],
- types.Types[types.TSTRING],
-
- // basic type aliases
- types.ByteType,
- types.RuneType,
-
- // error
- types.ErrorType,
-
- // untyped types
- types.UntypedBool,
- types.UntypedInt,
- types.UntypedRune,
- types.UntypedFloat,
- types.UntypedComplex,
- types.UntypedString,
- types.Types[types.TNIL],
-
- // package unsafe
- types.Types[types.TUNSAFEPTR],
-
- // invalid type (package contains errors)
- types.Types[types.Txxx],
-
- // any type, for builtin export data
- types.Types[types.TANY],
- }
- }
- return predecl
-}
+++ /dev/null
-// Code generated by mkbuiltin.go. DO NOT EDIT.
-
-package gc
-
-import (
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
-)
-
-var runtimeDecls = [...]struct {
- name string
- tag int
- typ int
-}{
- {"newobject", funcTag, 4},
- {"mallocgc", funcTag, 8},
- {"panicdivide", funcTag, 9},
- {"panicshift", funcTag, 9},
- {"panicmakeslicelen", funcTag, 9},
- {"panicmakeslicecap", funcTag, 9},
- {"throwinit", funcTag, 9},
- {"panicwrap", funcTag, 9},
- {"gopanic", funcTag, 11},
- {"gorecover", funcTag, 14},
- {"goschedguarded", funcTag, 9},
- {"goPanicIndex", funcTag, 16},
- {"goPanicIndexU", funcTag, 18},
- {"goPanicSliceAlen", funcTag, 16},
- {"goPanicSliceAlenU", funcTag, 18},
- {"goPanicSliceAcap", funcTag, 16},
- {"goPanicSliceAcapU", funcTag, 18},
- {"goPanicSliceB", funcTag, 16},
- {"goPanicSliceBU", funcTag, 18},
- {"goPanicSlice3Alen", funcTag, 16},
- {"goPanicSlice3AlenU", funcTag, 18},
- {"goPanicSlice3Acap", funcTag, 16},
- {"goPanicSlice3AcapU", funcTag, 18},
- {"goPanicSlice3B", funcTag, 16},
- {"goPanicSlice3BU", funcTag, 18},
- {"goPanicSlice3C", funcTag, 16},
- {"goPanicSlice3CU", funcTag, 18},
- {"printbool", funcTag, 19},
- {"printfloat", funcTag, 21},
- {"printint", funcTag, 23},
- {"printhex", funcTag, 25},
- {"printuint", funcTag, 25},
- {"printcomplex", funcTag, 27},
- {"printstring", funcTag, 29},
- {"printpointer", funcTag, 30},
- {"printuintptr", funcTag, 31},
- {"printiface", funcTag, 30},
- {"printeface", funcTag, 30},
- {"printslice", funcTag, 30},
- {"printnl", funcTag, 9},
- {"printsp", funcTag, 9},
- {"printlock", funcTag, 9},
- {"printunlock", funcTag, 9},
- {"concatstring2", funcTag, 34},
- {"concatstring3", funcTag, 35},
- {"concatstring4", funcTag, 36},
- {"concatstring5", funcTag, 37},
- {"concatstrings", funcTag, 39},
- {"cmpstring", funcTag, 40},
- {"intstring", funcTag, 43},
- {"slicebytetostring", funcTag, 44},
- {"slicebytetostringtmp", funcTag, 45},
- {"slicerunetostring", funcTag, 48},
- {"stringtoslicebyte", funcTag, 50},
- {"stringtoslicerune", funcTag, 53},
- {"slicecopy", funcTag, 54},
- {"decoderune", funcTag, 55},
- {"countrunes", funcTag, 56},
- {"convI2I", funcTag, 57},
- {"convT16", funcTag, 58},
- {"convT32", funcTag, 58},
- {"convT64", funcTag, 58},
- {"convTstring", funcTag, 58},
- {"convTslice", funcTag, 58},
- {"convT2E", funcTag, 59},
- {"convT2Enoptr", funcTag, 59},
- {"convT2I", funcTag, 59},
- {"convT2Inoptr", funcTag, 59},
- {"assertE2I", funcTag, 57},
- {"assertE2I2", funcTag, 60},
- {"assertI2I", funcTag, 57},
- {"assertI2I2", funcTag, 60},
- {"panicdottypeE", funcTag, 61},
- {"panicdottypeI", funcTag, 61},
- {"panicnildottype", funcTag, 62},
- {"ifaceeq", funcTag, 64},
- {"efaceeq", funcTag, 64},
- {"fastrand", funcTag, 66},
- {"makemap64", funcTag, 68},
- {"makemap", funcTag, 69},
- {"makemap_small", funcTag, 70},
- {"mapaccess1", funcTag, 71},
- {"mapaccess1_fast32", funcTag, 72},
- {"mapaccess1_fast64", funcTag, 72},
- {"mapaccess1_faststr", funcTag, 72},
- {"mapaccess1_fat", funcTag, 73},
- {"mapaccess2", funcTag, 74},
- {"mapaccess2_fast32", funcTag, 75},
- {"mapaccess2_fast64", funcTag, 75},
- {"mapaccess2_faststr", funcTag, 75},
- {"mapaccess2_fat", funcTag, 76},
- {"mapassign", funcTag, 71},
- {"mapassign_fast32", funcTag, 72},
- {"mapassign_fast32ptr", funcTag, 72},
- {"mapassign_fast64", funcTag, 72},
- {"mapassign_fast64ptr", funcTag, 72},
- {"mapassign_faststr", funcTag, 72},
- {"mapiterinit", funcTag, 77},
- {"mapdelete", funcTag, 77},
- {"mapdelete_fast32", funcTag, 78},
- {"mapdelete_fast64", funcTag, 78},
- {"mapdelete_faststr", funcTag, 78},
- {"mapiternext", funcTag, 79},
- {"mapclear", funcTag, 80},
- {"makechan64", funcTag, 82},
- {"makechan", funcTag, 83},
- {"chanrecv1", funcTag, 85},
- {"chanrecv2", funcTag, 86},
- {"chansend1", funcTag, 88},
- {"closechan", funcTag, 30},
- {"writeBarrier", varTag, 90},
- {"typedmemmove", funcTag, 91},
- {"typedmemclr", funcTag, 92},
- {"typedslicecopy", funcTag, 93},
- {"selectnbsend", funcTag, 94},
- {"selectnbrecv", funcTag, 95},
- {"selectnbrecv2", funcTag, 97},
- {"selectsetpc", funcTag, 98},
- {"selectgo", funcTag, 99},
- {"block", funcTag, 9},
- {"makeslice", funcTag, 100},
- {"makeslice64", funcTag, 101},
- {"makeslicecopy", funcTag, 102},
- {"growslice", funcTag, 104},
- {"memmove", funcTag, 105},
- {"memclrNoHeapPointers", funcTag, 106},
- {"memclrHasPointers", funcTag, 106},
- {"memequal", funcTag, 107},
- {"memequal0", funcTag, 108},
- {"memequal8", funcTag, 108},
- {"memequal16", funcTag, 108},
- {"memequal32", funcTag, 108},
- {"memequal64", funcTag, 108},
- {"memequal128", funcTag, 108},
- {"f32equal", funcTag, 109},
- {"f64equal", funcTag, 109},
- {"c64equal", funcTag, 109},
- {"c128equal", funcTag, 109},
- {"strequal", funcTag, 109},
- {"interequal", funcTag, 109},
- {"nilinterequal", funcTag, 109},
- {"memhash", funcTag, 110},
- {"memhash0", funcTag, 111},
- {"memhash8", funcTag, 111},
- {"memhash16", funcTag, 111},
- {"memhash32", funcTag, 111},
- {"memhash64", funcTag, 111},
- {"memhash128", funcTag, 111},
- {"f32hash", funcTag, 111},
- {"f64hash", funcTag, 111},
- {"c64hash", funcTag, 111},
- {"c128hash", funcTag, 111},
- {"strhash", funcTag, 111},
- {"interhash", funcTag, 111},
- {"nilinterhash", funcTag, 111},
- {"int64div", funcTag, 112},
- {"uint64div", funcTag, 113},
- {"int64mod", funcTag, 112},
- {"uint64mod", funcTag, 113},
- {"float64toint64", funcTag, 114},
- {"float64touint64", funcTag, 115},
- {"float64touint32", funcTag, 116},
- {"int64tofloat64", funcTag, 117},
- {"uint64tofloat64", funcTag, 118},
- {"uint32tofloat64", funcTag, 119},
- {"complex128div", funcTag, 120},
- {"racefuncenter", funcTag, 31},
- {"racefuncenterfp", funcTag, 9},
- {"racefuncexit", funcTag, 9},
- {"raceread", funcTag, 31},
- {"racewrite", funcTag, 31},
- {"racereadrange", funcTag, 121},
- {"racewriterange", funcTag, 121},
- {"msanread", funcTag, 121},
- {"msanwrite", funcTag, 121},
- {"msanmove", funcTag, 122},
- {"checkptrAlignment", funcTag, 123},
- {"checkptrArithmetic", funcTag, 125},
- {"libfuzzerTraceCmp1", funcTag, 127},
- {"libfuzzerTraceCmp2", funcTag, 129},
- {"libfuzzerTraceCmp4", funcTag, 130},
- {"libfuzzerTraceCmp8", funcTag, 131},
- {"libfuzzerTraceConstCmp1", funcTag, 127},
- {"libfuzzerTraceConstCmp2", funcTag, 129},
- {"libfuzzerTraceConstCmp4", funcTag, 130},
- {"libfuzzerTraceConstCmp8", funcTag, 131},
- {"x86HasPOPCNT", varTag, 6},
- {"x86HasSSE41", varTag, 6},
- {"x86HasFMA", varTag, 6},
- {"armHasVFPv4", varTag, 6},
- {"arm64HasATOMICS", varTag, 6},
-}
-
-func runtimeTypes() []*types.Type {
- var typs [132]*types.Type
- typs[0] = types.ByteType
- typs[1] = types.NewPtr(typs[0])
- typs[2] = types.Types[types.TANY]
- typs[3] = types.NewPtr(typs[2])
- typs[4] = functype(nil, []*ir.Field{anonfield(typs[1])}, []*ir.Field{anonfield(typs[3])})
- typs[5] = types.Types[types.TUINTPTR]
- typs[6] = types.Types[types.TBOOL]
- typs[7] = types.Types[types.TUNSAFEPTR]
- typs[8] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*ir.Field{anonfield(typs[7])})
- typs[9] = functype(nil, nil, nil)
- typs[10] = types.Types[types.TINTER]
- typs[11] = functype(nil, []*ir.Field{anonfield(typs[10])}, nil)
- typs[12] = types.Types[types.TINT32]
- typs[13] = types.NewPtr(typs[12])
- typs[14] = functype(nil, []*ir.Field{anonfield(typs[13])}, []*ir.Field{anonfield(typs[10])})
- typs[15] = types.Types[types.TINT]
- typs[16] = functype(nil, []*ir.Field{anonfield(typs[15]), anonfield(typs[15])}, nil)
- typs[17] = types.Types[types.TUINT]
- typs[18] = functype(nil, []*ir.Field{anonfield(typs[17]), anonfield(typs[15])}, nil)
- typs[19] = functype(nil, []*ir.Field{anonfield(typs[6])}, nil)
- typs[20] = types.Types[types.TFLOAT64]
- typs[21] = functype(nil, []*ir.Field{anonfield(typs[20])}, nil)
- typs[22] = types.Types[types.TINT64]
- typs[23] = functype(nil, []*ir.Field{anonfield(typs[22])}, nil)
- typs[24] = types.Types[types.TUINT64]
- typs[25] = functype(nil, []*ir.Field{anonfield(typs[24])}, nil)
- typs[26] = types.Types[types.TCOMPLEX128]
- typs[27] = functype(nil, []*ir.Field{anonfield(typs[26])}, nil)
- typs[28] = types.Types[types.TSTRING]
- typs[29] = functype(nil, []*ir.Field{anonfield(typs[28])}, nil)
- typs[30] = functype(nil, []*ir.Field{anonfield(typs[2])}, nil)
- typs[31] = functype(nil, []*ir.Field{anonfield(typs[5])}, nil)
- typs[32] = types.NewArray(typs[0], 32)
- typs[33] = types.NewPtr(typs[32])
- typs[34] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])})
- typs[35] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])})
- typs[36] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])})
- typs[37] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])})
- typs[38] = types.NewSlice(typs[28])
- typs[39] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[38])}, []*ir.Field{anonfield(typs[28])})
- typs[40] = functype(nil, []*ir.Field{anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[15])})
- typs[41] = types.NewArray(typs[0], 4)
- typs[42] = types.NewPtr(typs[41])
- typs[43] = functype(nil, []*ir.Field{anonfield(typs[42]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[28])})
- typs[44] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])})
- typs[45] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])})
- typs[46] = types.RuneType
- typs[47] = types.NewSlice(typs[46])
- typs[48] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[47])}, []*ir.Field{anonfield(typs[28])})
- typs[49] = types.NewSlice(typs[0])
- typs[50] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[49])})
- typs[51] = types.NewArray(typs[46], 32)
- typs[52] = types.NewPtr(typs[51])
- typs[53] = functype(nil, []*ir.Field{anonfield(typs[52]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[47])})
- typs[54] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[15])})
- typs[55] = functype(nil, []*ir.Field{anonfield(typs[28]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[46]), anonfield(typs[15])})
- typs[56] = functype(nil, []*ir.Field{anonfield(typs[28])}, []*ir.Field{anonfield(typs[15])})
- typs[57] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[2])})
- typs[58] = functype(nil, []*ir.Field{anonfield(typs[2])}, []*ir.Field{anonfield(typs[7])})
- typs[59] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[2])})
- typs[60] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[2]), anonfield(typs[6])})
- typs[61] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
- typs[62] = functype(nil, []*ir.Field{anonfield(typs[1])}, nil)
- typs[63] = types.NewPtr(typs[5])
- typs[64] = functype(nil, []*ir.Field{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[6])})
- typs[65] = types.Types[types.TUINT32]
- typs[66] = functype(nil, nil, []*ir.Field{anonfield(typs[65])})
- typs[67] = types.NewMap(typs[2], typs[2])
- typs[68] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[67])})
- typs[69] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[67])})
- typs[70] = functype(nil, nil, []*ir.Field{anonfield(typs[67])})
- typs[71] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[3])})
- typs[72] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[3])})
- typs[73] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Field{anonfield(typs[3])})
- typs[74] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])})
- typs[75] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])})
- typs[76] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])})
- typs[77] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
- typs[78] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
- typs[79] = functype(nil, []*ir.Field{anonfield(typs[3])}, nil)
- typs[80] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67])}, nil)
- typs[81] = types.NewChan(typs[2], types.Cboth)
- typs[82] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[81])})
- typs[83] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[81])})
- typs[84] = types.NewChan(typs[2], types.Crecv)
- typs[85] = functype(nil, []*ir.Field{anonfield(typs[84]), anonfield(typs[3])}, nil)
- typs[86] = functype(nil, []*ir.Field{anonfield(typs[84]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])})
- typs[87] = types.NewChan(typs[2], types.Csend)
- typs[88] = functype(nil, []*ir.Field{anonfield(typs[87]), anonfield(typs[3])}, nil)
- typs[89] = types.NewArray(typs[0], 3)
- typs[90] = tostruct([]*ir.Field{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
- typs[91] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
- typs[92] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3])}, nil)
- typs[93] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[15])})
- typs[94] = functype(nil, []*ir.Field{anonfield(typs[87]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])})
- typs[95] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[84])}, []*ir.Field{anonfield(typs[6])})
- typs[96] = types.NewPtr(typs[6])
- typs[97] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*ir.Field{anonfield(typs[6])})
- typs[98] = functype(nil, []*ir.Field{anonfield(typs[63])}, nil)
- typs[99] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*ir.Field{anonfield(typs[15]), anonfield(typs[6])})
- typs[100] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[7])})
- typs[101] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[7])})
- typs[102] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[7])})
- typs[103] = types.NewSlice(typs[2])
- typs[104] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[103])})
- typs[105] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
- typs[106] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5])}, nil)
- typs[107] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[6])})
- typs[108] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])})
- typs[109] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[6])})
- typs[110] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[5])})
- typs[111] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[5])})
- typs[112] = functype(nil, []*ir.Field{anonfield(typs[22]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[22])})
- typs[113] = functype(nil, []*ir.Field{anonfield(typs[24]), anonfield(typs[24])}, []*ir.Field{anonfield(typs[24])})
- typs[114] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[22])})
- typs[115] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[24])})
- typs[116] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[65])})
- typs[117] = functype(nil, []*ir.Field{anonfield(typs[22])}, []*ir.Field{anonfield(typs[20])})
- typs[118] = functype(nil, []*ir.Field{anonfield(typs[24])}, []*ir.Field{anonfield(typs[20])})
- typs[119] = functype(nil, []*ir.Field{anonfield(typs[65])}, []*ir.Field{anonfield(typs[20])})
- typs[120] = functype(nil, []*ir.Field{anonfield(typs[26]), anonfield(typs[26])}, []*ir.Field{anonfield(typs[26])})
- typs[121] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[5])}, nil)
- typs[122] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[5]), anonfield(typs[5])}, nil)
- typs[123] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
- typs[124] = types.NewSlice(typs[7])
- typs[125] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[124])}, nil)
- typs[126] = types.Types[types.TUINT8]
- typs[127] = functype(nil, []*ir.Field{anonfield(typs[126]), anonfield(typs[126])}, nil)
- typs[128] = types.Types[types.TUINT16]
- typs[129] = functype(nil, []*ir.Field{anonfield(typs[128]), anonfield(typs[128])}, nil)
- typs[130] = functype(nil, []*ir.Field{anonfield(typs[65]), anonfield(typs[65])}, nil)
- typs[131] = functype(nil, []*ir.Field{anonfield(typs[24]), anonfield(typs[24])}, nil)
- return typs[:]
-}
+++ /dev/null
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "math/bits"
-
- "cmd/compile/internal/base"
-)
-
-const (
- wordBits = 32
- wordMask = wordBits - 1
- wordShift = 5
-)
-
-// A bvec is a bit vector.
-type bvec struct {
- n int32 // number of bits in vector
- b []uint32 // words holding bits
-}
-
-func bvalloc(n int32) bvec {
- nword := (n + wordBits - 1) / wordBits
- return bvec{n, make([]uint32, nword)}
-}
-
-type bulkBvec struct {
- words []uint32
- nbit int32
- nword int32
-}
-
-func bvbulkalloc(nbit int32, count int32) bulkBvec {
- nword := (nbit + wordBits - 1) / wordBits
- size := int64(nword) * int64(count)
- if int64(int32(size*4)) != size*4 {
- base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
- }
- return bulkBvec{
- words: make([]uint32, size),
- nbit: nbit,
- nword: nword,
- }
-}
-
-func (b *bulkBvec) next() bvec {
- out := bvec{b.nbit, b.words[:b.nword]}
- b.words = b.words[b.nword:]
- return out
-}
-
-func (bv1 bvec) Eq(bv2 bvec) bool {
- if bv1.n != bv2.n {
- base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
- }
- for i, x := range bv1.b {
- if x != bv2.b[i] {
- return false
- }
- }
- return true
-}
-
-func (dst bvec) Copy(src bvec) {
- copy(dst.b, src.b)
-}
-
-func (bv bvec) Get(i int32) bool {
- if i < 0 || i >= bv.n {
- base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
- }
- mask := uint32(1 << uint(i%wordBits))
- return bv.b[i>>wordShift]&mask != 0
-}
-
-func (bv bvec) Set(i int32) {
- if i < 0 || i >= bv.n {
- base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
- }
- mask := uint32(1 << uint(i%wordBits))
- bv.b[i/wordBits] |= mask
-}
-
-func (bv bvec) Unset(i int32) {
- if i < 0 || i >= bv.n {
- base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
- }
- mask := uint32(1 << uint(i%wordBits))
- bv.b[i/wordBits] &^= mask
-}
-
-// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
-// If there is no such index, bvnext returns -1.
-func (bv bvec) Next(i int32) int32 {
- if i >= bv.n {
- return -1
- }
-
- // Jump i ahead to next word with bits.
- if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 {
- i &^= wordMask
- i += wordBits
- for i < bv.n && bv.b[i>>wordShift] == 0 {
- i += wordBits
- }
- }
-
- if i >= bv.n {
- return -1
- }
-
- // Find 1 bit.
- w := bv.b[i>>wordShift] >> uint(i&wordMask)
- i += int32(bits.TrailingZeros32(w))
-
- return i
-}
-
-func (bv bvec) IsEmpty() bool {
- for _, x := range bv.b {
- if x != 0 {
- return false
- }
- }
- return true
-}
-
-func (bv bvec) Not() {
- for i, x := range bv.b {
- bv.b[i] = ^x
- }
-}
-
-// union
-func (dst bvec) Or(src1, src2 bvec) {
- if len(src1.b) == 0 {
- return
- }
- _, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
-
- for i, x := range src1.b {
- dst.b[i] = x | src2.b[i]
- }
-}
-
-// intersection
-func (dst bvec) And(src1, src2 bvec) {
- if len(src1.b) == 0 {
- return
- }
- _, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
-
- for i, x := range src1.b {
- dst.b[i] = x & src2.b[i]
- }
-}
-
-// difference
-func (dst bvec) AndNot(src1, src2 bvec) {
- if len(src1.b) == 0 {
- return
- }
- _, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
-
- for i, x := range src1.b {
- dst.b[i] = x &^ src2.b[i]
- }
-}
-
-func (bv bvec) String() string {
- s := make([]byte, 2+bv.n)
- copy(s, "#*")
- for i := int32(0); i < bv.n; i++ {
- ch := byte('0')
- if bv.Get(i) {
- ch = '1'
- }
- s[2+i] = ch
- }
- return string(s)
-}
-
-func (bv bvec) Clear() {
- for i := range bv.b {
- bv.b[i] = 0
- }
-}
-
-// FNV-1 hash function constants.
-const (
- H0 = 2166136261
- Hp = 16777619
-)
-
-func hashbitmap(h uint32, bv bvec) uint32 {
- n := int((bv.n + 31) / 32)
- for i := 0; i < n; i++ {
- w := bv.b[i]
- h = (h * Hp) ^ (w & 0xff)
- h = (h * Hp) ^ ((w >> 8) & 0xff)
- h = (h * Hp) ^ ((w >> 16) & 0xff)
- h = (h * Hp) ^ ((w >> 24) & 0xff)
- }
-
- return h
-}
-
-// bvecSet is a set of bvecs, in initial insertion order.
-type bvecSet struct {
- index []int // hash -> uniq index. -1 indicates empty slot.
- uniq []bvec // unique bvecs, in insertion order
-}
-
-func (m *bvecSet) grow() {
- // Allocate new index.
- n := len(m.index) * 2
- if n == 0 {
- n = 32
- }
- newIndex := make([]int, n)
- for i := range newIndex {
- newIndex[i] = -1
- }
-
- // Rehash into newIndex.
- for i, bv := range m.uniq {
- h := hashbitmap(H0, bv) % uint32(len(newIndex))
- for {
- j := newIndex[h]
- if j < 0 {
- newIndex[h] = i
- break
- }
- h++
- if h == uint32(len(newIndex)) {
- h = 0
- }
- }
- }
- m.index = newIndex
-}
-
-// add adds bv to the set and returns its index in m.extractUniqe.
-// The caller must not modify bv after this.
-func (m *bvecSet) add(bv bvec) int {
- if len(m.uniq)*4 >= len(m.index) {
- m.grow()
- }
-
- index := m.index
- h := hashbitmap(H0, bv) % uint32(len(index))
- for {
- j := index[h]
- if j < 0 {
- // New bvec.
- index[h] = len(m.uniq)
- m.uniq = append(m.uniq, bv)
- return len(m.uniq) - 1
- }
- jlive := m.uniq[j]
- if bv.Eq(jlive) {
- // Existing bvec.
- return j
- }
-
- h++
- if h == uint32(len(index)) {
- h = 0
- }
- }
-}
-
-// extractUniqe returns this slice of unique bit vectors in m, as
-// indexed by the result of bvecSet.add.
-func (m *bvecSet) extractUniqe() []bvec {
- return m.uniq
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/syntax"
- "cmd/compile/internal/types"
- "cmd/internal/src"
- "fmt"
-)
-
-func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
- xtype := p.typeExpr(expr.Type)
- ntype := p.typeExpr(expr.Type)
-
- fn := ir.NewFunc(p.pos(expr))
- fn.SetIsHiddenClosure(Curfn != nil)
- fn.Nname = newFuncNameAt(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure
- fn.Nname.Ntype = xtype
- fn.Nname.Defn = fn
-
- clo := ir.NewClosureExpr(p.pos(expr), fn)
- fn.ClosureType = ntype
- fn.OClosure = clo
-
- p.funcBody(fn, expr.Body)
-
- // closure-specific variables are hanging off the
- // ordinary ones in the symbol table; see oldname.
- // unhook them.
- // make the list of pointers for the closure call.
- for _, v := range fn.ClosureVars {
- // Unlink from v1; see comment in syntax.go type Param for these fields.
- v1 := v.Defn
- v1.Name().Innermost = v.Outer
-
- // If the closure usage of v is not dense,
- // we need to make it dense; now that we're out
- // of the function in which v appeared,
- // look up v.Sym in the enclosing function
- // and keep it around for use in the compiled code.
- //
- // That is, suppose we just finished parsing the innermost
- // closure f4 in this code:
- //
- // func f() {
- // v := 1
- // func() { // f2
- // use(v)
- // func() { // f3
- // func() { // f4
- // use(v)
- // }()
- // }()
- // }()
- // }
- //
- // At this point v.Outer is f2's v; there is no f3's v.
- // To construct the closure f4 from within f3,
- // we need to use f3's v and in this case we need to create f3's v.
- // We are now in the context of f3, so calling oldname(v.Sym)
- // obtains f3's v, creating it if necessary (as it is in the example).
- //
- // capturevars will decide whether to use v directly or &v.
- v.Outer = oldname(v.Sym()).(*ir.Name)
- }
-
- return clo
-}
-
-// typecheckclosure typechecks an OCLOSURE node. It also creates the named
-// function associated with the closure.
-// TODO: This creation of the named function should probably really be done in a
-// separate pass from type-checking.
-func typecheckclosure(clo *ir.ClosureExpr, top int) {
- fn := clo.Func()
- // Set current associated iota value, so iota can be used inside
- // function in ConstSpec, see issue #22344
- if x := getIotaValue(); x >= 0 {
- fn.SetIota(x)
- }
-
- fn.ClosureType = typecheck(fn.ClosureType, ctxType)
- clo.SetType(fn.ClosureType.Type())
- fn.SetClosureCalled(top&ctxCallee != 0)
-
- // Do not typecheck fn twice, otherwise, we will end up pushing
- // fn to Target.Decls multiple times, causing initLSym called twice.
- // See #30709
- if fn.Typecheck() == 1 {
- return
- }
-
- for _, ln := range fn.ClosureVars {
- n := ln.Defn
- if !n.Name().Captured() {
- n.Name().SetCaptured(true)
- if n.Name().Decldepth == 0 {
- base.Fatalf("typecheckclosure: var %v does not have decldepth assigned", n)
- }
-
- // Ignore assignments to the variable in straightline code
- // preceding the first capturing by a closure.
- if n.Name().Decldepth == decldepth {
- n.Name().SetAssigned(false)
- }
- }
- }
-
- fn.Nname.SetSym(closurename(Curfn))
- setNodeNameFunc(fn.Nname)
- typecheckFunc(fn)
-
- // Type check the body now, but only if we're inside a function.
- // At top level (in a variable initialization: curfn==nil) we're not
- // ready to type check code yet; we'll check it later, because the
- // underlying closure function we create is added to Target.Decls.
- if Curfn != nil && clo.Type() != nil {
- oldfn := Curfn
- Curfn = fn
- olddd := decldepth
- decldepth = 1
- typecheckslice(fn.Body().Slice(), ctxStmt)
- decldepth = olddd
- Curfn = oldfn
- }
-
- Target.Decls = append(Target.Decls, fn)
-}
-
-// globClosgen is like Func.Closgen, but for the global scope.
-var globClosgen int32
-
-// closurename generates a new unique name for a closure within
-// outerfunc.
-func closurename(outerfunc *ir.Func) *types.Sym {
- outer := "glob."
- prefix := "func"
- gen := &globClosgen
-
- if outerfunc != nil {
- if outerfunc.OClosure != nil {
- prefix = ""
- }
-
- outer = ir.FuncName(outerfunc)
-
- // There may be multiple functions named "_". In those
- // cases, we can't use their individual Closgens as it
- // would lead to name clashes.
- if !ir.IsBlank(outerfunc.Nname) {
- gen = &outerfunc.Closgen
- }
- }
-
- *gen++
- return lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
-}
-
-// capturevarscomplete is set to true when the capturevars phase is done.
-var capturevarscomplete bool
-
-// capturevars is called in a separate phase after all typechecking is done.
-// It decides whether each variable captured by a closure should be captured
-// by value or by reference.
-// We use value capturing for values <= 128 bytes that are never reassigned
-// after capturing (effectively constant).
-func capturevars(fn *ir.Func) {
- lno := base.Pos
- base.Pos = fn.Pos()
- cvars := fn.ClosureVars
- out := cvars[:0]
- for _, v := range cvars {
- if v.Type() == nil {
- // If v.Type is nil, it means v looked like it
- // was going to be used in the closure, but
- // isn't. This happens in struct literals like
- // s{f: x} where we can't distinguish whether
- // f is a field identifier or expression until
- // resolving s.
- continue
- }
- out = append(out, v)
-
- // type check the & of closed variables outside the closure,
- // so that the outer frame also grabs them and knows they escape.
- dowidth(v.Type())
-
- var outer ir.Node
- outer = v.Outer
- outermost := v.Defn.(*ir.Name)
-
- // out parameters will be assigned to implicitly upon return.
- if outermost.Class() != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 {
- v.SetByval(true)
- } else {
- outermost.Name().SetAddrtaken(true)
- outer = nodAddr(outer)
- }
-
- if base.Flag.LowerM > 1 {
- var name *types.Sym
- if v.Curfn != nil && v.Curfn.Nname != nil {
- name = v.Curfn.Sym()
- }
- how := "ref"
- if v.Byval() {
- how = "value"
- }
- base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width))
- }
-
- outer = typecheck(outer, ctxExpr)
- fn.ClosureEnter.Append(outer)
- }
-
- fn.ClosureVars = out
- base.Pos = lno
-}
-
-// transformclosure is called in a separate phase after escape analysis.
-// It transform closure bodies to properly reference captured variables.
-func transformclosure(fn *ir.Func) {
- lno := base.Pos
- base.Pos = fn.Pos()
-
- if fn.ClosureCalled() {
- // If the closure is directly called, we transform it to a plain function call
- // with variables passed as args. This avoids allocation of a closure object.
- // Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
- // will complete the transformation later.
- // For illustration, the following closure:
- // func(a int) {
- // println(byval)
- // byref++
- // }(42)
- // becomes:
- // func(byval int, &byref *int, a int) {
- // println(byval)
- // (*&byref)++
- // }(byval, &byref, 42)
-
- // f is ONAME of the actual function.
- f := fn.Nname
-
- // We are going to insert captured variables before input args.
- var params []*types.Field
- var decls []*ir.Name
- for _, v := range fn.ClosureVars {
- if !v.Byval() {
- // If v of type T is captured by reference,
- // we introduce function param &v *T
- // and v remains PAUTOHEAP with &v heapaddr
- // (accesses will implicitly deref &v).
- addr := NewName(lookup("&" + v.Sym().Name))
- addr.SetType(types.NewPtr(v.Type()))
- v.Heapaddr = addr
- v = addr
- }
-
- v.SetClass(ir.PPARAM)
- decls = append(decls, v)
-
- fld := types.NewField(src.NoXPos, v.Sym(), v.Type())
- fld.Nname = v
- params = append(params, fld)
- }
-
- if len(params) > 0 {
- // Prepend params and decls.
- f.Type().Params().SetFields(append(params, f.Type().Params().FieldSlice()...))
- fn.Dcl = append(decls, fn.Dcl...)
- }
-
- dowidth(f.Type())
- fn.SetType(f.Type()) // update type of ODCLFUNC
- } else {
- // The closure is not called, so it is going to stay as closure.
- var body []ir.Node
- offset := int64(Widthptr)
- for _, v := range fn.ClosureVars {
- // cv refers to the field inside of closure OSTRUCTLIT.
- typ := v.Type()
- if !v.Byval() {
- typ = types.NewPtr(typ)
- }
- offset = Rnd(offset, int64(typ.Align))
- cr := ir.NewClosureRead(typ, offset)
- offset += typ.Width
-
- if v.Byval() && v.Type().Width <= int64(2*Widthptr) {
- // If it is a small variable captured by value, downgrade it to PAUTO.
- v.SetClass(ir.PAUTO)
- fn.Dcl = append(fn.Dcl, v)
- body = append(body, ir.Nod(ir.OAS, v, cr))
- } else {
- // Declare variable holding addresses taken from closure
- // and initialize in entry prologue.
- addr := NewName(lookup("&" + v.Sym().Name))
- addr.SetType(types.NewPtr(v.Type()))
- addr.SetClass(ir.PAUTO)
- addr.SetUsed(true)
- addr.Curfn = fn
- fn.Dcl = append(fn.Dcl, addr)
- v.Heapaddr = addr
- var src ir.Node = cr
- if v.Byval() {
- src = nodAddr(cr)
- }
- body = append(body, ir.Nod(ir.OAS, addr, src))
- }
- }
-
- if len(body) > 0 {
- typecheckslice(body, ctxStmt)
- fn.Enter.Set(body)
- fn.SetNeedctxt(true)
- }
- }
-
- base.Pos = lno
-}
-
-// hasemptycvars reports whether closure clo has an
-// empty list of captured vars.
-func hasemptycvars(clo *ir.ClosureExpr) bool {
- return len(clo.Func().ClosureVars) == 0
-}
-
-// closuredebugruntimecheck applies boilerplate checks for debug flags
-// and compiling runtime
-func closuredebugruntimecheck(clo *ir.ClosureExpr) {
- if base.Debug.Closure > 0 {
- if clo.Esc() == EscHeap {
- base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func().ClosureVars)
- } else {
- base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func().ClosureVars)
- }
- }
- if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
- base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
- }
-}
-
-// closureType returns the struct type used to hold all the information
-// needed in the closure for clo (clo must be a OCLOSURE node).
-// The address of a variable of the returned type can be cast to a func.
-func closureType(clo *ir.ClosureExpr) *types.Type {
- // Create closure in the form of a composite literal.
- // supposing the closure captures an int i and a string s
- // and has one float64 argument and no results,
- // the generated code looks like:
- //
- // clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
- //
- // The use of the struct provides type information to the garbage
- // collector so that it can walk the closure. We could use (in this case)
- // [3]unsafe.Pointer instead, but that would leave the gc in the dark.
- // The information appears in the binary in the form of type descriptors;
- // the struct is unnamed so that closures in multiple packages with the
- // same struct type can share the descriptor.
- fields := []*ir.Field{
- namedfield(".F", types.Types[types.TUINTPTR]),
- }
- for _, v := range clo.Func().ClosureVars {
- typ := v.Type()
- if !v.Byval() {
- typ = types.NewPtr(typ)
- }
- fields = append(fields, symfield(v.Sym(), typ))
- }
- typ := tostruct(fields)
- typ.SetNoalg(true)
- return typ
-}
-
-func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
- fn := clo.Func()
-
- // If no closure vars, don't bother wrapping.
- if hasemptycvars(clo) {
- if base.Debug.Closure > 0 {
- base.WarnfAt(clo.Pos(), "closure converted to global")
- }
- return fn.Nname
- }
- closuredebugruntimecheck(clo)
-
- typ := closureType(clo)
-
- clos := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ))
- clos.SetEsc(clo.Esc())
- clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...))
-
- addr := nodAddr(clos)
- addr.SetEsc(clo.Esc())
-
- // Force type conversion from *struct to the func type.
- cfn := convnop(addr, clo.Type())
-
- // non-escaping temp to use, if any.
- if x := clo.Prealloc; x != nil {
- if !types.Identical(typ, x.Type()) {
- panic("closure type does not match order's assigned type")
- }
- addr.SetRight(x)
- clo.Prealloc = nil
- }
-
- return walkexpr(cfn, init)
-}
-
-func typecheckpartialcall(n ir.Node, sym *types.Sym) *ir.CallPartExpr {
- switch n.Op() {
- case ir.ODOTINTER, ir.ODOTMETH:
- break
-
- default:
- base.Fatalf("invalid typecheckpartialcall")
- }
- dot := n.(*ir.SelectorExpr)
-
- // Create top-level function.
- fn := makepartialcall(dot, dot.Type(), sym)
- fn.SetWrapper(true)
-
- return ir.NewCallPartExpr(dot.Pos(), dot.Left(), dot.Selection, fn)
-}
-
-// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
-// for partial calls.
-func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir.Func {
- rcvrtype := dot.Left().Type()
- sym := methodSymSuffix(rcvrtype, meth, "-fm")
-
- if sym.Uniq() {
- return sym.Def.(*ir.Func)
- }
- sym.SetUniq(true)
-
- savecurfn := Curfn
- saveLineNo := base.Pos
- Curfn = nil
-
- // Set line number equal to the line number where the method is declared.
- var m *types.Field
- if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() {
- base.Pos = m.Pos
- }
- // Note: !m.Pos.IsKnown() happens for method expressions where
- // the method is implicitly declared. The Error method of the
- // built-in error type is one such method. We leave the line
- // number at the use of the method expression in this
- // case. See issue 29389.
-
- tfn := ir.NewFuncType(base.Pos, nil,
- structargs(t0.Params(), true),
- structargs(t0.Results(), false))
-
- fn := dclfunc(sym, tfn)
- fn.SetDupok(true)
- fn.SetNeedctxt(true)
-
- // Declare and initialize variable holding receiver.
- cr := ir.NewClosureRead(rcvrtype, Rnd(int64(Widthptr), int64(rcvrtype.Align)))
- ptr := NewName(lookup(".this"))
- declare(ptr, ir.PAUTO)
- ptr.SetUsed(true)
- var body []ir.Node
- if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
- ptr.SetType(rcvrtype)
- body = append(body, ir.Nod(ir.OAS, ptr, cr))
- } else {
- ptr.SetType(types.NewPtr(rcvrtype))
- body = append(body, ir.Nod(ir.OAS, ptr, nodAddr(cr)))
- }
-
- call := ir.Nod(ir.OCALL, nodSym(ir.OXDOT, ptr, meth), nil)
- call.PtrList().Set(paramNnames(tfn.Type()))
- call.SetIsDDD(tfn.Type().IsVariadic())
- if t0.NumResults() != 0 {
- ret := ir.Nod(ir.ORETURN, nil, nil)
- ret.PtrList().Set1(call)
- body = append(body, ret)
- } else {
- body = append(body, call)
- }
-
- fn.PtrBody().Set(body)
- funcbody()
-
- typecheckFunc(fn)
- // Need to typecheck the body of the just-generated wrapper.
- // typecheckslice() requires that Curfn is set when processing an ORETURN.
- Curfn = fn
- typecheckslice(fn.Body().Slice(), ctxStmt)
- sym.Def = fn
- Target.Decls = append(Target.Decls, fn)
- Curfn = savecurfn
- base.Pos = saveLineNo
-
- return fn
-}
-
-// partialCallType returns the struct type used to hold all the information
-// needed in the closure for n (n must be a OCALLPART node).
-// The address of a variable of the returned type can be cast to a func.
-func partialCallType(n *ir.CallPartExpr) *types.Type {
- t := tostruct([]*ir.Field{
- namedfield("F", types.Types[types.TUINTPTR]),
- namedfield("R", n.Left().Type()),
- })
- t.SetNoalg(true)
- return t
-}
-
-func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node {
- // Create closure in the form of a composite literal.
- // For x.M with receiver (x) type T, the generated code looks like:
- //
- // clos = &struct{F uintptr; R T}{T.M·f, x}
- //
- // Like walkclosure above.
-
- if n.Left().Type().IsInterface() {
- // Trigger panic for method on nil interface now.
- // Otherwise it happens in the wrapper and is confusing.
- n.SetLeft(cheapexpr(n.Left(), init))
- n.SetLeft(walkexpr(n.Left(), nil))
-
- tab := typecheck(ir.Nod(ir.OITAB, n.Left(), nil), ctxExpr)
-
- c := ir.Nod(ir.OCHECKNIL, tab, nil)
- c.SetTypecheck(1)
- init.Append(c)
- }
-
- typ := partialCallType(n)
-
- clos := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ))
- clos.SetEsc(n.Esc())
- clos.PtrList().Set2(ir.Nod(ir.OCFUNC, n.Func().Nname, nil), n.Left())
-
- addr := nodAddr(clos)
- addr.SetEsc(n.Esc())
-
- // Force type conversion from *struct to the func type.
- cfn := convnop(addr, n.Type())
-
- // non-escaping temp to use, if any.
- if x := n.Prealloc; x != nil {
- if !types.Identical(typ, x.Type()) {
- panic("partial call type does not match order's assigned type")
- }
- addr.SetRight(x)
- n.Prealloc = nil
- }
-
- return walkexpr(cfn, init)
-}
-
-// callpartMethod returns the *types.Field representing the method
-// referenced by method value n.
-func callpartMethod(n ir.Node) *types.Field {
- return n.(*ir.CallPartExpr).Method
-}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "internal/race"
+ "math/rand"
+ "sort"
+ "sync"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/liveness"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/walk"
+)
+
+// "Portable" code generation.
+
+var (
+ compilequeue []*ir.Func // functions waiting to be compiled
+)
+
+func funccompile(fn *ir.Func) {
+ if ir.CurFunc != nil {
+ base.Fatalf("funccompile %v inside %v", fn.Sym(), ir.CurFunc.Sym())
+ }
+
+ if fn.Type() == nil {
+ if base.Errors() == 0 {
+ base.Fatalf("funccompile missing type")
+ }
+ return
+ }
+
+ // assign parameter offsets
+ types.CalcSize(fn.Type())
+
+ if len(fn.Body) == 0 {
+ // Initialize ABI wrappers if necessary.
+ ssagen.InitLSym(fn, false)
+ liveness.WriteFuncMap(fn)
+ return
+ }
+
+ typecheck.DeclContext = ir.PAUTO
+ ir.CurFunc = fn
+ compile(fn)
+ ir.CurFunc = nil
+ typecheck.DeclContext = ir.PEXTERN
+}
+
+func compile(fn *ir.Func) {
+ // Set up the function's LSym early to avoid data races with the assemblers.
+ // Do this before walk, as walk needs the LSym to set attributes/relocations
+ // (e.g. in markTypeUsedInInterface).
+ ssagen.InitLSym(fn, true)
+
+ errorsBefore := base.Errors()
+ walk.Walk(fn)
+ if base.Errors() > errorsBefore {
+ return
+ }
+
+ // From this point, there should be no uses of Curfn. Enforce that.
+ ir.CurFunc = nil
+
+ if ir.FuncName(fn) == "_" {
+ // We don't need to generate code for this function, just report errors in its body.
+ // At this point we've generated any errors needed.
+ // (Beyond here we generate only non-spec errors, like "stack frame too large".)
+ // See issue 29870.
+ return
+ }
+
+ // Make sure type syms are declared for all types that might
+ // be types of stack objects. We need to do this here
+ // because symbols must be allocated before the parallel
+ // phase of the compiler.
+ for _, n := range fn.Dcl {
+ switch n.Class_ {
+ case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
+ if liveness.ShouldTrack(n) && n.Addrtaken() {
+ reflectdata.WriteType(n.Type())
+ // Also make sure we allocate a linker symbol
+ // for the stack object data, for the same reason.
+ if fn.LSym.Func().StackObjects == nil {
+ fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj")
+ }
+ }
+ }
+ }
+
+ if compilenow(fn) {
+ ssagen.Compile(fn, 0)
+ } else {
+ compilequeue = append(compilequeue, fn)
+ }
+}
+
+// compilenow reports whether to compile immediately.
+// If functions are not compiled immediately,
+// they are enqueued in compilequeue,
+// which is drained by compileFunctions.
+func compilenow(fn *ir.Func) bool {
+ // Issue 38068: if this function is a method AND an inline
+ // candidate AND was not inlined (yet), put it onto the compile
+ // queue instead of compiling it immediately. This is in case we
+ // wind up inlining it into a method wrapper that is generated by
+ // compiling a function later on in the Target.Decls list.
+ if ir.IsMethod(fn) && isInlinableButNotInlined(fn) {
+ return false
+ }
+ return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0
+}
+
+// compileFunctions compiles all functions in compilequeue.
+// It fans out nBackendWorkers to do the work
+// and waits for them to complete.
+func compileFunctions() {
+ if len(compilequeue) != 0 {
+ types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
+ if race.Enabled {
+ // Randomize compilation order to try to shake out races.
+ tmp := make([]*ir.Func, len(compilequeue))
+ perm := rand.Perm(len(compilequeue))
+ for i, v := range perm {
+ tmp[v] = compilequeue[i]
+ }
+ copy(compilequeue, tmp)
+ } else {
+ // Compile the longest functions first,
+ // since they're most likely to be the slowest.
+ // This helps avoid stragglers.
+ sort.Slice(compilequeue, func(i, j int) bool {
+ return len(compilequeue[i].Body) > len(compilequeue[j].Body)
+ })
+ }
+ var wg sync.WaitGroup
+ base.Ctxt.InParallel = true
+ c := make(chan *ir.Func, base.Flag.LowerC)
+ for i := 0; i < base.Flag.LowerC; i++ {
+ wg.Add(1)
+ go func(worker int) {
+ for fn := range c {
+ ssagen.Compile(fn, worker)
+ }
+ wg.Done()
+ }(i)
+ }
+ for _, fn := range compilequeue {
+ c <- fn
+ }
+ close(c)
+ compilequeue = nil
+ wg.Wait()
+ base.Ctxt.InParallel = false
+ types.CalcSizeDisabled = false
+ }
+}
+
+// isInlinableButNotInlined returns true if 'fn' was marked as an
+// inline candidate but then never inlined (presumably because we
+// found no call sites).
+func isInlinableButNotInlined(fn *ir.Func) bool {
+ if fn.Inl == nil {
+ return false
+ }
+ if fn.Sym() == nil {
+ return true
+ }
+ return !fn.Sym().Linksym().WasInlined()
+}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "bytes"
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/obj"
- "cmd/internal/src"
- "fmt"
- "strings"
-)
-
-func EnableNoWriteBarrierRecCheck() {
- nowritebarrierrecCheck = newNowritebarrierrecChecker()
-}
-
-func NoWriteBarrierRecCheck() {
- // Write barriers are now known. Check the
- // call graph.
- nowritebarrierrecCheck.check()
- nowritebarrierrecCheck = nil
-}
-
-var nowritebarrierrecCheck *nowritebarrierrecChecker
-
-func testdclstack() {
- if !types.IsDclstackValid() {
- base.Fatalf("mark left on the dclstack")
- }
-}
-
-// redeclare emits a diagnostic about symbol s being redeclared at pos.
-func redeclare(pos src.XPos, s *types.Sym, where string) {
- if !s.Lastlineno.IsKnown() {
- pkgName := dotImportRefs[s.Def.(*ir.Ident)]
- base.ErrorfAt(pos, "%v redeclared %s\n"+
- "\t%v: previous declaration during import %q", s, where, base.FmtPos(pkgName.Pos()), pkgName.Pkg.Path)
- } else {
- prevPos := s.Lastlineno
-
- // When an import and a declaration collide in separate files,
- // present the import as the "redeclared", because the declaration
- // is visible where the import is, but not vice versa.
- // See issue 4510.
- if s.Def == nil {
- pos, prevPos = prevPos, pos
- }
-
- base.ErrorfAt(pos, "%v redeclared %s\n"+
- "\t%v: previous declaration", s, where, base.FmtPos(prevPos))
- }
-}
-
-var vargen int
-
-// declare individual names - var, typ, const
-
-var declare_typegen int
-
-// declare records that Node n declares symbol n.Sym in the specified
-// declaration context.
-func declare(n *ir.Name, ctxt ir.Class) {
- if ir.IsBlank(n) {
- return
- }
-
- s := n.Sym()
-
- // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
- if !inimport && !typecheckok && s.Pkg != types.LocalPkg {
- base.ErrorfAt(n.Pos(), "cannot declare name %v", s)
- }
-
- gen := 0
- if ctxt == ir.PEXTERN {
- if s.Name == "init" {
- base.ErrorfAt(n.Pos(), "cannot declare init - must be func")
- }
- if s.Name == "main" && s.Pkg.Name == "main" {
- base.ErrorfAt(n.Pos(), "cannot declare main - must be func")
- }
- Target.Externs = append(Target.Externs, n)
- } else {
- if Curfn == nil && ctxt == ir.PAUTO {
- base.Pos = n.Pos()
- base.Fatalf("automatic outside function")
- }
- if Curfn != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME {
- Curfn.Dcl = append(Curfn.Dcl, n)
- }
- if n.Op() == ir.OTYPE {
- declare_typegen++
- gen = declare_typegen
- } else if n.Op() == ir.ONAME && ctxt == ir.PAUTO && !strings.Contains(s.Name, "·") {
- vargen++
- gen = vargen
- }
- types.Pushdcl(s)
- n.Curfn = Curfn
- }
-
- if ctxt == ir.PAUTO {
- n.SetFrameOffset(0)
- }
-
- if s.Block == types.Block {
- // functype will print errors about duplicate function arguments.
- // Don't repeat the error here.
- if ctxt != ir.PPARAM && ctxt != ir.PPARAMOUT {
- redeclare(n.Pos(), s, "in this block")
- }
- }
-
- s.Block = types.Block
- s.Lastlineno = base.Pos
- s.Def = n
- n.Vargen = int32(gen)
- n.SetClass(ctxt)
- if ctxt == ir.PFUNC {
- n.Sym().SetFunc(true)
- }
-
- autoexport(n, ctxt)
-}
-
-// declare variables from grammar
-// new_name_list (type | [type] = expr_list)
-func variter(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node {
- var init []ir.Node
- doexpr := len(el) > 0
-
- if len(el) == 1 && len(vl) > 1 {
- e := el[0]
- as2 := ir.Nod(ir.OAS2, nil, nil)
- as2.PtrRlist().Set1(e)
- for _, v := range vl {
- as2.PtrList().Append(v)
- declare(v, dclcontext)
- v.Ntype = t
- v.Defn = as2
- if Curfn != nil {
- init = append(init, ir.Nod(ir.ODCL, v, nil))
- }
- }
-
- return append(init, as2)
- }
-
- for i, v := range vl {
- var e ir.Node
- if doexpr {
- if i >= len(el) {
- base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el))
- break
- }
- e = el[i]
- }
-
- declare(v, dclcontext)
- v.Ntype = t
-
- if e != nil || Curfn != nil || ir.IsBlank(v) {
- if Curfn != nil {
- init = append(init, ir.Nod(ir.ODCL, v, nil))
- }
- as := ir.Nod(ir.OAS, v, e)
- init = append(init, as)
- if e != nil {
- v.Defn = as
- }
- }
- }
-
- if len(el) > len(vl) {
- base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el))
- }
- return init
-}
-
-// newFuncNameAt generates a new name node for a function or method.
-func newFuncNameAt(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Name {
- if fn.Nname != nil {
- base.Fatalf("newFuncName - already have name")
- }
- n := ir.NewNameAt(pos, s)
- n.SetFunc(fn)
- fn.Nname = n
- return n
-}
-
-func anonfield(typ *types.Type) *ir.Field {
- return symfield(nil, typ)
-}
-
-func namedfield(s string, typ *types.Type) *ir.Field {
- return symfield(lookup(s), typ)
-}
-
-func symfield(s *types.Sym, typ *types.Type) *ir.Field {
- return ir.NewField(base.Pos, s, nil, typ)
-}
-
-// oldname returns the Node that declares symbol s in the current scope.
-// If no such Node currently exists, an ONONAME Node is returned instead.
-// Automatically creates a new closure variable if the referenced symbol was
-// declared in a different (containing) function.
-func oldname(s *types.Sym) ir.Node {
- if s.Pkg != types.LocalPkg {
- return ir.NewIdent(base.Pos, s)
- }
-
- n := ir.AsNode(s.Def)
- if n == nil {
- // Maybe a top-level declaration will come along later to
- // define s. resolve will check s.Def again once all input
- // source has been processed.
- return ir.NewIdent(base.Pos, s)
- }
-
- if Curfn != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != Curfn {
- // Inner func is referring to var in outer func.
- //
- // TODO(rsc): If there is an outer variable x and we
- // are parsing x := 5 inside the closure, until we get to
- // the := it looks like a reference to the outer x so we'll
- // make x a closure variable unnecessarily.
- c := n.Name().Innermost
- if c == nil || c.Curfn != Curfn {
- // Do not have a closure var for the active closure yet; make one.
- c = NewName(s)
- c.SetClass(ir.PAUTOHEAP)
- c.SetIsClosureVar(true)
- c.SetIsDDD(n.IsDDD())
- c.Defn = n
-
- // Link into list of active closure variables.
- // Popped from list in func funcLit.
- c.Outer = n.Name().Innermost
- n.Name().Innermost = c
-
- Curfn.ClosureVars = append(Curfn.ClosureVars, c)
- }
-
- // return ref to closure var, not original
- return c
- }
-
- return n
-}
-
-// importName is like oldname,
-// but it reports an error if sym is from another package and not exported.
-func importName(sym *types.Sym) ir.Node {
- n := oldname(sym)
- if !types.IsExported(sym.Name) && sym.Pkg != types.LocalPkg {
- n.SetDiag(true)
- base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
- }
- return n
-}
-
-// := declarations
-func colasname(n ir.Node) bool {
- switch n.Op() {
- case ir.ONAME,
- ir.ONONAME,
- ir.OPACK,
- ir.OTYPE,
- ir.OLITERAL:
- return n.Sym() != nil
- }
-
- return false
-}
-
-func colasdefn(left []ir.Node, defn ir.Node) {
- for _, n := range left {
- if n.Sym() != nil {
- n.Sym().SetUniq(true)
- }
- }
-
- var nnew, nerr int
- for i, n := range left {
- if ir.IsBlank(n) {
- continue
- }
- if !colasname(n) {
- base.ErrorfAt(defn.Pos(), "non-name %v on left side of :=", n)
- nerr++
- continue
- }
-
- if !n.Sym().Uniq() {
- base.ErrorfAt(defn.Pos(), "%v repeated on left side of :=", n.Sym())
- n.SetDiag(true)
- nerr++
- continue
- }
-
- n.Sym().SetUniq(false)
- if n.Sym().Block == types.Block {
- continue
- }
-
- nnew++
- n := NewName(n.Sym())
- declare(n, dclcontext)
- n.Defn = defn
- defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
- left[i] = n
- }
-
- if nnew == 0 && nerr == 0 {
- base.ErrorfAt(defn.Pos(), "no new variables on left side of :=")
- }
-}
-
-// declare the function proper
-// and declare the arguments.
-// called in extern-declaration context
-// returns in auto-declaration context.
-func funchdr(fn *ir.Func) {
- // change the declaration context from extern to auto
- funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext})
- Curfn = fn
- dclcontext = ir.PAUTO
-
- types.Markdcl()
-
- if fn.Nname.Ntype != nil {
- funcargs(fn.Nname.Ntype.(*ir.FuncType))
- } else {
- funcargs2(fn.Type())
- }
-}
-
-func funcargs(nt *ir.FuncType) {
- if nt.Op() != ir.OTFUNC {
- base.Fatalf("funcargs %v", nt.Op())
- }
-
- // re-start the variable generation number
- // we want to use small numbers for the return variables,
- // so let them have the chunk starting at 1.
- //
- // TODO(mdempsky): This is ugly, and only necessary because
- // esc.go uses Vargen to figure out result parameters' index
- // within the result tuple.
- vargen = len(nt.Results)
-
- // declare the receiver and in arguments.
- if nt.Recv != nil {
- funcarg(nt.Recv, ir.PPARAM)
- }
- for _, n := range nt.Params {
- funcarg(n, ir.PPARAM)
- }
-
- oldvargen := vargen
- vargen = 0
-
- // declare the out arguments.
- gen := len(nt.Params)
- for _, n := range nt.Results {
- if n.Sym == nil {
- // Name so that escape analysis can track it. ~r stands for 'result'.
- n.Sym = lookupN("~r", gen)
- gen++
- }
- if n.Sym.IsBlank() {
- // Give it a name so we can assign to it during return. ~b stands for 'blank'.
- // The name must be different from ~r above because if you have
- // func f() (_ int)
- // func g() int
- // f is allowed to use a plain 'return' with no arguments, while g is not.
- // So the two cases must be distinguished.
- n.Sym = lookupN("~b", gen)
- gen++
- }
-
- funcarg(n, ir.PPARAMOUT)
- }
-
- vargen = oldvargen
-}
-
-func funcarg(n *ir.Field, ctxt ir.Class) {
- if n.Sym == nil {
- return
- }
-
- name := ir.NewNameAt(n.Pos, n.Sym)
- n.Decl = name
- name.Ntype = n.Ntype
- name.SetIsDDD(n.IsDDD)
- declare(name, ctxt)
-
- vargen++
- n.Decl.Vargen = int32(vargen)
-}
-
-// Same as funcargs, except run over an already constructed TFUNC.
-// This happens during import, where the hidden_fndcl rule has
-// used functype directly to parse the function's type.
-func funcargs2(t *types.Type) {
- if t.Kind() != types.TFUNC {
- base.Fatalf("funcargs2 %v", t)
- }
-
- for _, f := range t.Recvs().Fields().Slice() {
- funcarg2(f, ir.PPARAM)
- }
- for _, f := range t.Params().Fields().Slice() {
- funcarg2(f, ir.PPARAM)
- }
- for _, f := range t.Results().Fields().Slice() {
- funcarg2(f, ir.PPARAMOUT)
- }
-}
-
-func funcarg2(f *types.Field, ctxt ir.Class) {
- if f.Sym == nil {
- return
- }
- n := ir.NewNameAt(f.Pos, f.Sym)
- f.Nname = n
- n.SetType(f.Type)
- n.SetIsDDD(f.IsDDD())
- declare(n, ctxt)
-}
-
-var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
-
-type funcStackEnt struct {
- curfn *ir.Func
- dclcontext ir.Class
-}
-
-func CheckFuncStack() {
- if len(funcStack) != 0 {
- base.Fatalf("funcStack is non-empty: %v", len(funcStack))
- }
-}
-
-// finish the body.
-// called in auto-declaration context.
-// returns in extern-declaration context.
-func funcbody() {
- // change the declaration context from auto to previous context
- types.Popdcl()
- var e funcStackEnt
- funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
- Curfn, dclcontext = e.curfn, e.dclcontext
-}
-
-// structs, functions, and methods.
-// they don't belong here, but where do they belong?
-func checkembeddedtype(t *types.Type) {
- if t == nil {
- return
- }
-
- if t.Sym() == nil && t.IsPtr() {
- t = t.Elem()
- if t.IsInterface() {
- base.Errorf("embedded type cannot be a pointer to interface")
- }
- }
-
- if t.IsPtr() || t.IsUnsafePtr() {
- base.Errorf("embedded type cannot be a pointer")
- } else if t.Kind() == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() {
- t.ForwardType().Embedlineno = base.Pos
- }
-}
-
-// checkdupfields emits errors for duplicately named fields or methods in
-// a list of struct or interface types.
-func checkdupfields(what string, fss ...[]*types.Field) {
- seen := make(map[*types.Sym]bool)
- for _, fs := range fss {
- for _, f := range fs {
- if f.Sym == nil || f.Sym.IsBlank() {
- continue
- }
- if seen[f.Sym] {
- base.ErrorfAt(f.Pos, "duplicate %s %s", what, f.Sym.Name)
- continue
- }
- seen[f.Sym] = true
- }
- }
-}
-
-// convert a parsed id/type list into
-// a type for struct/interface/arglist
-func tostruct(l []*ir.Field) *types.Type {
- lno := base.Pos
-
- fields := make([]*types.Field, len(l))
- for i, n := range l {
- base.Pos = n.Pos
-
- if n.Ntype != nil {
- n.Type = typecheckNtype(n.Ntype).Type()
- n.Ntype = nil
- }
- f := types.NewField(n.Pos, n.Sym, n.Type)
- if n.Embedded {
- checkembeddedtype(n.Type)
- f.Embedded = 1
- }
- f.Note = n.Note
- fields[i] = f
- }
- checkdupfields("field", fields)
-
- base.Pos = lno
- return types.NewStruct(types.LocalPkg, fields)
-}
-
-func tointerface(nmethods []*ir.Field) *types.Type {
- if len(nmethods) == 0 {
- return types.Types[types.TINTER]
- }
-
- lno := base.Pos
-
- methods := make([]*types.Field, len(nmethods))
- for i, n := range nmethods {
- base.Pos = n.Pos
- if n.Ntype != nil {
- n.Type = typecheckNtype(n.Ntype).Type()
- n.Ntype = nil
- }
- methods[i] = types.NewField(n.Pos, n.Sym, n.Type)
- }
-
- base.Pos = lno
- return types.NewInterface(types.LocalPkg, methods)
-}
-
-func fakeRecv() *ir.Field {
- return anonfield(types.FakeRecvType())
-}
-
-func fakeRecvField() *types.Field {
- return types.NewField(src.NoXPos, nil, types.FakeRecvType())
-}
-
-// isifacemethod reports whether (field) m is
-// an interface method. Such methods have the
-// special receiver type types.FakeRecvType().
-func isifacemethod(f *types.Type) bool {
- return f.Recv().Type == types.FakeRecvType()
-}
-
-// turn a parsed function declaration into a type
-func functype(nrecv *ir.Field, nparams, nresults []*ir.Field) *types.Type {
- funarg := func(n *ir.Field) *types.Field {
- lno := base.Pos
- base.Pos = n.Pos
-
- if n.Ntype != nil {
- n.Type = typecheckNtype(n.Ntype).Type()
- n.Ntype = nil
- }
-
- f := types.NewField(n.Pos, n.Sym, n.Type)
- f.SetIsDDD(n.IsDDD)
- if n.Decl != nil {
- n.Decl.SetType(f.Type)
- f.Nname = n.Decl
- }
-
- base.Pos = lno
- return f
- }
- funargs := func(nn []*ir.Field) []*types.Field {
- res := make([]*types.Field, len(nn))
- for i, n := range nn {
- res[i] = funarg(n)
- }
- return res
- }
-
- var recv *types.Field
- if nrecv != nil {
- recv = funarg(nrecv)
- }
-
- t := types.NewSignature(types.LocalPkg, recv, funargs(nparams), funargs(nresults))
- checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice())
- return t
-}
-
-func hasNamedResults(fn *ir.Func) bool {
- typ := fn.Type()
- return typ.NumResults() > 0 && types.OrigSym(typ.Results().Field(0).Sym) != nil
-}
-
-// methodSym returns the method symbol representing a method name
-// associated with a specific receiver type.
-//
-// Method symbols can be used to distinguish the same method appearing
-// in different method sets. For example, T.M and (*T).M have distinct
-// method symbols.
-//
-// The returned symbol will be marked as a function.
-func methodSym(recv *types.Type, msym *types.Sym) *types.Sym {
- sym := methodSymSuffix(recv, msym, "")
- sym.SetFunc(true)
- return sym
-}
-
-// methodSymSuffix is like methodsym, but allows attaching a
-// distinguisher suffix. To avoid collisions, the suffix must not
-// start with a letter, number, or period.
-func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
- if msym.IsBlank() {
- base.Fatalf("blank method name")
- }
-
- rsym := recv.Sym()
- if recv.IsPtr() {
- if rsym != nil {
- base.Fatalf("declared pointer receiver type: %v", recv)
- }
- rsym = recv.Elem().Sym()
- }
-
- // Find the package the receiver type appeared in. For
- // anonymous receiver types (i.e., anonymous structs with
- // embedded fields), use the "go" pseudo-package instead.
- rpkg := gopkg
- if rsym != nil {
- rpkg = rsym.Pkg
- }
-
- var b bytes.Buffer
- if recv.IsPtr() {
- // The parentheses aren't really necessary, but
- // they're pretty traditional at this point.
- fmt.Fprintf(&b, "(%-S)", recv)
- } else {
- fmt.Fprintf(&b, "%-S", recv)
- }
-
- // A particular receiver type may have multiple non-exported
- // methods with the same name. To disambiguate them, include a
- // package qualifier for names that came from a different
- // package than the receiver type.
- if !types.IsExported(msym.Name) && msym.Pkg != rpkg {
- b.WriteString(".")
- b.WriteString(msym.Pkg.Prefix)
- }
-
- b.WriteString(".")
- b.WriteString(msym.Name)
- b.WriteString(suffix)
-
- return rpkg.LookupBytes(b.Bytes())
-}
-
-// Add a method, declared as a function.
-// - msym is the method symbol
-// - t is function type (with receiver)
-// Returns a pointer to the existing or added Field; or nil if there's an error.
-func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
- if msym == nil {
- base.Fatalf("no method symbol")
- }
-
- // get parent type sym
- rf := t.Recv() // ptr to this structure
- if rf == nil {
- base.Errorf("missing receiver")
- return nil
- }
-
- mt := methtype(rf.Type)
- if mt == nil || mt.Sym() == nil {
- pa := rf.Type
- t := pa
- if t != nil && t.IsPtr() {
- if t.Sym() != nil {
- base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
- return nil
- }
- t = t.Elem()
- }
-
- switch {
- case t == nil || t.Broke():
- // rely on typecheck having complained before
- case t.Sym() == nil:
- base.Errorf("invalid receiver type %v (%v is not a defined type)", pa, t)
- case t.IsPtr():
- base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
- case t.IsInterface():
- base.Errorf("invalid receiver type %v (%v is an interface type)", pa, t)
- default:
- // Should have picked off all the reasons above,
- // but just in case, fall back to generic error.
- base.Errorf("invalid receiver type %v (%L / %L)", pa, pa, t)
- }
- return nil
- }
-
- if local && mt.Sym().Pkg != types.LocalPkg {
- base.Errorf("cannot define new methods on non-local type %v", mt)
- return nil
- }
-
- if msym.IsBlank() {
- return nil
- }
-
- if mt.IsStruct() {
- for _, f := range mt.Fields().Slice() {
- if f.Sym == msym {
- base.Errorf("type %v has both field and method named %v", mt, msym)
- f.SetBroke(true)
- return nil
- }
- }
- }
-
- for _, f := range mt.Methods().Slice() {
- if msym.Name != f.Sym.Name {
- continue
- }
- // types.Identical only checks that incoming and result parameters match,
- // so explicitly check that the receiver parameters match too.
- if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) {
- base.Errorf("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
- }
- return f
- }
-
- f := types.NewField(base.Pos, msym, t)
- f.Nname = n.Nname
- f.SetNointerface(nointerface)
-
- mt.Methods().Append(f)
- return f
-}
-
-func funcsymname(s *types.Sym) string {
- return s.Name + "·f"
-}
-
-// funcsym returns s·f.
-func funcsym(s *types.Sym) *types.Sym {
- // funcsymsmu here serves to protect not just mutations of funcsyms (below),
- // but also the package lookup of the func sym name,
- // since this function gets called concurrently from the backend.
- // There are no other concurrent package lookups in the backend,
- // except for the types package, which is protected separately.
- // Reusing funcsymsmu to also cover this package lookup
- // avoids a general, broader, expensive package lookup mutex.
- // Note makefuncsym also does package look-up of func sym names,
- // but that it is only called serially, from the front end.
- funcsymsmu.Lock()
- sf, existed := s.Pkg.LookupOK(funcsymname(s))
- // Don't export s·f when compiling for dynamic linking.
- // When dynamically linking, the necessary function
- // symbols will be created explicitly with makefuncsym.
- // See the makefuncsym comment for details.
- if !base.Ctxt.Flag_dynlink && !existed {
- funcsyms = append(funcsyms, s)
- }
- funcsymsmu.Unlock()
- return sf
-}
-
-// makefuncsym ensures that s·f is exported.
-// It is only used with -dynlink.
-// When not compiling for dynamic linking,
-// the funcsyms are created as needed by
-// the packages that use them.
-// Normally we emit the s·f stubs as DUPOK syms,
-// but DUPOK doesn't work across shared library boundaries.
-// So instead, when dynamic linking, we only create
-// the s·f stubs in s's package.
-func makefuncsym(s *types.Sym) {
- if !base.Ctxt.Flag_dynlink {
- base.Fatalf("makefuncsym dynlink")
- }
- if s.IsBlank() {
- return
- }
- if base.Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
- // runtime.getg(), getclosureptr(), getcallerpc(), and
- // getcallersp() are not real functions and so do not
- // get funcsyms.
- return
- }
- if _, existed := s.Pkg.LookupOK(funcsymname(s)); !existed {
- funcsyms = append(funcsyms, s)
- }
-}
-
-// setNodeNameFunc marks a node as a function.
-func setNodeNameFunc(n *ir.Name) {
- if n.Op() != ir.ONAME || n.Class() != ir.Pxxx {
- base.Fatalf("expected ONAME/Pxxx node, got %v", n)
- }
-
- n.SetClass(ir.PFUNC)
- n.Sym().SetFunc(true)
-}
-
-func dclfunc(sym *types.Sym, tfn ir.Ntype) *ir.Func {
- if tfn.Op() != ir.OTFUNC {
- base.Fatalf("expected OTFUNC node, got %v", tfn)
- }
-
- fn := ir.NewFunc(base.Pos)
- fn.Nname = newFuncNameAt(base.Pos, sym, fn)
- fn.Nname.Defn = fn
- fn.Nname.Ntype = tfn
- setNodeNameFunc(fn.Nname)
- funchdr(fn)
- fn.Nname.Ntype = typecheckNtype(fn.Nname.Ntype)
- return fn
-}
-
-type nowritebarrierrecChecker struct {
- // extraCalls contains extra function calls that may not be
- // visible during later analysis. It maps from the ODCLFUNC of
- // the caller to a list of callees.
- extraCalls map[*ir.Func][]nowritebarrierrecCall
-
- // curfn is the current function during AST walks.
- curfn *ir.Func
-}
-
-type nowritebarrierrecCall struct {
- target *ir.Func // caller or callee
- lineno src.XPos // line of call
-}
-
-// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
-// must be called before transformclosure and walk.
-func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
- c := &nowritebarrierrecChecker{
- extraCalls: make(map[*ir.Func][]nowritebarrierrecCall),
- }
-
- // Find all systemstack calls and record their targets. In
- // general, flow analysis can't see into systemstack, but it's
- // important to handle it for this check, so we model it
- // directly. This has to happen before transformclosure since
- // it's a lot harder to work out the argument after.
- for _, n := range Target.Decls {
- if n.Op() != ir.ODCLFUNC {
- continue
- }
- c.curfn = n.(*ir.Func)
- ir.Visit(n, c.findExtraCalls)
- }
- c.curfn = nil
- return c
-}
-
-func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) {
- if nn.Op() != ir.OCALLFUNC {
- return
- }
- n := nn.(*ir.CallExpr)
- if n.Left() == nil || n.Left().Op() != ir.ONAME {
- return
- }
- fn := n.Left().(*ir.Name)
- if fn.Class() != ir.PFUNC || fn.Name().Defn == nil {
- return
- }
- if !isRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" {
- return
- }
-
- var callee *ir.Func
- arg := n.List().First()
- switch arg.Op() {
- case ir.ONAME:
- callee = arg.Name().Defn.(*ir.Func)
- case ir.OCLOSURE:
- arg := arg.(*ir.ClosureExpr)
- callee = arg.Func()
- default:
- base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
- }
- if callee.Op() != ir.ODCLFUNC {
- base.Fatalf("expected ODCLFUNC node, got %+v", callee)
- }
- c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()})
-}
-
-// recordCall records a call from ODCLFUNC node "from", to function
-// symbol "to" at position pos.
-//
-// This should be done as late as possible during compilation to
-// capture precise call graphs. The target of the call is an LSym
-// because that's all we know after we start SSA.
-//
-// This can be called concurrently for different from Nodes.
-func (c *nowritebarrierrecChecker) recordCall(fn *ir.Func, to *obj.LSym, pos src.XPos) {
- // We record this information on the *Func so this is concurrent-safe.
- if fn.NWBRCalls == nil {
- fn.NWBRCalls = new([]ir.SymAndPos)
- }
- *fn.NWBRCalls = append(*fn.NWBRCalls, ir.SymAndPos{Sym: to, Pos: pos})
-}
-
-func (c *nowritebarrierrecChecker) check() {
- // We walk the call graph as late as possible so we can
- // capture all calls created by lowering, but this means we
- // only get to see the obj.LSyms of calls. symToFunc lets us
- // get back to the ODCLFUNCs.
- symToFunc := make(map[*obj.LSym]*ir.Func)
- // funcs records the back-edges of the BFS call graph walk. It
- // maps from the ODCLFUNC of each function that must not have
- // write barriers to the call that inhibits them. Functions
- // that are directly marked go:nowritebarrierrec are in this
- // map with a zero-valued nowritebarrierrecCall. This also
- // acts as the set of marks for the BFS of the call graph.
- funcs := make(map[*ir.Func]nowritebarrierrecCall)
- // q is the queue of ODCLFUNC Nodes to visit in BFS order.
- var q ir.NameQueue
-
- for _, n := range Target.Decls {
- if n.Op() != ir.ODCLFUNC {
- continue
- }
- fn := n.(*ir.Func)
-
- symToFunc[fn.LSym] = fn
-
- // Make nowritebarrierrec functions BFS roots.
- if fn.Pragma&ir.Nowritebarrierrec != 0 {
- funcs[fn] = nowritebarrierrecCall{}
- q.PushRight(fn.Nname)
- }
- // Check go:nowritebarrier functions.
- if fn.Pragma&ir.Nowritebarrier != 0 && fn.WBPos.IsKnown() {
- base.ErrorfAt(fn.WBPos, "write barrier prohibited")
- }
- }
-
- // Perform a BFS of the call graph from all
- // go:nowritebarrierrec functions.
- enqueue := func(src, target *ir.Func, pos src.XPos) {
- if target.Pragma&ir.Yeswritebarrierrec != 0 {
- // Don't flow into this function.
- return
- }
- if _, ok := funcs[target]; ok {
- // Already found a path to target.
- return
- }
-
- // Record the path.
- funcs[target] = nowritebarrierrecCall{target: src, lineno: pos}
- q.PushRight(target.Nname)
- }
- for !q.Empty() {
- fn := q.PopLeft().Func()
-
- // Check fn.
- if fn.WBPos.IsKnown() {
- var err bytes.Buffer
- call := funcs[fn]
- for call.target != nil {
- fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Nname)
- call = funcs[call.target]
- }
- base.ErrorfAt(fn.WBPos, "write barrier prohibited by caller; %v%s", fn.Nname, err.String())
- continue
- }
-
- // Enqueue fn's calls.
- for _, callee := range c.extraCalls[fn] {
- enqueue(fn, callee.target, callee.lineno)
- }
- if fn.NWBRCalls == nil {
- continue
- }
- for _, callee := range *fn.NWBRCalls {
- target := symToFunc[callee.Sym]
- if target != nil {
- enqueue(fn, target, callee.Pos)
- }
- }
- }
-}
import (
"cmd/compile/internal/base"
+ "cmd/compile/internal/inline"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/bio"
- "cmd/internal/src"
"fmt"
"go/constant"
)
}
}
-// exportsym marks n for export (or reexport).
-func exportsym(n *ir.Name) {
- if n.Sym().OnExportList() {
- return
- }
- n.Sym().SetOnExportList(true)
-
- if base.Flag.E != 0 {
- fmt.Printf("export symbol %v\n", n.Sym())
- }
-
- Target.Exports = append(Target.Exports, n)
-}
-
-func initname(s string) bool {
- return s == "init"
-}
-
-func autoexport(n *ir.Name, ctxt ir.Class) {
- if n.Sym().Pkg != types.LocalPkg {
- return
- }
- if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || dclcontext != ir.PEXTERN {
- return
- }
- if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) {
- return
- }
-
- if types.IsExported(n.Sym().Name) || initname(n.Sym().Name) {
- exportsym(n)
- }
- if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
- n.Sym().SetAsm(true)
- Target.Asms = append(Target.Asms, n)
- }
-}
-
func dumpexport(bout *bio.Writer) {
p := &exporter{marked: make(map[*types.Type]bool)}
- for _, n := range Target.Exports {
+ for _, n := range typecheck.Target.Exports {
p.markObject(n)
}
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
exportf(bout, "\n$$B\n") // indicate binary export format
off := bout.Offset()
- iexport(bout.Writer)
+ typecheck.WriteExports(bout.Writer)
size := bout.Offset() - off
exportf(bout, "\n$$\n")
}
}
-func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name {
- if n := s.PkgDef(); n != nil {
- base.Fatalf("importsym of symbol that already exists: %v", n)
- }
-
- n := ir.NewDeclNameAt(pos, op, s)
- n.SetClass(ctxt) // TODO(mdempsky): Move this into NewDeclNameAt too?
- s.SetPkgDef(n)
- s.Importdef = ipkg
- return n
-}
-
-// importtype returns the named type declared by symbol s.
-// If no such type has been declared yet, a forward declaration is returned.
-// ipkg is the package being imported
-func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *ir.Name {
- n := importsym(ipkg, pos, s, ir.OTYPE, ir.PEXTERN)
- n.SetType(types.NewNamed(n))
- return n
-}
-
-// importobj declares symbol s as an imported object representable by op.
-// ipkg is the package being imported
-func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
- n := importsym(ipkg, pos, s, op, ctxt)
- n.SetType(t)
- if ctxt == ir.PFUNC {
- n.Sym().SetFunc(true)
- }
- return n
-}
-
-// importconst declares symbol s as an imported constant with type t and value val.
-// ipkg is the package being imported
-func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
- n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t)
- n.SetVal(val)
- return n
-}
-
-// importfunc declares symbol s as an imported function with type t.
-// ipkg is the package being imported
-func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
- n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t)
-
- fn := ir.NewFunc(pos)
- fn.SetType(t)
- n.SetFunc(fn)
- fn.Nname = n
-
- return n
-}
-
-// importvar declares symbol s as an imported variable with type t.
-// ipkg is the package being imported
-func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
- return importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t)
-}
-
-// importalias declares symbol s as an imported type alias with type t.
-// ipkg is the package being imported
-func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
- return importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t)
-}
-
func dumpasmhdr() {
b, err := bio.Create(base.Flag.AsmHdr)
if err != nil {
base.Fatalf("%v", err)
}
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
- for _, n := range Target.Asms {
+ for _, n := range typecheck.Target.Asms {
if n.Sym().IsBlank() {
continue
}
b.Close()
}
+
+type exporter struct {
+ marked map[*types.Type]bool // types already seen by markType
+}
+
+// markObject visits a reachable object.
+func (p *exporter) markObject(n ir.Node) {
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if n.Class_ == ir.PFUNC {
+ inline.Inline_Flood(n, typecheck.Export)
+ }
+ }
+
+ p.markType(n.Type())
+}
+
+// markType recursively visits types reachable from t to identify
+// functions whose inline bodies may be needed.
+func (p *exporter) markType(t *types.Type) {
+ if p.marked[t] {
+ return
+ }
+ p.marked[t] = true
+
+ // If this is a named type, mark all of its associated
+ // methods. Skip interface types because t.Methods contains
+ // only their unexpanded method set (i.e., exclusive of
+ // interface embeddings), and the switch statement below
+ // handles their full method set.
+ if t.Sym() != nil && t.Kind() != types.TINTER {
+ for _, m := range t.Methods().Slice() {
+ if types.IsExported(m.Sym.Name) {
+ p.markObject(ir.AsNode(m.Nname))
+ }
+ }
+ }
+
+ // Recursively mark any types that can be produced given a
+ // value of type t: dereferencing a pointer; indexing or
+ // iterating over an array, slice, or map; receiving from a
+ // channel; accessing a struct field or interface method; or
+ // calling a function.
+ //
+ // Notably, we don't mark function parameter types, because
+ // the user already needs some way to construct values of
+ // those types.
+ switch t.Kind() {
+ case types.TPTR, types.TARRAY, types.TSLICE:
+ p.markType(t.Elem())
+
+ case types.TCHAN:
+ if t.ChanDir().CanRecv() {
+ p.markType(t.Elem())
+ }
+
+ case types.TMAP:
+ p.markType(t.Key())
+ p.markType(t.Elem())
+
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
+ p.markType(f.Type)
+ }
+ }
+
+ case types.TFUNC:
+ for _, f := range t.Results().FieldSlice() {
+ p.markType(f.Type)
+ }
+
+ case types.TINTER:
+ for _, f := range t.FieldSlice() {
+ if types.IsExported(f.Sym.Name) {
+ p.markType(f.Type)
+ }
+ }
+ }
+}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/obj"
- "cmd/internal/src"
- "strconv"
-)
-
-// sysfunc looks up Go function name in package runtime. This function
-// must follow the internal calling convention.
-func sysfunc(name string) *obj.LSym {
- s := Runtimepkg.Lookup(name)
- s.SetFunc(true)
- return s.Linksym()
-}
-
-// sysvar looks up a variable (or assembly function) name in package
-// runtime. If this is a function, it may have a special calling
-// convention.
-func sysvar(name string) *obj.LSym {
- return Runtimepkg.Lookup(name).Linksym()
-}
-
-// isParamStackCopy reports whether this is the on-stack copy of a
-// function parameter that moved to the heap.
-func isParamStackCopy(n ir.Node) bool {
- if n.Op() != ir.ONAME {
- return false
- }
- name := n.(*ir.Name)
- return (name.Class() == ir.PPARAM || name.Class() == ir.PPARAMOUT) && name.Heapaddr != nil
-}
-
-// isParamHeapCopy reports whether this is the on-heap copy of
-// a function parameter that moved to the heap.
-func isParamHeapCopy(n ir.Node) bool {
- if n.Op() != ir.ONAME {
- return false
- }
- name := n.(*ir.Name)
- return name.Class() == ir.PAUTOHEAP && name.Name().Stackcopy != nil
-}
-
-// autotmpname returns the name for an autotmp variable numbered n.
-func autotmpname(n int) string {
- // Give each tmp a different name so that they can be registerized.
- // Add a preceding . to avoid clashing with legal names.
- const prefix = ".autotmp_"
- // Start with a buffer big enough to hold a large n.
- b := []byte(prefix + " ")[:len(prefix)]
- b = strconv.AppendInt(b, int64(n), 10)
- return types.InternString(b)
-}
-
-// make a new Node off the books
-func tempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
- if curfn == nil {
- base.Fatalf("no curfn for tempAt")
- }
- if curfn.Op() == ir.OCLOSURE {
- ir.Dump("tempAt", curfn)
- base.Fatalf("adding tempAt to wrong closure function")
- }
- if t == nil {
- base.Fatalf("tempAt called with nil type")
- }
-
- s := &types.Sym{
- Name: autotmpname(len(curfn.Dcl)),
- Pkg: types.LocalPkg,
- }
- n := ir.NewNameAt(pos, s)
- s.Def = n
- n.SetType(t)
- n.SetClass(ir.PAUTO)
- n.SetEsc(EscNever)
- n.Curfn = curfn
- n.SetUsed(true)
- n.SetAutoTemp(true)
- curfn.Dcl = append(curfn.Dcl, n)
-
- dowidth(t)
-
- return n
-}
-
-func temp(t *types.Type) *ir.Name {
- return tempAt(base.Pos, Curfn, t)
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/ssa"
- "cmd/compile/internal/types"
- "cmd/internal/obj"
- "cmd/internal/src"
- "sync"
-)
-
-var (
- // maximum size variable which we will allocate on the stack.
- // This limit is for explicit variable declarations like "var x T" or "x := ...".
- // Note: the flag smallframes can update this value.
- maxStackVarSize = int64(10 * 1024 * 1024)
-
- // maximum size of implicit variables that we will allocate on the stack.
- // p := new(T) allocating T on the stack
- // p := &T{} allocating T on the stack
- // s := make([]T, n) allocating [n]T on the stack
- // s := []byte("...") allocating [n]byte on the stack
- // Note: the flag smallframes can update this value.
- maxImplicitStackVarSize = int64(64 * 1024)
-
- // smallArrayBytes is the maximum size of an array which is considered small.
- // Small arrays will be initialized directly with a sequence of constant stores.
- // Large arrays will be initialized by copying from a static temp.
- // 256 bytes was chosen to minimize generated code + statictmp size.
- smallArrayBytes = int64(256)
-)
-
-// isRuntimePkg reports whether p is package runtime.
-func isRuntimePkg(p *types.Pkg) bool {
- if base.Flag.CompilingRuntime && p == types.LocalPkg {
- return true
- }
- return p.Path == "runtime"
-}
-
-// isReflectPkg reports whether p is package reflect.
-func isReflectPkg(p *types.Pkg) bool {
- if p == types.LocalPkg {
- return base.Ctxt.Pkgpath == "reflect"
- }
- return p.Path == "reflect"
-}
-
-// Slices in the runtime are represented by three components:
-//
-// type slice struct {
-// ptr unsafe.Pointer
-// len int
-// cap int
-// }
-//
-// Strings in the runtime are represented by two components:
-//
-// type string struct {
-// ptr unsafe.Pointer
-// len int
-// }
-//
-// These variables are the offsets of fields and sizes of these structs.
-var (
- slicePtrOffset int64
- sliceLenOffset int64
- sliceCapOffset int64
-
- sizeofSlice int64
- sizeofString int64
-)
-
-var pragcgobuf [][]string
-
-var decldepth int32
-
-var nolocalimports bool
-
-var inimport bool // set during import
-
-var itabpkg *types.Pkg // fake pkg for itab entries
-
-var itablinkpkg *types.Pkg // fake package for runtime itab entries
-
-var Runtimepkg *types.Pkg // fake package runtime
-
-var racepkg *types.Pkg // package runtime/race
-
-var msanpkg *types.Pkg // package runtime/msan
-
-var unsafepkg *types.Pkg // package unsafe
-
-var trackpkg *types.Pkg // fake package for field tracking
-
-var mappkg *types.Pkg // fake package for map zero value
-
-var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver types
-
-var zerosize int64
-
-var simtype [types.NTYPE]types.Kind
-
-var (
- isInt [types.NTYPE]bool
- isFloat [types.NTYPE]bool
- isComplex [types.NTYPE]bool
- issimple [types.NTYPE]bool
-)
-
-var (
- okforeq [types.NTYPE]bool
- okforadd [types.NTYPE]bool
- okforand [types.NTYPE]bool
- okfornone [types.NTYPE]bool
- okforbool [types.NTYPE]bool
- okforcap [types.NTYPE]bool
- okforlen [types.NTYPE]bool
- okforarith [types.NTYPE]bool
-)
-
-var okforcmp [types.NTYPE]bool
-
-var (
- okfor [ir.OEND][]bool
- iscmp [ir.OEND]bool
-)
-
-var (
- funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
- funcsyms []*types.Sym
-)
-
-var dclcontext ir.Class // PEXTERN/PAUTO
-
-var Curfn *ir.Func
-
-var Widthptr int
-
-var Widthreg int
-
-var typecheckok bool
-
-// Whether we are adding any sort of code instrumentation, such as
-// when the race detector is enabled.
-var instrumenting bool
-
-var nodfp *ir.Name
-
-var autogeneratedPos src.XPos
-
-// interface to back end
-
-type Arch struct {
- LinkArch *obj.LinkArch
-
- REGSP int
- MAXWIDTH int64
- SoftFloat bool
-
- PadFrame func(int64) int64
-
- // ZeroRange zeroes a range of memory on stack. It is only inserted
- // at function entry, and it is ok to clobber registers.
- ZeroRange func(*Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
-
- Ginsnop func(*Progs) *obj.Prog
- Ginsnopdefer func(*Progs) *obj.Prog // special ginsnop for deferreturn
-
- // SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
- SSAMarkMoves func(*SSAGenState, *ssa.Block)
-
- // SSAGenValue emits Prog(s) for the Value.
- SSAGenValue func(*SSAGenState, *ssa.Value)
-
- // SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
- // for all values in the block before SSAGenBlock.
- SSAGenBlock func(s *SSAGenState, b, next *ssa.Block)
-}
-
-var thearch Arch
-
-var (
- staticuint64s *ir.Name
- zerobase *ir.Name
-
- assertE2I,
- assertE2I2,
- assertI2I,
- assertI2I2,
- deferproc,
- deferprocStack,
- Deferreturn,
- Duffcopy,
- Duffzero,
- gcWriteBarrier,
- goschedguarded,
- growslice,
- msanread,
- msanwrite,
- msanmove,
- newobject,
- newproc,
- panicdivide,
- panicshift,
- panicdottypeE,
- panicdottypeI,
- panicnildottype,
- panicoverflow,
- raceread,
- racereadrange,
- racewrite,
- racewriterange,
- x86HasPOPCNT,
- x86HasSSE41,
- x86HasFMA,
- armHasVFPv4,
- arm64HasATOMICS,
- typedmemclr,
- typedmemmove,
- Udiv,
- writeBarrier,
- zerobaseSym *obj.LSym
-
- BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
- ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
-
- // Wasm
- WasmMove,
- WasmZero,
- WasmDiv,
- WasmTruncS,
- WasmTruncU,
- SigPanic *obj.LSym
-)
-
-// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
-var GCWriteBarrierReg map[int16]*obj.LSym
+++ /dev/null
-// Derived from Inferno utils/6c/txt.c
-// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/ssa"
- "cmd/compile/internal/types"
- "cmd/internal/obj"
- "cmd/internal/objabi"
- "cmd/internal/src"
- "fmt"
- "os"
-)
-
-var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
-
-// Progs accumulates Progs for a function and converts them into machine code.
-type Progs struct {
- Text *obj.Prog // ATEXT Prog for this function
- next *obj.Prog // next Prog
- pc int64 // virtual PC; count of Progs
- pos src.XPos // position to use for new Progs
- curfn *ir.Func // fn these Progs are for
- progcache []obj.Prog // local progcache
- cacheidx int // first free element of progcache
-
- nextLive LivenessIndex // liveness index for the next Prog
- prevLive LivenessIndex // last emitted liveness index
-}
-
-// newProgs returns a new Progs for fn.
-// worker indicates which of the backend workers will use the Progs.
-func newProgs(fn *ir.Func, worker int) *Progs {
- pp := new(Progs)
- if base.Ctxt.CanReuseProgs() {
- sz := len(sharedProgArray) / base.Flag.LowerC
- pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)]
- }
- pp.curfn = fn
-
- // prime the pump
- pp.next = pp.NewProg()
- pp.clearp(pp.next)
-
- pp.pos = fn.Pos()
- pp.settext(fn)
- // PCDATA tables implicitly start with index -1.
- pp.prevLive = LivenessIndex{-1, false}
- pp.nextLive = pp.prevLive
- return pp
-}
-
-func (pp *Progs) NewProg() *obj.Prog {
- var p *obj.Prog
- if pp.cacheidx < len(pp.progcache) {
- p = &pp.progcache[pp.cacheidx]
- pp.cacheidx++
- } else {
- p = new(obj.Prog)
- }
- p.Ctxt = base.Ctxt
- return p
-}
-
-// Flush converts from pp to machine code.
-func (pp *Progs) Flush() {
- plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
- obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
-}
-
-// Free clears pp and any associated resources.
-func (pp *Progs) Free() {
- if base.Ctxt.CanReuseProgs() {
- // Clear progs to enable GC and avoid abuse.
- s := pp.progcache[:pp.cacheidx]
- for i := range s {
- s[i] = obj.Prog{}
- }
- }
- // Clear pp to avoid abuse.
- *pp = Progs{}
-}
-
-// Prog adds a Prog with instruction As to pp.
-func (pp *Progs) Prog(as obj.As) *obj.Prog {
- if pp.nextLive.StackMapValid() && pp.nextLive.stackMapIndex != pp.prevLive.stackMapIndex {
- // Emit stack map index change.
- idx := pp.nextLive.stackMapIndex
- pp.prevLive.stackMapIndex = idx
- p := pp.Prog(obj.APCDATA)
- Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
- Addrconst(&p.To, int64(idx))
- }
- if pp.nextLive.isUnsafePoint != pp.prevLive.isUnsafePoint {
- // Emit unsafe-point marker.
- pp.prevLive.isUnsafePoint = pp.nextLive.isUnsafePoint
- p := pp.Prog(obj.APCDATA)
- Addrconst(&p.From, objabi.PCDATA_UnsafePoint)
- if pp.nextLive.isUnsafePoint {
- Addrconst(&p.To, objabi.PCDATA_UnsafePointUnsafe)
- } else {
- Addrconst(&p.To, objabi.PCDATA_UnsafePointSafe)
- }
- }
-
- p := pp.next
- pp.next = pp.NewProg()
- pp.clearp(pp.next)
- p.Link = pp.next
-
- if !pp.pos.IsKnown() && base.Flag.K != 0 {
- base.Warn("prog: unknown position (line 0)")
- }
-
- p.As = as
- p.Pos = pp.pos
- if pp.pos.IsStmt() == src.PosIsStmt {
- // Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
- if ssa.LosesStmtMark(as) {
- return p
- }
- pp.pos = pp.pos.WithNotStmt()
- }
- return p
-}
-
-func (pp *Progs) clearp(p *obj.Prog) {
- obj.Nopout(p)
- p.As = obj.AEND
- p.Pc = pp.pc
- pp.pc++
-}
-
-func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
- q := pp.NewProg()
- pp.clearp(q)
- q.As = as
- q.Pos = p.Pos
- q.From.Type = ftype
- q.From.Reg = freg
- q.From.Offset = foffset
- q.To.Type = ttype
- q.To.Reg = treg
- q.To.Offset = toffset
- q.Link = p.Link
- p.Link = q
- return q
-}
-
-func (pp *Progs) settext(fn *ir.Func) {
- if pp.Text != nil {
- base.Fatalf("Progs.settext called twice")
- }
- ptxt := pp.Prog(obj.ATEXT)
- pp.Text = ptxt
-
- fn.LSym.Func().Text = ptxt
- ptxt.From.Type = obj.TYPE_MEM
- ptxt.From.Name = obj.NAME_EXTERN
- ptxt.From.Sym = fn.LSym
-}
-
-// makeABIWrapper creates a new function that wraps a cross-ABI call
-// to "f". The wrapper is marked as an ABIWRAPPER.
-func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
-
- // Q: is this needed?
- savepos := base.Pos
- savedclcontext := dclcontext
- savedcurfn := Curfn
-
- base.Pos = autogeneratedPos
- dclcontext = ir.PEXTERN
-
- // At the moment we don't support wrapping a method, we'd need machinery
- // below to handle the receiver. Panic if we see this scenario.
- ft := f.Nname.Ntype.Type()
- if ft.NumRecvs() != 0 {
- panic("makeABIWrapper support for wrapping methods not implemented")
- }
-
- // Manufacture a new func type to use for the wrapper.
- var noReceiver *ir.Field
- tfn := ir.NewFuncType(base.Pos,
- noReceiver,
- structargs(ft.Params(), true),
- structargs(ft.Results(), false))
-
- // Reuse f's types.Sym to create a new ODCLFUNC/function.
- fn := dclfunc(f.Nname.Sym(), tfn)
- fn.SetDupok(true)
- fn.SetWrapper(true) // ignore frame for panic+recover matching
-
- // Select LSYM now.
- asym := base.Ctxt.LookupABI(f.LSym.Name, wrapperABI)
- asym.Type = objabi.STEXT
- if fn.LSym != nil {
- panic("unexpected")
- }
- fn.LSym = asym
-
- // ABI0-to-ABIInternal wrappers will be mainly loading params from
- // stack into registers (and/or storing stack locations back to
- // registers after the wrapped call); in most cases they won't
- // need to allocate stack space, so it should be OK to mark them
- // as NOSPLIT in these cases. In addition, my assumption is that
- // functions written in assembly are NOSPLIT in most (but not all)
- // cases. In the case of an ABIInternal target that has too many
- // parameters to fit into registers, the wrapper would need to
- // allocate stack space, but this seems like an unlikely scenario.
- // Hence: mark these wrappers NOSPLIT.
- //
- // ABIInternal-to-ABI0 wrappers on the other hand will be taking
- // things in registers and pushing them onto the stack prior to
- // the ABI0 call, meaning that they will always need to allocate
- // stack space. If the compiler marks them as NOSPLIT this seems
- // as though it could lead to situations where the the linker's
- // nosplit-overflow analysis would trigger a link failure. On the
- // other hand if they not tagged NOSPLIT then this could cause
- // problems when building the runtime (since there may be calls to
- // asm routine in cases where it's not safe to grow the stack). In
- // most cases the wrapper would be (in effect) inlined, but are
- // there (perhaps) indirect calls from the runtime that could run
- // into trouble here.
- // FIXME: at the moment all.bash does not pass when I leave out
- // NOSPLIT for these wrappers, so all are currently tagged with NOSPLIT.
- setupTextLSym(fn, obj.NOSPLIT|obj.ABIWRAPPER)
-
- // Generate call. Use tail call if no params and no returns,
- // but a regular call otherwise.
- //
- // Note: ideally we would be using a tail call in cases where
- // there are params but no returns for ABI0->ABIInternal wrappers,
- // provided that all params fit into registers (e.g. we don't have
- // to allocate any stack space). Doing this will require some
- // extra work in typecheck/walk/ssa, might want to add a new node
- // OTAILCALL or something to this effect.
- var tail ir.Node
- if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 {
- tail = nodSym(ir.ORETJMP, nil, f.Nname.Sym())
- } else {
- call := ir.Nod(ir.OCALL, f.Nname, nil)
- call.PtrList().Set(paramNnames(tfn.Type()))
- call.SetIsDDD(tfn.Type().IsVariadic())
- tail = call
- if tfn.Type().NumResults() > 0 {
- n := ir.Nod(ir.ORETURN, nil, nil)
- n.PtrList().Set1(call)
- tail = n
- }
- }
- fn.PtrBody().Append(tail)
-
- funcbody()
- if base.Debug.DclStack != 0 {
- testdclstack()
- }
-
- typecheckFunc(fn)
- Curfn = fn
- typecheckslice(fn.Body().Slice(), ctxStmt)
-
- escapeFuncs([]*ir.Func{fn}, false)
-
- Target.Decls = append(Target.Decls, fn)
-
- // Restore previous context.
- base.Pos = savepos
- dclcontext = savedclcontext
- Curfn = savedcurfn
-}
-
-// initLSym defines f's obj.LSym and initializes it based on the
-// properties of f. This includes setting the symbol flags and ABI and
-// creating and initializing related DWARF symbols.
-//
-// initLSym must be called exactly once per function and must be
-// called for both functions with bodies and functions without bodies.
-// For body-less functions, we only create the LSym; for functions
-// with bodies call a helper to setup up / populate the LSym.
-func initLSym(f *ir.Func, hasBody bool) {
- // FIXME: for new-style ABI wrappers, we set up the lsym at the
- // point the wrapper is created.
- if f.LSym != nil && base.Flag.ABIWrap {
- return
- }
- selectLSym(f, hasBody)
- if hasBody {
- setupTextLSym(f, 0)
- }
-}
-
-// selectLSym sets up the LSym for a given function, and
-// makes calls to helpers to create ABI wrappers if needed.
-func selectLSym(f *ir.Func, hasBody bool) {
- if f.LSym != nil {
- base.Fatalf("Func.initLSym called twice")
- }
-
- if nam := f.Nname; !ir.IsBlank(nam) {
-
- var wrapperABI obj.ABI
- needABIWrapper := false
- defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()]
- if hasDefABI && defABI == obj.ABI0 {
- // Symbol is defined as ABI0. Create an
- // Internal -> ABI0 wrapper.
- f.LSym = nam.Sym().LinksymABI0()
- needABIWrapper, wrapperABI = true, obj.ABIInternal
- } else {
- f.LSym = nam.Sym().Linksym()
- // No ABI override. Check that the symbol is
- // using the expected ABI.
- want := obj.ABIInternal
- if f.LSym.ABI() != want {
- base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want)
- }
- }
- if f.Pragma&ir.Systemstack != 0 {
- f.LSym.Set(obj.AttrCFunc, true)
- }
-
- isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI)
- if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
- // Either 1) this symbol is definitely
- // referenced as ABI0 from this package; or 2)
- // this symbol is defined in this package but
- // given a linkname, indicating that it may be
- // referenced from another package. Create an
- // ABI0 -> Internal wrapper so it can be
- // called as ABI0. In case 2, it's important
- // that we know it's defined in this package
- // since other packages may "pull" symbols
- // using linkname and we don't want to create
- // duplicate ABI wrappers.
- if f.LSym.ABI() != obj.ABI0 {
- needABIWrapper, wrapperABI = true, obj.ABI0
- }
- }
-
- if needABIWrapper {
- if !useABIWrapGen(f) {
- // Fallback: use alias instead. FIXME.
-
- // These LSyms have the same name as the
- // native function, so we create them directly
- // rather than looking them up. The uniqueness
- // of f.lsym ensures uniqueness of asym.
- asym := &obj.LSym{
- Name: f.LSym.Name,
- Type: objabi.SABIALIAS,
- R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational"
- }
- asym.SetABI(wrapperABI)
- asym.Set(obj.AttrDuplicateOK, true)
- base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym)
- } else {
- if base.Debug.ABIWrap != 0 {
- fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %s.%s\n",
- wrapperABI, 1-wrapperABI, types.LocalPkg.Path, f.LSym.Name)
- }
- makeABIWrapper(f, wrapperABI)
- }
- }
- }
-}
-
-// setupTextLsym initializes the LSym for a with-body text symbol.
-func setupTextLSym(f *ir.Func, flag int) {
- if f.Dupok() {
- flag |= obj.DUPOK
- }
- if f.Wrapper() {
- flag |= obj.WRAPPER
- }
- if f.Needctxt() {
- flag |= obj.NEEDCTXT
- }
- if f.Pragma&ir.Nosplit != 0 {
- flag |= obj.NOSPLIT
- }
- if f.ReflectMethod() {
- flag |= obj.REFLECTMETHOD
- }
-
- // Clumsy but important.
- // See test/recover.go for test cases and src/reflect/value.go
- // for the actual functions being considered.
- if base.Ctxt.Pkgpath == "reflect" {
- switch f.Sym().Name {
- case "callReflect", "callMethod":
- flag |= obj.WRAPPER
- }
- }
-
- base.Ctxt.InitTextSym(f.LSym, flag)
-}
-
-func ggloblnod(nam ir.Node) {
- s := nam.Sym().Linksym()
- s.Gotype = ngotype(nam).Linksym()
- flags := 0
- if nam.Name().Readonly() {
- flags = obj.RODATA
- }
- if nam.Type() != nil && !nam.Type().HasPointers() {
- flags |= obj.NOPTR
- }
- base.Ctxt.Globl(s, nam.Type().Width, flags)
- if nam.Name().LibfuzzerExtraCounter() {
- s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
- }
- if nam.Sym().Linkname != "" {
- // Make sure linkname'd symbol is non-package. When a symbol is
- // both imported and linkname'd, s.Pkg may not set to "_" in
- // types.Sym.Linksym because LSym already exists. Set it here.
- s.Pkg = "_"
- }
-}
-
-func ggloblsym(s *obj.LSym, width int32, flags int16) {
- if flags&obj.LOCAL != 0 {
- s.Set(obj.AttrLocal, true)
- flags &^= obj.LOCAL
- }
- base.Ctxt.Globl(s, int64(width), int(flags))
-}
-
-func Addrconst(a *obj.Addr, v int64) {
- a.SetConst(v)
-}
-
-func Patch(p *obj.Prog, to *obj.Prog) {
- p.To.SetTarget(to)
-}
"bufio"
"bytes"
"cmd/compile/internal/base"
+ "cmd/compile/internal/dwarfgen"
+ "cmd/compile/internal/escape"
+ "cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
+ "cmd/compile/internal/noder"
+ "cmd/compile/internal/pkginit"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
- "cmd/internal/bio"
+ "cmd/compile/internal/walk"
"cmd/internal/dwarf"
- "cmd/internal/goobj"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"flag"
"fmt"
- "go/constant"
- "internal/goversion"
- "io"
- "io/ioutil"
"log"
"os"
- "path"
- "regexp"
"runtime"
- "sort"
- "strconv"
- "strings"
)
func hidePanic() {
}
}
-// Target is the package being compiled.
-var Target *ir.Package
-
// Main parses flags and Go source files specified in the command-line
// arguments, type-checks the parsed Go package, compiles functions to machine
// code, and finally writes the compiled package definition to disk.
-func Main(archInit func(*Arch)) {
- timings.Start("fe", "init")
+func Main(archInit func(*ssagen.ArchInfo)) {
+ base.Timer.Start("fe", "init")
defer hidePanic()
- archInit(&thearch)
+ archInit(&ssagen.Arch)
- base.Ctxt = obj.Linknew(thearch.LinkArch)
+ base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch)
base.Ctxt.DiagFunc = base.Errorf
base.Ctxt.DiagFlush = base.FlushErrors
base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
types.BuiltinPkg.Prefix = "go.builtin" // not go%2ebuiltin
// pseudo-package, accessed by import "unsafe"
- unsafepkg = types.NewPkg("unsafe", "unsafe")
+ ir.Pkgs.Unsafe = types.NewPkg("unsafe", "unsafe")
// Pseudo-package that contains the compiler's builtin
// declarations for package runtime. These are declared in a
// separate package to avoid conflicts with package runtime's
// actual declarations, which may differ intentionally but
// insignificantly.
- Runtimepkg = types.NewPkg("go.runtime", "runtime")
- Runtimepkg.Prefix = "runtime"
+ ir.Pkgs.Runtime = types.NewPkg("go.runtime", "runtime")
+ ir.Pkgs.Runtime.Prefix = "runtime"
// pseudo-packages used in symbol tables
- itabpkg = types.NewPkg("go.itab", "go.itab")
- itabpkg.Prefix = "go.itab" // not go%2eitab
+ ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab")
+ ir.Pkgs.Itab.Prefix = "go.itab" // not go%2eitab
- itablinkpkg = types.NewPkg("go.itablink", "go.itablink")
- itablinkpkg.Prefix = "go.itablink" // not go%2eitablink
+ ir.Pkgs.Itablink = types.NewPkg("go.itablink", "go.itablink")
+ ir.Pkgs.Itablink.Prefix = "go.itablink" // not go%2eitablink
- trackpkg = types.NewPkg("go.track", "go.track")
- trackpkg.Prefix = "go.track" // not go%2etrack
+ ir.Pkgs.Track = types.NewPkg("go.track", "go.track")
+ ir.Pkgs.Track.Prefix = "go.track" // not go%2etrack
// pseudo-package used for map zero values
- mappkg = types.NewPkg("go.map", "go.map")
- mappkg.Prefix = "go.map"
+ ir.Pkgs.Map = types.NewPkg("go.map", "go.map")
+ ir.Pkgs.Map.Prefix = "go.map"
// pseudo-package used for methods with anonymous receivers
- gopkg = types.NewPkg("go", "")
+ ir.Pkgs.Go = types.NewPkg("go", "")
base.DebugSSA = ssa.PhaseOption
base.ParseFlags()
// Record flags that affect the build result. (And don't
// record flags that don't, since that would cause spurious
// changes in the binary.)
- recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre")
+ dwarfgen.RecordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre")
- if !enableTrace && base.Flag.LowerT {
+ if !base.EnableTrace && base.Flag.LowerT {
log.Fatalf("compiler not built with support for -t")
}
}
if base.Flag.SmallFrames {
- maxStackVarSize = 128 * 1024
- maxImplicitStackVarSize = 16 * 1024
+ ir.MaxStackVarSize = 128 * 1024
+ ir.MaxImplicitStackVarSize = 16 * 1024
}
if base.Flag.Dwarf {
- base.Ctxt.DebugInfo = debuginfo
- base.Ctxt.GenAbstractFunc = genAbstractFunc
+ base.Ctxt.DebugInfo = dwarfgen.Info
+ base.Ctxt.GenAbstractFunc = dwarfgen.AbstractFunc
base.Ctxt.DwFixups = obj.NewDwarfFixupTable(base.Ctxt)
} else {
// turn off inline generation if no dwarf at all
log.Fatalf("location lists requested but register mapping not available on %v", base.Ctxt.Arch.Name)
}
- checkLang()
+ types.ParseLangFlag()
if base.Flag.SymABIs != "" {
- readSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath)
+ ssagen.ReadSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath)
}
- if ispkgin(omit_pkgs) {
+ if base.Compiling(base.NoInstrumentPkgs) {
base.Flag.Race = false
base.Flag.MSan = false
}
- thearch.LinkArch.Init(base.Ctxt)
+ ssagen.Arch.LinkArch.Init(base.Ctxt)
startProfile()
if base.Flag.Race {
- racepkg = types.NewPkg("runtime/race", "")
+ ir.Pkgs.Race = types.NewPkg("runtime/race", "")
}
if base.Flag.MSan {
- msanpkg = types.NewPkg("runtime/msan", "")
+ ir.Pkgs.Msan = types.NewPkg("runtime/msan", "")
}
if base.Flag.Race || base.Flag.MSan {
- instrumenting = true
+ base.Flag.Cfg.Instrumenting = true
}
if base.Flag.Dwarf {
dwarf.EnableLogging(base.Debug.DwarfInl != 0)
}
if base.Debug.SoftFloat != 0 {
- thearch.SoftFloat = true
+ ssagen.Arch.SoftFloat = true
}
if base.Flag.JSON != "" { // parse version,destination from json logging optimization.
logopt.LogJsonOption(base.Flag.JSON)
}
- ir.EscFmt = escFmt
- IsIntrinsicCall = isIntrinsicCall
- SSADumpInline = ssaDumpInline
- initSSAEnv()
- initSSATables()
+ ir.EscFmt = escape.Fmt
+ ir.IsIntrinsicCall = ssagen.IsIntrinsicCall
+ inline.SSADumpInline = ssagen.DumpInline
+ ssagen.InitEnv()
+ ssagen.InitTables()
- Widthptr = thearch.LinkArch.PtrSize
- Widthreg = thearch.LinkArch.RegSize
- MaxWidth = thearch.MAXWIDTH
+ types.PtrSize = ssagen.Arch.LinkArch.PtrSize
+ types.RegSize = ssagen.Arch.LinkArch.RegSize
+ types.MaxWidth = ssagen.Arch.MAXWIDTH
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
- return typenamesym(t).Linksym()
+ return reflectdata.TypeSym(t).Linksym()
}
- Target = new(ir.Package)
+ typecheck.Target = new(ir.Package)
- NeedFuncSym = makefuncsym
- NeedITab = func(t, iface *types.Type) { itabname(t, iface) }
- NeedRuntimeType = addsignat // TODO(rsc): typenamesym for lock?
+ typecheck.NeedFuncSym = staticdata.NeedFuncSym
+ typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) }
+ typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): typenamesym for lock?
- autogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
+ base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
- return typenamesym(t).Linksym()
+ return reflectdata.TypeSym(t).Linksym()
}
- TypecheckInit()
+ typecheck.Init()
// Parse input.
- timings.Start("fe", "parse")
- lines := parseFiles(flag.Args())
- cgoSymABIs()
- timings.Stop()
- timings.AddEvent(int64(lines), "lines")
+ base.Timer.Start("fe", "parse")
+ lines := noder.ParseFiles(flag.Args())
+ ssagen.CgoSymABIs()
+ base.Timer.Stop()
+ base.Timer.AddEvent(int64(lines), "lines")
if base.Flag.G != 0 && base.Flag.G < 3 {
// can only parse generic code for now
base.ExitIfErrors()
return
}
- recordPackageName()
+ dwarfgen.RecordPackageName()
// Typecheck.
- TypecheckPackage()
+ typecheck.Package()
// With all user code typechecked, it's now safe to verify unused dot imports.
- checkDotImports()
+ noder.CheckDotImports()
base.ExitIfErrors()
// Build init task.
- if initTask := fninit(); initTask != nil {
- exportsym(initTask)
+ if initTask := pkginit.Task(); initTask != nil {
+ typecheck.Export(initTask)
}
// Inlining
- timings.Start("fe", "inlining")
+ base.Timer.Start("fe", "inlining")
if base.Flag.LowerL != 0 {
- InlinePackage()
+ inline.InlinePackage()
}
// Devirtualize.
- for _, n := range Target.Decls {
+ for _, n := range typecheck.Target.Decls {
if n.Op() == ir.ODCLFUNC {
- devirtualize(n.(*ir.Func))
+ inline.Devirtualize(n.(*ir.Func))
}
}
- Curfn = nil
+ ir.CurFunc = nil
// Escape analysis.
// Required for moving heap allocations onto stack,
// or else the stack copier will not update it.
// Large values are also moved off stack in escape analysis;
// because large values may contain pointers, it must happen early.
- timings.Start("fe", "escapes")
- escapes(Target.Decls)
+ base.Timer.Start("fe", "escapes")
+ escape.Funcs(typecheck.Target.Decls)
// Collect information for go:nowritebarrierrec
// checking. This must happen before transformclosure.
// We'll do the final check after write barriers are
// inserted.
if base.Flag.CompilingRuntime {
- EnableNoWriteBarrierRecCheck()
+ ssagen.EnableNoWriteBarrierRecCheck()
}
// Transform closure bodies to properly reference captured variables.
// This needs to happen before walk, because closures must be transformed
// before walk reaches a call of a closure.
- timings.Start("fe", "xclosures")
- for _, n := range Target.Decls {
+ base.Timer.Start("fe", "xclosures")
+ for _, n := range typecheck.Target.Decls {
if n.Op() == ir.ODCLFUNC {
n := n.(*ir.Func)
- if n.Func().OClosure != nil {
- Curfn = n
- transformclosure(n)
+ if n.OClosure != nil {
+ ir.CurFunc = n
+ walk.Closure(n)
}
}
}
// Prepare for SSA compilation.
// This must be before peekitabs, because peekitabs
// can trigger function compilation.
- initssaconfig()
+ ssagen.InitConfig()
// Just before compilation, compile itabs found on
// the right side of OCONVIFACE so that methods
// can be de-virtualized during compilation.
- Curfn = nil
- peekitabs()
+ ir.CurFunc = nil
+ reflectdata.CompileITabs()
// Compile top level functions.
// Don't use range--walk can add functions to Target.Decls.
- timings.Start("be", "compilefuncs")
+ base.Timer.Start("be", "compilefuncs")
fcount := int64(0)
- for i := 0; i < len(Target.Decls); i++ {
- n := Target.Decls[i]
+ for i := 0; i < len(typecheck.Target.Decls); i++ {
+ n := typecheck.Target.Decls[i]
if n.Op() == ir.ODCLFUNC {
funccompile(n.(*ir.Func))
fcount++
}
}
- timings.AddEvent(fcount, "funcs")
+ base.Timer.AddEvent(fcount, "funcs")
compileFunctions()
if base.Flag.CompilingRuntime {
// Write barriers are now known. Check the call graph.
- NoWriteBarrierRecCheck()
+ ssagen.NoWriteBarrierRecCheck()
}
// Finalize DWARF inline routine DIEs, then explicitly turn off
}
// Write object data to disk.
- timings.Start("be", "dumpobj")
+ base.Timer.Start("be", "dumpobj")
dumpdata()
base.Ctxt.NumberSyms()
dumpobj()
dumpasmhdr()
}
- CheckLargeStacks()
- CheckFuncStack()
+ ssagen.CheckLargeStacks()
+ typecheck.CheckFuncStack()
if len(compilequeue) != 0 {
base.Fatalf("%d uncompiled functions", len(compilequeue))
base.ExitIfErrors()
base.FlushErrors()
- timings.Stop()
+ base.Timer.Stop()
if base.Flag.Bench != "" {
if err := writebench(base.Flag.Bench); err != nil {
}
}
-func CheckLargeStacks() {
- // Check whether any of the functions we have compiled have gigantic stack frames.
- sort.Slice(largeStackFrames, func(i, j int) bool {
- return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
- })
- for _, large := range largeStackFrames {
- if large.callee != 0 {
- base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
- } else {
- base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
- }
- }
-}
-
-func cgoSymABIs() {
- // The linker expects an ABI0 wrapper for all cgo-exported
- // functions.
- for _, prag := range Target.CgoPragmas {
- switch prag[0] {
- case "cgo_export_static", "cgo_export_dynamic":
- if symabiRefs == nil {
- symabiRefs = make(map[string]obj.ABI)
- }
- symabiRefs[prag[1]] = obj.ABI0
- }
- }
-}
-
-// numNonClosures returns the number of functions in list which are not closures.
-func numNonClosures(list []*ir.Func) int {
- count := 0
- for _, fn := range list {
- if fn.OClosure == nil {
- count++
- }
- }
- return count
-}
-
func writebench(filename string) error {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
fmt.Fprintln(&buf, "commit:", objabi.Version)
fmt.Fprintln(&buf, "goos:", runtime.GOOS)
fmt.Fprintln(&buf, "goarch:", runtime.GOARCH)
- timings.Write(&buf, "BenchmarkCompile:"+base.Ctxt.Pkgpath+":")
+ base.Timer.Write(&buf, "BenchmarkCompile:"+base.Ctxt.Pkgpath+":")
n, err := f.Write(buf.Bytes())
if err != nil {
return f.Close()
}
-// symabiDefs and symabiRefs record the defined and referenced ABIs of
-// symbols required by non-Go code. These are keyed by link symbol
-// name, where the local package prefix is always `"".`
-var symabiDefs, symabiRefs map[string]obj.ABI
-
-// readSymABIs reads a symabis file that specifies definitions and
-// references of text symbols by ABI.
-//
-// The symabis format is a set of lines, where each line is a sequence
-// of whitespace-separated fields. The first field is a verb and is
-// either "def" for defining a symbol ABI or "ref" for referencing a
-// symbol using an ABI. For both "def" and "ref", the second field is
-// the symbol name and the third field is the ABI name, as one of the
-// named cmd/internal/obj.ABI constants.
-func readSymABIs(file, myimportpath string) {
- data, err := ioutil.ReadFile(file)
- if err != nil {
- log.Fatalf("-symabis: %v", err)
- }
-
- symabiDefs = make(map[string]obj.ABI)
- symabiRefs = make(map[string]obj.ABI)
-
- localPrefix := ""
- if myimportpath != "" {
- // Symbols in this package may be written either as
- // "".X or with the package's import path already in
- // the symbol.
- localPrefix = objabi.PathToPrefix(myimportpath) + "."
- }
-
- for lineNum, line := range strings.Split(string(data), "\n") {
- lineNum++ // 1-based
- line = strings.TrimSpace(line)
- if line == "" || strings.HasPrefix(line, "#") {
- continue
- }
-
- parts := strings.Fields(line)
- switch parts[0] {
- case "def", "ref":
- // Parse line.
- if len(parts) != 3 {
- log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0])
- }
- sym, abistr := parts[1], parts[2]
- abi, valid := obj.ParseABI(abistr)
- if !valid {
- log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr)
- }
-
- // If the symbol is already prefixed with
- // myimportpath, rewrite it to start with ""
- // so it matches the compiler's internal
- // symbol names.
- if localPrefix != "" && strings.HasPrefix(sym, localPrefix) {
- sym = `"".` + sym[len(localPrefix):]
- }
-
- // Record for later.
- if parts[0] == "def" {
- symabiDefs[sym] = abi
- } else {
- symabiRefs[sym] = abi
- }
- default:
- log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0])
- }
- }
-}
-
-func arsize(b *bufio.Reader, name string) int {
- var buf [ArhdrSize]byte
- if _, err := io.ReadFull(b, buf[:]); err != nil {
- return -1
- }
- aname := strings.Trim(string(buf[0:16]), " ")
- if !strings.HasPrefix(aname, name) {
- return -1
- }
- asize := strings.Trim(string(buf[48:58]), " ")
- i, _ := strconv.Atoi(asize)
- return i
-}
-
-func isDriveLetter(b byte) bool {
- return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z'
-}
-
-// is this path a local name? begins with ./ or ../ or /
-func islocalname(name string) bool {
- return strings.HasPrefix(name, "/") ||
- runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' ||
- strings.HasPrefix(name, "./") || name == "." ||
- strings.HasPrefix(name, "../") || name == ".."
-}
-
-func findpkg(name string) (file string, ok bool) {
- if islocalname(name) {
- if base.Flag.NoLocalImports {
- return "", false
- }
-
- if base.Flag.Cfg.PackageFile != nil {
- file, ok = base.Flag.Cfg.PackageFile[name]
- return file, ok
- }
-
- // try .a before .6. important for building libraries:
- // if there is an array.6 in the array.a library,
- // want to find all of array.a, not just array.6.
- file = fmt.Sprintf("%s.a", name)
- if _, err := os.Stat(file); err == nil {
- return file, true
- }
- file = fmt.Sprintf("%s.o", name)
- if _, err := os.Stat(file); err == nil {
- return file, true
- }
- return "", false
- }
-
- // local imports should be canonicalized already.
- // don't want to see "encoding/../encoding/base64"
- // as different from "encoding/base64".
- if q := path.Clean(name); q != name {
- base.Errorf("non-canonical import path %q (should be %q)", name, q)
- return "", false
- }
-
- if base.Flag.Cfg.PackageFile != nil {
- file, ok = base.Flag.Cfg.PackageFile[name]
- return file, ok
- }
-
- for _, dir := range base.Flag.Cfg.ImportDirs {
- file = fmt.Sprintf("%s/%s.a", dir, name)
- if _, err := os.Stat(file); err == nil {
- return file, true
- }
- file = fmt.Sprintf("%s/%s.o", dir, name)
- if _, err := os.Stat(file); err == nil {
- return file, true
- }
- }
-
- if objabi.GOROOT != "" {
- suffix := ""
- suffixsep := ""
- if base.Flag.InstallSuffix != "" {
- suffixsep = "_"
- suffix = base.Flag.InstallSuffix
- } else if base.Flag.Race {
- suffixsep = "_"
- suffix = "race"
- } else if base.Flag.MSan {
- suffixsep = "_"
- suffix = "msan"
- }
-
- file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name)
- if _, err := os.Stat(file); err == nil {
- return file, true
- }
- file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.o", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name)
- if _, err := os.Stat(file); err == nil {
- return file, true
- }
- }
-
- return "", false
-}
-
-// loadsys loads the definitions for the low-level runtime functions,
-// so that the compiler can generate calls to them,
-// but does not make them visible to user code.
-func loadsys() {
- types.Block = 1
-
- inimport = true
- typecheckok = true
-
- typs := runtimeTypes()
- for _, d := range &runtimeDecls {
- sym := Runtimepkg.Lookup(d.name)
- typ := typs[d.typ]
- switch d.tag {
- case funcTag:
- importfunc(Runtimepkg, src.NoXPos, sym, typ)
- case varTag:
- importvar(Runtimepkg, src.NoXPos, sym, typ)
- default:
- base.Fatalf("unhandled declaration tag %v", d.tag)
- }
- }
-
- typecheckok = false
- inimport = false
-}
-
-// myheight tracks the local package's height based on packages
-// imported so far.
-var myheight int
-
-func importfile(f constant.Value) *types.Pkg {
- if f.Kind() != constant.String {
- base.Errorf("import path must be a string")
- return nil
- }
-
- path_ := constant.StringVal(f)
- if len(path_) == 0 {
- base.Errorf("import path is empty")
- return nil
- }
-
- if isbadimport(path_, false) {
- return nil
- }
-
- // The package name main is no longer reserved,
- // but we reserve the import path "main" to identify
- // the main package, just as we reserve the import
- // path "math" to identify the standard math package.
- if path_ == "main" {
- base.Errorf("cannot import \"main\"")
- base.ErrorExit()
- }
-
- if base.Ctxt.Pkgpath != "" && path_ == base.Ctxt.Pkgpath {
- base.Errorf("import %q while compiling that package (import cycle)", path_)
- base.ErrorExit()
- }
-
- if mapped, ok := base.Flag.Cfg.ImportMap[path_]; ok {
- path_ = mapped
- }
-
- if path_ == "unsafe" {
- return unsafepkg
- }
-
- if islocalname(path_) {
- if path_[0] == '/' {
- base.Errorf("import path cannot be absolute path")
- return nil
- }
-
- prefix := base.Ctxt.Pathname
- if base.Flag.D != "" {
- prefix = base.Flag.D
- }
- path_ = path.Join(prefix, path_)
-
- if isbadimport(path_, true) {
- return nil
- }
- }
-
- file, found := findpkg(path_)
- if !found {
- base.Errorf("can't find import: %q", path_)
- base.ErrorExit()
- }
-
- importpkg := types.NewPkg(path_, "")
- if importpkg.Imported {
- return importpkg
- }
-
- importpkg.Imported = true
-
- imp, err := bio.Open(file)
- if err != nil {
- base.Errorf("can't open import: %q: %v", path_, err)
- base.ErrorExit()
- }
- defer imp.Close()
-
- // check object header
- p, err := imp.ReadString('\n')
- if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
- }
-
- if p == "!<arch>\n" { // package archive
- // package export block should be first
- sz := arsize(imp.Reader, "__.PKGDEF")
- if sz <= 0 {
- base.Errorf("import %s: not a package file", file)
- base.ErrorExit()
- }
- p, err = imp.ReadString('\n')
- if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
- }
- }
-
- if !strings.HasPrefix(p, "go object ") {
- base.Errorf("import %s: not a go object file: %s", file, p)
- base.ErrorExit()
- }
- q := fmt.Sprintf("%s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
- if p[10:] != q {
- base.Errorf("import %s: object is [%s] expected [%s]", file, p[10:], q)
- base.ErrorExit()
- }
-
- // process header lines
- for {
- p, err = imp.ReadString('\n')
- if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
- }
- if p == "\n" {
- break // header ends with blank line
- }
- }
-
- // Expect $$B\n to signal binary import format.
-
- // look for $$
- var c byte
- for {
- c, err = imp.ReadByte()
- if err != nil {
- break
- }
- if c == '$' {
- c, err = imp.ReadByte()
- if c == '$' || err != nil {
- break
- }
- }
- }
-
- // get character after $$
- if err == nil {
- c, _ = imp.ReadByte()
- }
-
- var fingerprint goobj.FingerprintType
- switch c {
- case '\n':
- base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path_)
- return nil
-
- case 'B':
- if base.Debug.Export != 0 {
- fmt.Printf("importing %s (%s)\n", path_, file)
- }
- imp.ReadByte() // skip \n after $$B
-
- c, err = imp.ReadByte()
- if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
- }
-
- // Indexed format is distinguished by an 'i' byte,
- // whereas previous export formats started with 'c', 'd', or 'v'.
- if c != 'i' {
- base.Errorf("import %s: unexpected package format byte: %v", file, c)
- base.ErrorExit()
- }
- fingerprint = iimport(importpkg, imp)
-
- default:
- base.Errorf("no import in %q", path_)
- base.ErrorExit()
- }
-
- // assume files move (get installed) so don't record the full path
- if base.Flag.Cfg.PackageFile != nil {
- // If using a packageFile map, assume path_ can be recorded directly.
- base.Ctxt.AddImport(path_, fingerprint)
- } else {
- // For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
- base.Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint)
- }
-
- if importpkg.Height >= myheight {
- myheight = importpkg.Height + 1
- }
-
- return importpkg
-}
-
-func pkgnotused(lineno src.XPos, path string, name string) {
- // If the package was imported with a name other than the final
- // import path element, show it explicitly in the error message.
- // Note that this handles both renamed imports and imports of
- // packages containing unconventional package declarations.
- // Note that this uses / always, even on Windows, because Go import
- // paths always use forward slashes.
- elem := path
- if i := strings.LastIndex(elem, "/"); i >= 0 {
- elem = elem[i+1:]
- }
- if name == "" || elem == name {
- base.ErrorfAt(lineno, "imported and not used: %q", path)
- } else {
- base.ErrorfAt(lineno, "imported and not used: %q as %s", path, name)
- }
-}
-
-func mkpackage(pkgname string) {
- if types.LocalPkg.Name == "" {
- if pkgname == "_" {
- base.Errorf("invalid package name _")
- }
- types.LocalPkg.Name = pkgname
- } else {
- if pkgname != types.LocalPkg.Name {
- base.Errorf("package %s; expected %s", pkgname, types.LocalPkg.Name)
- }
- }
-}
-
-func clearImports() {
- type importedPkg struct {
- pos src.XPos
- path string
- name string
- }
- var unused []importedPkg
-
- for _, s := range types.LocalPkg.Syms {
- n := ir.AsNode(s.Def)
- if n == nil {
- continue
- }
- if n.Op() == ir.OPACK {
- // throw away top-level package name left over
- // from previous file.
- // leave s->block set to cause redeclaration
- // errors if a conflicting top-level name is
- // introduced by a different file.
- p := n.(*ir.PkgName)
- if !p.Used && base.SyntaxErrors() == 0 {
- unused = append(unused, importedPkg{p.Pos(), p.Pkg.Path, s.Name})
- }
- s.Def = nil
- continue
- }
- if IsAlias(s) {
- // throw away top-level name left over
- // from previous import . "x"
- // We'll report errors after type checking in checkDotImports.
- s.Def = nil
- continue
- }
- }
-
- sort.Slice(unused, func(i, j int) bool { return unused[i].pos.Before(unused[j].pos) })
- for _, pkg := range unused {
- pkgnotused(pkg.pos, pkg.path, pkg.name)
- }
-}
-
-func IsAlias(sym *types.Sym) bool {
- return sym.Def != nil && sym.Def.Sym() != sym
-}
-
-// recordFlags records the specified command-line flags to be placed
-// in the DWARF info.
-func recordFlags(flags ...string) {
- if base.Ctxt.Pkgpath == "" {
- // We can't record the flags if we don't know what the
- // package name is.
- return
- }
-
- type BoolFlag interface {
- IsBoolFlag() bool
- }
- type CountFlag interface {
- IsCountFlag() bool
- }
- var cmd bytes.Buffer
- for _, name := range flags {
- f := flag.Lookup(name)
- if f == nil {
- continue
- }
- getter := f.Value.(flag.Getter)
- if getter.String() == f.DefValue {
- // Flag has default value, so omit it.
- continue
- }
- if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
- val, ok := getter.Get().(bool)
- if ok && val {
- fmt.Fprintf(&cmd, " -%s", f.Name)
- continue
- }
- }
- if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
- val, ok := getter.Get().(int)
- if ok && val == 1 {
- fmt.Fprintf(&cmd, " -%s", f.Name)
- continue
- }
- }
- fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
- }
-
- if cmd.Len() == 0 {
- return
- }
- s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath)
- s.Type = objabi.SDWARFCUINFO
- // Sometimes (for example when building tests) we can link
- // together two package main archives. So allow dups.
- s.Set(obj.AttrDuplicateOK, true)
- base.Ctxt.Data = append(base.Ctxt.Data, s)
- s.P = cmd.Bytes()[1:]
-}
-
-// recordPackageName records the name of the package being
-// compiled, so that the linker can save it in the compile unit's DIE.
-func recordPackageName() {
- s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath)
- s.Type = objabi.SDWARFCUINFO
- // Sometimes (for example when building tests) we can link
- // together two package main archives. So allow dups.
- s.Set(obj.AttrDuplicateOK, true)
- base.Ctxt.Data = append(base.Ctxt.Data, s)
- s.P = []byte(types.LocalPkg.Name)
-}
-
-// currentLang returns the current language version.
-func currentLang() string {
- return fmt.Sprintf("go1.%d", goversion.Version)
-}
-
-// goVersionRE is a regular expression that matches the valid
-// arguments to the -lang flag.
-var goVersionRE = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
-
-// A lang is a language version broken into major and minor numbers.
-type lang struct {
- major, minor int
-}
-
-// langWant is the desired language version set by the -lang flag.
-// If the -lang flag is not set, this is the zero value, meaning that
-// any language version is supported.
-var langWant lang
-
-// AllowsGoVersion reports whether a particular package
-// is allowed to use Go version major.minor.
-// We assume the imported packages have all been checked,
-// so we only have to check the local package against the -lang flag.
-func AllowsGoVersion(pkg *types.Pkg, major, minor int) bool {
- if pkg == nil {
- // TODO(mdempsky): Set Pkg for local types earlier.
- pkg = types.LocalPkg
- }
- if pkg != types.LocalPkg {
- // Assume imported packages passed type-checking.
- return true
- }
- if langWant.major == 0 && langWant.minor == 0 {
- return true
- }
- return langWant.major > major || (langWant.major == major && langWant.minor >= minor)
-}
-
-func langSupported(major, minor int, pkg *types.Pkg) bool {
- return AllowsGoVersion(pkg, major, minor)
-}
-
-// checkLang verifies that the -lang flag holds a valid value, and
-// exits if not. It initializes data used by langSupported.
-func checkLang() {
- if base.Flag.Lang == "" {
- return
- }
-
- var err error
- langWant, err = parseLang(base.Flag.Lang)
- if err != nil {
- log.Fatalf("invalid value %q for -lang: %v", base.Flag.Lang, err)
- }
-
- if def := currentLang(); base.Flag.Lang != def {
- defVers, err := parseLang(def)
- if err != nil {
- log.Fatalf("internal error parsing default lang %q: %v", def, err)
- }
- if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) {
- log.Fatalf("invalid value %q for -lang: max known version is %q", base.Flag.Lang, def)
- }
- }
-}
-
-// parseLang parses a -lang option into a langVer.
-func parseLang(s string) (lang, error) {
- matches := goVersionRE.FindStringSubmatch(s)
- if matches == nil {
- return lang{}, fmt.Errorf(`should be something like "go1.12"`)
- }
- major, err := strconv.Atoi(matches[1])
- if err != nil {
- return lang{}, err
- }
- minor, err := strconv.Atoi(matches[2])
- if err != nil {
- return lang{}, err
- }
- return lang{major: major, minor: minor}, nil
-}
-
-// useNewABIWrapGen returns TRUE if the compiler should generate an
-// ABI wrapper for the function 'f'.
-func useABIWrapGen(f *ir.Func) bool {
- if !base.Flag.ABIWrap {
- return false
- }
-
- // Support limit option for bisecting.
- if base.Flag.ABIWrapLimit == 1 {
- return false
- }
- if base.Flag.ABIWrapLimit < 1 {
- return true
- }
- base.Flag.ABIWrapLimit--
- if base.Debug.ABIWrap != 0 && base.Flag.ABIWrapLimit == 1 {
- fmt.Fprintf(os.Stderr, "=-= limit reached after new wrapper for %s\n",
- f.LSym.Name)
- }
-
- return true
+func makePos(b *src.PosBase, line, col uint) src.XPos {
+ return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
}
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "cmd/internal/archive"
"cmd/internal/bio"
"cmd/internal/obj"
"cmd/internal/objabi"
- "cmd/internal/src"
- "crypto/sha256"
"encoding/json"
"fmt"
- "go/constant"
- "io"
- "io/ioutil"
- "os"
- "sort"
- "strconv"
)
-// architecture-independent object file output
-const ArhdrSize = 60
-
-func formathdr(arhdr []byte, name string, size int64) {
- copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
-}
-
// These modes say which kind of object file to generate.
// The default use of the toolchain is to set both bits,
// generating a combined compiler+linker object, one that
}
func startArchiveEntry(bout *bio.Writer) int64 {
- var arhdr [ArhdrSize]byte
+ var arhdr [archive.HeaderSize]byte
bout.Write(arhdr[:])
return bout.Offset()
}
if size&1 != 0 {
bout.WriteByte(0)
}
- bout.MustSeek(start-ArhdrSize, 0)
+ bout.MustSeek(start-archive.HeaderSize, 0)
- var arhdr [ArhdrSize]byte
- formathdr(arhdr[:], name, size)
+ var arhdr [archive.HeaderSize]byte
+ archive.FormatHeader(arhdr[:], name, size)
bout.Write(arhdr[:])
bout.Flush()
bout.MustSeek(start+size+(size&1), 0)
}
func dumpdata() {
- numExterns := len(Target.Externs)
- numDecls := len(Target.Decls)
-
- dumpglobls(Target.Externs)
- dumpfuncsyms()
- addptabs()
- numExports := len(Target.Exports)
- addsignats(Target.Externs)
- dumpsignats()
- dumptabs()
- numPTabs, numITabs := CountTabs()
- dumpimportstrings()
- dumpbasictypes()
+ numExterns := len(typecheck.Target.Externs)
+ numDecls := len(typecheck.Target.Decls)
+
+ dumpglobls(typecheck.Target.Externs)
+ staticdata.WriteFuncSyms()
+ reflectdata.CollectPTabs()
+ numExports := len(typecheck.Target.Exports)
+ addsignats(typecheck.Target.Externs)
+ reflectdata.WriteRuntimeTypes()
+ reflectdata.WriteTabs()
+ numPTabs, numITabs := reflectdata.CountTabs()
+ reflectdata.WriteImportStrings()
+ reflectdata.WriteBasicTypes()
dumpembeds()
// Calls to dumpsignats can generate functions,
// In the typical case, we loop 0 or 1 times.
// It was not until issue 24761 that we found any code that required a loop at all.
for {
- for i := numDecls; i < len(Target.Decls); i++ {
- n := Target.Decls[i]
+ for i := numDecls; i < len(typecheck.Target.Decls); i++ {
+ n := typecheck.Target.Decls[i]
if n.Op() == ir.ODCLFUNC {
funccompile(n.(*ir.Func))
}
}
- numDecls = len(Target.Decls)
+ numDecls = len(typecheck.Target.Decls)
compileFunctions()
- dumpsignats()
- if numDecls == len(Target.Decls) {
+ reflectdata.WriteRuntimeTypes()
+ if numDecls == len(typecheck.Target.Decls) {
break
}
}
// Dump extra globals.
- dumpglobls(Target.Externs[numExterns:])
+ dumpglobls(typecheck.Target.Externs[numExterns:])
- if zerosize > 0 {
- zero := mappkg.Lookup("zero")
- ggloblsym(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA)
+ if reflectdata.ZeroSize > 0 {
+ zero := ir.Pkgs.Map.Lookup("zero")
+ objw.Global(zero.Linksym(), int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
}
addGCLocals()
- if numExports != len(Target.Exports) {
+ if numExports != len(typecheck.Target.Exports) {
base.Fatalf("Target.Exports changed after compile functions loop")
}
- newNumPTabs, newNumITabs := CountTabs()
+ newNumPTabs, newNumITabs := reflectdata.CountTabs()
if newNumPTabs != numPTabs {
base.Fatalf("ptabs changed after compile functions loop")
}
func dumpLinkerObj(bout *bio.Writer) {
printObjHeader(bout)
- if len(Target.CgoPragmas) != 0 {
+ if len(typecheck.Target.CgoPragmas) != 0 {
// write empty export section; must be before cgo section
fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
fmt.Fprintf(bout, "\n$$ // cgo\n")
- if err := json.NewEncoder(bout).Encode(Target.CgoPragmas); err != nil {
+ if err := json.NewEncoder(bout).Encode(typecheck.Target.CgoPragmas); err != nil {
base.Fatalf("serializing pragcgobuf: %v", err)
}
fmt.Fprintf(bout, "\n$$\n\n")
obj.WriteObjFile(base.Ctxt, bout)
}
-func addptabs() {
- if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
- return
- }
- for _, exportn := range Target.Exports {
- s := exportn.Sym()
- nn := ir.AsNode(s.Def)
- if nn == nil {
- continue
- }
- if nn.Op() != ir.ONAME {
- continue
- }
- n := nn.(*ir.Name)
- if !types.IsExported(s.Name) {
- continue
- }
- if s.Pkg.Name != "main" {
- continue
- }
- if n.Type().Kind() == types.TFUNC && n.Class() == ir.PFUNC {
- // function
- ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()})
- } else {
- // variable
- ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(s.Def.Type())})
- }
- }
-}
-
func dumpGlobal(n *ir.Name) {
if n.Type() == nil {
base.Fatalf("external %v nil type\n", n)
}
- if n.Class() == ir.PFUNC {
+ if n.Class_ == ir.PFUNC {
return
}
if n.Sym().Pkg != types.LocalPkg {
return
}
- dowidth(n.Type())
+ types.CalcSize(n.Type())
ggloblnod(n)
}
if t.IsUntyped() {
// Export untyped integers as int (if they fit).
t = types.Types[types.TINT]
- if doesoverflow(v, t) {
+ if ir.ConstOverflow(v, t) {
return
}
}
- base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, typesymname(t), ir.IntVal(t, v))
+ base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v))
}
func dumpglobls(externs []ir.Node) {
}
}
-func dumpfuncsyms() {
- sort.Slice(funcsyms, func(i, j int) bool {
- return funcsyms[i].LinksymName() < funcsyms[j].LinksymName()
- })
- for _, s := range funcsyms {
- sf := s.Pkg.Lookup(funcsymname(s)).Linksym()
- dsymptr(sf, 0, s.Linksym(), 0)
- ggloblsym(sf, int32(Widthptr), obj.DUPOK|obj.RODATA)
- }
-}
-
// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
//
// This is done during the sequential phase after compilation, since
}
for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} {
if gcsym != nil && !gcsym.OnList() {
- ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
+ objw.Global(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
}
}
if x := fn.StackObjects; x != nil {
attr := int16(obj.RODATA)
- ggloblsym(x, int32(len(x.P)), attr)
+ objw.Global(x, int32(len(x.P)), attr)
x.Set(obj.AttrStatic, true)
}
if x := fn.OpenCodedDeferInfo; x != nil {
- ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
- }
- }
-}
-
-func duintxx(s *obj.LSym, off int, v uint64, wid int) int {
- if off&(wid-1) != 0 {
- base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
- }
- s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
- return off + wid
-}
-
-func duint8(s *obj.LSym, off int, v uint8) int {
- return duintxx(s, off, uint64(v), 1)
-}
-
-func duint16(s *obj.LSym, off int, v uint16) int {
- return duintxx(s, off, uint64(v), 2)
-}
-
-func duint32(s *obj.LSym, off int, v uint32) int {
- return duintxx(s, off, uint64(v), 4)
-}
-
-func duintptr(s *obj.LSym, off int, v uint64) int {
- return duintxx(s, off, v, Widthptr)
-}
-
-func dbvec(s *obj.LSym, off int, bv bvec) int {
- // Runtime reads the bitmaps as byte arrays. Oblige.
- for j := 0; int32(j) < bv.n; j += 8 {
- word := bv.b[j/32]
- off = duint8(s, off, uint8(word>>(uint(j)%32)))
- }
- return off
-}
-
-const (
- stringSymPrefix = "go.string."
- stringSymPattern = ".gostring.%d.%x"
-)
-
-// stringsym returns a symbol containing the string s.
-// The symbol contains the string data, not a string header.
-func stringsym(pos src.XPos, s string) (data *obj.LSym) {
- var symname string
- if len(s) > 100 {
- // Huge strings are hashed to avoid long names in object files.
- // Indulge in some paranoia by writing the length of s, too,
- // as protection against length extension attacks.
- // Same pattern is known to fileStringSym below.
- h := sha256.New()
- io.WriteString(h, s)
- symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
- } else {
- // Small strings get named directly by their contents.
- symname = strconv.Quote(s)
- }
-
- symdata := base.Ctxt.Lookup(stringSymPrefix + symname)
- if !symdata.OnList() {
- off := dstringdata(symdata, 0, s, pos, "string")
- ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
- symdata.Set(obj.AttrContentAddressable, true)
- }
-
- return symdata
-}
-
-// fileStringSym returns a symbol for the contents and the size of file.
-// If readonly is true, the symbol shares storage with any literal string
-// or other file with the same content and is placed in a read-only section.
-// If readonly is false, the symbol is a read-write copy separate from any other,
-// for use as the backing store of a []byte.
-// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
-// The returned symbol contains the data itself, not a string header.
-func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
- f, err := os.Open(file)
- if err != nil {
- return nil, 0, err
- }
- defer f.Close()
- info, err := f.Stat()
- if err != nil {
- return nil, 0, err
- }
- if !info.Mode().IsRegular() {
- return nil, 0, fmt.Errorf("not a regular file")
- }
- size := info.Size()
- if size <= 1*1024 {
- data, err := ioutil.ReadAll(f)
- if err != nil {
- return nil, 0, err
- }
- if int64(len(data)) != size {
- return nil, 0, fmt.Errorf("file changed between reads")
- }
- var sym *obj.LSym
- if readonly {
- sym = stringsym(pos, string(data))
- } else {
- sym = slicedata(pos, string(data)).Sym().Linksym()
- }
- if len(hash) > 0 {
- sum := sha256.Sum256(data)
- copy(hash, sum[:])
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
}
- return sym, size, nil
}
- if size > 2e9 {
- // ggloblsym takes an int32,
- // and probably the rest of the toolchain
- // can't handle such big symbols either.
- // See golang.org/issue/9862.
- return nil, 0, fmt.Errorf("file too large")
- }
-
- // File is too big to read and keep in memory.
- // Compute hash if needed for read-only content hashing or if the caller wants it.
- var sum []byte
- if readonly || len(hash) > 0 {
- h := sha256.New()
- n, err := io.Copy(h, f)
- if err != nil {
- return nil, 0, err
- }
- if n != size {
- return nil, 0, fmt.Errorf("file changed between reads")
- }
- sum = h.Sum(nil)
- copy(hash, sum)
- }
-
- var symdata *obj.LSym
- if readonly {
- symname := fmt.Sprintf(stringSymPattern, size, sum)
- symdata = base.Ctxt.Lookup(stringSymPrefix + symname)
- if !symdata.OnList() {
- info := symdata.NewFileInfo()
- info.Name = file
- info.Size = size
- ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
- // Note: AttrContentAddressable cannot be set here,
- // because the content-addressable-handling code
- // does not know about file symbols.
- }
- } else {
- // Emit a zero-length data symbol
- // and then fix up length and content to use file.
- symdata = slicedata(pos, "").Sym().Linksym()
- symdata.Size = size
- symdata.Type = objabi.SNOPTRDATA
- info := symdata.NewFileInfo()
- info.Name = file
- info.Size = size
- }
-
- return symdata, size, nil
-}
-
-var slicedataGen int
-
-func slicedata(pos src.XPos, s string) *ir.Name {
- slicedataGen++
- symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
- sym := types.LocalPkg.Lookup(symname)
- symnode := NewName(sym)
- sym.Def = symnode
-
- lsym := sym.Linksym()
- off := dstringdata(lsym, 0, s, pos, "slice")
- ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL)
-
- return symnode
}
-func slicebytes(nam *ir.Name, off int64, s string) {
- if nam.Op() != ir.ONAME {
- base.Fatalf("slicebytes %v", nam)
+func ggloblnod(nam ir.Node) {
+ s := nam.Sym().Linksym()
+ s.Gotype = reflectdata.TypeSym(nam.Type()).Linksym()
+ flags := 0
+ if nam.Name().Readonly() {
+ flags = obj.RODATA
}
- slicesym(nam, off, slicedata(nam.Pos(), s), int64(len(s)))
-}
-
-func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
- // Objects that are too large will cause the data section to overflow right away,
- // causing a cryptic error message by the linker. Check for oversize objects here
- // and provide a useful error message instead.
- if int64(len(t)) > 2e9 {
- base.ErrorfAt(pos, "%v with length %v is too big", what, len(t))
- return 0
+ if nam.Type() != nil && !nam.Type().HasPointers() {
+ flags |= obj.NOPTR
}
-
- s.WriteString(base.Ctxt, int64(off), len(t), t)
- return off + len(t)
-}
-
-func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
- off = int(Rnd(int64(off), int64(Widthptr)))
- s.WriteAddr(base.Ctxt, int64(off), Widthptr, x, int64(xoff))
- off += Widthptr
- return off
-}
-
-func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int {
- s.WriteOff(base.Ctxt, int64(off), x, 0)
- off += 4
- return off
-}
-
-func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
- s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
- off += 4
- return off
-}
-
-// slicesym writes a static slice symbol {&arr, lencap, lencap} to n+noff.
-// slicesym does not modify n.
-func slicesym(n *ir.Name, noff int64, arr *ir.Name, lencap int64) {
- s := n.Sym().Linksym()
- if arr.Op() != ir.ONAME {
- base.Fatalf("slicesym non-name arr %v", arr)
- }
- s.WriteAddr(base.Ctxt, noff, Widthptr, arr.Sym().Linksym(), 0)
- s.WriteInt(base.Ctxt, noff+sliceLenOffset, Widthptr, lencap)
- s.WriteInt(base.Ctxt, noff+sliceCapOffset, Widthptr, lencap)
-}
-
-// addrsym writes the static address of a to n. a must be an ONAME.
-// Neither n nor a is modified.
-func addrsym(n *ir.Name, noff int64, a *ir.Name, aoff int64) {
- if n.Op() != ir.ONAME {
- base.Fatalf("addrsym n op %v", n.Op())
- }
- if n.Sym() == nil {
- base.Fatalf("addrsym nil n sym")
+ base.Ctxt.Globl(s, nam.Type().Width, flags)
+ if nam.Name().LibfuzzerExtraCounter() {
+ s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
- if a.Op() != ir.ONAME {
- base.Fatalf("addrsym a op %v", a.Op())
+ if nam.Sym().Linkname != "" {
+ // Make sure linkname'd symbol is non-package. When a symbol is
+ // both imported and linkname'd, s.Pkg may not set to "_" in
+ // types.Sym.Linksym because LSym already exists. Set it here.
+ s.Pkg = "_"
}
- s := n.Sym().Linksym()
- s.WriteAddr(base.Ctxt, noff, Widthptr, a.Sym().Linksym(), aoff)
}
-// pfuncsym writes the static address of f to n. f must be a global function.
-// Neither n nor f is modified.
-func pfuncsym(n *ir.Name, noff int64, f *ir.Name) {
- if n.Op() != ir.ONAME {
- base.Fatalf("pfuncsym n op %v", n.Op())
- }
- if n.Sym() == nil {
- base.Fatalf("pfuncsym nil n sym")
+func dumpembeds() {
+ for _, v := range typecheck.Target.Embeds {
+ staticdata.WriteEmbed(v)
}
- if f.Class() != ir.PFUNC {
- base.Fatalf("pfuncsym class not PFUNC %d", f.Class())
- }
- s := n.Sym().Linksym()
- s.WriteAddr(base.Ctxt, noff, Widthptr, funcsym(f.Sym()).Linksym(), 0)
}
-// litsym writes the static literal c to n.
-// Neither n nor c is modified.
-func litsym(n *ir.Name, noff int64, c ir.Node, wid int) {
- if n.Op() != ir.ONAME {
- base.Fatalf("litsym n op %v", n.Op())
- }
- if n.Sym() == nil {
- base.Fatalf("litsym nil n sym")
- }
- if c.Op() == ir.ONIL {
- return
- }
- if c.Op() != ir.OLITERAL {
- base.Fatalf("litsym c op %v", c.Op())
- }
- s := n.Sym().Linksym()
- switch u := c.Val(); u.Kind() {
- case constant.Bool:
- i := int64(obj.Bool2int(constant.BoolVal(u)))
- s.WriteInt(base.Ctxt, noff, wid, i)
-
- case constant.Int:
- s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u))
-
- case constant.Float:
- f, _ := constant.Float64Val(u)
- switch c.Type().Kind() {
- case types.TFLOAT32:
- s.WriteFloat32(base.Ctxt, noff, float32(f))
- case types.TFLOAT64:
- s.WriteFloat64(base.Ctxt, noff, f)
- }
-
- case constant.Complex:
- re, _ := constant.Float64Val(constant.Real(u))
- im, _ := constant.Float64Val(constant.Imag(u))
- switch c.Type().Kind() {
- case types.TCOMPLEX64:
- s.WriteFloat32(base.Ctxt, noff, float32(re))
- s.WriteFloat32(base.Ctxt, noff+4, float32(im))
- case types.TCOMPLEX128:
- s.WriteFloat64(base.Ctxt, noff, re)
- s.WriteFloat64(base.Ctxt, noff+8, im)
+func addsignats(dcls []ir.Node) {
+ // copy types from dcl list to signatset
+ for _, n := range dcls {
+ if n.Op() == ir.OTYPE {
+ reflectdata.NeedRuntimeType(n.Type())
}
-
- case constant.String:
- i := constant.StringVal(u)
- symdata := stringsym(n.Pos(), i)
- s.WriteAddr(base.Ctxt, noff, Widthptr, symdata, 0)
- s.WriteInt(base.Ctxt, noff+int64(Widthptr), Widthptr, int64(len(i)))
-
- default:
- base.Fatalf("litsym unhandled OLITERAL %v", c)
}
}
+++ /dev/null
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/src"
- "cmd/internal/sys"
-)
-
-// The racewalk pass is currently handled in three parts.
-//
-// First, for flag_race, it inserts calls to racefuncenter and
-// racefuncexit at the start and end (respectively) of each
-// function. This is handled below.
-//
-// Second, during buildssa, it inserts appropriate instrumentation
-// calls immediately before each memory load or store. This is handled
-// by the (*state).instrument method in ssa.go, so here we just set
-// the Func.InstrumentBody flag as needed. For background on why this
-// is done during SSA construction rather than a separate SSA pass,
-// see issue #19054.
-//
-// Third we remove calls to racefuncenter and racefuncexit, for leaf
-// functions without instrumented operations. This is done as part of
-// ssa opt pass via special rule.
-
-// TODO(dvyukov): do not instrument initialization as writes:
-// a := make([]int, 10)
-
-// Do not instrument the following packages at all,
-// at best instrumentation would cause infinite recursion.
-var omit_pkgs = []string{
- "runtime/internal/atomic",
- "runtime/internal/sys",
- "runtime/internal/math",
- "runtime",
- "runtime/race",
- "runtime/msan",
- "internal/cpu",
-}
-
-// Don't insert racefuncenterfp/racefuncexit into the following packages.
-// Memory accesses in the packages are either uninteresting or will cause false positives.
-var norace_inst_pkgs = []string{"sync", "sync/atomic"}
-
-func ispkgin(pkgs []string) bool {
- if base.Ctxt.Pkgpath != "" {
- for _, p := range pkgs {
- if base.Ctxt.Pkgpath == p {
- return true
- }
- }
- }
-
- return false
-}
-
-func instrument(fn *ir.Func) {
- if fn.Pragma&ir.Norace != 0 || (fn.Sym().Linksym() != nil && fn.Sym().Linksym().ABIWrapper()) {
- return
- }
-
- if !base.Flag.Race || !ispkgin(norace_inst_pkgs) {
- fn.SetInstrumentBody(true)
- }
-
- if base.Flag.Race {
- lno := base.Pos
- base.Pos = src.NoXPos
-
- if thearch.LinkArch.Arch.Family != sys.AMD64 {
- fn.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
- fn.Exit.Append(mkcall("racefuncexit", nil, nil))
- } else {
-
- // nodpc is the PC of the caller as extracted by
- // getcallerpc. We use -widthptr(FP) for x86.
- // This only works for amd64. This will not
- // work on arm or others that might support
- // race in the future.
- nodpc := nodfp.CloneName()
- nodpc.SetType(types.Types[types.TUINTPTR])
- nodpc.SetFrameOffset(int64(-Widthptr))
- fn.Dcl = append(fn.Dcl, nodpc)
- fn.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
- fn.Exit.Append(mkcall("racefuncexit", nil, nil))
- }
- base.Pos = lno
- }
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/sys"
- "unicode/utf8"
-)
-
-// range
-func typecheckrange(n *ir.RangeStmt) {
- // Typechecking order is important here:
- // 0. first typecheck range expression (slice/map/chan),
- // it is evaluated only once and so logically it is not part of the loop.
- // 1. typecheck produced values,
- // this part can declare new vars and so it must be typechecked before body,
- // because body can contain a closure that captures the vars.
- // 2. decldepth++ to denote loop body.
- // 3. typecheck body.
- // 4. decldepth--.
- typecheckrangeExpr(n)
-
- // second half of dance, the first half being typecheckrangeExpr
- n.SetTypecheck(1)
- ls := n.List().Slice()
- for i1, n1 := range ls {
- if n1.Typecheck() == 0 {
- ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
- }
- }
-
- decldepth++
- typecheckslice(n.Body().Slice(), ctxStmt)
- decldepth--
-}
-
-func typecheckrangeExpr(n *ir.RangeStmt) {
- n.SetRight(typecheck(n.Right(), ctxExpr))
-
- t := n.Right().Type()
- if t == nil {
- return
- }
- // delicate little dance. see typecheckas2
- ls := n.List().Slice()
- for i1, n1 := range ls {
- if !ir.DeclaredBy(n1, n) {
- ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
- }
- }
-
- if t.IsPtr() && t.Elem().IsArray() {
- t = t.Elem()
- }
- n.SetType(t)
-
- var t1, t2 *types.Type
- toomany := false
- switch t.Kind() {
- default:
- base.ErrorfAt(n.Pos(), "cannot range over %L", n.Right())
- return
-
- case types.TARRAY, types.TSLICE:
- t1 = types.Types[types.TINT]
- t2 = t.Elem()
-
- case types.TMAP:
- t1 = t.Key()
- t2 = t.Elem()
-
- case types.TCHAN:
- if !t.ChanDir().CanRecv() {
- base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.Right(), n.Right().Type())
- return
- }
-
- t1 = t.Elem()
- t2 = nil
- if n.List().Len() == 2 {
- toomany = true
- }
-
- case types.TSTRING:
- t1 = types.Types[types.TINT]
- t2 = types.RuneType
- }
-
- if n.List().Len() > 2 || toomany {
- base.ErrorfAt(n.Pos(), "too many variables in range")
- }
-
- var v1, v2 ir.Node
- if n.List().Len() != 0 {
- v1 = n.List().First()
- }
- if n.List().Len() > 1 {
- v2 = n.List().Second()
- }
-
- // this is not only an optimization but also a requirement in the spec.
- // "if the second iteration variable is the blank identifier, the range
- // clause is equivalent to the same clause with only the first variable
- // present."
- if ir.IsBlank(v2) {
- if v1 != nil {
- n.PtrList().Set1(v1)
- }
- v2 = nil
- }
-
- if v1 != nil {
- if ir.DeclaredBy(v1, n) {
- v1.SetType(t1)
- } else if v1.Type() != nil {
- if op, why := assignop(t1, v1.Type()); op == ir.OXXX {
- base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t1, v1, why)
- }
- }
- checkassign(n, v1)
- }
-
- if v2 != nil {
- if ir.DeclaredBy(v2, n) {
- v2.SetType(t2)
- } else if v2.Type() != nil {
- if op, why := assignop(t2, v2.Type()); op == ir.OXXX {
- base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t2, v2, why)
- }
- }
- checkassign(n, v2)
- }
-}
-
-func cheapComputableIndex(width int64) bool {
- switch thearch.LinkArch.Family {
- // MIPS does not have R+R addressing
- // Arm64 may lack ability to generate this code in our assembler,
- // but the architecture supports it.
- case sys.PPC64, sys.S390X:
- return width == 1
- case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
- switch width {
- case 1, 2, 4, 8:
- return true
- }
- }
- return false
-}
-
-// walkrange transforms various forms of ORANGE into
-// simpler forms. The result must be assigned back to n.
-// Node n may also be modified in place, and may also be
-// the returned node.
-func walkrange(nrange *ir.RangeStmt) ir.Node {
- if isMapClear(nrange) {
- m := nrange.Right()
- lno := setlineno(m)
- n := mapClear(m)
- base.Pos = lno
- return n
- }
-
- nfor := ir.NodAt(nrange.Pos(), ir.OFOR, nil, nil)
- nfor.SetInit(nrange.Init())
- nfor.SetSym(nrange.Sym())
-
- // variable name conventions:
- // ohv1, hv1, hv2: hidden (old) val 1, 2
- // ha, hit: hidden aggregate, iterator
- // hn, hp: hidden len, pointer
- // hb: hidden bool
- // a, v1, v2: not hidden aggregate, val 1, 2
-
- t := nrange.Type()
-
- a := nrange.Right()
- lno := setlineno(a)
-
- var v1, v2 ir.Node
- l := nrange.List().Len()
- if l > 0 {
- v1 = nrange.List().First()
- }
-
- if l > 1 {
- v2 = nrange.List().Second()
- }
-
- if ir.IsBlank(v2) {
- v2 = nil
- }
-
- if ir.IsBlank(v1) && v2 == nil {
- v1 = nil
- }
-
- if v1 == nil && v2 != nil {
- base.Fatalf("walkrange: v2 != nil while v1 == nil")
- }
-
- var ifGuard *ir.IfStmt
-
- var body []ir.Node
- var init []ir.Node
- switch t.Kind() {
- default:
- base.Fatalf("walkrange")
-
- case types.TARRAY, types.TSLICE:
- if nn := arrayClear(nrange, v1, v2, a); nn != nil {
- base.Pos = lno
- return nn
- }
-
- // order.stmt arranged for a copy of the array/slice variable if needed.
- ha := a
-
- hv1 := temp(types.Types[types.TINT])
- hn := temp(types.Types[types.TINT])
-
- init = append(init, ir.Nod(ir.OAS, hv1, nil))
- init = append(init, ir.Nod(ir.OAS, hn, ir.Nod(ir.OLEN, ha, nil)))
-
- nfor.SetLeft(ir.Nod(ir.OLT, hv1, hn))
- nfor.SetRight(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
-
- // for range ha { body }
- if v1 == nil {
- break
- }
-
- // for v1 := range ha { body }
- if v2 == nil {
- body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
- break
- }
-
- // for v1, v2 := range ha { body }
- if cheapComputableIndex(nrange.Type().Elem().Width) {
- // v1, v2 = hv1, ha[hv1]
- tmp := ir.Nod(ir.OINDEX, ha, hv1)
- tmp.SetBounded(true)
- // Use OAS2 to correctly handle assignments
- // of the form "v1, a[v1] := range".
- a := ir.Nod(ir.OAS2, nil, nil)
- a.PtrList().Set2(v1, v2)
- a.PtrRlist().Set2(hv1, tmp)
- body = []ir.Node{a}
- break
- }
-
- // TODO(austin): OFORUNTIL is a strange beast, but is
- // necessary for expressing the control flow we need
- // while also making "break" and "continue" work. It
- // would be nice to just lower ORANGE during SSA, but
- // racewalk needs to see many of the operations
- // involved in ORANGE's implementation. If racewalk
- // moves into SSA, consider moving ORANGE into SSA and
- // eliminating OFORUNTIL.
-
- // TODO(austin): OFORUNTIL inhibits bounds-check
- // elimination on the index variable (see #20711).
- // Enhance the prove pass to understand this.
- ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil)
- ifGuard.SetLeft(ir.Nod(ir.OLT, hv1, hn))
- nfor.SetOp(ir.OFORUNTIL)
-
- hp := temp(types.NewPtr(nrange.Type().Elem()))
- tmp := ir.Nod(ir.OINDEX, ha, nodintconst(0))
- tmp.SetBounded(true)
- init = append(init, ir.Nod(ir.OAS, hp, nodAddr(tmp)))
-
- // Use OAS2 to correctly handle assignments
- // of the form "v1, a[v1] := range".
- a := ir.Nod(ir.OAS2, nil, nil)
- a.PtrList().Set2(v1, v2)
- a.PtrRlist().Set2(hv1, ir.Nod(ir.ODEREF, hp, nil))
- body = append(body, a)
-
- // Advance pointer as part of the late increment.
- //
- // This runs *after* the condition check, so we know
- // advancing the pointer is safe and won't go past the
- // end of the allocation.
- as := ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width))
- nfor.PtrList().Set1(typecheck(as, ctxStmt))
-
- case types.TMAP:
- // order.stmt allocated the iterator for us.
- // we only use a once, so no copy needed.
- ha := a
-
- hit := nrange.Prealloc
- th := hit.Type()
- keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
- elemsym := th.Field(1).Sym // ditto
-
- fn := syslook("mapiterinit")
-
- fn = substArgTypes(fn, t.Key(), t.Elem(), th)
- init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nodAddr(hit)))
- nfor.SetLeft(ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil()))
-
- fn = syslook("mapiternext")
- fn = substArgTypes(fn, th)
- nfor.SetRight(mkcall1(fn, nil, nil, nodAddr(hit)))
-
- key := ir.Nod(ir.ODEREF, nodSym(ir.ODOT, hit, keysym), nil)
- if v1 == nil {
- body = nil
- } else if v2 == nil {
- body = []ir.Node{ir.Nod(ir.OAS, v1, key)}
- } else {
- elem := ir.Nod(ir.ODEREF, nodSym(ir.ODOT, hit, elemsym), nil)
- a := ir.Nod(ir.OAS2, nil, nil)
- a.PtrList().Set2(v1, v2)
- a.PtrRlist().Set2(key, elem)
- body = []ir.Node{a}
- }
-
- case types.TCHAN:
- // order.stmt arranged for a copy of the channel variable.
- ha := a
-
- hv1 := temp(t.Elem())
- hv1.SetTypecheck(1)
- if t.Elem().HasPointers() {
- init = append(init, ir.Nod(ir.OAS, hv1, nil))
- }
- hb := temp(types.Types[types.TBOOL])
-
- nfor.SetLeft(ir.Nod(ir.ONE, hb, nodbool(false)))
- a := ir.Nod(ir.OAS2RECV, nil, nil)
- a.SetTypecheck(1)
- a.PtrList().Set2(hv1, hb)
- a.PtrRlist().Set1(ir.Nod(ir.ORECV, ha, nil))
- nfor.Left().PtrInit().Set1(a)
- if v1 == nil {
- body = nil
- } else {
- body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
- }
- // Zero hv1. This prevents hv1 from being the sole, inaccessible
- // reference to an otherwise GC-able value during the next channel receive.
- // See issue 15281.
- body = append(body, ir.Nod(ir.OAS, hv1, nil))
-
- case types.TSTRING:
- // Transform string range statements like "for v1, v2 = range a" into
- //
- // ha := a
- // for hv1 := 0; hv1 < len(ha); {
- // hv1t := hv1
- // hv2 := rune(ha[hv1])
- // if hv2 < utf8.RuneSelf {
- // hv1++
- // } else {
- // hv2, hv1 = decoderune(ha, hv1)
- // }
- // v1, v2 = hv1t, hv2
- // // original body
- // }
-
- // order.stmt arranged for a copy of the string variable.
- ha := a
-
- hv1 := temp(types.Types[types.TINT])
- hv1t := temp(types.Types[types.TINT])
- hv2 := temp(types.RuneType)
-
- // hv1 := 0
- init = append(init, ir.Nod(ir.OAS, hv1, nil))
-
- // hv1 < len(ha)
- nfor.SetLeft(ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil)))
-
- if v1 != nil {
- // hv1t = hv1
- body = append(body, ir.Nod(ir.OAS, hv1t, hv1))
- }
-
- // hv2 := rune(ha[hv1])
- nind := ir.Nod(ir.OINDEX, ha, hv1)
- nind.SetBounded(true)
- body = append(body, ir.Nod(ir.OAS, hv2, conv(nind, types.RuneType)))
-
- // if hv2 < utf8.RuneSelf
- nif := ir.Nod(ir.OIF, nil, nil)
- nif.SetLeft(ir.Nod(ir.OLT, hv2, nodintconst(utf8.RuneSelf)))
-
- // hv1++
- nif.PtrBody().Set1(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
-
- // } else {
- eif := ir.Nod(ir.OAS2, nil, nil)
- nif.PtrRlist().Set1(eif)
-
- // hv2, hv1 = decoderune(ha, hv1)
- eif.PtrList().Set2(hv2, hv1)
- fn := syslook("decoderune")
- eif.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, ha, hv1))
-
- body = append(body, nif)
-
- if v1 != nil {
- if v2 != nil {
- // v1, v2 = hv1t, hv2
- a := ir.Nod(ir.OAS2, nil, nil)
- a.PtrList().Set2(v1, v2)
- a.PtrRlist().Set2(hv1t, hv2)
- body = append(body, a)
- } else {
- // v1 = hv1t
- body = append(body, ir.Nod(ir.OAS, v1, hv1t))
- }
- }
- }
-
- typecheckslice(init, ctxStmt)
-
- if ifGuard != nil {
- ifGuard.PtrInit().Append(init...)
- ifGuard = typecheck(ifGuard, ctxStmt).(*ir.IfStmt)
- } else {
- nfor.PtrInit().Append(init...)
- }
-
- typecheckslice(nfor.Left().Init().Slice(), ctxStmt)
-
- nfor.SetLeft(typecheck(nfor.Left(), ctxExpr))
- nfor.SetLeft(defaultlit(nfor.Left(), nil))
- nfor.SetRight(typecheck(nfor.Right(), ctxStmt))
- typecheckslice(body, ctxStmt)
- nfor.PtrBody().Append(body...)
- nfor.PtrBody().Append(nrange.Body().Slice()...)
-
- var n ir.Node = nfor
- if ifGuard != nil {
- ifGuard.PtrBody().Set1(n)
- n = ifGuard
- }
-
- n = walkstmt(n)
-
- base.Pos = lno
- return n
-}
-
-// isMapClear checks if n is of the form:
-//
-// for k := range m {
-// delete(m, k)
-// }
-//
-// where == for keys of map m is reflexive.
-func isMapClear(n *ir.RangeStmt) bool {
- if base.Flag.N != 0 || instrumenting {
- return false
- }
-
- if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || n.List().Len() != 1 {
- return false
- }
-
- k := n.List().First()
- if k == nil || ir.IsBlank(k) {
- return false
- }
-
- // Require k to be a new variable name.
- if !ir.DeclaredBy(k, n) {
- return false
- }
-
- if n.Body().Len() != 1 {
- return false
- }
-
- stmt := n.Body().First() // only stmt in body
- if stmt == nil || stmt.Op() != ir.ODELETE {
- return false
- }
-
- m := n.Right()
- if delete := stmt.(*ir.CallExpr); !samesafeexpr(delete.List().First(), m) || !samesafeexpr(delete.List().Second(), k) {
- return false
- }
-
- // Keys where equality is not reflexive can not be deleted from maps.
- if !isreflexive(m.Type().Key()) {
- return false
- }
-
- return true
-}
-
-// mapClear constructs a call to runtime.mapclear for the map m.
-func mapClear(m ir.Node) ir.Node {
- t := m.Type()
-
- // instantiate mapclear(typ *type, hmap map[any]any)
- fn := syslook("mapclear")
- fn = substArgTypes(fn, t.Key(), t.Elem())
- n := mkcall1(fn, nil, nil, typename(t), m)
- return walkstmt(typecheck(n, ctxStmt))
-}
-
-// Lower n into runtime·memclr if possible, for
-// fast zeroing of slices and arrays (issue 5373).
-// Look for instances of
-//
-// for i := range a {
-// a[i] = zero
-// }
-//
-// in which the evaluation of a is side-effect-free.
-//
-// Parameters are as in walkrange: "for v1, v2 = range a".
-func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
- if base.Flag.N != 0 || instrumenting {
- return nil
- }
-
- if v1 == nil || v2 != nil {
- return nil
- }
-
- if loop.Body().Len() != 1 || loop.Body().First() == nil {
- return nil
- }
-
- stmt1 := loop.Body().First() // only stmt in body
- if stmt1.Op() != ir.OAS {
- return nil
- }
- stmt := stmt1.(*ir.AssignStmt)
- if stmt.Left().Op() != ir.OINDEX {
- return nil
- }
- lhs := stmt.Left().(*ir.IndexExpr)
-
- if !samesafeexpr(lhs.Left(), a) || !samesafeexpr(lhs.Right(), v1) {
- return nil
- }
-
- elemsize := loop.Type().Elem().Width
- if elemsize <= 0 || !isZero(stmt.Right()) {
- return nil
- }
-
- // Convert to
- // if len(a) != 0 {
- // hp = &a[0]
- // hn = len(a)*sizeof(elem(a))
- // memclr{NoHeap,Has}Pointers(hp, hn)
- // i = len(a) - 1
- // }
- n := ir.Nod(ir.OIF, nil, nil)
- n.PtrBody().Set(nil)
- n.SetLeft(ir.Nod(ir.ONE, ir.Nod(ir.OLEN, a, nil), nodintconst(0)))
-
- // hp = &a[0]
- hp := temp(types.Types[types.TUNSAFEPTR])
-
- ix := ir.Nod(ir.OINDEX, a, nodintconst(0))
- ix.SetBounded(true)
- addr := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR])
- n.PtrBody().Append(ir.Nod(ir.OAS, hp, addr))
-
- // hn = len(a) * sizeof(elem(a))
- hn := temp(types.Types[types.TUINTPTR])
- mul := conv(ir.Nod(ir.OMUL, ir.Nod(ir.OLEN, a, nil), nodintconst(elemsize)), types.Types[types.TUINTPTR])
- n.PtrBody().Append(ir.Nod(ir.OAS, hn, mul))
-
- var fn ir.Node
- if a.Type().Elem().HasPointers() {
- // memclrHasPointers(hp, hn)
- Curfn.SetWBPos(stmt.Pos())
- fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
- } else {
- // memclrNoHeapPointers(hp, hn)
- fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
- }
-
- n.PtrBody().Append(fn)
-
- // i = len(a) - 1
- v1 = ir.Nod(ir.OAS, v1, ir.Nod(ir.OSUB, ir.Nod(ir.OLEN, a, nil), nodintconst(1)))
-
- n.PtrBody().Append(v1)
-
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), nil))
- typecheckslice(n.Body().Slice(), ctxStmt)
- return walkstmt(n)
-}
-
-// addptr returns (*T)(uintptr(p) + n).
-func addptr(p ir.Node, n int64) ir.Node {
- t := p.Type()
-
- p = ir.Nod(ir.OCONVNOP, p, nil)
- p.SetType(types.Types[types.TUINTPTR])
-
- p = ir.Nod(ir.OADD, p, nodintconst(n))
-
- p = ir.Nod(ir.OCONVNOP, p, nil)
- p.SetType(t)
-
- return p
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
-)
-
-// select
-func typecheckselect(sel *ir.SelectStmt) {
- var def ir.Node
- lno := setlineno(sel)
- typecheckslice(sel.Init().Slice(), ctxStmt)
- for _, ncase := range sel.List().Slice() {
- ncase := ncase.(*ir.CaseStmt)
-
- if ncase.List().Len() == 0 {
- // default
- if def != nil {
- base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def))
- } else {
- def = ncase
- }
- } else if ncase.List().Len() > 1 {
- base.ErrorfAt(ncase.Pos(), "select cases cannot be lists")
- } else {
- ncase.List().SetFirst(typecheck(ncase.List().First(), ctxStmt))
- n := ncase.List().First()
- ncase.SetLeft(n)
- ncase.PtrList().Set(nil)
- oselrecv2 := func(dst, recv ir.Node, colas bool) {
- n := ir.NodAt(n.Pos(), ir.OSELRECV2, nil, nil)
- n.PtrList().Set2(dst, ir.BlankNode)
- n.PtrRlist().Set1(recv)
- n.SetColas(colas)
- n.SetTypecheck(1)
- ncase.SetLeft(n)
- }
- switch n.Op() {
- default:
- pos := n.Pos()
- if n.Op() == ir.ONAME {
- // We don't have the right position for ONAME nodes (see #15459 and
- // others). Using ncase.Pos for now as it will provide the correct
- // line number (assuming the expression follows the "case" keyword
- // on the same line). This matches the approach before 1.10.
- pos = ncase.Pos()
- }
- base.ErrorfAt(pos, "select case must be receive, send or assign recv")
-
- case ir.OAS:
- // convert x = <-c into x, _ = <-c
- // remove implicit conversions; the eventual assignment
- // will reintroduce them.
- if r := n.Right(); r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE {
- if r.Implicit() {
- n.SetRight(r.Left())
- }
- }
- if n.Right().Op() != ir.ORECV {
- base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
- break
- }
- oselrecv2(n.Left(), n.Right(), n.Colas())
-
- case ir.OAS2RECV:
- if n.Rlist().First().Op() != ir.ORECV {
- base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
- break
- }
- n.SetOp(ir.OSELRECV2)
-
- case ir.ORECV:
- // convert <-c into _, _ = <-c
- oselrecv2(ir.BlankNode, n, false)
-
- case ir.OSEND:
- break
- }
- }
-
- typecheckslice(ncase.Body().Slice(), ctxStmt)
- }
-
- base.Pos = lno
-}
-
-func walkselect(sel *ir.SelectStmt) {
- lno := setlineno(sel)
- if sel.Body().Len() != 0 {
- base.Fatalf("double walkselect")
- }
-
- init := sel.Init().Slice()
- sel.PtrInit().Set(nil)
-
- init = append(init, walkselectcases(sel.List())...)
- sel.SetList(ir.Nodes{})
-
- sel.PtrBody().Set(init)
- walkstmtlist(sel.Body().Slice())
-
- base.Pos = lno
-}
-
-func walkselectcases(cases ir.Nodes) []ir.Node {
- ncas := cases.Len()
- sellineno := base.Pos
-
- // optimization: zero-case select
- if ncas == 0 {
- return []ir.Node{mkcall("block", nil, nil)}
- }
-
- // optimization: one-case select: single op.
- if ncas == 1 {
- cas := cases.First().(*ir.CaseStmt)
- setlineno(cas)
- l := cas.Init().Slice()
- if cas.Left() != nil { // not default:
- n := cas.Left()
- l = append(l, n.Init().Slice()...)
- n.PtrInit().Set(nil)
- switch n.Op() {
- default:
- base.Fatalf("select %v", n.Op())
-
- case ir.OSEND:
- // already ok
-
- case ir.OSELRECV2:
- r := n.(*ir.AssignListStmt)
- if ir.IsBlank(r.List().First()) && ir.IsBlank(r.List().Second()) {
- n = r.Rlist().First()
- break
- }
- r.SetOp(ir.OAS2RECV)
- }
-
- l = append(l, n)
- }
-
- l = append(l, cas.Body().Slice()...)
- l = append(l, ir.Nod(ir.OBREAK, nil, nil))
- return l
- }
-
- // convert case value arguments to addresses.
- // this rewrite is used by both the general code and the next optimization.
- var dflt *ir.CaseStmt
- for _, cas := range cases.Slice() {
- cas := cas.(*ir.CaseStmt)
- setlineno(cas)
- n := cas.Left()
- if n == nil {
- dflt = cas
- continue
- }
- switch n.Op() {
- case ir.OSEND:
- n.SetRight(nodAddr(n.Right()))
- n.SetRight(typecheck(n.Right(), ctxExpr))
-
- case ir.OSELRECV2:
- if !ir.IsBlank(n.List().First()) {
- n.List().SetIndex(0, nodAddr(n.List().First()))
- n.List().SetIndex(0, typecheck(n.List().First(), ctxExpr))
- }
- }
- }
-
- // optimization: two-case select but one is default: single non-blocking op.
- if ncas == 2 && dflt != nil {
- cas := cases.First().(*ir.CaseStmt)
- if cas == dflt {
- cas = cases.Second().(*ir.CaseStmt)
- }
-
- n := cas.Left()
- setlineno(n)
- r := ir.Nod(ir.OIF, nil, nil)
- r.PtrInit().Set(cas.Init().Slice())
- var call ir.Node
- switch n.Op() {
- default:
- base.Fatalf("select %v", n.Op())
-
- case ir.OSEND:
- // if selectnbsend(c, v) { body } else { default body }
- ch := n.Left()
- call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right())
-
- case ir.OSELRECV2:
- recv := n.Rlist().First().(*ir.UnaryExpr)
- ch := recv.Left()
- elem := n.List().First()
- if ir.IsBlank(elem) {
- elem = nodnil()
- }
- if ir.IsBlank(n.List().Second()) {
- // if selectnbrecv(&v, c) { body } else { default body }
- call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch)
- } else {
- // TODO(cuonglm): make this use selectnbrecv()
- // if selectnbrecv2(&v, &received, c) { body } else { default body }
- receivedp := typecheck(nodAddr(n.List().Second()), ctxExpr)
- call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch)
- }
- }
-
- r.SetLeft(typecheck(call, ctxExpr))
- r.PtrBody().Set(cas.Body().Slice())
- r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...))
- return []ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)}
- }
-
- if dflt != nil {
- ncas--
- }
- casorder := make([]*ir.CaseStmt, ncas)
- nsends, nrecvs := 0, 0
-
- var init []ir.Node
-
- // generate sel-struct
- base.Pos = sellineno
- selv := temp(types.NewArray(scasetype(), int64(ncas)))
- init = append(init, typecheck(ir.Nod(ir.OAS, selv, nil), ctxStmt))
-
- // No initialization for order; runtime.selectgo is responsible for that.
- order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
-
- var pc0, pcs ir.Node
- if base.Flag.Race {
- pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
- pc0 = typecheck(nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(0))), ctxExpr)
- } else {
- pc0 = nodnil()
- }
-
- // register cases
- for _, cas := range cases.Slice() {
- cas := cas.(*ir.CaseStmt)
- setlineno(cas)
-
- init = append(init, cas.Init().Slice()...)
- cas.PtrInit().Set(nil)
-
- n := cas.Left()
- if n == nil { // default:
- continue
- }
-
- var i int
- var c, elem ir.Node
- switch n.Op() {
- default:
- base.Fatalf("select %v", n.Op())
- case ir.OSEND:
- i = nsends
- nsends++
- c = n.Left()
- elem = n.Right()
- case ir.OSELRECV2:
- nrecvs++
- i = ncas - nrecvs
- recv := n.Rlist().First().(*ir.UnaryExpr)
- c = recv.Left()
- elem = n.List().First()
- }
-
- casorder[i] = cas
-
- setField := func(f string, val ir.Node) {
- r := ir.Nod(ir.OAS, nodSym(ir.ODOT, ir.Nod(ir.OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
- init = append(init, typecheck(r, ctxStmt))
- }
-
- c = convnop(c, types.Types[types.TUNSAFEPTR])
- setField("c", c)
- if !ir.IsBlank(elem) {
- elem = convnop(elem, types.Types[types.TUNSAFEPTR])
- setField("elem", elem)
- }
-
- // TODO(mdempsky): There should be a cleaner way to
- // handle this.
- if base.Flag.Race {
- r := mkcall("selectsetpc", nil, nil, nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i)))))
- init = append(init, r)
- }
- }
- if nsends+nrecvs != ncas {
- base.Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
- }
-
- // run the select
- base.Pos = sellineno
- chosen := temp(types.Types[types.TINT])
- recvOK := temp(types.Types[types.TBOOL])
- r := ir.Nod(ir.OAS2, nil, nil)
- r.PtrList().Set2(chosen, recvOK)
- fn := syslook("selectgo")
- r.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
- init = append(init, typecheck(r, ctxStmt))
-
- // selv and order are no longer alive after selectgo.
- init = append(init, ir.Nod(ir.OVARKILL, selv, nil))
- init = append(init, ir.Nod(ir.OVARKILL, order, nil))
- if base.Flag.Race {
- init = append(init, ir.Nod(ir.OVARKILL, pcs, nil))
- }
-
- // dispatch cases
- dispatch := func(cond ir.Node, cas *ir.CaseStmt) {
- cond = typecheck(cond, ctxExpr)
- cond = defaultlit(cond, nil)
-
- r := ir.Nod(ir.OIF, cond, nil)
-
- if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 {
- if !ir.IsBlank(n.List().Second()) {
- x := ir.Nod(ir.OAS, n.List().Second(), recvOK)
- r.PtrBody().Append(typecheck(x, ctxStmt))
- }
- }
-
- r.PtrBody().AppendNodes(cas.PtrBody())
- r.PtrBody().Append(ir.Nod(ir.OBREAK, nil, nil))
- init = append(init, r)
- }
-
- if dflt != nil {
- setlineno(dflt)
- dispatch(ir.Nod(ir.OLT, chosen, nodintconst(0)), dflt)
- }
- for i, cas := range casorder {
- setlineno(cas)
- dispatch(ir.Nod(ir.OEQ, chosen, nodintconst(int64(i))), cas)
- }
-
- return init
-}
-
-// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
-func bytePtrToIndex(n ir.Node, i int64) ir.Node {
- s := nodAddr(ir.Nod(ir.OINDEX, n, nodintconst(i)))
- t := types.NewPtr(types.Types[types.TUINT8])
- return convnop(s, t)
-}
-
-var scase *types.Type
-
-// Keep in sync with src/runtime/select.go.
-func scasetype() *types.Type {
- if scase == nil {
- scase = tostruct([]*ir.Field{
- namedfield("c", types.Types[types.TUNSAFEPTR]),
- namedfield("elem", types.Types[types.TUNSAFEPTR]),
- })
- scase.SetNoalg(true)
- }
- return scase
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/obj"
- "fmt"
- "go/constant"
-)
-
-type InitEntry struct {
- Xoffset int64 // struct, array only
- Expr ir.Node // bytes of run-time computed expressions
-}
-
-type InitPlan struct {
- E []InitEntry
-}
-
-// An InitSchedule is used to decompose assignment statements into
-// static and dynamic initialization parts. Static initializations are
-// handled by populating variables' linker symbol data, while dynamic
-// initializations are accumulated to be executed in order.
-type InitSchedule struct {
- // out is the ordered list of dynamic initialization
- // statements.
- out []ir.Node
-
- initplans map[ir.Node]*InitPlan
- inittemps map[ir.Node]*ir.Name
-}
-
-func (s *InitSchedule) append(n ir.Node) {
- s.out = append(s.out, n)
-}
-
-// staticInit adds an initialization statement n to the schedule.
-func (s *InitSchedule) staticInit(n ir.Node) {
- if !s.tryStaticInit(n) {
- if base.Flag.Percent != 0 {
- ir.Dump("nonstatic", n)
- }
- s.append(n)
- }
-}
-
-// tryStaticInit attempts to statically execute an initialization
-// statement and reports whether it succeeded.
-func (s *InitSchedule) tryStaticInit(nn ir.Node) bool {
- // Only worry about simple "l = r" assignments. Multiple
- // variable/expression OAS2 assignments have already been
- // replaced by multiple simple OAS assignments, and the other
- // OAS2* assignments mostly necessitate dynamic execution
- // anyway.
- if nn.Op() != ir.OAS {
- return false
- }
- n := nn.(*ir.AssignStmt)
- if ir.IsBlank(n.Left()) && !anySideEffects(n.Right()) {
- // Discard.
- return true
- }
- lno := setlineno(n)
- defer func() { base.Pos = lno }()
- nam := n.Left().(*ir.Name)
- return s.staticassign(nam, 0, n.Right(), nam.Type())
-}
-
-// like staticassign but we are copying an already
-// initialized value r.
-func (s *InitSchedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool {
- if rn.Class() == ir.PFUNC {
- // TODO if roff != 0 { panic }
- pfuncsym(l, loff, rn)
- return true
- }
- if rn.Class() != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
- return false
- }
- if rn.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
- return false
- }
- if rn.Defn.Op() != ir.OAS {
- return false
- }
- if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675)
- return false
- }
- orig := rn
- r := rn.Defn.(*ir.AssignStmt).Right()
-
- for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) {
- r = r.(*ir.ConvExpr).Left()
- }
-
- switch r.Op() {
- case ir.OMETHEXPR:
- r = r.(*ir.MethodExpr).FuncName()
- fallthrough
- case ir.ONAME:
- r := r.(*ir.Name)
- if s.staticcopy(l, loff, r, typ) {
- return true
- }
- // We may have skipped past one or more OCONVNOPs, so
- // use conv to ensure r is assignable to l (#13263).
- dst := ir.Node(l)
- if loff != 0 || !types.Identical(typ, l.Type()) {
- dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ)
- }
- s.append(ir.Nod(ir.OAS, dst, conv(r, typ)))
- return true
-
- case ir.ONIL:
- return true
-
- case ir.OLITERAL:
- if isZero(r) {
- return true
- }
- litsym(l, loff, r, int(typ.Width))
- return true
-
- case ir.OADDR:
- if a := r.Left(); a.Op() == ir.ONAME {
- a := a.(*ir.Name)
- addrsym(l, loff, a, 0)
- return true
- }
-
- case ir.OPTRLIT:
- switch r.Left().Op() {
- case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
- // copy pointer
- addrsym(l, loff, s.inittemps[r], 0)
- return true
- }
-
- case ir.OSLICELIT:
- r := r.(*ir.CompLitExpr)
- // copy slice
- slicesym(l, loff, s.inittemps[r], r.Len)
- return true
-
- case ir.OARRAYLIT, ir.OSTRUCTLIT:
- p := s.initplans[r]
- for i := range p.E {
- e := &p.E[i]
- typ := e.Expr.Type()
- if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
- litsym(l, loff+e.Xoffset, e.Expr, int(typ.Width))
- continue
- }
- x := e.Expr
- if x.Op() == ir.OMETHEXPR {
- x = x.(*ir.MethodExpr).FuncName()
- }
- if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) {
- continue
- }
- // Requires computation, but we're
- // copying someone else's computation.
- ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ)
- rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ)
- setlineno(rr)
- s.append(ir.Nod(ir.OAS, ll, rr))
- }
-
- return true
- }
-
- return false
-}
-
-func (s *InitSchedule) staticassign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool {
- for r.Op() == ir.OCONVNOP {
- r = r.(*ir.ConvExpr).Left()
- }
-
- switch r.Op() {
- case ir.ONAME:
- r := r.(*ir.Name)
- return s.staticcopy(l, loff, r, typ)
-
- case ir.OMETHEXPR:
- r := r.(*ir.MethodExpr)
- return s.staticcopy(l, loff, r.FuncName(), typ)
-
- case ir.ONIL:
- return true
-
- case ir.OLITERAL:
- if isZero(r) {
- return true
- }
- litsym(l, loff, r, int(typ.Width))
- return true
-
- case ir.OADDR:
- if name, offset, ok := stataddr(r.Left()); ok {
- addrsym(l, loff, name, offset)
- return true
- }
- fallthrough
-
- case ir.OPTRLIT:
- switch r.Left().Op() {
- case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT:
- // Init pointer.
- a := staticname(r.Left().Type())
-
- s.inittemps[r] = a
- addrsym(l, loff, a, 0)
-
- // Init underlying literal.
- if !s.staticassign(a, 0, r.Left(), a.Type()) {
- s.append(ir.Nod(ir.OAS, a, r.Left()))
- }
- return true
- }
- //dump("not static ptrlit", r);
-
- case ir.OSTR2BYTES:
- if l.Class() == ir.PEXTERN && r.Left().Op() == ir.OLITERAL {
- sval := ir.StringVal(r.Left())
- slicebytes(l, loff, sval)
- return true
- }
-
- case ir.OSLICELIT:
- r := r.(*ir.CompLitExpr)
- s.initplan(r)
- // Init slice.
- ta := types.NewArray(r.Type().Elem(), r.Len)
- ta.SetNoalg(true)
- a := staticname(ta)
- s.inittemps[r] = a
- slicesym(l, loff, a, r.Len)
- // Fall through to init underlying array.
- l = a
- loff = 0
- fallthrough
-
- case ir.OARRAYLIT, ir.OSTRUCTLIT:
- s.initplan(r)
-
- p := s.initplans[r]
- for i := range p.E {
- e := &p.E[i]
- if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
- litsym(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Width))
- continue
- }
- setlineno(e.Expr)
- if !s.staticassign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) {
- a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type())
- s.append(ir.Nod(ir.OAS, a, e.Expr))
- }
- }
-
- return true
-
- case ir.OMAPLIT:
- break
-
- case ir.OCLOSURE:
- r := r.(*ir.ClosureExpr)
- if hasemptycvars(r) {
- if base.Debug.Closure > 0 {
- base.WarnfAt(r.Pos(), "closure converted to global")
- }
- // Closures with no captured variables are globals,
- // so the assignment can be done at link time.
- // TODO if roff != 0 { panic }
- pfuncsym(l, loff, r.Func().Nname)
- return true
- }
- closuredebugruntimecheck(r)
-
- case ir.OCONVIFACE:
- // This logic is mirrored in isStaticCompositeLiteral.
- // If you change something here, change it there, and vice versa.
-
- // Determine the underlying concrete type and value we are converting from.
- val := ir.Node(r)
- for val.Op() == ir.OCONVIFACE {
- val = val.(*ir.ConvExpr).Left()
- }
-
- if val.Type().IsInterface() {
- // val is an interface type.
- // If val is nil, we can statically initialize l;
- // both words are zero and so there no work to do, so report success.
- // If val is non-nil, we have no concrete type to record,
- // and we won't be able to statically initialize its value, so report failure.
- return val.Op() == ir.ONIL
- }
-
- markTypeUsedInInterface(val.Type(), l.Sym().Linksym())
-
- var itab *ir.AddrExpr
- if typ.IsEmptyInterface() {
- itab = typename(val.Type())
- } else {
- itab = itabname(val.Type(), typ)
- }
-
- // Create a copy of l to modify while we emit data.
-
- // Emit itab, advance offset.
- addrsym(l, loff, itab.Left().(*ir.Name), 0)
-
- // Emit data.
- if isdirectiface(val.Type()) {
- if val.Op() == ir.ONIL {
- // Nil is zero, nothing to do.
- return true
- }
- // Copy val directly into n.
- setlineno(val)
- if !s.staticassign(l, loff+int64(Widthptr), val, val.Type()) {
- a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(Widthptr), val.Type())
- s.append(ir.Nod(ir.OAS, a, val))
- }
- } else {
- // Construct temp to hold val, write pointer to temp into n.
- a := staticname(val.Type())
- s.inittemps[val] = a
- if !s.staticassign(a, 0, val, val.Type()) {
- s.append(ir.Nod(ir.OAS, a, val))
- }
- addrsym(l, loff+int64(Widthptr), a, 0)
- }
-
- return true
- }
-
- //dump("not static", r);
- return false
-}
-
-// initContext is the context in which static data is populated.
-// It is either in an init function or in any other function.
-// Static data populated in an init function will be written either
-// zero times (as a readonly, static data symbol) or
-// one time (during init function execution).
-// Either way, there is no opportunity for races or further modification,
-// so the data can be written to a (possibly readonly) data symbol.
-// Static data populated in any other function needs to be local to
-// that function to allow multiple instances of that function
-// to execute concurrently without clobbering each others' data.
-type initContext uint8
-
-const (
- inInitFunction initContext = iota
- inNonInitFunction
-)
-
-func (c initContext) String() string {
- if c == inInitFunction {
- return "inInitFunction"
- }
- return "inNonInitFunction"
-}
-
-// from here down is the walk analysis
-// of composite literals.
-// most of the work is to generate
-// data statements for the constant
-// part of the composite literal.
-
-var statuniqgen int // name generator for static temps
-
-// staticname returns a name backed by a (writable) static data symbol.
-// Use readonlystaticname for read-only node.
-func staticname(t *types.Type) *ir.Name {
- // Don't use lookupN; it interns the resulting string, but these are all unique.
- n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
- statuniqgen++
- declare(n, ir.PEXTERN)
- n.SetType(t)
- n.Sym().Linksym().Set(obj.AttrLocal, true)
- return n
-}
-
-// readonlystaticname returns a name backed by a (writable) static data symbol.
-func readonlystaticname(t *types.Type) *ir.Name {
- n := staticname(t)
- n.MarkReadonly()
- n.Sym().Linksym().Set(obj.AttrContentAddressable, true)
- return n
-}
-
-func isSimpleName(nn ir.Node) bool {
- if nn.Op() != ir.ONAME {
- return false
- }
- n := nn.(*ir.Name)
- return n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN
-}
-
-func litas(l ir.Node, r ir.Node, init *ir.Nodes) {
- appendWalkStmt(init, ir.Nod(ir.OAS, l, r))
-}
-
-// initGenType is a bitmap indicating the types of generation that will occur for a static value.
-type initGenType uint8
-
-const (
- initDynamic initGenType = 1 << iota // contains some dynamic values, for which init code will be generated
- initConst // contains some constant values, which may be written into data symbols
-)
-
-// getdyn calculates the initGenType for n.
-// If top is false, getdyn is recursing.
-func getdyn(n ir.Node, top bool) initGenType {
- switch n.Op() {
- default:
- if isGoConst(n) {
- return initConst
- }
- return initDynamic
-
- case ir.OSLICELIT:
- n := n.(*ir.CompLitExpr)
- if !top {
- return initDynamic
- }
- if n.Len/4 > int64(n.List().Len()) {
- // <25% of entries have explicit values.
- // Very rough estimation, it takes 4 bytes of instructions
- // to initialize 1 byte of result. So don't use a static
- // initializer if the dynamic initialization code would be
- // smaller than the static value.
- // See issue 23780.
- return initDynamic
- }
-
- case ir.OARRAYLIT, ir.OSTRUCTLIT:
- }
- lit := n.(*ir.CompLitExpr)
-
- var mode initGenType
- for _, n1 := range lit.List().Slice() {
- switch n1.Op() {
- case ir.OKEY:
- n1 = n1.(*ir.KeyExpr).Right()
- case ir.OSTRUCTKEY:
- n1 = n1.(*ir.StructKeyExpr).Left()
- }
- mode |= getdyn(n1, false)
- if mode == initDynamic|initConst {
- break
- }
- }
- return mode
-}
-
-// isStaticCompositeLiteral reports whether n is a compile-time constant.
-func isStaticCompositeLiteral(n ir.Node) bool {
- switch n.Op() {
- case ir.OSLICELIT:
- return false
- case ir.OARRAYLIT:
- for _, r := range n.List().Slice() {
- if r.Op() == ir.OKEY {
- r = r.(*ir.KeyExpr).Right()
- }
- if !isStaticCompositeLiteral(r) {
- return false
- }
- }
- return true
- case ir.OSTRUCTLIT:
- for _, r := range n.List().Slice() {
- r := r.(*ir.StructKeyExpr)
- if !isStaticCompositeLiteral(r.Left()) {
- return false
- }
- }
- return true
- case ir.OLITERAL, ir.ONIL:
- return true
- case ir.OCONVIFACE:
- // See staticassign's OCONVIFACE case for comments.
- val := ir.Node(n)
- for val.Op() == ir.OCONVIFACE {
- val = val.(*ir.ConvExpr).Left()
- }
- if val.Type().IsInterface() {
- return val.Op() == ir.ONIL
- }
- if isdirectiface(val.Type()) && val.Op() == ir.ONIL {
- return true
- }
- return isStaticCompositeLiteral(val)
- }
- return false
-}
-
-// initKind is a kind of static initialization: static, dynamic, or local.
-// Static initialization represents literals and
-// literal components of composite literals.
-// Dynamic initialization represents non-literals and
-// non-literal components of composite literals.
-// LocalCode initialization represents initialization
-// that occurs purely in generated code local to the function of use.
-// Initialization code is sometimes generated in passes,
-// first static then dynamic.
-type initKind uint8
-
-const (
- initKindStatic initKind = iota + 1
- initKindDynamic
- initKindLocalCode
-)
-
-// fixedlit handles struct, array, and slice literals.
-// TODO: expand documentation.
-func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
- isBlank := var_ == ir.BlankNode
- var splitnode func(ir.Node) (a ir.Node, value ir.Node)
- switch n.Op() {
- case ir.OARRAYLIT, ir.OSLICELIT:
- var k int64
- splitnode = func(r ir.Node) (ir.Node, ir.Node) {
- if r.Op() == ir.OKEY {
- kv := r.(*ir.KeyExpr)
- k = indexconst(kv.Left())
- if k < 0 {
- base.Fatalf("fixedlit: invalid index %v", kv.Left())
- }
- r = kv.Right()
- }
- a := ir.Nod(ir.OINDEX, var_, nodintconst(k))
- k++
- if isBlank {
- return ir.BlankNode, r
- }
- return a, r
- }
- case ir.OSTRUCTLIT:
- splitnode = func(rn ir.Node) (ir.Node, ir.Node) {
- r := rn.(*ir.StructKeyExpr)
- if r.Sym().IsBlank() || isBlank {
- return ir.BlankNode, r.Left()
- }
- setlineno(r)
- return nodSym(ir.ODOT, var_, r.Sym()), r.Left()
- }
- default:
- base.Fatalf("fixedlit bad op: %v", n.Op())
- }
-
- for _, r := range n.List().Slice() {
- a, value := splitnode(r)
- if a == ir.BlankNode && !anySideEffects(value) {
- // Discard.
- continue
- }
-
- switch value.Op() {
- case ir.OSLICELIT:
- value := value.(*ir.CompLitExpr)
- if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
- slicelit(ctxt, value, a, init)
- continue
- }
-
- case ir.OARRAYLIT, ir.OSTRUCTLIT:
- value := value.(*ir.CompLitExpr)
- fixedlit(ctxt, kind, value, a, init)
- continue
- }
-
- islit := isGoConst(value)
- if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
- continue
- }
-
- // build list of assignments: var[index] = expr
- setlineno(a)
- as := ir.NewAssignStmt(base.Pos, a, value)
- as = typecheck(as, ctxStmt).(*ir.AssignStmt)
- switch kind {
- case initKindStatic:
- genAsStatic(as)
- case initKindDynamic, initKindLocalCode:
- a = orderStmtInPlace(as, map[string][]*ir.Name{})
- a = walkstmt(a)
- init.Append(a)
- default:
- base.Fatalf("fixedlit: bad kind %d", kind)
- }
-
- }
-}
-
-func isSmallSliceLit(n *ir.CompLitExpr) bool {
- if n.Op() != ir.OSLICELIT {
- return false
- }
-
- return n.Type().Elem().Width == 0 || n.Len <= smallArrayBytes/n.Type().Elem().Width
-}
-
-func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
- // make an array type corresponding the number of elements we have
- t := types.NewArray(n.Type().Elem(), n.Len)
- dowidth(t)
-
- if ctxt == inNonInitFunction {
- // put everything into static array
- vstat := staticname(t)
-
- fixedlit(ctxt, initKindStatic, n, vstat, init)
- fixedlit(ctxt, initKindDynamic, n, vstat, init)
-
- // copy static to slice
- var_ = typecheck(var_, ctxExpr|ctxAssign)
- name, offset, ok := stataddr(var_)
- if !ok || name.Class() != ir.PEXTERN {
- base.Fatalf("slicelit: %v", var_)
- }
- slicesym(name, offset, vstat, t.NumElem())
- return
- }
-
- // recipe for var = []t{...}
- // 1. make a static array
- // var vstat [...]t
- // 2. assign (data statements) the constant part
- // vstat = constpart{}
- // 3. make an auto pointer to array and allocate heap to it
- // var vauto *[...]t = new([...]t)
- // 4. copy the static array to the auto array
- // *vauto = vstat
- // 5. for each dynamic part assign to the array
- // vauto[i] = dynamic part
- // 6. assign slice of allocated heap to var
- // var = vauto[:]
- //
- // an optimization is done if there is no constant part
- // 3. var vauto *[...]t = new([...]t)
- // 5. vauto[i] = dynamic part
- // 6. var = vauto[:]
-
- // if the literal contains constants,
- // make static initialized array (1),(2)
- var vstat ir.Node
-
- mode := getdyn(n, true)
- if mode&initConst != 0 && !isSmallSliceLit(n) {
- if ctxt == inInitFunction {
- vstat = readonlystaticname(t)
- } else {
- vstat = staticname(t)
- }
- fixedlit(ctxt, initKindStatic, n, vstat, init)
- }
-
- // make new auto *array (3 declare)
- vauto := temp(types.NewPtr(t))
-
- // set auto to point at new temp or heap (3 assign)
- var a ir.Node
- if x := n.Prealloc; x != nil {
- // temp allocated during order.go for dddarg
- if !types.Identical(t, x.Type()) {
- panic("dotdotdot base type does not match order's assigned type")
- }
-
- if vstat == nil {
- a = ir.Nod(ir.OAS, x, nil)
- a = typecheck(a, ctxStmt)
- init.Append(a) // zero new temp
- } else {
- // Declare that we're about to initialize all of x.
- // (Which happens at the *vauto = vstat below.)
- init.Append(ir.Nod(ir.OVARDEF, x, nil))
- }
-
- a = nodAddr(x)
- } else if n.Esc() == EscNone {
- a = temp(t)
- if vstat == nil {
- a = ir.Nod(ir.OAS, temp(t), nil)
- a = typecheck(a, ctxStmt)
- init.Append(a) // zero new temp
- a = a.(*ir.AssignStmt).Left()
- } else {
- init.Append(ir.Nod(ir.OVARDEF, a, nil))
- }
-
- a = nodAddr(a)
- } else {
- a = ir.Nod(ir.ONEW, ir.TypeNode(t), nil)
- }
- appendWalkStmt(init, ir.Nod(ir.OAS, vauto, a))
-
- if vstat != nil {
- // copy static to heap (4)
- a = ir.Nod(ir.ODEREF, vauto, nil)
- appendWalkStmt(init, ir.Nod(ir.OAS, a, vstat))
- }
-
- // put dynamics into array (5)
- var index int64
- for _, value := range n.List().Slice() {
- if value.Op() == ir.OKEY {
- kv := value.(*ir.KeyExpr)
- index = indexconst(kv.Left())
- if index < 0 {
- base.Fatalf("slicelit: invalid index %v", kv.Left())
- }
- value = kv.Right()
- }
- a := ir.Nod(ir.OINDEX, vauto, nodintconst(index))
- a.SetBounded(true)
- index++
-
- // TODO need to check bounds?
-
- switch value.Op() {
- case ir.OSLICELIT:
- break
-
- case ir.OARRAYLIT, ir.OSTRUCTLIT:
- value := value.(*ir.CompLitExpr)
- k := initKindDynamic
- if vstat == nil {
- // Generate both static and dynamic initializations.
- // See issue #31987.
- k = initKindLocalCode
- }
- fixedlit(ctxt, k, value, a, init)
- continue
- }
-
- if vstat != nil && isGoConst(value) { // already set by copy from static value
- continue
- }
-
- // build list of vauto[c] = expr
- setlineno(value)
- as := typecheck(ir.Nod(ir.OAS, a, value), ctxStmt)
- as = orderStmtInPlace(as, map[string][]*ir.Name{})
- as = walkstmt(as)
- init.Append(as)
- }
-
- // make slice out of heap (6)
- a = ir.Nod(ir.OAS, var_, ir.Nod(ir.OSLICE, vauto, nil))
-
- a = typecheck(a, ctxStmt)
- a = orderStmtInPlace(a, map[string][]*ir.Name{})
- a = walkstmt(a)
- init.Append(a)
-}
-
-func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
- // make the map var
- a := ir.Nod(ir.OMAKE, nil, nil)
- a.SetEsc(n.Esc())
- a.PtrList().Set2(ir.TypeNode(n.Type()), nodintconst(int64(n.List().Len())))
- litas(m, a, init)
-
- entries := n.List().Slice()
-
- // The order pass already removed any dynamic (runtime-computed) entries.
- // All remaining entries are static. Double-check that.
- for _, r := range entries {
- r := r.(*ir.KeyExpr)
- if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
- base.Fatalf("maplit: entry is not a literal: %v", r)
- }
- }
-
- if len(entries) > 25 {
- // For a large number of entries, put them in an array and loop.
-
- // build types [count]Tindex and [count]Tvalue
- tk := types.NewArray(n.Type().Key(), int64(len(entries)))
- te := types.NewArray(n.Type().Elem(), int64(len(entries)))
-
- tk.SetNoalg(true)
- te.SetNoalg(true)
-
- dowidth(tk)
- dowidth(te)
-
- // make and initialize static arrays
- vstatk := readonlystaticname(tk)
- vstate := readonlystaticname(te)
-
- datak := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
- datae := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
- for _, r := range entries {
- r := r.(*ir.KeyExpr)
- datak.PtrList().Append(r.Left())
- datae.PtrList().Append(r.Right())
- }
- fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
- fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
-
- // loop adding structure elements to map
- // for i = 0; i < len(vstatk); i++ {
- // map[vstatk[i]] = vstate[i]
- // }
- i := temp(types.Types[types.TINT])
- rhs := ir.Nod(ir.OINDEX, vstate, i)
- rhs.SetBounded(true)
-
- kidx := ir.Nod(ir.OINDEX, vstatk, i)
- kidx.SetBounded(true)
- lhs := ir.Nod(ir.OINDEX, m, kidx)
-
- zero := ir.Nod(ir.OAS, i, nodintconst(0))
- cond := ir.Nod(ir.OLT, i, nodintconst(tk.NumElem()))
- incr := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1)))
- body := ir.Nod(ir.OAS, lhs, rhs)
-
- loop := ir.Nod(ir.OFOR, cond, incr)
- loop.PtrBody().Set1(body)
- loop.PtrInit().Set1(zero)
-
- appendWalkStmt(init, loop)
- return
- }
- // For a small number of entries, just add them directly.
-
- // Build list of var[c] = expr.
- // Use temporaries so that mapassign1 can have addressable key, elem.
- // TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
- tmpkey := temp(m.Type().Key())
- tmpelem := temp(m.Type().Elem())
-
- for _, r := range entries {
- r := r.(*ir.KeyExpr)
- index, elem := r.Left(), r.Right()
-
- setlineno(index)
- appendWalkStmt(init, ir.Nod(ir.OAS, tmpkey, index))
-
- setlineno(elem)
- appendWalkStmt(init, ir.Nod(ir.OAS, tmpelem, elem))
-
- setlineno(tmpelem)
- appendWalkStmt(init, ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, tmpkey), tmpelem))
- }
-
- appendWalkStmt(init, ir.Nod(ir.OVARKILL, tmpkey, nil))
- appendWalkStmt(init, ir.Nod(ir.OVARKILL, tmpelem, nil))
-}
-
-func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
- t := n.Type()
- switch n.Op() {
- default:
- base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n)
-
- case ir.ONAME:
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, n))
-
- case ir.OMETHEXPR:
- n := n.(*ir.MethodExpr)
- anylit(n.FuncName(), var_, init)
-
- case ir.OPTRLIT:
- if !t.IsPtr() {
- base.Fatalf("anylit: not ptr")
- }
-
- var r ir.Node
- if n.Right() != nil {
- // n.Right is stack temporary used as backing store.
- appendWalkStmt(init, ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410)
- r = nodAddr(n.Right())
- } else {
- r = ir.Nod(ir.ONEW, ir.TypeNode(n.Left().Type()), nil)
- r.SetEsc(n.Esc())
- }
- appendWalkStmt(init, ir.Nod(ir.OAS, var_, r))
-
- var_ = ir.Nod(ir.ODEREF, var_, nil)
- var_ = typecheck(var_, ctxExpr|ctxAssign)
- anylit(n.Left(), var_, init)
-
- case ir.OSTRUCTLIT, ir.OARRAYLIT:
- n := n.(*ir.CompLitExpr)
- if !t.IsStruct() && !t.IsArray() {
- base.Fatalf("anylit: not struct/array")
- }
-
- if isSimpleName(var_) && n.List().Len() > 4 {
- // lay out static data
- vstat := readonlystaticname(t)
-
- ctxt := inInitFunction
- if n.Op() == ir.OARRAYLIT {
- ctxt = inNonInitFunction
- }
- fixedlit(ctxt, initKindStatic, n, vstat, init)
-
- // copy static to var
- appendWalkStmt(init, ir.Nod(ir.OAS, var_, vstat))
-
- // add expressions to automatic
- fixedlit(inInitFunction, initKindDynamic, n, var_, init)
- break
- }
-
- var components int64
- if n.Op() == ir.OARRAYLIT {
- components = t.NumElem()
- } else {
- components = int64(t.NumFields())
- }
- // initialization of an array or struct with unspecified components (missing fields or arrays)
- if isSimpleName(var_) || int64(n.List().Len()) < components {
- appendWalkStmt(init, ir.Nod(ir.OAS, var_, nil))
- }
-
- fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
-
- case ir.OSLICELIT:
- n := n.(*ir.CompLitExpr)
- slicelit(inInitFunction, n, var_, init)
-
- case ir.OMAPLIT:
- n := n.(*ir.CompLitExpr)
- if !t.IsMap() {
- base.Fatalf("anylit: not map")
- }
- maplit(n, var_, init)
- }
-}
-
-// oaslit handles special composite literal assignments.
-// It returns true if n's effects have been added to init,
-// in which case n should be dropped from the program by the caller.
-func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool {
- if n.Left() == nil || n.Right() == nil {
- // not a special composite literal assignment
- return false
- }
- if n.Left().Type() == nil || n.Right().Type() == nil {
- // not a special composite literal assignment
- return false
- }
- if !isSimpleName(n.Left()) {
- // not a special composite literal assignment
- return false
- }
- if !types.Identical(n.Left().Type(), n.Right().Type()) {
- // not a special composite literal assignment
- return false
- }
-
- switch n.Right().Op() {
- default:
- // not a special composite literal assignment
- return false
-
- case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
- if refersToCommonName(n.Left(), n.Right()) {
- // not a special composite literal assignment
- return false
- }
- anylit(n.Right(), n.Left(), init)
- }
-
- return true
-}
-
-func getlit(lit ir.Node) int {
- if smallintconst(lit) {
- return int(ir.Int64Val(lit))
- }
- return -1
-}
-
-// stataddr returns the static address of n, if n has one, or else nil.
-func stataddr(n ir.Node) (name *ir.Name, offset int64, ok bool) {
- if n == nil {
- return nil, 0, false
- }
-
- switch n.Op() {
- case ir.ONAME:
- n := n.(*ir.Name)
- return n, 0, true
-
- case ir.OMETHEXPR:
- n := n.(*ir.MethodExpr)
- return stataddr(n.FuncName())
-
- case ir.ODOT:
- if name, offset, ok = stataddr(n.Left()); !ok {
- break
- }
- offset += n.Offset()
- return name, offset, true
-
- case ir.OINDEX:
- if n.Left().Type().IsSlice() {
- break
- }
- if name, offset, ok = stataddr(n.Left()); !ok {
- break
- }
- l := getlit(n.Right())
- if l < 0 {
- break
- }
-
- // Check for overflow.
- if n.Type().Width != 0 && MaxWidth/n.Type().Width <= int64(l) {
- break
- }
- offset += int64(l) * n.Type().Width
- return name, offset, true
- }
-
- return nil, 0, false
-}
-
-func (s *InitSchedule) initplan(n ir.Node) {
- if s.initplans[n] != nil {
- return
- }
- p := new(InitPlan)
- s.initplans[n] = p
- switch n.Op() {
- default:
- base.Fatalf("initplan")
-
- case ir.OARRAYLIT, ir.OSLICELIT:
- var k int64
- for _, a := range n.List().Slice() {
- if a.Op() == ir.OKEY {
- kv := a.(*ir.KeyExpr)
- k = indexconst(kv.Left())
- if k < 0 {
- base.Fatalf("initplan arraylit: invalid index %v", kv.Left())
- }
- a = kv.Right()
- }
- s.addvalue(p, k*n.Type().Elem().Width, a)
- k++
- }
-
- case ir.OSTRUCTLIT:
- for _, a := range n.List().Slice() {
- if a.Op() != ir.OSTRUCTKEY {
- base.Fatalf("initplan structlit")
- }
- a := a.(*ir.StructKeyExpr)
- if a.Sym().IsBlank() {
- continue
- }
- s.addvalue(p, a.Offset(), a.Left())
- }
-
- case ir.OMAPLIT:
- for _, a := range n.List().Slice() {
- if a.Op() != ir.OKEY {
- base.Fatalf("initplan maplit")
- }
- a := a.(*ir.KeyExpr)
- s.addvalue(p, -1, a.Right())
- }
- }
-}
-
-func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n ir.Node) {
- // special case: zero can be dropped entirely
- if isZero(n) {
- return
- }
-
- // special case: inline struct and array (not slice) literals
- if isvaluelit(n) {
- s.initplan(n)
- q := s.initplans[n]
- for _, qe := range q.E {
- // qe is a copy; we are not modifying entries in q.E
- qe.Xoffset += xoffset
- p.E = append(p.E, qe)
- }
- return
- }
-
- // add to plan
- p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n})
-}
-
-func isZero(n ir.Node) bool {
- switch n.Op() {
- case ir.ONIL:
- return true
-
- case ir.OLITERAL:
- switch u := n.Val(); u.Kind() {
- case constant.String:
- return constant.StringVal(u) == ""
- case constant.Bool:
- return !constant.BoolVal(u)
- default:
- return constant.Sign(u) == 0
- }
-
- case ir.OARRAYLIT:
- for _, n1 := range n.List().Slice() {
- if n1.Op() == ir.OKEY {
- n1 = n1.(*ir.KeyExpr).Right()
- }
- if !isZero(n1) {
- return false
- }
- }
- return true
-
- case ir.OSTRUCTLIT:
- for _, n1 := range n.List().Slice() {
- n1 := n1.(*ir.StructKeyExpr)
- if !isZero(n1.Left()) {
- return false
- }
- }
- return true
- }
-
- return false
-}
-
-func isvaluelit(n ir.Node) bool {
- return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
-}
-
-func genAsStatic(as *ir.AssignStmt) {
- if as.Left().Type() == nil {
- base.Fatalf("genAsStatic as.Left not typechecked")
- }
-
- name, offset, ok := stataddr(as.Left())
- if !ok || (name.Class() != ir.PEXTERN && as.Left() != ir.BlankNode) {
- base.Fatalf("genAsStatic: lhs %v", as.Left())
- }
-
- switch r := as.Right(); r.Op() {
- case ir.OLITERAL:
- litsym(name, offset, r, int(r.Type().Width))
- return
- case ir.OMETHEXPR:
- r := r.(*ir.MethodExpr)
- pfuncsym(name, offset, r.FuncName())
- return
- case ir.ONAME:
- r := r.(*ir.Name)
- if r.Offset() != 0 {
- base.Fatalf("genAsStatic %+v", as)
- }
- if r.Class() == ir.PFUNC {
- pfuncsym(name, offset, r)
- return
- }
- }
- base.Fatalf("genAsStatic: rhs %v", as.Right())
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/src"
- "crypto/md5"
- "encoding/binary"
- "fmt"
- "go/constant"
- "sort"
- "strconv"
- "strings"
- "sync"
- "unicode"
- "unicode/utf8"
-)
-
-// largeStack is info about a function whose stack frame is too large (rare).
-type largeStack struct {
- locals int64
- args int64
- callee int64
- pos src.XPos
-}
-
-var (
- largeStackFramesMu sync.Mutex // protects largeStackFrames
- largeStackFrames []largeStack
-)
-
-// hasUniquePos reports whether n has a unique position that can be
-// used for reporting error messages.
-//
-// It's primarily used to distinguish references to named objects,
-// whose Pos will point back to their declaration position rather than
-// their usage position.
-func hasUniquePos(n ir.Node) bool {
- switch n.Op() {
- case ir.ONAME, ir.OPACK:
- return false
- case ir.OLITERAL, ir.ONIL, ir.OTYPE:
- if n.Sym() != nil {
- return false
- }
- }
-
- if !n.Pos().IsKnown() {
- if base.Flag.K != 0 {
- base.Warn("setlineno: unknown position (line 0)")
- }
- return false
- }
-
- return true
-}
-
-func setlineno(n ir.Node) src.XPos {
- lno := base.Pos
- if n != nil && hasUniquePos(n) {
- base.Pos = n.Pos()
- }
- return lno
-}
-
-func lookup(name string) *types.Sym {
- return types.LocalPkg.Lookup(name)
-}
-
-// lookupN looks up the symbol starting with prefix and ending with
-// the decimal n. If prefix is too long, lookupN panics.
-func lookupN(prefix string, n int) *types.Sym {
- var buf [20]byte // plenty long enough for all current users
- copy(buf[:], prefix)
- b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10)
- return types.LocalPkg.LookupBytes(b)
-}
-
-// autolabel generates a new Name node for use with
-// an automatically generated label.
-// prefix is a short mnemonic (e.g. ".s" for switch)
-// to help with debugging.
-// It should begin with "." to avoid conflicts with
-// user labels.
-func autolabel(prefix string) *types.Sym {
- if prefix[0] != '.' {
- base.Fatalf("autolabel prefix must start with '.', have %q", prefix)
- }
- fn := Curfn
- if Curfn == nil {
- base.Fatalf("autolabel outside function")
- }
- n := fn.Label
- fn.Label++
- return lookupN(prefix, int(n))
-}
-
-// dotImports tracks all PkgNames that have been dot-imported.
-var dotImports []*ir.PkgName
-
-// dotImportRefs maps idents introduced by importDot back to the
-// ir.PkgName they were dot-imported through.
-var dotImportRefs map[*ir.Ident]*ir.PkgName
-
-// find all the exported symbols in package referenced by PkgName,
-// and make them available in the current package
-func importDot(pack *ir.PkgName) {
- if dotImportRefs == nil {
- dotImportRefs = make(map[*ir.Ident]*ir.PkgName)
- }
-
- opkg := pack.Pkg
- for _, s := range opkg.Syms {
- if s.Def == nil {
- if _, ok := declImporter[s]; !ok {
- continue
- }
- }
- if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
- continue
- }
- s1 := lookup(s.Name)
- if s1.Def != nil {
- pkgerror := fmt.Sprintf("during import %q", opkg.Path)
- redeclare(base.Pos, s1, pkgerror)
- continue
- }
-
- id := ir.NewIdent(src.NoXPos, s)
- dotImportRefs[id] = pack
- s1.Def = id
- s1.Block = 1
- }
-
- dotImports = append(dotImports, pack)
-}
-
-// checkDotImports reports errors for any unused dot imports.
-func checkDotImports() {
- for _, pack := range dotImports {
- if !pack.Used {
- base.ErrorfAt(pack.Pos(), "imported and not used: %q", pack.Pkg.Path)
- }
- }
-
- // No longer needed; release memory.
- dotImports = nil
- dotImportRefs = nil
-}
-
-// nodAddr returns a node representing &n at base.Pos.
-func nodAddr(n ir.Node) *ir.AddrExpr {
- return nodAddrAt(base.Pos, n)
-}
-
-// nodAddrPos returns a node representing &n at position pos.
-func nodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr {
- return ir.NewAddrExpr(pos, n)
-}
-
-// newname returns a new ONAME Node associated with symbol s.
-func NewName(s *types.Sym) *ir.Name {
- n := ir.NewNameAt(base.Pos, s)
- n.Curfn = Curfn
- return n
-}
-
-// nodSym makes a Node with Op op and with the Left field set to left
-// and the Sym field set to sym. This is for ODOT and friends.
-func nodSym(op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
- return nodlSym(base.Pos, op, left, sym)
-}
-
-// nodlSym makes a Node with position Pos, with Op op, and with the Left field set to left
-// and the Sym field set to sym. This is for ODOT and friends.
-func nodlSym(pos src.XPos, op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
- n := ir.NodAt(pos, op, left, nil)
- n.SetSym(sym)
- return n
-}
-
-// methcmp sorts methods by symbol.
-type methcmp []*types.Field
-
-func (x methcmp) Len() int { return len(x) }
-func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x methcmp) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
-
-func nodintconst(v int64) ir.Node {
- return ir.NewLiteral(constant.MakeInt64(v))
-}
-
-func nodnil() ir.Node {
- n := ir.Nod(ir.ONIL, nil, nil)
- n.SetType(types.Types[types.TNIL])
- return n
-}
-
-func nodbool(b bool) ir.Node {
- return ir.NewLiteral(constant.MakeBool(b))
-}
-
-func nodstr(s string) ir.Node {
- return ir.NewLiteral(constant.MakeString(s))
-}
-
-func isptrto(t *types.Type, et types.Kind) bool {
- if t == nil {
- return false
- }
- if !t.IsPtr() {
- return false
- }
- t = t.Elem()
- if t == nil {
- return false
- }
- if t.Kind() != et {
- return false
- }
- return true
-}
-
-// methtype returns the underlying type, if any,
-// that owns methods with receiver parameter t.
-// The result is either a named type or an anonymous struct.
-func methtype(t *types.Type) *types.Type {
- if t == nil {
- return nil
- }
-
- // Strip away pointer if it's there.
- if t.IsPtr() {
- if t.Sym() != nil {
- return nil
- }
- t = t.Elem()
- if t == nil {
- return nil
- }
- }
-
- // Must be a named type or anonymous struct.
- if t.Sym() == nil && !t.IsStruct() {
- return nil
- }
-
- // Check types.
- if issimple[t.Kind()] {
- return t
- }
- switch t.Kind() {
- case types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRING, types.TSTRUCT:
- return t
- }
- return nil
-}
-
-// Is type src assignment compatible to type dst?
-// If so, return op code to use in conversion.
-// If not, return OXXX. In this case, the string return parameter may
-// hold a reason why. In all other cases, it'll be the empty string.
-func assignop(src, dst *types.Type) (ir.Op, string) {
- if src == dst {
- return ir.OCONVNOP, ""
- }
- if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil {
- return ir.OXXX, ""
- }
-
- // 1. src type is identical to dst.
- if types.Identical(src, dst) {
- return ir.OCONVNOP, ""
- }
-
- // 2. src and dst have identical underlying types
- // and either src or dst is not a named type or
- // both are empty interface types.
- // For assignable but different non-empty interface types,
- // we want to recompute the itab. Recomputing the itab ensures
- // that itabs are unique (thus an interface with a compile-time
- // type I has an itab with interface type I).
- if types.Identical(src.Underlying(), dst.Underlying()) {
- if src.IsEmptyInterface() {
- // Conversion between two empty interfaces
- // requires no code.
- return ir.OCONVNOP, ""
- }
- if (src.Sym() == nil || dst.Sym() == nil) && !src.IsInterface() {
- // Conversion between two types, at least one unnamed,
- // needs no conversion. The exception is nonempty interfaces
- // which need to have their itab updated.
- return ir.OCONVNOP, ""
- }
- }
-
- // 3. dst is an interface type and src implements dst.
- if dst.IsInterface() && src.Kind() != types.TNIL {
- var missing, have *types.Field
- var ptr int
- if implements(src, dst, &missing, &have, &ptr) {
- // Call itabname so that (src, dst)
- // gets added to itabs early, which allows
- // us to de-virtualize calls through this
- // type/interface pair later. See peekitabs in reflect.go
- if isdirectiface(src) && !dst.IsEmptyInterface() {
- NeedITab(src, dst)
- }
-
- return ir.OCONVIFACE, ""
- }
-
- // we'll have complained about this method anyway, suppress spurious messages.
- if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) {
- return ir.OCONVIFACE, ""
- }
-
- var why string
- if isptrto(src, types.TINTER) {
- why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
- } else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
- why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
- } else if have != nil && have.Sym == missing.Sym {
- why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
- "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
- } else if ptr != 0 {
- why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
- } else if have != nil {
- why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
- "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
- } else {
- why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
- }
-
- return ir.OXXX, why
- }
-
- if isptrto(dst, types.TINTER) {
- why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
- return ir.OXXX, why
- }
-
- if src.IsInterface() && dst.Kind() != types.TBLANK {
- var missing, have *types.Field
- var ptr int
- var why string
- if implements(dst, src, &missing, &have, &ptr) {
- why = ": need type assertion"
- }
- return ir.OXXX, why
- }
-
- // 4. src is a bidirectional channel value, dst is a channel type,
- // src and dst have identical element types, and
- // either src or dst is not a named type.
- if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
- if types.Identical(src.Elem(), dst.Elem()) && (src.Sym() == nil || dst.Sym() == nil) {
- return ir.OCONVNOP, ""
- }
- }
-
- // 5. src is the predeclared identifier nil and dst is a nillable type.
- if src.Kind() == types.TNIL {
- switch dst.Kind() {
- case types.TPTR,
- types.TFUNC,
- types.TMAP,
- types.TCHAN,
- types.TINTER,
- types.TSLICE:
- return ir.OCONVNOP, ""
- }
- }
-
- // 6. rule about untyped constants - already converted by defaultlit.
-
- // 7. Any typed value can be assigned to the blank identifier.
- if dst.Kind() == types.TBLANK {
- return ir.OCONVNOP, ""
- }
-
- return ir.OXXX, ""
-}
-
-// Can we convert a value of type src to a value of type dst?
-// If so, return op code to use in conversion (maybe OCONVNOP).
-// If not, return OXXX. In this case, the string return parameter may
-// hold a reason why. In all other cases, it'll be the empty string.
-// srcConstant indicates whether the value of type src is a constant.
-func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
- if src == dst {
- return ir.OCONVNOP, ""
- }
- if src == nil || dst == nil {
- return ir.OXXX, ""
- }
-
- // Conversions from regular to go:notinheap are not allowed
- // (unless it's unsafe.Pointer). These are runtime-specific
- // rules.
- // (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
- if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
- why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
- return ir.OXXX, why
- }
- // (b) Disallow string to []T where T is go:notinheap.
- if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Kind() == types.ByteType.Kind() || dst.Elem().Kind() == types.RuneType.Kind()) {
- why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
- return ir.OXXX, why
- }
-
- // 1. src can be assigned to dst.
- op, why := assignop(src, dst)
- if op != ir.OXXX {
- return op, why
- }
-
- // The rules for interfaces are no different in conversions
- // than assignments. If interfaces are involved, stop now
- // with the good message from assignop.
- // Otherwise clear the error.
- if src.IsInterface() || dst.IsInterface() {
- return ir.OXXX, why
- }
-
- // 2. Ignoring struct tags, src and dst have identical underlying types.
- if types.IdenticalIgnoreTags(src.Underlying(), dst.Underlying()) {
- return ir.OCONVNOP, ""
- }
-
- // 3. src and dst are unnamed pointer types and, ignoring struct tags,
- // their base types have identical underlying types.
- if src.IsPtr() && dst.IsPtr() && src.Sym() == nil && dst.Sym() == nil {
- if types.IdenticalIgnoreTags(src.Elem().Underlying(), dst.Elem().Underlying()) {
- return ir.OCONVNOP, ""
- }
- }
-
- // 4. src and dst are both integer or floating point types.
- if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
- if simtype[src.Kind()] == simtype[dst.Kind()] {
- return ir.OCONVNOP, ""
- }
- return ir.OCONV, ""
- }
-
- // 5. src and dst are both complex types.
- if src.IsComplex() && dst.IsComplex() {
- if simtype[src.Kind()] == simtype[dst.Kind()] {
- return ir.OCONVNOP, ""
- }
- return ir.OCONV, ""
- }
-
- // Special case for constant conversions: any numeric
- // conversion is potentially okay. We'll validate further
- // within evconst. See #38117.
- if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) {
- return ir.OCONV, ""
- }
-
- // 6. src is an integer or has type []byte or []rune
- // and dst is a string type.
- if src.IsInteger() && dst.IsString() {
- return ir.ORUNESTR, ""
- }
-
- if src.IsSlice() && dst.IsString() {
- if src.Elem().Kind() == types.ByteType.Kind() {
- return ir.OBYTES2STR, ""
- }
- if src.Elem().Kind() == types.RuneType.Kind() {
- return ir.ORUNES2STR, ""
- }
- }
-
- // 7. src is a string and dst is []byte or []rune.
- // String to slice.
- if src.IsString() && dst.IsSlice() {
- if dst.Elem().Kind() == types.ByteType.Kind() {
- return ir.OSTR2BYTES, ""
- }
- if dst.Elem().Kind() == types.RuneType.Kind() {
- return ir.OSTR2RUNES, ""
- }
- }
-
- // 8. src is a pointer or uintptr and dst is unsafe.Pointer.
- if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
- return ir.OCONVNOP, ""
- }
-
- // 9. src is unsafe.Pointer and dst is a pointer or uintptr.
- if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
- return ir.OCONVNOP, ""
- }
-
- // src is map and dst is a pointer to corresponding hmap.
- // This rule is needed for the implementation detail that
- // go gc maps are implemented as a pointer to a hmap struct.
- if src.Kind() == types.TMAP && dst.IsPtr() &&
- src.MapType().Hmap == dst.Elem() {
- return ir.OCONVNOP, ""
- }
-
- return ir.OXXX, ""
-}
-
-func assignconv(n ir.Node, t *types.Type, context string) ir.Node {
- return assignconvfn(n, t, func() string { return context })
-}
-
-// Convert node n for assignment to type t.
-func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
- if n == nil || n.Type() == nil || n.Type().Broke() {
- return n
- }
-
- if t.Kind() == types.TBLANK && n.Type().Kind() == types.TNIL {
- base.Errorf("use of untyped nil")
- }
-
- n = convlit1(n, t, false, context)
- if n.Type() == nil {
- return n
- }
- if t.Kind() == types.TBLANK {
- return n
- }
-
- // Convert ideal bool from comparison to plain bool
- // if the next step is non-bool (like interface{}).
- if n.Type() == types.UntypedBool && !t.IsBoolean() {
- if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL {
- r := ir.Nod(ir.OCONVNOP, n, nil)
- r.SetType(types.Types[types.TBOOL])
- r.SetTypecheck(1)
- r.SetImplicit(true)
- n = r
- }
- }
-
- if types.Identical(n.Type(), t) {
- return n
- }
-
- op, why := assignop(n.Type(), t)
- if op == ir.OXXX {
- base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why)
- op = ir.OCONV
- }
-
- r := ir.NewConvExpr(base.Pos, op, t, n)
- r.SetTypecheck(1)
- r.SetImplicit(true)
- return r
-}
-
-// backingArrayPtrLen extracts the pointer and length from a slice or string.
-// This constructs two nodes referring to n, so n must be a cheapexpr.
-func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
- var init ir.Nodes
- c := cheapexpr(n, &init)
- if c != n || init.Len() != 0 {
- base.Fatalf("backingArrayPtrLen not cheap: %v", n)
- }
- ptr = ir.Nod(ir.OSPTR, n, nil)
- if n.Type().IsString() {
- ptr.SetType(types.Types[types.TUINT8].PtrTo())
- } else {
- ptr.SetType(n.Type().Elem().PtrTo())
- }
- length = ir.Nod(ir.OLEN, n, nil)
- length.SetType(types.Types[types.TINT])
- return ptr, length
-}
-
-func syslook(name string) *ir.Name {
- s := Runtimepkg.Lookup(name)
- if s == nil || s.Def == nil {
- base.Fatalf("syslook: can't find runtime.%s", name)
- }
- return ir.AsNode(s.Def).(*ir.Name)
-}
-
-// typehash computes a hash value for type t to use in type switch statements.
-func typehash(t *types.Type) uint32 {
- p := t.LongString()
-
- // Using MD5 is overkill, but reduces accidental collisions.
- h := md5.Sum([]byte(p))
- return binary.LittleEndian.Uint32(h[:4])
-}
-
-// updateHasCall checks whether expression n contains any function
-// calls and sets the n.HasCall flag if so.
-func updateHasCall(n ir.Node) {
- if n == nil {
- return
- }
- n.SetHasCall(calcHasCall(n))
-}
-
-func calcHasCall(n ir.Node) bool {
- if n.Init().Len() != 0 {
- // TODO(mdempsky): This seems overly conservative.
- return true
- }
-
- switch n.Op() {
- default:
- base.Fatalf("calcHasCall %+v", n)
- panic("unreachable")
-
- case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE, ir.ONAMEOFFSET:
- if n.HasCall() {
- base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
- }
- return false
- case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- return true
- case ir.OANDAND, ir.OOROR:
- // hard with instrumented code
- if instrumenting {
- return true
- }
- return n.Left().HasCall() || n.Right().HasCall()
- case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
- ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD:
- // These ops might panic, make sure they are done
- // before we start marshaling args for a call. See issue 16760.
- return true
-
- // When using soft-float, these ops might be rewritten to function calls
- // so we ensure they are evaluated first.
- case ir.OADD, ir.OSUB, ir.OMUL:
- if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) {
- return true
- }
- return n.Left().HasCall() || n.Right().HasCall()
- case ir.ONEG:
- if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) {
- return true
- }
- return n.Left().HasCall()
- case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
- if thearch.SoftFloat && (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()]) {
- return true
- }
- return n.Left().HasCall() || n.Right().HasCall()
- case ir.OCONV:
- if thearch.SoftFloat && ((isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) || (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()])) {
- return true
- }
- return n.Left().HasCall()
-
- case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE:
- return n.Left().HasCall() || n.Right().HasCall()
-
- case ir.OAS:
- return n.Left().HasCall() || n.Right() != nil && n.Right().HasCall()
-
- case ir.OADDR:
- return n.Left().HasCall()
- case ir.OPAREN:
- return n.Left().HasCall()
- case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV,
- ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW,
- ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF,
- ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE:
- return n.Left().HasCall()
- case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
- return n.Left().HasCall()
-
- case ir.OGETG, ir.OCLOSUREREAD, ir.OMETHEXPR:
- return false
-
- // TODO(rsc): These look wrong in various ways but are what calcHasCall has always done.
- case ir.OADDSTR:
- // TODO(rsc): This used to check left and right, which are not part of OADDSTR.
- return false
- case ir.OBLOCK:
- // TODO(rsc): Surely the block's statements matter.
- return false
- case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR:
- // TODO(rsc): Some conversions are themselves calls, no?
- return n.Left().HasCall()
- case ir.ODOTTYPE2:
- // TODO(rsc): Shouldn't this be up with ODOTTYPE above?
- return n.Left().HasCall()
- case ir.OSLICEHEADER:
- // TODO(rsc): What about len and cap?
- return n.Left().HasCall()
- case ir.OAS2DOTTYPE, ir.OAS2FUNC:
- // TODO(rsc): Surely we need to check List and Rlist.
- return false
- }
-}
-
-func badtype(op ir.Op, tl, tr *types.Type) {
- var s string
- if tl != nil {
- s += fmt.Sprintf("\n\t%v", tl)
- }
- if tr != nil {
- s += fmt.Sprintf("\n\t%v", tr)
- }
-
- // common mistake: *struct and *interface.
- if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
- if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
- s += "\n\t(*struct vs *interface)"
- } else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
- s += "\n\t(*interface vs *struct)"
- }
- }
-
- base.Errorf("illegal types for operand: %v%s", op, s)
-}
-
-// brcom returns !(op).
-// For example, brcom(==) is !=.
-func brcom(op ir.Op) ir.Op {
- switch op {
- case ir.OEQ:
- return ir.ONE
- case ir.ONE:
- return ir.OEQ
- case ir.OLT:
- return ir.OGE
- case ir.OGT:
- return ir.OLE
- case ir.OLE:
- return ir.OGT
- case ir.OGE:
- return ir.OLT
- }
- base.Fatalf("brcom: no com for %v\n", op)
- return op
-}
-
-// brrev returns reverse(op).
-// For example, Brrev(<) is >.
-func brrev(op ir.Op) ir.Op {
- switch op {
- case ir.OEQ:
- return ir.OEQ
- case ir.ONE:
- return ir.ONE
- case ir.OLT:
- return ir.OGT
- case ir.OGT:
- return ir.OLT
- case ir.OLE:
- return ir.OGE
- case ir.OGE:
- return ir.OLE
- }
- base.Fatalf("brrev: no rev for %v\n", op)
- return op
-}
-
-// return side effect-free n, appending side effects to init.
-// result is assignable if n is.
-func safeexpr(n ir.Node, init *ir.Nodes) ir.Node {
- if n == nil {
- return nil
- }
-
- if n.Init().Len() != 0 {
- walkstmtlist(n.Init().Slice())
- init.AppendNodes(n.PtrInit())
- }
-
- switch n.Op() {
- case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
- return n
-
- case ir.OLEN, ir.OCAP:
- l := safeexpr(n.Left(), init)
- if l == n.Left() {
- return n
- }
- a := ir.Copy(n).(*ir.UnaryExpr)
- a.SetLeft(l)
- return walkexpr(typecheck(a, ctxExpr), init)
-
- case ir.ODOT, ir.ODOTPTR:
- l := safeexpr(n.Left(), init)
- if l == n.Left() {
- return n
- }
- a := ir.Copy(n).(*ir.SelectorExpr)
- a.SetLeft(l)
- return walkexpr(typecheck(a, ctxExpr), init)
-
- case ir.ODEREF:
- l := safeexpr(n.Left(), init)
- if l == n.Left() {
- return n
- }
- a := ir.Copy(n).(*ir.StarExpr)
- a.SetLeft(l)
- return walkexpr(typecheck(a, ctxExpr), init)
-
- case ir.OINDEX, ir.OINDEXMAP:
- l := safeexpr(n.Left(), init)
- r := safeexpr(n.Right(), init)
- if l == n.Left() && r == n.Right() {
- return n
- }
- a := ir.Copy(n).(*ir.IndexExpr)
- a.SetLeft(l)
- a.SetRight(r)
- return walkexpr(typecheck(a, ctxExpr), init)
-
- case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
- if isStaticCompositeLiteral(n) {
- return n
- }
- }
-
- // make a copy; must not be used as an lvalue
- if islvalue(n) {
- base.Fatalf("missing lvalue case in safeexpr: %v", n)
- }
- return cheapexpr(n, init)
-}
-
-func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
- l := temp(t)
- appendWalkStmt(init, ir.Nod(ir.OAS, l, n))
- return l
-}
-
-// return side-effect free and cheap n, appending side effects to init.
-// result may not be assignable.
-func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node {
- switch n.Op() {
- case ir.ONAME, ir.OLITERAL, ir.ONIL:
- return n
- }
-
- return copyexpr(n, n.Type(), init)
-}
-
-// Code to resolve elided DOTs in embedded types.
-
-// A Dlist stores a pointer to a TFIELD Type embedded within
-// a TSTRUCT or TINTER Type.
-type Dlist struct {
- field *types.Field
-}
-
-// dotlist is used by adddot1 to record the path of embedded fields
-// used to access a target field or method.
-// Must be non-nil so that dotpath returns a non-nil slice even if d is zero.
-var dotlist = make([]Dlist, 10)
-
-// lookdot0 returns the number of fields or methods named s associated
-// with Type t. If exactly one exists, it will be returned in *save
-// (if save is not nil).
-func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) int {
- u := t
- if u.IsPtr() {
- u = u.Elem()
- }
-
- c := 0
- if u.IsStruct() || u.IsInterface() {
- for _, f := range u.Fields().Slice() {
- if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) {
- if save != nil {
- *save = f
- }
- c++
- }
- }
- }
-
- u = t
- if t.Sym() != nil && t.IsPtr() && !t.Elem().IsPtr() {
- // If t is a defined pointer type, then x.m is shorthand for (*x).m.
- u = t.Elem()
- }
- u = methtype(u)
- if u != nil {
- for _, f := range u.Methods().Slice() {
- if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) {
- if save != nil {
- *save = f
- }
- c++
- }
- }
- }
-
- return c
-}
-
-// adddot1 returns the number of fields or methods named s at depth d in Type t.
-// If exactly one exists, it will be returned in *save (if save is not nil),
-// and dotlist will contain the path of embedded fields traversed to find it,
-// in reverse order. If none exist, more will indicate whether t contains any
-// embedded fields at depth d, so callers can decide whether to retry at
-// a greater depth.
-func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase bool) (c int, more bool) {
- if t.Recur() {
- return
- }
- t.SetRecur(true)
- defer t.SetRecur(false)
-
- var u *types.Type
- d--
- if d < 0 {
- // We've reached our target depth. If t has any fields/methods
- // named s, then we're done. Otherwise, we still need to check
- // below for embedded fields.
- c = lookdot0(s, t, save, ignorecase)
- if c != 0 {
- return c, false
- }
- }
-
- u = t
- if u.IsPtr() {
- u = u.Elem()
- }
- if !u.IsStruct() && !u.IsInterface() {
- return c, false
- }
-
- for _, f := range u.Fields().Slice() {
- if f.Embedded == 0 || f.Sym == nil {
- continue
- }
- if d < 0 {
- // Found an embedded field at target depth.
- return c, true
- }
- a, more1 := adddot1(s, f.Type, d, save, ignorecase)
- if a != 0 && c == 0 {
- dotlist[d].field = f
- }
- c += a
- if more1 {
- more = true
- }
- }
-
- return c, more
-}
-
-// dotpath computes the unique shortest explicit selector path to fully qualify
-// a selection expression x.f, where x is of type t and f is the symbol s.
-// If no such path exists, dotpath returns nil.
-// If there are multiple shortest paths to the same depth, ambig is true.
-func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (path []Dlist, ambig bool) {
- // The embedding of types within structs imposes a tree structure onto
- // types: structs parent the types they embed, and types parent their
- // fields or methods. Our goal here is to find the shortest path to
- // a field or method named s in the subtree rooted at t. To accomplish
- // that, we iteratively perform depth-first searches of increasing depth
- // until we either find the named field/method or exhaust the tree.
- for d := 0; ; d++ {
- if d > len(dotlist) {
- dotlist = append(dotlist, Dlist{})
- }
- if c, more := adddot1(s, t, d, save, ignorecase); c == 1 {
- return dotlist[:d], false
- } else if c > 1 {
- return nil, true
- } else if !more {
- return nil, false
- }
- }
-}
-
-// in T.field
-// find missing fields that
-// will give shortest unique addressing.
-// modify the tree with missing type names.
-func adddot(n *ir.SelectorExpr) *ir.SelectorExpr {
- n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
- if n.Left().Diag() {
- n.SetDiag(true)
- }
- t := n.Left().Type()
- if t == nil {
- return n
- }
-
- if n.Left().Op() == ir.OTYPE {
- return n
- }
-
- s := n.Sym()
- if s == nil {
- return n
- }
-
- switch path, ambig := dotpath(s, t, nil, false); {
- case path != nil:
- // rebuild elided dots
- for c := len(path) - 1; c >= 0; c-- {
- dot := nodSym(ir.ODOT, n.Left(), path[c].field.Sym)
- dot.SetImplicit(true)
- dot.SetType(path[c].field.Type)
- n.SetLeft(dot)
- }
- case ambig:
- base.Errorf("ambiguous selector %v", n)
- n.SetLeft(nil)
- }
-
- return n
-}
-
-// Code to help generate trampoline functions for methods on embedded
-// types. These are approx the same as the corresponding adddot
-// routines except that they expect to be called with unique tasks and
-// they return the actual methods.
-
-type Symlink struct {
- field *types.Field
-}
-
-var slist []Symlink
-
-func expand0(t *types.Type) {
- u := t
- if u.IsPtr() {
- u = u.Elem()
- }
-
- if u.IsInterface() {
- for _, f := range u.Fields().Slice() {
- if f.Sym.Uniq() {
- continue
- }
- f.Sym.SetUniq(true)
- slist = append(slist, Symlink{field: f})
- }
-
- return
- }
-
- u = methtype(t)
- if u != nil {
- for _, f := range u.Methods().Slice() {
- if f.Sym.Uniq() {
- continue
- }
- f.Sym.SetUniq(true)
- slist = append(slist, Symlink{field: f})
- }
- }
-}
-
-func expand1(t *types.Type, top bool) {
- if t.Recur() {
- return
- }
- t.SetRecur(true)
-
- if !top {
- expand0(t)
- }
-
- u := t
- if u.IsPtr() {
- u = u.Elem()
- }
-
- if u.IsStruct() || u.IsInterface() {
- for _, f := range u.Fields().Slice() {
- if f.Embedded == 0 {
- continue
- }
- if f.Sym == nil {
- continue
- }
- expand1(f.Type, false)
- }
- }
-
- t.SetRecur(false)
-}
-
-func expandmeth(t *types.Type) {
- if t == nil || t.AllMethods().Len() != 0 {
- return
- }
-
- // mark top-level method symbols
- // so that expand1 doesn't consider them.
- for _, f := range t.Methods().Slice() {
- f.Sym.SetUniq(true)
- }
-
- // generate all reachable methods
- slist = slist[:0]
- expand1(t, true)
-
- // check each method to be uniquely reachable
- var ms []*types.Field
- for i, sl := range slist {
- slist[i].field = nil
- sl.field.Sym.SetUniq(false)
-
- var f *types.Field
- path, _ := dotpath(sl.field.Sym, t, &f, false)
- if path == nil {
- continue
- }
-
- // dotpath may have dug out arbitrary fields, we only want methods.
- if !f.IsMethod() {
- continue
- }
-
- // add it to the base type method list
- f = f.Copy()
- f.Embedded = 1 // needs a trampoline
- for _, d := range path {
- if d.field.Type.IsPtr() {
- f.Embedded = 2
- break
- }
- }
- ms = append(ms, f)
- }
-
- for _, f := range t.Methods().Slice() {
- f.Sym.SetUniq(false)
- }
-
- ms = append(ms, t.Methods().Slice()...)
- sort.Sort(methcmp(ms))
- t.AllMethods().Set(ms)
-}
-
-// Given funarg struct list, return list of fn args.
-func structargs(tl *types.Type, mustname bool) []*ir.Field {
- var args []*ir.Field
- gen := 0
- for _, t := range tl.Fields().Slice() {
- s := t.Sym
- if mustname && (s == nil || s.Name == "_") {
- // invent a name so that we can refer to it in the trampoline
- s = lookupN(".anon", gen)
- gen++
- }
- a := symfield(s, t.Type)
- a.Pos = t.Pos
- a.IsDDD = t.IsDDD()
- args = append(args, a)
- }
-
- return args
-}
-
-// Generate a wrapper function to convert from
-// a receiver of type T to a receiver of type U.
-// That is,
-//
-// func (t T) M() {
-// ...
-// }
-//
-// already exists; this function generates
-//
-// func (u U) M() {
-// u.M()
-// }
-//
-// where the types T and U are such that u.M() is valid
-// and calls the T.M method.
-// The resulting function is for use in method tables.
-//
-// rcvr - U
-// method - M func (t T)(), a TFIELD type struct
-// newnam - the eventual mangled name of this function
-func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
- if false && base.Flag.LowerR != 0 {
- fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
- }
-
- // Only generate (*T).M wrappers for T.M in T's own package.
- if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
- rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
- return
- }
-
- // Only generate I.M wrappers for I in I's own package
- // but keep doing it for error.Error (was issue #29304).
- if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
- return
- }
-
- base.Pos = autogeneratedPos
- dclcontext = ir.PEXTERN
-
- tfn := ir.NewFuncType(base.Pos,
- namedfield(".this", rcvr),
- structargs(method.Type.Params(), true),
- structargs(method.Type.Results(), false))
-
- fn := dclfunc(newnam, tfn)
- fn.SetDupok(true)
-
- nthis := ir.AsNode(tfn.Type().Recv().Nname)
-
- methodrcvr := method.Type.Recv().Type
-
- // generate nil pointer check for better error
- if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
- // generating wrapper from *T to T.
- n := ir.Nod(ir.OIF, nil, nil)
- n.SetLeft(ir.Nod(ir.OEQ, nthis, nodnil()))
- call := ir.Nod(ir.OCALL, syslook("panicwrap"), nil)
- n.PtrBody().Set1(call)
- fn.PtrBody().Append(n)
- }
-
- dot := adddot(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
-
- // generate call
- // It's not possible to use a tail call when dynamic linking on ppc64le. The
- // bad scenario is when a local call is made to the wrapper: the wrapper will
- // call the implementation, which might be in a different module and so set
- // the TOC to the appropriate value for that module. But if it returns
- // directly to the wrapper's caller, nothing will reset it to the correct
- // value for that function.
- if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
- // generate tail call: adjust pointer receiver and jump to embedded method.
- left := dot.Left() // skip final .M
- if !left.Type().IsPtr() {
- left = nodAddr(left)
- }
- as := ir.Nod(ir.OAS, nthis, convnop(left, rcvr))
- fn.PtrBody().Append(as)
- fn.PtrBody().Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym)))
- } else {
- fn.SetWrapper(true) // ignore frame for panic+recover matching
- call := ir.Nod(ir.OCALL, dot, nil)
- call.PtrList().Set(paramNnames(tfn.Type()))
- call.SetIsDDD(tfn.Type().IsVariadic())
- if method.Type.NumResults() > 0 {
- ret := ir.Nod(ir.ORETURN, nil, nil)
- ret.PtrList().Set1(call)
- fn.PtrBody().Append(ret)
- } else {
- fn.PtrBody().Append(call)
- }
- }
-
- if false && base.Flag.LowerR != 0 {
- ir.DumpList("genwrapper body", fn.Body())
- }
-
- funcbody()
- if base.Debug.DclStack != 0 {
- testdclstack()
- }
-
- typecheckFunc(fn)
- Curfn = fn
- typecheckslice(fn.Body().Slice(), ctxStmt)
-
- // Inline calls within (*T).M wrappers. This is safe because we only
- // generate those wrappers within the same compilation unit as (T).M.
- // TODO(mdempsky): Investigate why we can't enable this more generally.
- if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
- inlcalls(fn)
- }
- escapeFuncs([]*ir.Func{fn}, false)
-
- Curfn = nil
- Target.Decls = append(Target.Decls, fn)
-}
-
-func paramNnames(ft *types.Type) []ir.Node {
- args := make([]ir.Node, ft.NumParams())
- for i, f := range ft.Params().FieldSlice() {
- args[i] = ir.AsNode(f.Nname)
- }
- return args
-}
-
-func hashmem(t *types.Type) ir.Node {
- sym := Runtimepkg.Lookup("memhash")
-
- n := NewName(sym)
- setNodeNameFunc(n)
- n.SetType(functype(nil, []*ir.Field{
- anonfield(types.NewPtr(t)),
- anonfield(types.Types[types.TUINTPTR]),
- anonfield(types.Types[types.TUINTPTR]),
- }, []*ir.Field{
- anonfield(types.Types[types.TUINTPTR]),
- }))
- return n
-}
-
-func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field, followptr bool) {
- if t == nil {
- return nil, false
- }
-
- path, ambig := dotpath(s, t, &m, ignorecase)
- if path == nil {
- if ambig {
- base.Errorf("%v.%v is ambiguous", t, s)
- }
- return nil, false
- }
-
- for _, d := range path {
- if d.field.Type.IsPtr() {
- followptr = true
- break
- }
- }
-
- if !m.IsMethod() {
- base.Errorf("%v.%v is a field, not a method", t, s)
- return nil, followptr
- }
-
- return m, followptr
-}
-
-func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool {
- t0 := t
- if t == nil {
- return false
- }
-
- if t.IsInterface() {
- i := 0
- tms := t.Fields().Slice()
- for _, im := range iface.Fields().Slice() {
- for i < len(tms) && tms[i].Sym != im.Sym {
- i++
- }
- if i == len(tms) {
- *m = im
- *samename = nil
- *ptr = 0
- return false
- }
- tm := tms[i]
- if !types.Identical(tm.Type, im.Type) {
- *m = im
- *samename = tm
- *ptr = 0
- return false
- }
- }
-
- return true
- }
-
- t = methtype(t)
- var tms []*types.Field
- if t != nil {
- expandmeth(t)
- tms = t.AllMethods().Slice()
- }
- i := 0
- for _, im := range iface.Fields().Slice() {
- if im.Broke() {
- continue
- }
- for i < len(tms) && tms[i].Sym != im.Sym {
- i++
- }
- if i == len(tms) {
- *m = im
- *samename, _ = ifacelookdot(im.Sym, t, true)
- *ptr = 0
- return false
- }
- tm := tms[i]
- if tm.Nointerface() || !types.Identical(tm.Type, im.Type) {
- *m = im
- *samename = tm
- *ptr = 0
- return false
- }
- followptr := tm.Embedded == 2
-
- // if pointer receiver in method,
- // the method does not exist for value types.
- rcvr := tm.Type.Recv().Type
- if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !isifacemethod(tm.Type) {
- if false && base.Flag.LowerR != 0 {
- base.Errorf("interface pointer mismatch")
- }
-
- *m = im
- *samename = nil
- *ptr = 1
- return false
- }
- }
-
- return true
-}
-
-func liststmt(l []ir.Node) ir.Node {
- n := ir.Nod(ir.OBLOCK, nil, nil)
- n.PtrList().Set(l)
- if len(l) != 0 {
- n.SetPos(l[0].Pos())
- }
- return n
-}
-
-func ngotype(n ir.Node) *types.Sym {
- if n.Type() != nil {
- return typenamesym(n.Type())
- }
- return nil
-}
-
-// The result of initExpr MUST be assigned back to n, e.g.
-// n.Left = initExpr(init, n.Left)
-func initExpr(init []ir.Node, n ir.Node) ir.Node {
- if len(init) == 0 {
- return n
- }
- if ir.MayBeShared(n) {
- // Introduce OCONVNOP to hold init list.
- old := n
- n = ir.Nod(ir.OCONVNOP, old, nil)
- n.SetType(old.Type())
- n.SetTypecheck(1)
- }
-
- n.PtrInit().Prepend(init...)
- n.SetHasCall(true)
- return n
-}
-
-// The linker uses the magic symbol prefixes "go." and "type."
-// Avoid potential confusion between import paths and symbols
-// by rejecting these reserved imports for now. Also, people
-// "can do weird things in GOPATH and we'd prefer they didn't
-// do _that_ weird thing" (per rsc). See also #4257.
-var reservedimports = []string{
- "go",
- "type",
-}
-
-func isbadimport(path string, allowSpace bool) bool {
- if strings.Contains(path, "\x00") {
- base.Errorf("import path contains NUL")
- return true
- }
-
- for _, ri := range reservedimports {
- if path == ri {
- base.Errorf("import path %q is reserved and cannot be used", path)
- return true
- }
- }
-
- for _, r := range path {
- if r == utf8.RuneError {
- base.Errorf("import path contains invalid UTF-8 sequence: %q", path)
- return true
- }
-
- if r < 0x20 || r == 0x7f {
- base.Errorf("import path contains control character: %q", path)
- return true
- }
-
- if r == '\\' {
- base.Errorf("import path contains backslash; use slash: %q", path)
- return true
- }
-
- if !allowSpace && unicode.IsSpace(r) {
- base.Errorf("import path contains space character: %q", path)
- return true
- }
-
- if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
- base.Errorf("import path contains invalid character '%c': %q", r, path)
- return true
- }
- }
-
- return false
-}
-
-// Can this type be stored directly in an interface word?
-// Yes, if the representation is a single pointer.
-func isdirectiface(t *types.Type) bool {
- if t.Broke() {
- return false
- }
-
- switch t.Kind() {
- case types.TPTR:
- // Pointers to notinheap types must be stored indirectly. See issue 42076.
- return !t.Elem().NotInHeap()
- case types.TCHAN,
- types.TMAP,
- types.TFUNC,
- types.TUNSAFEPTR:
- return true
-
- case types.TARRAY:
- // Array of 1 direct iface type can be direct.
- return t.NumElem() == 1 && isdirectiface(t.Elem())
-
- case types.TSTRUCT:
- // Struct with 1 field of direct iface type can be direct.
- return t.NumFields() == 1 && isdirectiface(t.Field(0).Type)
- }
-
- return false
-}
-
-// itabType loads the _type field from a runtime.itab struct.
-func itabType(itab ir.Node) ir.Node {
- typ := nodSym(ir.ODOTPTR, itab, nil)
- typ.SetType(types.NewPtr(types.Types[types.TUINT8]))
- typ.SetTypecheck(1)
- typ.SetOffset(int64(Widthptr)) // offset of _type in runtime.itab
- typ.SetBounded(true) // guaranteed not to fault
- return typ
-}
-
-// ifaceData loads the data field from an interface.
-// The concrete type must be known to have type t.
-// It follows the pointer if !isdirectiface(t).
-func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
- if t.IsInterface() {
- base.Fatalf("ifaceData interface: %v", t)
- }
- ptr := ir.NodAt(pos, ir.OIDATA, n, nil)
- if isdirectiface(t) {
- ptr.SetType(t)
- ptr.SetTypecheck(1)
- return ptr
- }
- ptr.SetType(types.NewPtr(t))
- ptr.SetTypecheck(1)
- ind := ir.NodAt(pos, ir.ODEREF, ptr, nil)
- ind.SetType(t)
- ind.SetTypecheck(1)
- ind.SetBounded(true)
- return ind
-}
-
-// typePos returns the position associated with t.
-// This is where t was declared or where it appeared as a type expression.
-func typePos(t *types.Type) src.XPos {
- if pos := t.Pos(); pos.IsKnown() {
- return pos
- }
- base.Fatalf("bad type: %v", t)
- panic("unreachable")
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/src"
- "go/constant"
- "go/token"
- "sort"
-)
-
-// typecheckswitch typechecks a switch statement.
-func typecheckswitch(n *ir.SwitchStmt) {
- typecheckslice(n.Init().Slice(), ctxStmt)
- if n.Left() != nil && n.Left().Op() == ir.OTYPESW {
- typecheckTypeSwitch(n)
- } else {
- typecheckExprSwitch(n)
- }
-}
-
-func typecheckTypeSwitch(n *ir.SwitchStmt) {
- guard := n.Left().(*ir.TypeSwitchGuard)
- guard.SetRight(typecheck(guard.Right(), ctxExpr))
- t := guard.Right().Type()
- if t != nil && !t.IsInterface() {
- base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", guard.Right())
- t = nil
- }
-
- // We don't actually declare the type switch's guarded
- // declaration itself. So if there are no cases, we won't
- // notice that it went unused.
- if v := guard.Left(); v != nil && !ir.IsBlank(v) && n.List().Len() == 0 {
- base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym())
- }
-
- var defCase, nilCase ir.Node
- var ts typeSet
- for _, ncase := range n.List().Slice() {
- ncase := ncase.(*ir.CaseStmt)
- ls := ncase.List().Slice()
- if len(ls) == 0 { // default:
- if defCase != nil {
- base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
- } else {
- defCase = ncase
- }
- }
-
- for i := range ls {
- ls[i] = typecheck(ls[i], ctxExpr|ctxType)
- n1 := ls[i]
- if t == nil || n1.Type() == nil {
- continue
- }
-
- var missing, have *types.Field
- var ptr int
- if ir.IsNil(n1) { // case nil:
- if nilCase != nil {
- base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase))
- } else {
- nilCase = ncase
- }
- continue
- }
- if n1.Op() != ir.OTYPE {
- base.ErrorfAt(ncase.Pos(), "%L is not a type", n1)
- continue
- }
- if !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke() {
- if have != nil && !have.Broke() {
- base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
- " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", guard.Right(), n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
- } else if ptr != 0 {
- base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
- " (%v method has pointer receiver)", guard.Right(), n1.Type(), missing.Sym)
- } else {
- base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
- " (missing %v method)", guard.Right(), n1.Type(), missing.Sym)
- }
- continue
- }
-
- ts.add(ncase.Pos(), n1.Type())
- }
-
- if ncase.Rlist().Len() != 0 {
- // Assign the clause variable's type.
- vt := t
- if len(ls) == 1 {
- if ls[0].Op() == ir.OTYPE {
- vt = ls[0].Type()
- } else if !ir.IsNil(ls[0]) {
- // Invalid single-type case;
- // mark variable as broken.
- vt = nil
- }
- }
-
- nvar := ncase.Rlist().First()
- nvar.SetType(vt)
- if vt != nil {
- nvar = typecheck(nvar, ctxExpr|ctxAssign)
- } else {
- // Clause variable is broken; prevent typechecking.
- nvar.SetTypecheck(1)
- nvar.SetWalkdef(1)
- }
- ncase.Rlist().SetFirst(nvar)
- }
-
- typecheckslice(ncase.Body().Slice(), ctxStmt)
- }
-}
-
-type typeSet struct {
- m map[string][]typeSetEntry
-}
-
-type typeSetEntry struct {
- pos src.XPos
- typ *types.Type
-}
-
-func (s *typeSet) add(pos src.XPos, typ *types.Type) {
- if s.m == nil {
- s.m = make(map[string][]typeSetEntry)
- }
-
- // LongString does not uniquely identify types, so we need to
- // disambiguate collisions with types.Identical.
- // TODO(mdempsky): Add a method that *is* unique.
- ls := typ.LongString()
- prevs := s.m[ls]
- for _, prev := range prevs {
- if types.Identical(typ, prev.typ) {
- base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos))
- return
- }
- }
- s.m[ls] = append(prevs, typeSetEntry{pos, typ})
-}
-
-func typecheckExprSwitch(n *ir.SwitchStmt) {
- t := types.Types[types.TBOOL]
- if n.Left() != nil {
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), nil))
- t = n.Left().Type()
- }
-
- var nilonly string
- if t != nil {
- switch {
- case t.IsMap():
- nilonly = "map"
- case t.Kind() == types.TFUNC:
- nilonly = "func"
- case t.IsSlice():
- nilonly = "slice"
-
- case !IsComparable(t):
- if t.IsStruct() {
- base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Left(), IncomparableField(t).Type)
- } else {
- base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Left())
- }
- t = nil
- }
- }
-
- var defCase ir.Node
- var cs constSet
- for _, ncase := range n.List().Slice() {
- ncase := ncase.(*ir.CaseStmt)
- ls := ncase.List().Slice()
- if len(ls) == 0 { // default:
- if defCase != nil {
- base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
- } else {
- defCase = ncase
- }
- }
-
- for i := range ls {
- setlineno(ncase)
- ls[i] = typecheck(ls[i], ctxExpr)
- ls[i] = defaultlit(ls[i], t)
- n1 := ls[i]
- if t == nil || n1.Type() == nil {
- continue
- }
-
- if nilonly != "" && !ir.IsNil(n1) {
- base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left())
- } else if t.IsInterface() && !n1.Type().IsInterface() && !IsComparable(n1.Type()) {
- base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1)
- } else {
- op1, _ := assignop(n1.Type(), t)
- op2, _ := assignop(t, n1.Type())
- if op1 == ir.OXXX && op2 == ir.OXXX {
- if n.Left() != nil {
- base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left(), n1.Type(), t)
- } else {
- base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type())
- }
- }
- }
-
- // Don't check for duplicate bools. Although the spec allows it,
- // (1) the compiler hasn't checked it in the past, so compatibility mandates it, and
- // (2) it would disallow useful things like
- // case GOARCH == "arm" && GOARM == "5":
- // case GOARCH == "arm":
- // which would both evaluate to false for non-ARM compiles.
- if !n1.Type().IsBoolean() {
- cs.add(ncase.Pos(), n1, "case", "switch")
- }
- }
-
- typecheckslice(ncase.Body().Slice(), ctxStmt)
- }
-}
-
-// walkswitch walks a switch statement.
-func walkswitch(sw *ir.SwitchStmt) {
- // Guard against double walk, see #25776.
- if sw.List().Len() == 0 && sw.Body().Len() > 0 {
- return // Was fatal, but eliminating every possible source of double-walking is hard
- }
-
- if sw.Left() != nil && sw.Left().Op() == ir.OTYPESW {
- walkTypeSwitch(sw)
- } else {
- walkExprSwitch(sw)
- }
-}
-
-// walkExprSwitch generates an AST implementing sw. sw is an
-// expression switch.
-func walkExprSwitch(sw *ir.SwitchStmt) {
- lno := setlineno(sw)
-
- cond := sw.Left()
- sw.SetLeft(nil)
-
- // convert switch {...} to switch true {...}
- if cond == nil {
- cond = nodbool(true)
- cond = typecheck(cond, ctxExpr)
- cond = defaultlit(cond, nil)
- }
-
- // Given "switch string(byteslice)",
- // with all cases being side-effect free,
- // use a zero-cost alias of the byte slice.
- // Do this before calling walkexpr on cond,
- // because walkexpr will lower the string
- // conversion into a runtime call.
- // See issue 24937 for more discussion.
- if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
- cond.SetOp(ir.OBYTES2STRTMP)
- }
-
- cond = walkexpr(cond, sw.PtrInit())
- if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL {
- cond = copyexpr(cond, cond.Type(), sw.PtrBody())
- }
-
- base.Pos = lno
-
- s := exprSwitch{
- exprname: cond,
- }
-
- var defaultGoto ir.Node
- var body ir.Nodes
- for _, ncase := range sw.List().Slice() {
- ncase := ncase.(*ir.CaseStmt)
- label := autolabel(".s")
- jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
-
- // Process case dispatch.
- if ncase.List().Len() == 0 {
- if defaultGoto != nil {
- base.Fatalf("duplicate default case not detected during typechecking")
- }
- defaultGoto = jmp
- }
-
- for _, n1 := range ncase.List().Slice() {
- s.Add(ncase.Pos(), n1, jmp)
- }
-
- // Process body.
- body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
- body.Append(ncase.Body().Slice()...)
- if fall, pos := endsInFallthrough(ncase.Body().Slice()); !fall {
- br := ir.Nod(ir.OBREAK, nil, nil)
- br.SetPos(pos)
- body.Append(br)
- }
- }
- sw.PtrList().Set(nil)
-
- if defaultGoto == nil {
- br := ir.Nod(ir.OBREAK, nil, nil)
- br.SetPos(br.Pos().WithNotStmt())
- defaultGoto = br
- }
-
- s.Emit(sw.PtrBody())
- sw.PtrBody().Append(defaultGoto)
- sw.PtrBody().AppendNodes(&body)
- walkstmtlist(sw.Body().Slice())
-}
-
-// An exprSwitch walks an expression switch.
-type exprSwitch struct {
- exprname ir.Node // value being switched on
-
- done ir.Nodes
- clauses []exprClause
-}
-
-type exprClause struct {
- pos src.XPos
- lo, hi ir.Node
- jmp ir.Node
-}
-
-func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
- c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
- if okforcmp[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL {
- s.clauses = append(s.clauses, c)
- return
- }
-
- s.flush()
- s.clauses = append(s.clauses, c)
- s.flush()
-}
-
-func (s *exprSwitch) Emit(out *ir.Nodes) {
- s.flush()
- out.AppendNodes(&s.done)
-}
-
-func (s *exprSwitch) flush() {
- cc := s.clauses
- s.clauses = nil
- if len(cc) == 0 {
- return
- }
-
- // Caution: If len(cc) == 1, then cc[0] might not an OLITERAL.
- // The code below is structured to implicitly handle this case
- // (e.g., sort.Slice doesn't need to invoke the less function
- // when there's only a single slice element).
-
- if s.exprname.Type().IsString() && len(cc) >= 2 {
- // Sort strings by length and then by value. It is
- // much cheaper to compare lengths than values, and
- // all we need here is consistency. We respect this
- // sorting below.
- sort.Slice(cc, func(i, j int) bool {
- si := ir.StringVal(cc[i].lo)
- sj := ir.StringVal(cc[j].lo)
- if len(si) != len(sj) {
- return len(si) < len(sj)
- }
- return si < sj
- })
-
- // runLen returns the string length associated with a
- // particular run of exprClauses.
- runLen := func(run []exprClause) int64 { return int64(len(ir.StringVal(run[0].lo))) }
-
- // Collapse runs of consecutive strings with the same length.
- var runs [][]exprClause
- start := 0
- for i := 1; i < len(cc); i++ {
- if runLen(cc[start:]) != runLen(cc[i:]) {
- runs = append(runs, cc[start:i])
- start = i
- }
- }
- runs = append(runs, cc[start:])
-
- // Perform two-level binary search.
- binarySearch(len(runs), &s.done,
- func(i int) ir.Node {
- return ir.Nod(ir.OLE, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1])))
- },
- func(i int, nif *ir.IfStmt) {
- run := runs[i]
- nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run))))
- s.search(run, nif.PtrBody())
- },
- )
- return
- }
-
- sort.Slice(cc, func(i, j int) bool {
- return constant.Compare(cc[i].lo.Val(), token.LSS, cc[j].lo.Val())
- })
-
- // Merge consecutive integer cases.
- if s.exprname.Type().IsInteger() {
- merged := cc[:1]
- for _, c := range cc[1:] {
- last := &merged[len(merged)-1]
- if last.jmp == c.jmp && ir.Int64Val(last.hi)+1 == ir.Int64Val(c.lo) {
- last.hi = c.lo
- } else {
- merged = append(merged, c)
- }
- }
- cc = merged
- }
-
- s.search(cc, &s.done)
-}
-
-func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
- binarySearch(len(cc), out,
- func(i int) ir.Node {
- return ir.Nod(ir.OLE, s.exprname, cc[i-1].hi)
- },
- func(i int, nif *ir.IfStmt) {
- c := &cc[i]
- nif.SetLeft(c.test(s.exprname))
- nif.PtrBody().Set1(c.jmp)
- },
- )
-}
-
-func (c *exprClause) test(exprname ir.Node) ir.Node {
- // Integer range.
- if c.hi != c.lo {
- low := ir.NodAt(c.pos, ir.OGE, exprname, c.lo)
- high := ir.NodAt(c.pos, ir.OLE, exprname, c.hi)
- return ir.NodAt(c.pos, ir.OANDAND, low, high)
- }
-
- // Optimize "switch true { ...}" and "switch false { ... }".
- if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() {
- if ir.BoolVal(exprname) {
- return c.lo
- } else {
- return ir.NodAt(c.pos, ir.ONOT, c.lo, nil)
- }
- }
-
- return ir.NodAt(c.pos, ir.OEQ, exprname, c.lo)
-}
-
-func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool {
- // In theory, we could be more aggressive, allowing any
- // side-effect-free expressions in cases, but it's a bit
- // tricky because some of that information is unavailable due
- // to the introduction of temporaries during order.
- // Restricting to constants is simple and probably powerful
- // enough.
-
- for _, ncase := range sw.List().Slice() {
- ncase := ncase.(*ir.CaseStmt)
- for _, v := range ncase.List().Slice() {
- if v.Op() != ir.OLITERAL {
- return false
- }
- }
- }
- return true
-}
-
-// endsInFallthrough reports whether stmts ends with a "fallthrough" statement.
-func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) {
- // Search backwards for the index of the fallthrough
- // statement. Do not assume it'll be in the last
- // position, since in some cases (e.g. when the statement
- // list contains autotmp_ variables), one or more OVARKILL
- // nodes will be at the end of the list.
-
- i := len(stmts) - 1
- for i >= 0 && stmts[i].Op() == ir.OVARKILL {
- i--
- }
- if i < 0 {
- return false, src.NoXPos
- }
- return stmts[i].Op() == ir.OFALL, stmts[i].Pos()
-}
-
-// walkTypeSwitch generates an AST that implements sw, where sw is a
-// type switch.
-func walkTypeSwitch(sw *ir.SwitchStmt) {
- var s typeSwitch
- s.facename = sw.Left().(*ir.TypeSwitchGuard).Right()
- sw.SetLeft(nil)
-
- s.facename = walkexpr(s.facename, sw.PtrInit())
- s.facename = copyexpr(s.facename, s.facename.Type(), sw.PtrBody())
- s.okname = temp(types.Types[types.TBOOL])
-
- // Get interface descriptor word.
- // For empty interfaces this will be the type.
- // For non-empty interfaces this will be the itab.
- itab := ir.Nod(ir.OITAB, s.facename, nil)
-
- // For empty interfaces, do:
- // if e._type == nil {
- // do nil case if it exists, otherwise default
- // }
- // h := e._type.hash
- // Use a similar strategy for non-empty interfaces.
- ifNil := ir.Nod(ir.OIF, nil, nil)
- ifNil.SetLeft(ir.Nod(ir.OEQ, itab, nodnil()))
- base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
- ifNil.SetLeft(typecheck(ifNil.Left(), ctxExpr))
- ifNil.SetLeft(defaultlit(ifNil.Left(), nil))
- // ifNil.Nbody assigned at end.
- sw.PtrBody().Append(ifNil)
-
- // Load hash from type or itab.
- dotHash := nodSym(ir.ODOTPTR, itab, nil)
- dotHash.SetType(types.Types[types.TUINT32])
- dotHash.SetTypecheck(1)
- if s.facename.Type().IsEmptyInterface() {
- dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime._type
- } else {
- dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime.itab
- }
- dotHash.SetBounded(true) // guaranteed not to fault
- s.hashname = copyexpr(dotHash, dotHash.Type(), sw.PtrBody())
-
- br := ir.Nod(ir.OBREAK, nil, nil)
- var defaultGoto, nilGoto ir.Node
- var body ir.Nodes
- for _, ncase := range sw.List().Slice() {
- ncase := ncase.(*ir.CaseStmt)
- var caseVar ir.Node
- if ncase.Rlist().Len() != 0 {
- caseVar = ncase.Rlist().First()
- }
-
- // For single-type cases with an interface type,
- // we initialize the case variable as part of the type assertion.
- // In other cases, we initialize it in the body.
- var singleType *types.Type
- if ncase.List().Len() == 1 && ncase.List().First().Op() == ir.OTYPE {
- singleType = ncase.List().First().Type()
- }
- caseVarInitialized := false
-
- label := autolabel(".s")
- jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
-
- if ncase.List().Len() == 0 { // default:
- if defaultGoto != nil {
- base.Fatalf("duplicate default case not detected during typechecking")
- }
- defaultGoto = jmp
- }
-
- for _, n1 := range ncase.List().Slice() {
- if ir.IsNil(n1) { // case nil:
- if nilGoto != nil {
- base.Fatalf("duplicate nil case not detected during typechecking")
- }
- nilGoto = jmp
- continue
- }
-
- if singleType != nil && singleType.IsInterface() {
- s.Add(ncase.Pos(), n1.Type(), caseVar, jmp)
- caseVarInitialized = true
- } else {
- s.Add(ncase.Pos(), n1.Type(), nil, jmp)
- }
- }
-
- body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
- if caseVar != nil && !caseVarInitialized {
- val := s.facename
- if singleType != nil {
- // We have a single concrete type. Extract the data.
- if singleType.IsInterface() {
- base.Fatalf("singleType interface should have been handled in Add")
- }
- val = ifaceData(ncase.Pos(), s.facename, singleType)
- }
- l := []ir.Node{
- ir.NodAt(ncase.Pos(), ir.ODCL, caseVar, nil),
- ir.NodAt(ncase.Pos(), ir.OAS, caseVar, val),
- }
- typecheckslice(l, ctxStmt)
- body.Append(l...)
- }
- body.Append(ncase.Body().Slice()...)
- body.Append(br)
- }
- sw.PtrList().Set(nil)
-
- if defaultGoto == nil {
- defaultGoto = br
- }
- if nilGoto == nil {
- nilGoto = defaultGoto
- }
- ifNil.PtrBody().Set1(nilGoto)
-
- s.Emit(sw.PtrBody())
- sw.PtrBody().Append(defaultGoto)
- sw.PtrBody().AppendNodes(&body)
-
- walkstmtlist(sw.Body().Slice())
-}
-
-// A typeSwitch walks a type switch.
-type typeSwitch struct {
- // Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
- facename ir.Node // value being type-switched on
- hashname ir.Node // type hash of the value being type-switched on
- okname ir.Node // boolean used for comma-ok type assertions
-
- done ir.Nodes
- clauses []typeClause
-}
-
-type typeClause struct {
- hash uint32
- body ir.Nodes
-}
-
-func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) {
- var body ir.Nodes
- if caseVar != nil {
- l := []ir.Node{
- ir.NodAt(pos, ir.ODCL, caseVar, nil),
- ir.NodAt(pos, ir.OAS, caseVar, nil),
- }
- typecheckslice(l, ctxStmt)
- body.Append(l...)
- } else {
- caseVar = ir.BlankNode
- }
-
- // cv, ok = iface.(type)
- as := ir.NodAt(pos, ir.OAS2, nil, nil)
- as.PtrList().Set2(caseVar, s.okname) // cv, ok =
- dot := ir.NodAt(pos, ir.ODOTTYPE, s.facename, nil)
- dot.SetType(typ) // iface.(type)
- as.PtrRlist().Set1(dot)
- appendWalkStmt(&body, as)
-
- // if ok { goto label }
- nif := ir.NodAt(pos, ir.OIF, nil, nil)
- nif.SetLeft(s.okname)
- nif.PtrBody().Set1(jmp)
- body.Append(nif)
-
- if !typ.IsInterface() {
- s.clauses = append(s.clauses, typeClause{
- hash: typehash(typ),
- body: body,
- })
- return
- }
-
- s.flush()
- s.done.AppendNodes(&body)
-}
-
-func (s *typeSwitch) Emit(out *ir.Nodes) {
- s.flush()
- out.AppendNodes(&s.done)
-}
-
-func (s *typeSwitch) flush() {
- cc := s.clauses
- s.clauses = nil
- if len(cc) == 0 {
- return
- }
-
- sort.Slice(cc, func(i, j int) bool { return cc[i].hash < cc[j].hash })
-
- // Combine adjacent cases with the same hash.
- merged := cc[:1]
- for _, c := range cc[1:] {
- last := &merged[len(merged)-1]
- if last.hash == c.hash {
- last.body.AppendNodes(&c.body)
- } else {
- merged = append(merged, c)
- }
- }
- cc = merged
-
- binarySearch(len(cc), &s.done,
- func(i int) ir.Node {
- return ir.Nod(ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
- },
- func(i int, nif *ir.IfStmt) {
- // TODO(mdempsky): Omit hash equality check if
- // there's only one type.
- c := cc[i]
- nif.SetLeft(ir.Nod(ir.OEQ, s.hashname, nodintconst(int64(c.hash))))
- nif.PtrBody().AppendNodes(&c.body)
- },
- )
-}
-
-// binarySearch constructs a binary search tree for handling n cases,
-// and appends it to out. It's used for efficiently implementing
-// switch statements.
-//
-// less(i) should return a boolean expression. If it evaluates true,
-// then cases before i will be tested; otherwise, cases i and later.
-//
-// leaf(i, nif) should setup nif (an OIF node) to test case i. In
-// particular, it should set nif.Left and nif.Nbody.
-func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif *ir.IfStmt)) {
- const binarySearchMin = 4 // minimum number of cases for binary search
-
- var do func(lo, hi int, out *ir.Nodes)
- do = func(lo, hi int, out *ir.Nodes) {
- n := hi - lo
- if n < binarySearchMin {
- for i := lo; i < hi; i++ {
- nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
- leaf(i, nif)
- base.Pos = base.Pos.WithNotStmt()
- nif.SetLeft(typecheck(nif.Left(), ctxExpr))
- nif.SetLeft(defaultlit(nif.Left(), nil))
- out.Append(nif)
- out = nif.PtrRlist()
- }
- return
- }
-
- half := lo + n/2
- nif := ir.Nod(ir.OIF, nil, nil)
- nif.SetLeft(less(half))
- base.Pos = base.Pos.WithNotStmt()
- nif.SetLeft(typecheck(nif.Left(), ctxExpr))
- nif.SetLeft(defaultlit(nif.Left(), nil))
- do(lo, half, nif.PtrBody())
- do(half, hi, nif.PtrRlist())
- out.Append(nif)
- }
-
- do(0, n, out)
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "fmt"
- "go/constant"
- "go/token"
- "strings"
-)
-
-var (
- NeedFuncSym = func(*types.Sym) {}
- NeedITab = func(t, itype *types.Type) {}
- NeedRuntimeType = func(*types.Type) {}
-)
-
-func TypecheckInit() {
- types.Widthptr = Widthptr
- types.Dowidth = dowidth
- initUniverse()
- dclcontext = ir.PEXTERN
- timings.Start("fe", "loadsys")
- loadsys()
-}
-
-func TypecheckPackage() {
- finishUniverse()
-
- typecheckok = true
-
- // Process top-level declarations in phases.
-
- // Phase 1: const, type, and names and types of funcs.
- // This will gather all the information about types
- // and methods but doesn't depend on any of it.
- //
- // We also defer type alias declarations until phase 2
- // to avoid cycles like #18640.
- // TODO(gri) Remove this again once we have a fix for #25838.
-
- // Don't use range--typecheck can add closures to Target.Decls.
- timings.Start("fe", "typecheck", "top1")
- for i := 0; i < len(Target.Decls); i++ {
- n := Target.Decls[i]
- if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).Left().Name().Alias()) {
- Target.Decls[i] = typecheck(n, ctxStmt)
- }
- }
-
- // Phase 2: Variable assignments.
- // To check interface assignments, depends on phase 1.
-
- // Don't use range--typecheck can add closures to Target.Decls.
- timings.Start("fe", "typecheck", "top2")
- for i := 0; i < len(Target.Decls); i++ {
- n := Target.Decls[i]
- if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).Left().Name().Alias() {
- Target.Decls[i] = typecheck(n, ctxStmt)
- }
- }
-
- // Phase 3: Type check function bodies.
- // Don't use range--typecheck can add closures to Target.Decls.
- timings.Start("fe", "typecheck", "func")
- var fcount int64
- for i := 0; i < len(Target.Decls); i++ {
- n := Target.Decls[i]
- if n.Op() == ir.ODCLFUNC {
- TypecheckFuncBody(n.(*ir.Func))
- fcount++
- }
- }
-
- // Phase 4: Check external declarations.
- // TODO(mdempsky): This should be handled when type checking their
- // corresponding ODCL nodes.
- timings.Start("fe", "typecheck", "externdcls")
- for i, n := range Target.Externs {
- if n.Op() == ir.ONAME {
- Target.Externs[i] = typecheck(Target.Externs[i], ctxExpr)
- }
- }
-
- // Phase 5: With all user code type-checked, it's now safe to verify map keys.
- checkMapKeys()
-
- // Phase 6: Decide how to capture closed variables.
- // This needs to run before escape analysis,
- // because variables captured by value do not escape.
- timings.Start("fe", "capturevars")
- for _, n := range Target.Decls {
- if n.Op() == ir.ODCLFUNC {
- n := n.(*ir.Func)
- if n.Func().OClosure != nil {
- Curfn = n
- capturevars(n)
- }
- }
- }
- capturevarscomplete = true
- Curfn = nil
-
- if base.Debug.TypecheckInl != 0 {
- // Typecheck imported function bodies if Debug.l > 1,
- // otherwise lazily when used or re-exported.
- TypecheckImports()
- }
-}
-
-func TypecheckAssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) }
-func TypecheckExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr) }
-func TypecheckStmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) }
-
-func TypecheckExprs(exprs []ir.Node) { typecheckslice(exprs, ctxExpr) }
-func TypecheckStmts(stmts []ir.Node) { typecheckslice(stmts, ctxStmt) }
-
-func TypecheckCall(call *ir.CallExpr) {
- t := call.X.Type()
- if t == nil {
- panic("misuse of Call")
- }
- ctx := ctxStmt
- if t.NumResults() > 0 {
- ctx = ctxExpr | ctxMultiOK
- }
- if typecheck(call, ctx) != call {
- panic("bad typecheck")
- }
-}
-
-func TypecheckCallee(n ir.Node) ir.Node {
- return typecheck(n, ctxExpr|ctxCallee)
-}
-
-func TypecheckFuncBody(n *ir.Func) {
- Curfn = n
- decldepth = 1
- errorsBefore := base.Errors()
- typecheckslice(n.Body(), ctxStmt)
- checkreturn(n)
- if base.Errors() > errorsBefore {
- n.PtrBody().Set(nil) // type errors; do not compile
- }
- // Now that we've checked whether n terminates,
- // we can eliminate some obviously dead code.
- deadcode(n)
-}
-
-var importlist []*ir.Func
-
-func TypecheckImports() {
- for _, n := range importlist {
- if n.Inl != nil {
- typecheckinl(n)
- }
- }
-}
-
-// To enable tracing support (-t flag), set enableTrace to true.
-const enableTrace = false
-
-var traceIndent []byte
-var skipDowidthForTracing bool
-
-func tracePrint(title string, n ir.Node) func(np *ir.Node) {
- indent := traceIndent
-
- // guard against nil
- var pos, op string
- var tc uint8
- if n != nil {
- pos = base.FmtPos(n.Pos())
- op = n.Op().String()
- tc = n.Typecheck()
- }
-
- skipDowidthForTracing = true
- defer func() { skipDowidthForTracing = false }()
- fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc)
- traceIndent = append(traceIndent, ". "...)
-
- return func(np *ir.Node) {
- traceIndent = traceIndent[:len(traceIndent)-2]
-
- // if we have a result, use that
- if np != nil {
- n = *np
- }
-
- // guard against nil
- // use outer pos, op so we don't get empty pos/op if n == nil (nicer output)
- var tc uint8
- var typ *types.Type
- if n != nil {
- pos = base.FmtPos(n.Pos())
- op = n.Op().String()
- tc = n.Typecheck()
- typ = n.Type()
- }
-
- skipDowidthForTracing = true
- defer func() { skipDowidthForTracing = false }()
- fmt.Printf("%s: %s=> %p %s %v tc=%d type=%L\n", pos, indent, n, op, n, tc, typ)
- }
-}
-
-const (
- ctxStmt = 1 << iota // evaluated at statement level
- ctxExpr // evaluated in value context
- ctxType // evaluated in type context
- ctxCallee // call-only expressions are ok
- ctxMultiOK // multivalue function returns are ok
- ctxAssign // assigning to expression
-)
-
-// type checks the whole tree of an expression.
-// calculates expression types.
-// evaluates compile time constants.
-// marks variables that escape the local frame.
-// rewrites n.Op to be more specific in some cases.
-
-var typecheckdefstack []ir.Node
-
-// resolve ONONAME to definition, if any.
-func resolve(n ir.Node) (res ir.Node) {
- if n == nil || n.Op() != ir.ONONAME {
- return n
- }
-
- // only trace if there's work to do
- if enableTrace && base.Flag.LowerT {
- defer tracePrint("resolve", n)(&res)
- }
-
- if sym := n.Sym(); sym.Pkg != types.LocalPkg {
- // We might have an ir.Ident from oldname or importDot.
- if id, ok := n.(*ir.Ident); ok {
- if pkgName := dotImportRefs[id]; pkgName != nil {
- pkgName.Used = true
- }
- }
-
- if inimport {
- base.Fatalf("recursive inimport")
- }
- inimport = true
- n = expandDecl(n)
- inimport = false
- return n
- }
-
- r := ir.AsNode(n.Sym().Def)
- if r == nil {
- return n
- }
-
- if r.Op() == ir.OIOTA {
- if x := getIotaValue(); x >= 0 {
- return nodintconst(x)
- }
- return n
- }
-
- return r
-}
-
-func typecheckslice(l []ir.Node, top int) {
- for i := range l {
- l[i] = typecheck(l[i], top)
- }
-}
-
-var _typekind = []string{
- types.TINT: "int",
- types.TUINT: "uint",
- types.TINT8: "int8",
- types.TUINT8: "uint8",
- types.TINT16: "int16",
- types.TUINT16: "uint16",
- types.TINT32: "int32",
- types.TUINT32: "uint32",
- types.TINT64: "int64",
- types.TUINT64: "uint64",
- types.TUINTPTR: "uintptr",
- types.TCOMPLEX64: "complex64",
- types.TCOMPLEX128: "complex128",
- types.TFLOAT32: "float32",
- types.TFLOAT64: "float64",
- types.TBOOL: "bool",
- types.TSTRING: "string",
- types.TPTR: "pointer",
- types.TUNSAFEPTR: "unsafe.Pointer",
- types.TSTRUCT: "struct",
- types.TINTER: "interface",
- types.TCHAN: "chan",
- types.TMAP: "map",
- types.TARRAY: "array",
- types.TSLICE: "slice",
- types.TFUNC: "func",
- types.TNIL: "nil",
- types.TIDEAL: "untyped number",
-}
-
-func typekind(t *types.Type) string {
- if t.IsUntyped() {
- return fmt.Sprintf("%v", t)
- }
- et := t.Kind()
- if int(et) < len(_typekind) {
- s := _typekind[et]
- if s != "" {
- return s
- }
- }
- return fmt.Sprintf("etype=%d", et)
-}
-
-func cycleFor(start ir.Node) []ir.Node {
- // Find the start node in typecheck_tcstack.
- // We know that it must exist because each time we mark
- // a node with n.SetTypecheck(2) we push it on the stack,
- // and each time we mark a node with n.SetTypecheck(2) we
- // pop it from the stack. We hit a cycle when we encounter
- // a node marked 2 in which case is must be on the stack.
- i := len(typecheck_tcstack) - 1
- for i > 0 && typecheck_tcstack[i] != start {
- i--
- }
-
- // collect all nodes with same Op
- var cycle []ir.Node
- for _, n := range typecheck_tcstack[i:] {
- if n.Op() == start.Op() {
- cycle = append(cycle, n)
- }
- }
-
- return cycle
-}
-
-func cycleTrace(cycle []ir.Node) string {
- var s string
- for i, n := range cycle {
- s += fmt.Sprintf("\n\t%v: %v uses %v", ir.Line(n), n, cycle[(i+1)%len(cycle)])
- }
- return s
-}
-
-var typecheck_tcstack []ir.Node
-
-func typecheckFunc(fn *ir.Func) {
- new := typecheck(fn, ctxStmt)
- if new != fn {
- base.Fatalf("typecheck changed func")
- }
-}
-
-func typecheckNtype(n ir.Ntype) ir.Ntype {
- return typecheck(n, ctxType).(ir.Ntype)
-}
-
-// typecheck type checks node n.
-// The result of typecheck MUST be assigned back to n, e.g.
-// n.Left = typecheck(n.Left, top)
-func typecheck(n ir.Node, top int) (res ir.Node) {
- // cannot type check until all the source has been parsed
- if !typecheckok {
- base.Fatalf("early typecheck")
- }
-
- if n == nil {
- return nil
- }
-
- // only trace if there's work to do
- if enableTrace && base.Flag.LowerT {
- defer tracePrint("typecheck", n)(&res)
- }
-
- lno := setlineno(n)
-
- // Skip over parens.
- for n.Op() == ir.OPAREN {
- n = n.(*ir.ParenExpr).Left()
- }
-
- // Resolve definition of name and value of iota lazily.
- n = resolve(n)
-
- // Skip typecheck if already done.
- // But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
- if n.Typecheck() == 1 {
- switch n.Op() {
- case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.OPACK:
- break
-
- default:
- base.Pos = lno
- return n
- }
- }
-
- if n.Typecheck() == 2 {
- // Typechecking loop. Trying printing a meaningful message,
- // otherwise a stack trace of typechecking.
- switch n.Op() {
- // We can already diagnose variables used as types.
- case ir.ONAME:
- if top&(ctxExpr|ctxType) == ctxType {
- base.Errorf("%v is not a type", n)
- }
-
- case ir.OTYPE:
- // Only report a type cycle if we are expecting a type.
- // Otherwise let other code report an error.
- if top&ctxType == ctxType {
- // A cycle containing only alias types is an error
- // since it would expand indefinitely when aliases
- // are substituted.
- cycle := cycleFor(n)
- for _, n1 := range cycle {
- if n1.Name() != nil && !n1.Name().Alias() {
- // Cycle is ok. But if n is an alias type and doesn't
- // have a type yet, we have a recursive type declaration
- // with aliases that we can't handle properly yet.
- // Report an error rather than crashing later.
- if n.Name() != nil && n.Name().Alias() && n.Type() == nil {
- base.Pos = n.Pos()
- base.Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
- }
- base.Pos = lno
- return n
- }
- }
- base.ErrorfAt(n.Pos(), "invalid recursive type alias %v%s", n, cycleTrace(cycle))
- }
-
- case ir.OLITERAL:
- if top&(ctxExpr|ctxType) == ctxType {
- base.Errorf("%v is not a type", n)
- break
- }
- base.ErrorfAt(n.Pos(), "constant definition loop%s", cycleTrace(cycleFor(n)))
- }
-
- if base.Errors() == 0 {
- var trace string
- for i := len(typecheck_tcstack) - 1; i >= 0; i-- {
- x := typecheck_tcstack[i]
- trace += fmt.Sprintf("\n\t%v %v", ir.Line(x), x)
- }
- base.Errorf("typechecking loop involving %v%s", n, trace)
- }
-
- base.Pos = lno
- return n
- }
-
- typecheck_tcstack = append(typecheck_tcstack, n)
-
- n.SetTypecheck(2)
- n = typecheck1(n, top)
- n.SetTypecheck(1)
-
- last := len(typecheck_tcstack) - 1
- typecheck_tcstack[last] = nil
- typecheck_tcstack = typecheck_tcstack[:last]
-
- _, isExpr := n.(ir.Expr)
- _, isStmt := n.(ir.Stmt)
- isMulti := false
- switch n.Op() {
- case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
- if t := n.Left().Type(); t != nil && t.Kind() == types.TFUNC {
- nr := t.NumResults()
- isMulti = nr > 1
- if nr == 0 {
- isExpr = false
- }
- }
- case ir.OAPPEND:
- // Must be used (and not BinaryExpr/UnaryExpr).
- isStmt = false
- case ir.OCLOSE, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.OVARKILL, ir.OVARLIVE:
- // Must not be used.
- isExpr = false
- isStmt = true
- case ir.OCOPY, ir.ORECOVER, ir.ORECV:
- // Can be used or not.
- isStmt = true
- }
-
- t := n.Type()
- if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE {
- switch t.Kind() {
- case types.TFUNC, // might have TANY; wait until it's called
- types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK:
- break
-
- default:
- checkwidth(t)
- }
- }
- if t != nil {
- n = evalConst(n)
- t = n.Type()
- }
-
- // TODO(rsc): Lots of the complexity here is because typecheck can
- // see OTYPE, ONAME, and OLITERAL nodes multiple times.
- // Once we make the IR a proper tree, we should be able to simplify
- // this code a bit, especially the final case.
- switch {
- case top&(ctxStmt|ctxExpr) == ctxExpr && !isExpr && n.Op() != ir.OTYPE && !isMulti:
- if !n.Diag() {
- base.Errorf("%v used as value", n)
- n.SetDiag(true)
- }
- if t != nil {
- n.SetType(nil)
- }
-
- case top&ctxType == 0 && n.Op() == ir.OTYPE && t != nil:
- if !n.Type().Broke() {
- base.Errorf("type %v is not an expression", n.Type())
- }
- n.SetType(nil)
-
- case top&(ctxStmt|ctxExpr) == ctxStmt && !isStmt && t != nil:
- if !n.Diag() {
- base.Errorf("%v evaluated but not used", n)
- n.SetDiag(true)
- }
- n.SetType(nil)
-
- case top&(ctxType|ctxExpr) == ctxType && n.Op() != ir.OTYPE && n.Op() != ir.ONONAME && (t != nil || n.Op() == ir.ONAME):
- base.Errorf("%v is not a type", n)
- if t != nil {
- n.SetType(nil)
- }
-
- }
-
- base.Pos = lno
- return n
-}
-
-// indexlit implements typechecking of untyped values as
-// array/slice indexes. It is almost equivalent to defaultlit
-// but also accepts untyped numeric values representable as
-// value of type int (see also checkmake for comparison).
-// The result of indexlit MUST be assigned back to n, e.g.
-// n.Left = indexlit(n.Left)
-func indexlit(n ir.Node) ir.Node {
- if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
- return defaultlit(n, types.Types[types.TINT])
- }
- return n
-}
-
-// typecheck1 should ONLY be called from typecheck.
-func typecheck1(n ir.Node, top int) (res ir.Node) {
- if enableTrace && base.Flag.LowerT {
- defer tracePrint("typecheck1", n)(&res)
- }
-
- switch n.Op() {
- case ir.OLITERAL, ir.ONAME, ir.ONONAME, ir.OTYPE:
- if n.Sym() == nil {
- return n
- }
-
- if n.Op() == ir.ONAME {
- if n.SubOp() != 0 && top&ctxCallee == 0 {
- base.Errorf("use of builtin %v not in function call", n.Sym())
- n.SetType(nil)
- return n
- }
- }
-
- typecheckdef(n)
- if n.Op() == ir.ONONAME {
- n.SetType(nil)
- return n
- }
- }
-
- switch n.Op() {
- default:
- ir.Dump("typecheck", n)
- base.Fatalf("typecheck %v", n.Op())
- panic("unreachable")
-
- // names
- case ir.OLITERAL:
- if n.Type() == nil && n.Val().Kind() == constant.String {
- base.Fatalf("string literal missing type")
- }
- return n
-
- case ir.ONIL, ir.ONONAME:
- return n
-
- case ir.ONAME:
- if n.Name().Decldepth == 0 {
- n.Name().Decldepth = decldepth
- }
- if n.SubOp() != 0 {
- return n
- }
- if top&ctxAssign == 0 {
- // not a write to the variable
- if ir.IsBlank(n) {
- base.Errorf("cannot use _ as value")
- n.SetType(nil)
- return n
- }
- n.Name().SetUsed(true)
- }
- return n
-
- case ir.ONAMEOFFSET:
- // type already set
- return n
-
- case ir.OPACK:
- base.Errorf("use of package %v without selector", n.Sym())
- n.SetType(nil)
- return n
-
- // types (ODEREF is with exprs)
- case ir.OTYPE:
- if n.Type() == nil {
- return n
- }
- return n
-
- case ir.OTSLICE:
- n := n.(*ir.SliceType)
- n.Elem = typecheck(n.Elem, ctxType)
- if n.Elem.Type() == nil {
- return n
- }
- t := types.NewSlice(n.Elem.Type())
- n.SetOTYPE(t)
- checkwidth(t)
- return n
-
- case ir.OTARRAY:
- n := n.(*ir.ArrayType)
- n.Elem = typecheck(n.Elem, ctxType)
- if n.Elem.Type() == nil {
- return n
- }
- if n.Len == nil { // [...]T
- if !n.Diag() {
- n.SetDiag(true)
- base.Errorf("use of [...] array outside of array literal")
- }
- return n
- }
- n.Len = indexlit(typecheck(n.Len, ctxExpr))
- size := n.Len
- if ir.ConstType(size) != constant.Int {
- switch {
- case size.Type() == nil:
- // Error already reported elsewhere.
- case size.Type().IsInteger() && size.Op() != ir.OLITERAL:
- base.Errorf("non-constant array bound %v", size)
- default:
- base.Errorf("invalid array bound %v", size)
- }
- return n
- }
-
- v := size.Val()
- if doesoverflow(v, types.Types[types.TINT]) {
- base.Errorf("array bound is too large")
- return n
- }
-
- if constant.Sign(v) < 0 {
- base.Errorf("array bound must be non-negative")
- return n
- }
-
- bound, _ := constant.Int64Val(v)
- t := types.NewArray(n.Elem.Type(), bound)
- n.SetOTYPE(t)
- checkwidth(t)
- return n
-
- case ir.OTMAP:
- n := n.(*ir.MapType)
- n.Key = typecheck(n.Key, ctxType)
- n.Elem = typecheck(n.Elem, ctxType)
- l := n.Key
- r := n.Elem
- if l.Type() == nil || r.Type() == nil {
- return n
- }
- if l.Type().NotInHeap() {
- base.Errorf("incomplete (or unallocatable) map key not allowed")
- }
- if r.Type().NotInHeap() {
- base.Errorf("incomplete (or unallocatable) map value not allowed")
- }
- n.SetOTYPE(types.NewMap(l.Type(), r.Type()))
- mapqueue = append(mapqueue, n) // check map keys when all types are settled
- return n
-
- case ir.OTCHAN:
- n := n.(*ir.ChanType)
- n.Elem = typecheck(n.Elem, ctxType)
- l := n.Elem
- if l.Type() == nil {
- return n
- }
- if l.Type().NotInHeap() {
- base.Errorf("chan of incomplete (or unallocatable) type not allowed")
- }
- n.SetOTYPE(types.NewChan(l.Type(), n.Dir))
- return n
-
- case ir.OTSTRUCT:
- n := n.(*ir.StructType)
- n.SetOTYPE(tostruct(n.Fields))
- return n
-
- case ir.OTINTER:
- n := n.(*ir.InterfaceType)
- n.SetOTYPE(tointerface(n.Methods))
- return n
-
- case ir.OTFUNC:
- n := n.(*ir.FuncType)
- n.SetOTYPE(functype(n.Recv, n.Params, n.Results))
- return n
-
- // type or expr
- case ir.ODEREF:
- n := n.(*ir.StarExpr)
- n.X = typecheck(n.X, ctxExpr|ctxType)
- l := n.X
- t := l.Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
- if l.Op() == ir.OTYPE {
- n.SetOTYPE(types.NewPtr(l.Type()))
- // Ensure l.Type gets dowidth'd for the backend. Issue 20174.
- checkwidth(l.Type())
- return n
- }
-
- if !t.IsPtr() {
- if top&(ctxExpr|ctxStmt) != 0 {
- base.Errorf("invalid indirect of %L", n.Left())
- n.SetType(nil)
- return n
- }
- base.Errorf("%v is not a type", l)
- return n
- }
-
- n.SetType(t.Elem())
- return n
-
- // arithmetic exprs
- case ir.OASOP,
- ir.OADD,
- ir.OAND,
- ir.OANDAND,
- ir.OANDNOT,
- ir.ODIV,
- ir.OEQ,
- ir.OGE,
- ir.OGT,
- ir.OLE,
- ir.OLT,
- ir.OLSH,
- ir.ORSH,
- ir.OMOD,
- ir.OMUL,
- ir.ONE,
- ir.OOR,
- ir.OOROR,
- ir.OSUB,
- ir.OXOR:
- var l, r ir.Node
- var setLR func()
- switch n := n.(type) {
- case *ir.AssignOpStmt:
- l, r = n.Left(), n.Right()
- setLR = func() { n.SetLeft(l); n.SetRight(r) }
- case *ir.BinaryExpr:
- l, r = n.Left(), n.Right()
- setLR = func() { n.SetLeft(l); n.SetRight(r) }
- case *ir.LogicalExpr:
- l, r = n.Left(), n.Right()
- setLR = func() { n.SetLeft(l); n.SetRight(r) }
- }
- l = typecheck(l, ctxExpr)
- r = typecheck(r, ctxExpr)
- setLR()
- if l.Type() == nil || r.Type() == nil {
- n.SetType(nil)
- return n
- }
- op := n.Op()
- if n.Op() == ir.OASOP {
- checkassign(n, l)
- if n.Implicit() && !okforarith[l.Type().Kind()] {
- base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type())
- n.SetType(nil)
- return n
- }
- // TODO(marvin): Fix Node.EType type union.
- op = n.SubOp()
- }
- if op == ir.OLSH || op == ir.ORSH {
- r = defaultlit(r, types.Types[types.TUINT])
- setLR()
- t := r.Type()
- if !t.IsInteger() {
- base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type())
- n.SetType(nil)
- return n
- }
- if t.IsSigned() && !langSupported(1, 13, curpkg()) {
- base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type())
- n.SetType(nil)
- return n
- }
- t = l.Type()
- if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() {
- base.Errorf("invalid operation: %v (shift of type %v)", n, t)
- n.SetType(nil)
- return n
- }
-
- // no defaultlit for left
- // the outer context gives the type
- n.SetType(l.Type())
- if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL {
- n.SetType(types.UntypedInt)
- }
- return n
- }
-
- // For "x == x && len(s)", it's better to report that "len(s)" (type int)
- // can't be used with "&&" than to report that "x == x" (type untyped bool)
- // can't be converted to int (see issue #41500).
- if n.Op() == ir.OANDAND || n.Op() == ir.OOROR {
- if !n.Left().Type().IsBoolean() {
- base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Left().Type()))
- n.SetType(nil)
- return n
- }
- if !n.Right().Type().IsBoolean() {
- base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Right().Type()))
- n.SetType(nil)
- return n
- }
- }
-
- // ideal mixed with non-ideal
- l, r = defaultlit2(l, r, false)
- setLR()
-
- if l.Type() == nil || r.Type() == nil {
- n.SetType(nil)
- return n
- }
- t := l.Type()
- if t.Kind() == types.TIDEAL {
- t = r.Type()
- }
- et := t.Kind()
- if et == types.TIDEAL {
- et = types.TINT
- }
- aop := ir.OXXX
- if iscmp[n.Op()] && t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
- // comparison is okay as long as one side is
- // assignable to the other. convert so they have
- // the same type.
- //
- // the only conversion that isn't a no-op is concrete == interface.
- // in that case, check comparability of the concrete type.
- // The conversion allocates, so only do it if the concrete type is huge.
- converted := false
- if r.Type().Kind() != types.TBLANK {
- aop, _ = assignop(l.Type(), r.Type())
- if aop != ir.OXXX {
- if r.Type().IsInterface() && !l.Type().IsInterface() && !IsComparable(l.Type()) {
- base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type()))
- n.SetType(nil)
- return n
- }
-
- dowidth(l.Type())
- if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 {
- l = ir.NewConvExpr(base.Pos, aop, r.Type(), l)
- l.SetTypecheck(1)
- setLR()
- }
-
- t = r.Type()
- converted = true
- }
- }
-
- if !converted && l.Type().Kind() != types.TBLANK {
- aop, _ = assignop(r.Type(), l.Type())
- if aop != ir.OXXX {
- if l.Type().IsInterface() && !r.Type().IsInterface() && !IsComparable(r.Type()) {
- base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type()))
- n.SetType(nil)
- return n
- }
-
- dowidth(r.Type())
- if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 {
- r = ir.NewConvExpr(base.Pos, aop, l.Type(), r)
- r.SetTypecheck(1)
- setLR()
- }
-
- t = l.Type()
- }
- }
-
- et = t.Kind()
- }
-
- if t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
- l, r = defaultlit2(l, r, true)
- if l.Type() == nil || r.Type() == nil {
- n.SetType(nil)
- return n
- }
- if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 {
- base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
- n.SetType(nil)
- return n
- }
- }
-
- if t.Kind() == types.TIDEAL {
- t = mixUntyped(l.Type(), r.Type())
- }
- if dt := defaultType(t); !okfor[op][dt.Kind()] {
- base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
- n.SetType(nil)
- return n
- }
-
- // okfor allows any array == array, map == map, func == func.
- // restrict to slice/map/func == nil and nil == slice/map/func.
- if l.Type().IsArray() && !IsComparable(l.Type()) {
- base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type())
- n.SetType(nil)
- return n
- }
-
- if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) {
- base.Errorf("invalid operation: %v (slice can only be compared to nil)", n)
- n.SetType(nil)
- return n
- }
-
- if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) {
- base.Errorf("invalid operation: %v (map can only be compared to nil)", n)
- n.SetType(nil)
- return n
- }
-
- if l.Type().Kind() == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) {
- base.Errorf("invalid operation: %v (func can only be compared to nil)", n)
- n.SetType(nil)
- return n
- }
-
- if l.Type().IsStruct() {
- if f := IncomparableField(l.Type()); f != nil {
- base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
- n.SetType(nil)
- return n
- }
- }
-
- if iscmp[n.Op()] {
- t = types.UntypedBool
- n.SetType(t)
- if con := evalConst(n); con.Op() == ir.OLITERAL {
- return con
- }
- l, r = defaultlit2(l, r, true)
- setLR()
- return n
- }
-
- if et == types.TSTRING && n.Op() == ir.OADD {
- // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ...
- var add *ir.AddStringExpr
- if l.Op() == ir.OADDSTR {
- add = l.(*ir.AddStringExpr)
- add.SetPos(n.Pos())
- } else {
- add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l})
- }
- if r.Op() == ir.OADDSTR {
- add.PtrList().AppendNodes(r.PtrList())
- } else {
- add.PtrList().Append(r)
- }
- add.SetType(t)
- return add
- }
-
- if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) {
- if constant.Sign(r.Val()) == 0 {
- base.Errorf("division by zero")
- n.SetType(nil)
- return n
- }
- }
-
- n.SetType(t)
- return n
-
- case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- l := n.Left()
- t := l.Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
- if !okfor[n.Op()][defaultType(t).Kind()] {
- base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t))
- n.SetType(nil)
- return n
- }
-
- n.SetType(t)
- return n
-
- // exprs
- case ir.OADDR:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- if n.Left().Type() == nil {
- n.SetType(nil)
- return n
- }
-
- switch n.Left().Op() {
- case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
- n.SetOp(ir.OPTRLIT)
-
- default:
- checklvalue(n.Left(), "take the address of")
- r := outervalue(n.Left())
- if r.Op() == ir.ONAME {
- if ir.Orig(r) != r {
- base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
- }
- r.Name().SetAddrtaken(true)
- if r.Name().IsClosureVar() && !capturevarscomplete {
- // Mark the original variable as Addrtaken so that capturevars
- // knows not to pass it by value.
- // But if the capturevars phase is complete, don't touch it,
- // in case l.Name's containing function has not yet been compiled.
- r.Name().Defn.Name().SetAddrtaken(true)
- }
- }
- n.SetLeft(defaultlit(n.Left(), nil))
- if n.Left().Type() == nil {
- n.SetType(nil)
- return n
- }
- }
-
- n.SetType(types.NewPtr(n.Left().Type()))
- return n
-
- case ir.OCOMPLIT:
- return typecheckcomplit(n.(*ir.CompLitExpr))
-
- case ir.OXDOT, ir.ODOT:
- n := n.(*ir.SelectorExpr)
- if n.Op() == ir.OXDOT {
- n = adddot(n)
- n.SetOp(ir.ODOT)
- if n.Left() == nil {
- n.SetType(nil)
- return n
- }
- }
-
- n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType))
-
- n.SetLeft(defaultlit(n.Left(), nil))
-
- t := n.Left().Type()
- if t == nil {
- base.UpdateErrorDot(ir.Line(n), fmt.Sprint(n.Left()), fmt.Sprint(n))
- n.SetType(nil)
- return n
- }
-
- s := n.Sym()
-
- if n.Left().Op() == ir.OTYPE {
- return typecheckMethodExpr(n)
- }
-
- if t.IsPtr() && !t.Elem().IsInterface() {
- t = t.Elem()
- if t == nil {
- n.SetType(nil)
- return n
- }
- n.SetOp(ir.ODOTPTR)
- checkwidth(t)
- }
-
- if n.Sym().IsBlank() {
- base.Errorf("cannot refer to blank field or method")
- n.SetType(nil)
- return n
- }
-
- if lookdot(n, t, 0) == nil {
- // Legitimate field or method lookup failed, try to explain the error
- switch {
- case t.IsEmptyInterface():
- base.Errorf("%v undefined (type %v is interface with no methods)", n, n.Left().Type())
-
- case t.IsPtr() && t.Elem().IsInterface():
- // Pointer to interface is almost always a mistake.
- base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.Left().Type())
-
- case lookdot(n, t, 1) != nil:
- // Field or method matches by name, but it is not exported.
- base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym())
-
- default:
- if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup.
- base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left().Type(), n.Sym(), mt.Sym)
- } else {
- base.Errorf("%v undefined (type %v has no field or method %v)", n, n.Left().Type(), n.Sym())
- }
- }
- n.SetType(nil)
- return n
- }
-
- if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 {
- return typecheckpartialcall(n, s)
- }
- return n
-
- case ir.ODOTTYPE:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), nil))
- l := n.Left()
- t := l.Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
- if !t.IsInterface() {
- base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t)
- n.SetType(nil)
- return n
- }
-
- if n.Right() != nil {
- n.SetRight(typecheck(n.Right(), ctxType))
- n.SetType(n.Right().Type())
- n.SetRight(nil)
- if n.Type() == nil {
- return n
- }
- }
-
- if n.Type() != nil && !n.Type().IsInterface() {
- var missing, have *types.Field
- var ptr int
- if !implements(n.Type(), t, &missing, &have, &ptr) {
- if have != nil && have.Sym == missing.Sym {
- base.Errorf("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+
- "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
- } else if ptr != 0 {
- base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type(), t, missing.Sym)
- } else if have != nil {
- base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+
- "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
- } else {
- base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type(), t, missing.Sym)
- }
- n.SetType(nil)
- return n
- }
- }
- return n
-
- case ir.OINDEX:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), nil))
- n.SetLeft(implicitstar(n.Left()))
- l := n.Left()
- n.SetRight(typecheck(n.Right(), ctxExpr))
- r := n.Right()
- t := l.Type()
- if t == nil || r.Type() == nil {
- n.SetType(nil)
- return n
- }
- switch t.Kind() {
- default:
- base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t)
- n.SetType(nil)
- return n
-
- case types.TSTRING, types.TARRAY, types.TSLICE:
- n.SetRight(indexlit(n.Right()))
- if t.IsString() {
- n.SetType(types.ByteType)
- } else {
- n.SetType(t.Elem())
- }
- why := "string"
- if t.IsArray() {
- why = "array"
- } else if t.IsSlice() {
- why = "slice"
- }
-
- if n.Right().Type() != nil && !n.Right().Type().IsInteger() {
- base.Errorf("non-integer %s index %v", why, n.Right())
- return n
- }
-
- if !n.Bounded() && ir.IsConst(n.Right(), constant.Int) {
- x := n.Right().Val()
- if constant.Sign(x) < 0 {
- base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Right())
- } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) {
- base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Right(), t.NumElem())
- } else if ir.IsConst(n.Left(), constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.Left()))))) {
- base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Right(), len(ir.StringVal(n.Left())))
- } else if doesoverflow(x, types.Types[types.TINT]) {
- base.Errorf("invalid %s index %v (index too large)", why, n.Right())
- }
- }
-
- case types.TMAP:
- n.SetRight(assignconv(n.Right(), t.Key(), "map index"))
- n.SetType(t.Elem())
- n.SetOp(ir.OINDEXMAP)
- n.SetIndexMapLValue(false)
- }
- return n
-
- case ir.ORECV:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), nil))
- l := n.Left()
- t := l.Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
- if !t.IsChan() {
- base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t)
- n.SetType(nil)
- return n
- }
-
- if !t.ChanDir().CanRecv() {
- base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t)
- n.SetType(nil)
- return n
- }
-
- n.SetType(t.Elem())
- return n
-
- case ir.OSEND:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetRight(typecheck(n.Right(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), nil))
- t := n.Left().Type()
- if t == nil {
- return n
- }
- if !t.IsChan() {
- base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t)
- return n
- }
-
- if !t.ChanDir().CanSend() {
- base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t)
- return n
- }
-
- n.SetRight(assignconv(n.Right(), t.Elem(), "send"))
- if n.Right().Type() == nil {
- return n
- }
- return n
-
- case ir.OSLICEHEADER:
- // Errors here are Fatalf instead of Errorf because only the compiler
- // can construct an OSLICEHEADER node.
- // Components used in OSLICEHEADER that are supplied by parsed source code
- // have already been typechecked in e.g. OMAKESLICE earlier.
- t := n.Type()
- if t == nil {
- base.Fatalf("no type specified for OSLICEHEADER")
- }
-
- if !t.IsSlice() {
- base.Fatalf("invalid type %v for OSLICEHEADER", n.Type())
- }
-
- if n.Left() == nil || n.Left().Type() == nil || !n.Left().Type().IsUnsafePtr() {
- base.Fatalf("need unsafe.Pointer for OSLICEHEADER")
- }
-
- if x := n.List().Len(); x != 2 {
- base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x)
- }
-
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- l := typecheck(n.List().First(), ctxExpr)
- c := typecheck(n.List().Second(), ctxExpr)
- l = defaultlit(l, types.Types[types.TINT])
- c = defaultlit(c, types.Types[types.TINT])
-
- if ir.IsConst(l, constant.Int) && ir.Int64Val(l) < 0 {
- base.Fatalf("len for OSLICEHEADER must be non-negative")
- }
-
- if ir.IsConst(c, constant.Int) && ir.Int64Val(c) < 0 {
- base.Fatalf("cap for OSLICEHEADER must be non-negative")
- }
-
- if ir.IsConst(l, constant.Int) && ir.IsConst(c, constant.Int) && constant.Compare(l.Val(), token.GTR, c.Val()) {
- base.Fatalf("len larger than cap for OSLICEHEADER")
- }
-
- n.List().SetFirst(l)
- n.List().SetSecond(c)
- return n
-
- case ir.OMAKESLICECOPY:
- // Errors here are Fatalf instead of Errorf because only the compiler
- // can construct an OMAKESLICECOPY node.
- // Components used in OMAKESCLICECOPY that are supplied by parsed source code
- // have already been typechecked in OMAKE and OCOPY earlier.
- t := n.Type()
-
- if t == nil {
- base.Fatalf("no type specified for OMAKESLICECOPY")
- }
-
- if !t.IsSlice() {
- base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type())
- }
-
- if n.Left() == nil {
- base.Fatalf("missing len argument for OMAKESLICECOPY")
- }
-
- if n.Right() == nil {
- base.Fatalf("missing slice argument to copy for OMAKESLICECOPY")
- }
-
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetRight(typecheck(n.Right(), ctxExpr))
-
- n.SetLeft(defaultlit(n.Left(), types.Types[types.TINT]))
-
- if !n.Left().Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
- base.Errorf("non-integer len argument in OMAKESLICECOPY")
- }
-
- if ir.IsConst(n.Left(), constant.Int) {
- if doesoverflow(n.Left().Val(), types.Types[types.TINT]) {
- base.Fatalf("len for OMAKESLICECOPY too large")
- }
- if constant.Sign(n.Left().Val()) < 0 {
- base.Fatalf("len for OMAKESLICECOPY must be non-negative")
- }
- }
- return n
-
- case ir.OSLICE, ir.OSLICE3:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- low, high, max := n.SliceBounds()
- hasmax := n.Op().IsSlice3()
- low = typecheck(low, ctxExpr)
- high = typecheck(high, ctxExpr)
- max = typecheck(max, ctxExpr)
- n.SetLeft(defaultlit(n.Left(), nil))
- low = indexlit(low)
- high = indexlit(high)
- max = indexlit(max)
- n.SetSliceBounds(low, high, max)
- l := n.Left()
- if l.Type() == nil {
- n.SetType(nil)
- return n
- }
- if l.Type().IsArray() {
- if !islvalue(n.Left()) {
- base.Errorf("invalid operation %v (slice of unaddressable value)", n)
- n.SetType(nil)
- return n
- }
-
- addr := nodAddr(n.Left())
- addr.SetImplicit(true)
- n.SetLeft(typecheck(addr, ctxExpr))
- l = n.Left()
- }
- t := l.Type()
- var tp *types.Type
- if t.IsString() {
- if hasmax {
- base.Errorf("invalid operation %v (3-index slice of string)", n)
- n.SetType(nil)
- return n
- }
- n.SetType(t)
- n.SetOp(ir.OSLICESTR)
- } else if t.IsPtr() && t.Elem().IsArray() {
- tp = t.Elem()
- n.SetType(types.NewSlice(tp.Elem()))
- dowidth(n.Type())
- if hasmax {
- n.SetOp(ir.OSLICE3ARR)
- } else {
- n.SetOp(ir.OSLICEARR)
- }
- } else if t.IsSlice() {
- n.SetType(t)
- } else {
- base.Errorf("cannot slice %v (type %v)", l, t)
- n.SetType(nil)
- return n
- }
-
- if low != nil && !checksliceindex(l, low, tp) {
- n.SetType(nil)
- return n
- }
- if high != nil && !checksliceindex(l, high, tp) {
- n.SetType(nil)
- return n
- }
- if max != nil && !checksliceindex(l, max, tp) {
- n.SetType(nil)
- return n
- }
- if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) {
- n.SetType(nil)
- return n
- }
- return n
-
- // call and call like
- case ir.OCALL:
- n := n.(*ir.CallExpr)
- n.Use = ir.CallUseExpr
- if top == ctxStmt {
- n.Use = ir.CallUseStmt
- }
- typecheckslice(n.Init().Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907)
- n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType|ctxCallee))
- if n.Left().Diag() {
- n.SetDiag(true)
- }
-
- l := n.Left()
-
- if l.Op() == ir.ONAME && l.(*ir.Name).SubOp() != 0 {
- if n.IsDDD() && l.SubOp() != ir.OAPPEND {
- base.Errorf("invalid use of ... with builtin %v", l)
- }
-
- // builtin: OLEN, OCAP, etc.
- switch l.SubOp() {
- default:
- base.Fatalf("unknown builtin %v", l)
-
- case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
- n.SetOp(l.SubOp())
- n.SetLeft(nil)
- n.SetTypecheck(0) // re-typechecking new op is OK, not a loop
- return typecheck(n, top)
-
- case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL:
- typecheckargs(n)
- fallthrough
- case ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
- arg, ok := needOneArg(n, "%v", n.Op())
- if !ok {
- n.SetType(nil)
- return n
- }
- u := ir.NewUnaryExpr(n.Pos(), l.SubOp(), arg)
- return typecheck(initExpr(n.Init().Slice(), u), top) // typecheckargs can add to old.Init
-
- case ir.OCOMPLEX, ir.OCOPY:
- typecheckargs(n)
- arg1, arg2, ok := needTwoArgs(n)
- if !ok {
- n.SetType(nil)
- return n
- }
- b := ir.NewBinaryExpr(n.Pos(), l.SubOp(), arg1, arg2)
- return typecheck(initExpr(n.Init().Slice(), b), top) // typecheckargs can add to old.Init
- }
- panic("unreachable")
- }
-
- n.SetLeft(defaultlit(n.Left(), nil))
- l = n.Left()
- if l.Op() == ir.OTYPE {
- if n.IsDDD() {
- if !l.Type().Broke() {
- base.Errorf("invalid use of ... in type conversion to %v", l.Type())
- }
- n.SetDiag(true)
- }
-
- // pick off before type-checking arguments
- arg, ok := needOneArg(n, "conversion to %v", l.Type())
- if !ok {
- n.SetType(nil)
- return n
- }
-
- n := ir.NodAt(n.Pos(), ir.OCONV, arg, nil)
- n.SetType(l.Type())
- return typecheck1(n, top)
- }
-
- typecheckargs(n)
- t := l.Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
- checkwidth(t)
-
- switch l.Op() {
- case ir.ODOTINTER:
- n.SetOp(ir.OCALLINTER)
-
- case ir.ODOTMETH:
- n.SetOp(ir.OCALLMETH)
-
- // typecheckaste was used here but there wasn't enough
- // information further down the call chain to know if we
- // were testing a method receiver for unexported fields.
- // It isn't necessary, so just do a sanity check.
- tp := t.Recv().Type
-
- if l.Left() == nil || !types.Identical(l.Left().Type(), tp) {
- base.Fatalf("method receiver")
- }
-
- default:
- n.SetOp(ir.OCALLFUNC)
- if t.Kind() != types.TFUNC {
- // TODO(mdempsky): Remove "o.Sym() != nil" once we stop
- // using ir.Name for numeric literals.
- if o := ir.Orig(l); o.Name() != nil && o.Sym() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil {
- // be more specific when the non-function
- // name matches a predeclared function
- base.Errorf("cannot call non-function %L, declared at %s",
- l, base.FmtPos(o.Name().Pos()))
- } else {
- base.Errorf("cannot call non-function %L", l)
- }
- n.SetType(nil)
- return n
- }
- }
-
- typecheckaste(ir.OCALL, n.Left(), n.IsDDD(), t.Params(), n.List(), func() string { return fmt.Sprintf("argument to %v", n.Left()) })
- if t.NumResults() == 0 {
- return n
- }
- if t.NumResults() == 1 {
- n.SetType(l.Type().Results().Field(0).Type)
-
- if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME {
- if sym := n.Left().(*ir.Name).Sym(); isRuntimePkg(sym.Pkg) && sym.Name == "getg" {
- // Emit code for runtime.getg() directly instead of calling function.
- // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
- // so that the ordering pass can make sure to preserve the semantics of the original code
- // (in particular, the exact time of the function call) by introducing temporaries.
- // In this case, we know getg() always returns the same result within a given function
- // and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
- n.SetOp(ir.OGETG)
- }
- }
- return n
- }
-
- // multiple return
- if top&(ctxMultiOK|ctxStmt) == 0 {
- base.Errorf("multiple-value %v() in single-value context", l)
- return n
- }
-
- n.SetType(l.Type().Results())
- return n
-
- case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
- n.SetType(types.Types[types.TUINTPTR])
- return n
-
- case ir.OCAP, ir.OLEN:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), nil))
- n.SetLeft(implicitstar(n.Left()))
- l := n.Left()
- t := l.Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
-
- var ok bool
- if n.Op() == ir.OLEN {
- ok = okforlen[t.Kind()]
- } else {
- ok = okforcap[t.Kind()]
- }
- if !ok {
- base.Errorf("invalid argument %L for %v", l, n.Op())
- n.SetType(nil)
- return n
- }
-
- n.SetType(types.Types[types.TINT])
- return n
-
- case ir.OREAL, ir.OIMAG:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- l := n.Left()
- t := l.Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
-
- // Determine result type.
- switch t.Kind() {
- case types.TIDEAL:
- n.SetType(types.UntypedFloat)
- case types.TCOMPLEX64:
- n.SetType(types.Types[types.TFLOAT32])
- case types.TCOMPLEX128:
- n.SetType(types.Types[types.TFLOAT64])
- default:
- base.Errorf("invalid argument %L for %v", l, n.Op())
- n.SetType(nil)
- return n
- }
- return n
-
- case ir.OCOMPLEX:
- l := typecheck(n.Left(), ctxExpr)
- r := typecheck(n.Right(), ctxExpr)
- if l.Type() == nil || r.Type() == nil {
- n.SetType(nil)
- return n
- }
- l, r = defaultlit2(l, r, false)
- if l.Type() == nil || r.Type() == nil {
- n.SetType(nil)
- return n
- }
- n.SetLeft(l)
- n.SetRight(r)
-
- if !types.Identical(l.Type(), r.Type()) {
- base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
- n.SetType(nil)
- return n
- }
-
- var t *types.Type
- switch l.Type().Kind() {
- default:
- base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type())
- n.SetType(nil)
- return n
-
- case types.TIDEAL:
- t = types.UntypedComplex
-
- case types.TFLOAT32:
- t = types.Types[types.TCOMPLEX64]
-
- case types.TFLOAT64:
- t = types.Types[types.TCOMPLEX128]
- }
- n.SetType(t)
- return n
-
- case ir.OCLOSE:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), nil))
- l := n.Left()
- t := l.Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
- if !t.IsChan() {
- base.Errorf("invalid operation: %v (non-chan type %v)", n, t)
- n.SetType(nil)
- return n
- }
-
- if !t.ChanDir().CanSend() {
- base.Errorf("invalid operation: %v (cannot close receive-only channel)", n)
- n.SetType(nil)
- return n
- }
- return n
-
- case ir.ODELETE:
- typecheckargs(n)
- args := n.List()
- if args.Len() == 0 {
- base.Errorf("missing arguments to delete")
- n.SetType(nil)
- return n
- }
-
- if args.Len() == 1 {
- base.Errorf("missing second (key) argument to delete")
- n.SetType(nil)
- return n
- }
-
- if args.Len() != 2 {
- base.Errorf("too many arguments to delete")
- n.SetType(nil)
- return n
- }
-
- l := args.First()
- r := args.Second()
- if l.Type() != nil && !l.Type().IsMap() {
- base.Errorf("first argument to delete must be map; have %L", l.Type())
- n.SetType(nil)
- return n
- }
-
- args.SetSecond(assignconv(r, l.Type().Key(), "delete"))
- return n
-
- case ir.OAPPEND:
- typecheckargs(n)
- args := n.List()
- if args.Len() == 0 {
- base.Errorf("missing arguments to append")
- n.SetType(nil)
- return n
- }
-
- t := args.First().Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
-
- n.SetType(t)
- if !t.IsSlice() {
- if ir.IsNil(args.First()) {
- base.Errorf("first argument to append must be typed slice; have untyped nil")
- n.SetType(nil)
- return n
- }
-
- base.Errorf("first argument to append must be slice; have %L", t)
- n.SetType(nil)
- return n
- }
-
- if n.IsDDD() {
- if args.Len() == 1 {
- base.Errorf("cannot use ... on first argument to append")
- n.SetType(nil)
- return n
- }
-
- if args.Len() != 2 {
- base.Errorf("too many arguments to append")
- n.SetType(nil)
- return n
- }
-
- if t.Elem().IsKind(types.TUINT8) && args.Second().Type().IsString() {
- args.SetSecond(defaultlit(args.Second(), types.Types[types.TSTRING]))
- return n
- }
-
- args.SetSecond(assignconv(args.Second(), t.Underlying(), "append"))
- return n
- }
-
- as := args.Slice()[1:]
- for i, n := range as {
- if n.Type() == nil {
- continue
- }
- as[i] = assignconv(n, t.Elem(), "append")
- checkwidth(as[i].Type()) // ensure width is calculated for backend
- }
- return n
-
- case ir.OCOPY:
- n.SetType(types.Types[types.TINT])
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), nil))
- n.SetRight(typecheck(n.Right(), ctxExpr))
- n.SetRight(defaultlit(n.Right(), nil))
- if n.Left().Type() == nil || n.Right().Type() == nil {
- n.SetType(nil)
- return n
- }
-
- // copy([]byte, string)
- if n.Left().Type().IsSlice() && n.Right().Type().IsString() {
- if types.Identical(n.Left().Type().Elem(), types.ByteType) {
- return n
- }
- base.Errorf("arguments to copy have different element types: %L and string", n.Left().Type())
- n.SetType(nil)
- return n
- }
-
- if !n.Left().Type().IsSlice() || !n.Right().Type().IsSlice() {
- if !n.Left().Type().IsSlice() && !n.Right().Type().IsSlice() {
- base.Errorf("arguments to copy must be slices; have %L, %L", n.Left().Type(), n.Right().Type())
- } else if !n.Left().Type().IsSlice() {
- base.Errorf("first argument to copy should be slice; have %L", n.Left().Type())
- } else {
- base.Errorf("second argument to copy should be slice or string; have %L", n.Right().Type())
- }
- n.SetType(nil)
- return n
- }
-
- if !types.Identical(n.Left().Type().Elem(), n.Right().Type().Elem()) {
- base.Errorf("arguments to copy have different element types: %L and %L", n.Left().Type(), n.Right().Type())
- n.SetType(nil)
- return n
- }
- return n
-
- case ir.OCONV:
- n := n.(*ir.ConvExpr)
- checkwidth(n.Type()) // ensure width is calculated for backend
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(convlit1(n.Left(), n.Type(), true, nil))
- t := n.Left().Type()
- if t == nil || n.Type() == nil {
- n.SetType(nil)
- return n
- }
- op, why := convertop(n.Left().Op() == ir.OLITERAL, t, n.Type())
- if op == ir.OXXX {
- if !n.Diag() && !n.Type().Broke() && !n.Left().Diag() {
- base.Errorf("cannot convert %L to type %v%s", n.Left(), n.Type(), why)
- n.SetDiag(true)
- }
- n.SetOp(ir.OCONV)
- n.SetType(nil)
- return n
- }
-
- n.SetOp(op)
- switch n.Op() {
- case ir.OCONVNOP:
- if t.Kind() == n.Type().Kind() {
- switch t.Kind() {
- case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128:
- // Floating point casts imply rounding and
- // so the conversion must be kept.
- n.SetOp(ir.OCONV)
- }
- }
-
- // do not convert to []byte literal. See CL 125796.
- // generated code and compiler memory footprint is better without it.
- case ir.OSTR2BYTES:
- // ok
-
- case ir.OSTR2RUNES:
- if n.Left().Op() == ir.OLITERAL {
- return stringtoruneslit(n)
- }
- }
- return n
-
- case ir.OMAKE:
- args := n.List().Slice()
- if len(args) == 0 {
- base.Errorf("missing argument to make")
- n.SetType(nil)
- return n
- }
-
- n.PtrList().Set(nil)
- l := args[0]
- l = typecheck(l, ctxType)
- t := l.Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
-
- i := 1
- var nn ir.Node
- switch t.Kind() {
- default:
- base.Errorf("cannot make type %v", t)
- n.SetType(nil)
- return n
-
- case types.TSLICE:
- if i >= len(args) {
- base.Errorf("missing len argument to make(%v)", t)
- n.SetType(nil)
- return n
- }
-
- l = args[i]
- i++
- l = typecheck(l, ctxExpr)
- var r ir.Node
- if i < len(args) {
- r = args[i]
- i++
- r = typecheck(r, ctxExpr)
- }
-
- if l.Type() == nil || (r != nil && r.Type() == nil) {
- n.SetType(nil)
- return n
- }
- if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) {
- n.SetType(nil)
- return n
- }
- if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) {
- base.Errorf("len larger than cap in make(%v)", t)
- n.SetType(nil)
- return n
- }
- nn = ir.NodAt(n.Pos(), ir.OMAKESLICE, l, r)
-
- case types.TMAP:
- if i < len(args) {
- l = args[i]
- i++
- l = typecheck(l, ctxExpr)
- l = defaultlit(l, types.Types[types.TINT])
- if l.Type() == nil {
- n.SetType(nil)
- return n
- }
- if !checkmake(t, "size", &l) {
- n.SetType(nil)
- return n
- }
- } else {
- l = nodintconst(0)
- }
- nn = ir.NodAt(n.Pos(), ir.OMAKEMAP, l, nil)
- nn.SetEsc(n.Esc())
-
- case types.TCHAN:
- l = nil
- if i < len(args) {
- l = args[i]
- i++
- l = typecheck(l, ctxExpr)
- l = defaultlit(l, types.Types[types.TINT])
- if l.Type() == nil {
- n.SetType(nil)
- return n
- }
- if !checkmake(t, "buffer", &l) {
- n.SetType(nil)
- return n
- }
- } else {
- l = nodintconst(0)
- }
- nn = ir.NodAt(n.Pos(), ir.OMAKECHAN, l, nil)
- }
-
- if i < len(args) {
- base.Errorf("too many arguments to make(%v)", t)
- n.SetType(nil)
- return n
- }
-
- nn.SetType(t)
- return nn
-
- case ir.ONEW:
- if n.Left() == nil {
- // Fatalf because the OCALL above checked for us,
- // so this must be an internally-generated mistake.
- base.Fatalf("missing argument to new")
- }
- l := n.Left()
- l = typecheck(l, ctxType)
- t := l.Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
- n.SetLeft(l)
- n.SetType(types.NewPtr(t))
- return n
-
- case ir.OPRINT, ir.OPRINTN:
- typecheckargs(n)
- ls := n.List().Slice()
- for i1, n1 := range ls {
- // Special case for print: int constant is int64, not int.
- if ir.IsConst(n1, constant.Int) {
- ls[i1] = defaultlit(ls[i1], types.Types[types.TINT64])
- } else {
- ls[i1] = defaultlit(ls[i1], nil)
- }
- }
- return n
-
- case ir.OPANIC:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), types.Types[types.TINTER]))
- if n.Left().Type() == nil {
- n.SetType(nil)
- return n
- }
- return n
-
- case ir.ORECOVER:
- if n.List().Len() != 0 {
- base.Errorf("too many arguments to recover")
- n.SetType(nil)
- return n
- }
-
- n.SetType(types.Types[types.TINTER])
- return n
-
- case ir.OCLOSURE:
- n := n.(*ir.ClosureExpr)
- typecheckclosure(n, top)
- if n.Type() == nil {
- return n
- }
- return n
-
- case ir.OITAB:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- t := n.Left().Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
- if !t.IsInterface() {
- base.Fatalf("OITAB of %v", t)
- }
- n.SetType(types.NewPtr(types.Types[types.TUINTPTR]))
- return n
-
- case ir.OIDATA:
- // Whoever creates the OIDATA node must know a priori the concrete type at that moment,
- // usually by just having checked the OITAB.
- base.Fatalf("cannot typecheck interface data %v", n)
- panic("unreachable")
-
- case ir.OSPTR:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- t := n.Left().Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
- if !t.IsSlice() && !t.IsString() {
- base.Fatalf("OSPTR of %v", t)
- }
- if t.IsString() {
- n.SetType(types.NewPtr(types.Types[types.TUINT8]))
- } else {
- n.SetType(types.NewPtr(t.Elem()))
- }
- return n
-
- case ir.OCLOSUREREAD:
- return n
-
- case ir.OCFUNC:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetType(types.Types[types.TUINTPTR])
- return n
-
- case ir.OCONVNOP:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- return n
-
- // statements
- case ir.OAS:
- n := n.(*ir.AssignStmt)
- typecheckas(n)
-
- // Code that creates temps does not bother to set defn, so do it here.
- if n.Left().Op() == ir.ONAME && ir.IsAutoTmp(n.Left()) {
- n.Left().Name().Defn = n
- }
- return n
-
- case ir.OAS2:
- typecheckas2(n.(*ir.AssignListStmt))
- return n
-
- case ir.OBREAK,
- ir.OCONTINUE,
- ir.ODCL,
- ir.OGOTO,
- ir.OFALL,
- ir.OVARKILL,
- ir.OVARLIVE:
- return n
-
- case ir.OBLOCK:
- typecheckslice(n.List().Slice(), ctxStmt)
- return n
-
- case ir.OLABEL:
- decldepth++
- if n.Sym().IsBlank() {
- // Empty identifier is valid but useless.
- // Eliminate now to simplify life later.
- // See issues 7538, 11589, 11593.
- n = ir.NodAt(n.Pos(), ir.OBLOCK, nil, nil)
- }
- return n
-
- case ir.ODEFER, ir.OGO:
- n := n.(*ir.GoDeferStmt)
- n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr))
- if !n.Left().Diag() {
- checkdefergo(n)
- }
- return n
-
- case ir.OFOR, ir.OFORUNTIL:
- typecheckslice(n.Init().Slice(), ctxStmt)
- decldepth++
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), nil))
- if n.Left() != nil {
- t := n.Left().Type()
- if t != nil && !t.IsBoolean() {
- base.Errorf("non-bool %L used as for condition", n.Left())
- }
- }
- n.SetRight(typecheck(n.Right(), ctxStmt))
- if n.Op() == ir.OFORUNTIL {
- typecheckslice(n.List().Slice(), ctxStmt)
- }
- typecheckslice(n.Body().Slice(), ctxStmt)
- decldepth--
- return n
-
- case ir.OIF:
- typecheckslice(n.Init().Slice(), ctxStmt)
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), nil))
- if n.Left() != nil {
- t := n.Left().Type()
- if t != nil && !t.IsBoolean() {
- base.Errorf("non-bool %L used as if condition", n.Left())
- }
- }
- typecheckslice(n.Body().Slice(), ctxStmt)
- typecheckslice(n.Rlist().Slice(), ctxStmt)
- return n
-
- case ir.ORETURN:
- typecheckargs(n)
- if Curfn == nil {
- base.Errorf("return outside function")
- n.SetType(nil)
- return n
- }
-
- if hasNamedResults(Curfn) && n.List().Len() == 0 {
- return n
- }
- typecheckaste(ir.ORETURN, nil, false, Curfn.Type().Results(), n.List(), func() string { return "return argument" })
- return n
-
- case ir.ORETJMP:
- return n
-
- case ir.OSELECT:
- typecheckselect(n.(*ir.SelectStmt))
- return n
-
- case ir.OSWITCH:
- typecheckswitch(n.(*ir.SwitchStmt))
- return n
-
- case ir.ORANGE:
- typecheckrange(n.(*ir.RangeStmt))
- return n
-
- case ir.OTYPESW:
- base.Errorf("use of .(type) outside type switch")
- n.SetType(nil)
- return n
-
- case ir.ODCLFUNC:
- typecheckfunc(n.(*ir.Func))
- return n
-
- case ir.ODCLCONST:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- return n
-
- case ir.ODCLTYPE:
- n.SetLeft(typecheck(n.Left(), ctxType))
- checkwidth(n.Left().Type())
- return n
- }
-
- // No return n here!
- // Individual cases can type-assert n, introducing a new one.
- // Each must execute its own return n.
-}
-
-func typecheckargs(n ir.Node) {
- var list []ir.Node
- switch n := n.(type) {
- default:
- base.Fatalf("typecheckargs %+v", n.Op())
- case *ir.CallExpr:
- list = n.List().Slice()
- if n.IsDDD() {
- typecheckslice(list, ctxExpr)
- return
- }
- case *ir.ReturnStmt:
- list = n.List().Slice()
- }
- if len(list) != 1 {
- typecheckslice(list, ctxExpr)
- return
- }
-
- typecheckslice(list, ctxExpr|ctxMultiOK)
- t := list[0].Type()
- if t == nil || !t.IsFuncArgStruct() {
- return
- }
-
- // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
-
- // Save n as n.Orig for fmt.go.
- if ir.Orig(n) == n {
- n.(ir.OrigNode).SetOrig(ir.SepCopy(n))
- }
-
- as := ir.Nod(ir.OAS2, nil, nil)
- as.PtrRlist().Append(list...)
-
- // If we're outside of function context, then this call will
- // be executed during the generated init function. However,
- // init.go hasn't yet created it. Instead, associate the
- // temporary variables with initTodo for now, and init.go
- // will reassociate them later when it's appropriate.
- static := Curfn == nil
- if static {
- Curfn = initTodo
- }
- list = nil
- for _, f := range t.FieldSlice() {
- t := temp(f.Type)
- as.PtrInit().Append(ir.Nod(ir.ODCL, t, nil))
- as.PtrList().Append(t)
- list = append(list, t)
- }
- if static {
- Curfn = nil
- }
-
- switch n := n.(type) {
- case *ir.CallExpr:
- n.PtrList().Set(list)
- case *ir.ReturnStmt:
- n.PtrList().Set(list)
- }
-
- n.PtrInit().Append(typecheck(as, ctxStmt))
-}
-
-func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool {
- t := r.Type()
- if t == nil {
- return false
- }
- if !t.IsInteger() {
- base.Errorf("invalid slice index %v (type %v)", r, t)
- return false
- }
-
- if r.Op() == ir.OLITERAL {
- x := r.Val()
- if constant.Sign(x) < 0 {
- base.Errorf("invalid slice index %v (index must be non-negative)", r)
- return false
- } else if tp != nil && tp.NumElem() >= 0 && constant.Compare(x, token.GTR, constant.MakeInt64(tp.NumElem())) {
- base.Errorf("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
- return false
- } else if ir.IsConst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(ir.StringVal(l))))) {
- base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(ir.StringVal(l)))
- return false
- } else if doesoverflow(x, types.Types[types.TINT]) {
- base.Errorf("invalid slice index %v (index too large)", r)
- return false
- }
- }
-
- return true
-}
-
-func checksliceconst(lo ir.Node, hi ir.Node) bool {
- if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) {
- base.Errorf("invalid slice index: %v > %v", lo, hi)
- return false
- }
-
- return true
-}
-
-func checkdefergo(n *ir.GoDeferStmt) {
- what := "defer"
- if n.Op() == ir.OGO {
- what = "go"
- }
-
- switch n.Left().Op() {
- // ok
- case ir.OCALLINTER,
- ir.OCALLMETH,
- ir.OCALLFUNC,
- ir.OCLOSE,
- ir.OCOPY,
- ir.ODELETE,
- ir.OPANIC,
- ir.OPRINT,
- ir.OPRINTN,
- ir.ORECOVER:
- return
-
- case ir.OAPPEND,
- ir.OCAP,
- ir.OCOMPLEX,
- ir.OIMAG,
- ir.OLEN,
- ir.OMAKE,
- ir.OMAKESLICE,
- ir.OMAKECHAN,
- ir.OMAKEMAP,
- ir.ONEW,
- ir.OREAL,
- ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
- if orig := ir.Orig(n.Left()); orig.Op() == ir.OCONV {
- break
- }
- base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Left())
- return
- }
-
- // type is broken or missing, most likely a method call on a broken type
- // we will warn about the broken type elsewhere. no need to emit a potentially confusing error
- if n.Left().Type() == nil || n.Left().Type().Broke() {
- return
- }
-
- if !n.Diag() {
- // The syntax made sure it was a call, so this must be
- // a conversion.
- n.SetDiag(true)
- base.ErrorfAt(n.Pos(), "%s requires function call, not conversion", what)
- }
-}
-
-// The result of implicitstar MUST be assigned back to n, e.g.
-// n.Left = implicitstar(n.Left)
-func implicitstar(n ir.Node) ir.Node {
- // insert implicit * if needed for fixed array
- t := n.Type()
- if t == nil || !t.IsPtr() {
- return n
- }
- t = t.Elem()
- if t == nil {
- return n
- }
- if !t.IsArray() {
- return n
- }
- star := ir.Nod(ir.ODEREF, n, nil)
- star.SetImplicit(true)
- return typecheck(star, ctxExpr)
-}
-
-func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) {
- if n.List().Len() == 0 {
- p := fmt.Sprintf(f, args...)
- base.Errorf("missing argument to %s: %v", p, n)
- return nil, false
- }
-
- if n.List().Len() > 1 {
- p := fmt.Sprintf(f, args...)
- base.Errorf("too many arguments to %s: %v", p, n)
- return n.List().First(), false
- }
-
- return n.List().First(), true
-}
-
-func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) {
- if n.List().Len() != 2 {
- if n.List().Len() < 2 {
- base.Errorf("not enough arguments in call to %v", n)
- } else {
- base.Errorf("too many arguments in call to %v", n)
- }
- return nil, nil, false
- }
- return n.List().First(), n.List().Second(), true
-}
-
-func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
- var r *types.Field
- for _, f := range fs.Slice() {
- if dostrcmp != 0 && f.Sym.Name == s.Name {
- return f
- }
- if dostrcmp == 2 && strings.EqualFold(f.Sym.Name, s.Name) {
- return f
- }
- if f.Sym != s {
- continue
- }
- if r != nil {
- if errnode != nil {
- base.Errorf("ambiguous selector %v", errnode)
- } else if t.IsPtr() {
- base.Errorf("ambiguous selector (%v).%v", t, s)
- } else {
- base.Errorf("ambiguous selector %v.%v", t, s)
- }
- break
- }
-
- r = f
- }
-
- return r
-}
-
-// typecheckMethodExpr checks selector expressions (ODOT) where the
-// base expression is a type expression (OTYPE).
-func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) {
- if enableTrace && base.Flag.LowerT {
- defer tracePrint("typecheckMethodExpr", n)(&res)
- }
-
- t := n.Left().Type()
-
- // Compute the method set for t.
- var ms *types.Fields
- if t.IsInterface() {
- ms = t.Fields()
- } else {
- mt := methtype(t)
- if mt == nil {
- base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sym())
- n.SetType(nil)
- return n
- }
- expandmeth(mt)
- ms = mt.AllMethods()
-
- // The method expression T.m requires a wrapper when T
- // is different from m's declared receiver type. We
- // normally generate these wrappers while writing out
- // runtime type descriptors, which is always done for
- // types declared at package scope. However, we need
- // to make sure to generate wrappers for anonymous
- // receiver types too.
- if mt.Sym() == nil {
- NeedRuntimeType(t)
- }
- }
-
- s := n.Sym()
- m := lookdot1(n, s, t, ms, 0)
- if m == nil {
- if lookdot1(n, s, t, ms, 1) != nil {
- base.Errorf("%v undefined (cannot refer to unexported method %v)", n, s)
- } else if _, ambig := dotpath(s, t, nil, false); ambig {
- base.Errorf("%v undefined (ambiguous selector)", n) // method or field
- } else {
- base.Errorf("%v undefined (type %v has no method %v)", n, t, s)
- }
- n.SetType(nil)
- return n
- }
-
- if !isMethodApplicable(t, m) {
- base.Errorf("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
- n.SetType(nil)
- return n
- }
-
- me := ir.NewMethodExpr(n.Pos(), n.Left().Type(), m)
- me.SetType(methodfunc(m.Type, n.Left().Type()))
- f := NewName(methodSym(t, m.Sym))
- f.SetClass(ir.PFUNC)
- f.SetType(me.Type())
- me.FuncName_ = f
-
- // Issue 25065. Make sure that we emit the symbol for a local method.
- if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == types.LocalPkg) {
- NeedFuncSym(me.FuncName_.Sym())
- }
-
- return me
-}
-
-// isMethodApplicable reports whether method m can be called on a
-// value of type t. This is necessary because we compute a single
-// method set for both T and *T, but some *T methods are not
-// applicable to T receivers.
-func isMethodApplicable(t *types.Type, m *types.Field) bool {
- return t.IsPtr() || !m.Type.Recv().Type.IsPtr() || isifacemethod(m.Type) || m.Embedded == 2
-}
-
-func derefall(t *types.Type) *types.Type {
- for t != nil && t.IsPtr() {
- t = t.Elem()
- }
- return t
-}
-
-func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field {
- s := n.Sym()
-
- dowidth(t)
- var f1 *types.Field
- if t.IsStruct() || t.IsInterface() {
- f1 = lookdot1(n, s, t, t.Fields(), dostrcmp)
- }
-
- var f2 *types.Field
- if n.Left().Type() == t || n.Left().Type().Sym() == nil {
- mt := methtype(t)
- if mt != nil {
- f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp)
- }
- }
-
- if f1 != nil {
- if dostrcmp > 1 || f1.Broke() {
- // Already in the process of diagnosing an error.
- return f1
- }
- if f2 != nil {
- base.Errorf("%v is both field and method", n.Sym())
- }
- if f1.Offset == types.BADWIDTH {
- base.Fatalf("lookdot badwidth %v %p", f1, f1)
- }
- n.SetOffset(f1.Offset)
- n.SetType(f1.Type)
- if t.IsInterface() {
- if n.Left().Type().IsPtr() {
- star := ir.Nod(ir.ODEREF, n.Left(), nil)
- star.SetImplicit(true)
- n.SetLeft(typecheck(star, ctxExpr))
- }
-
- n.SetOp(ir.ODOTINTER)
- }
- n.Selection = f1
- return f1
- }
-
- if f2 != nil {
- if dostrcmp > 1 {
- // Already in the process of diagnosing an error.
- return f2
- }
- tt := n.Left().Type()
- dowidth(tt)
- rcvr := f2.Type.Recv().Type
- if !types.Identical(rcvr, tt) {
- if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
- checklvalue(n.Left(), "call pointer method on")
- addr := nodAddr(n.Left())
- addr.SetImplicit(true)
- n.SetLeft(typecheck(addr, ctxType|ctxExpr))
- } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
- star := ir.Nod(ir.ODEREF, n.Left(), nil)
- star.SetImplicit(true)
- n.SetLeft(typecheck(star, ctxType|ctxExpr))
- } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
- base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sym(), n.Left())
- for tt.IsPtr() {
- // Stop one level early for method with pointer receiver.
- if rcvr.IsPtr() && !tt.Elem().IsPtr() {
- break
- }
- star := ir.Nod(ir.ODEREF, n.Left(), nil)
- star.SetImplicit(true)
- n.SetLeft(typecheck(star, ctxType|ctxExpr))
- tt = tt.Elem()
- }
- } else {
- base.Fatalf("method mismatch: %v for %v", rcvr, tt)
- }
- }
-
- implicit, ll := n.Implicit(), n.Left()
- for ll != nil && (ll.Op() == ir.ODOT || ll.Op() == ir.ODOTPTR || ll.Op() == ir.ODEREF) {
- switch l := ll.(type) {
- case *ir.SelectorExpr:
- implicit, ll = l.Implicit(), l.Left()
- case *ir.StarExpr:
- implicit, ll = l.Implicit(), l.Left()
- }
- }
- if implicit && ll.Type().IsPtr() && ll.Type().Sym() != nil && ll.Type().Sym().Def != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE {
- // It is invalid to automatically dereference a named pointer type when selecting a method.
- // Make n.Left == ll to clarify error message.
- n.SetLeft(ll)
- return nil
- }
-
- n.SetSym(methodSym(n.Left().Type(), f2.Sym))
- n.SetOffset(f2.Offset)
- n.SetType(f2.Type)
- n.SetOp(ir.ODOTMETH)
- n.Selection = f2
-
- return f2
- }
-
- return nil
-}
-
-func nokeys(l ir.Nodes) bool {
- for _, n := range l.Slice() {
- if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY {
- return false
- }
- }
- return true
-}
-
-func hasddd(t *types.Type) bool {
- for _, tl := range t.Fields().Slice() {
- if tl.IsDDD() {
- return true
- }
- }
-
- return false
-}
-
-// typecheck assignment: type list = expression list
-func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) {
- var t *types.Type
- var i int
-
- lno := base.Pos
- defer func() { base.Pos = lno }()
-
- if tstruct.Broke() {
- return
- }
-
- var n ir.Node
- if nl.Len() == 1 {
- n = nl.First()
- }
-
- n1 := tstruct.NumFields()
- n2 := nl.Len()
- if !hasddd(tstruct) {
- if n2 > n1 {
- goto toomany
- }
- if n2 < n1 {
- goto notenough
- }
- } else {
- if !isddd {
- if n2 < n1-1 {
- goto notenough
- }
- } else {
- if n2 > n1 {
- goto toomany
- }
- if n2 < n1 {
- goto notenough
- }
- }
- }
-
- i = 0
- for _, tl := range tstruct.Fields().Slice() {
- t = tl.Type
- if tl.IsDDD() {
- if isddd {
- if i >= nl.Len() {
- goto notenough
- }
- if nl.Len()-i > 1 {
- goto toomany
- }
- n = nl.Index(i)
- setlineno(n)
- if n.Type() != nil {
- nl.SetIndex(i, assignconvfn(n, t, desc))
- }
- return
- }
-
- // TODO(mdempsky): Make into ... call with implicit slice.
- for ; i < nl.Len(); i++ {
- n = nl.Index(i)
- setlineno(n)
- if n.Type() != nil {
- nl.SetIndex(i, assignconvfn(n, t.Elem(), desc))
- }
- }
- return
- }
-
- if i >= nl.Len() {
- goto notenough
- }
- n = nl.Index(i)
- setlineno(n)
- if n.Type() != nil {
- nl.SetIndex(i, assignconvfn(n, t, desc))
- }
- i++
- }
-
- if i < nl.Len() {
- goto toomany
- }
- if isddd {
- if call != nil {
- base.Errorf("invalid use of ... in call to %v", call)
- } else {
- base.Errorf("invalid use of ... in %v", op)
- }
- }
- return
-
-notenough:
- if n == nil || (!n.Diag() && n.Type() != nil) {
- details := errorDetails(nl, tstruct, isddd)
- if call != nil {
- // call is the expression being called, not the overall call.
- // Method expressions have the form T.M, and the compiler has
- // rewritten those to ONAME nodes but left T in Left.
- if call.Op() == ir.OMETHEXPR {
- base.Errorf("not enough arguments in call to method expression %v%s", call, details)
- } else {
- base.Errorf("not enough arguments in call to %v%s", call, details)
- }
- } else {
- base.Errorf("not enough arguments to %v%s", op, details)
- }
- if n != nil {
- n.SetDiag(true)
- }
- }
- return
-
-toomany:
- details := errorDetails(nl, tstruct, isddd)
- if call != nil {
- base.Errorf("too many arguments in call to %v%s", call, details)
- } else {
- base.Errorf("too many arguments to %v%s", op, details)
- }
-}
-
-func errorDetails(nl ir.Nodes, tstruct *types.Type, isddd bool) string {
- // If we don't know any type at a call site, let's suppress any return
- // message signatures. See Issue https://golang.org/issues/19012.
- if tstruct == nil {
- return ""
- }
- // If any node has an unknown type, suppress it as well
- for _, n := range nl.Slice() {
- if n.Type() == nil {
- return ""
- }
- }
- return fmt.Sprintf("\n\thave %s\n\twant %v", fmtSignature(nl, isddd), tstruct)
-}
-
-// sigrepr is a type's representation to the outside world,
-// in string representations of return signatures
-// e.g in error messages about wrong arguments to return.
-func sigrepr(t *types.Type, isddd bool) string {
- switch t {
- case types.UntypedString:
- return "string"
- case types.UntypedBool:
- return "bool"
- }
-
- if t.Kind() == types.TIDEAL {
- // "untyped number" is not commonly used
- // outside of the compiler, so let's use "number".
- // TODO(mdempsky): Revisit this.
- return "number"
- }
-
- // Turn []T... argument to ...T for clearer error message.
- if isddd {
- if !t.IsSlice() {
- base.Fatalf("bad type for ... argument: %v", t)
- }
- return "..." + t.Elem().String()
- }
- return t.String()
-}
-
-// sigerr returns the signature of the types at the call or return.
-func fmtSignature(nl ir.Nodes, isddd bool) string {
- if nl.Len() < 1 {
- return "()"
- }
-
- var typeStrings []string
- for i, n := range nl.Slice() {
- isdddArg := isddd && i == nl.Len()-1
- typeStrings = append(typeStrings, sigrepr(n.Type(), isdddArg))
- }
-
- return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
-}
-
-// type check composite
-func fielddup(name string, hash map[string]bool) {
- if hash[name] {
- base.Errorf("duplicate field name in struct literal: %s", name)
- return
- }
- hash[name] = true
-}
-
-// iscomptype reports whether type t is a composite literal type.
-func iscomptype(t *types.Type) bool {
- switch t.Kind() {
- case types.TARRAY, types.TSLICE, types.TSTRUCT, types.TMAP:
- return true
- default:
- return false
- }
-}
-
-// pushtype adds elided type information for composite literals if
-// appropriate, and returns the resulting expression.
-func pushtype(nn ir.Node, t *types.Type) ir.Node {
- if nn == nil || nn.Op() != ir.OCOMPLIT {
- return nn
- }
- n := nn.(*ir.CompLitExpr)
- if n.Right() != nil {
- return n
- }
-
- switch {
- case iscomptype(t):
- // For T, return T{...}.
- n.SetRight(ir.TypeNode(t))
-
- case t.IsPtr() && iscomptype(t.Elem()):
- // For *T, return &T{...}.
- n.SetRight(ir.TypeNode(t.Elem()))
-
- addr := nodAddrAt(n.Pos(), n)
- addr.SetImplicit(true)
- return addr
- }
- return n
-}
-
-// The result of typecheckcomplit MUST be assigned back to n, e.g.
-// n.Left = typecheckcomplit(n.Left)
-func typecheckcomplit(n *ir.CompLitExpr) (res ir.Node) {
- if enableTrace && base.Flag.LowerT {
- defer tracePrint("typecheckcomplit", n)(&res)
- }
-
- lno := base.Pos
- defer func() {
- base.Pos = lno
- }()
-
- if n.Right() == nil {
- base.ErrorfAt(n.Pos(), "missing type in composite literal")
- n.SetType(nil)
- return n
- }
-
- // Save original node (including n.Right)
- n.SetOrig(ir.Copy(n))
-
- setlineno(n.Right())
-
- // Need to handle [...]T arrays specially.
- if array, ok := n.Right().(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil {
- array.Elem = typecheck(array.Elem, ctxType)
- elemType := array.Elem.Type()
- if elemType == nil {
- n.SetType(nil)
- return n
- }
- length := typecheckarraylit(elemType, -1, n.List().Slice(), "array literal")
- n.SetOp(ir.OARRAYLIT)
- n.SetType(types.NewArray(elemType, length))
- n.SetRight(nil)
- return n
- }
-
- n.SetRight(typecheck(n.Right(), ctxType))
- t := n.Right().Type()
- if t == nil {
- n.SetType(nil)
- return n
- }
- n.SetType(t)
-
- switch t.Kind() {
- default:
- base.Errorf("invalid composite literal type %v", t)
- n.SetType(nil)
-
- case types.TARRAY:
- typecheckarraylit(t.Elem(), t.NumElem(), n.List().Slice(), "array literal")
- n.SetOp(ir.OARRAYLIT)
- n.SetRight(nil)
-
- case types.TSLICE:
- length := typecheckarraylit(t.Elem(), -1, n.List().Slice(), "slice literal")
- n.SetOp(ir.OSLICELIT)
- n.SetRight(nil)
- n.Len = length
-
- case types.TMAP:
- var cs constSet
- for i3, l := range n.List().Slice() {
- setlineno(l)
- if l.Op() != ir.OKEY {
- n.List().SetIndex(i3, typecheck(l, ctxExpr))
- base.Errorf("missing key in map literal")
- continue
- }
- l := l.(*ir.KeyExpr)
-
- r := l.Left()
- r = pushtype(r, t.Key())
- r = typecheck(r, ctxExpr)
- l.SetLeft(assignconv(r, t.Key(), "map key"))
- cs.add(base.Pos, l.Left(), "key", "map literal")
-
- r = l.Right()
- r = pushtype(r, t.Elem())
- r = typecheck(r, ctxExpr)
- l.SetRight(assignconv(r, t.Elem(), "map value"))
- }
-
- n.SetOp(ir.OMAPLIT)
- n.SetRight(nil)
-
- case types.TSTRUCT:
- // Need valid field offsets for Xoffset below.
- dowidth(t)
-
- errored := false
- if n.List().Len() != 0 && nokeys(n.List()) {
- // simple list of variables
- ls := n.List().Slice()
- for i, n1 := range ls {
- setlineno(n1)
- n1 = typecheck(n1, ctxExpr)
- ls[i] = n1
- if i >= t.NumFields() {
- if !errored {
- base.Errorf("too many values in %v", n)
- errored = true
- }
- continue
- }
-
- f := t.Field(i)
- s := f.Sym
- if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
- base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
- }
- // No pushtype allowed here. Must name fields for that.
- n1 = assignconv(n1, f.Type, "field value")
- sk := nodSym(ir.OSTRUCTKEY, n1, f.Sym)
- sk.SetOffset(f.Offset)
- ls[i] = sk
- }
- if len(ls) < t.NumFields() {
- base.Errorf("too few values in %v", n)
- }
- } else {
- hash := make(map[string]bool)
-
- // keyed list
- ls := n.List().Slice()
- for i, l := range ls {
- setlineno(l)
-
- if l.Op() == ir.OKEY {
- kv := l.(*ir.KeyExpr)
- key := kv.Left()
-
- // Sym might have resolved to name in other top-level
- // package, because of import dot. Redirect to correct sym
- // before we do the lookup.
- s := key.Sym()
- if id, ok := key.(*ir.Ident); ok && dotImportRefs[id] != nil {
- s = lookup(s.Name)
- }
-
- // An OXDOT uses the Sym field to hold
- // the field to the right of the dot,
- // so s will be non-nil, but an OXDOT
- // is never a valid struct literal key.
- if s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank() {
- base.Errorf("invalid field name %v in struct initializer", key)
- continue
- }
-
- l = ir.NewStructKeyExpr(l.Pos(), s, kv.Right())
- ls[i] = l
- }
-
- if l.Op() != ir.OSTRUCTKEY {
- if !errored {
- base.Errorf("mixture of field:value and value initializers")
- errored = true
- }
- ls[i] = typecheck(ls[i], ctxExpr)
- continue
- }
- l := l.(*ir.StructKeyExpr)
-
- f := lookdot1(nil, l.Sym(), t, t.Fields(), 0)
- if f == nil {
- if ci := lookdot1(nil, l.Sym(), t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
- if visible(ci.Sym) {
- base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym(), t, ci.Sym)
- } else if nonexported(l.Sym()) && l.Sym().Name == ci.Sym.Name { // Ensure exactness before the suggestion.
- base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym(), t)
- } else {
- base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym(), t)
- }
- continue
- }
- var f *types.Field
- p, _ := dotpath(l.Sym(), t, &f, true)
- if p == nil || f.IsMethod() {
- base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym(), t)
- continue
- }
- // dotpath returns the parent embedded types in reverse order.
- var ep []string
- for ei := len(p) - 1; ei >= 0; ei-- {
- ep = append(ep, p[ei].field.Sym.Name)
- }
- ep = append(ep, l.Sym().Name)
- base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
- continue
- }
- fielddup(f.Sym.Name, hash)
- l.SetOffset(f.Offset)
-
- // No pushtype allowed here. Tried and rejected.
- l.SetLeft(typecheck(l.Left(), ctxExpr))
- l.SetLeft(assignconv(l.Left(), f.Type, "field value"))
- }
- }
-
- n.SetOp(ir.OSTRUCTLIT)
- n.SetRight(nil)
- }
-
- return n
-}
-
-// typecheckarraylit type-checks a sequence of slice/array literal elements.
-func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx string) int64 {
- // If there are key/value pairs, create a map to keep seen
- // keys so we can check for duplicate indices.
- var indices map[int64]bool
- for _, elt := range elts {
- if elt.Op() == ir.OKEY {
- indices = make(map[int64]bool)
- break
- }
- }
-
- var key, length int64
- for i, elt := range elts {
- setlineno(elt)
- r := elts[i]
- var kv *ir.KeyExpr
- if elt.Op() == ir.OKEY {
- elt := elt.(*ir.KeyExpr)
- elt.SetLeft(typecheck(elt.Left(), ctxExpr))
- key = indexconst(elt.Left())
- if key < 0 {
- if !elt.Left().Diag() {
- if key == -2 {
- base.Errorf("index too large")
- } else {
- base.Errorf("index must be non-negative integer constant")
- }
- elt.Left().SetDiag(true)
- }
- key = -(1 << 30) // stay negative for a while
- }
- kv = elt
- r = elt.Right()
- }
-
- r = pushtype(r, elemType)
- r = typecheck(r, ctxExpr)
- r = assignconv(r, elemType, ctx)
- if kv != nil {
- kv.SetRight(r)
- } else {
- elts[i] = r
- }
-
- if key >= 0 {
- if indices != nil {
- if indices[key] {
- base.Errorf("duplicate index in %s: %d", ctx, key)
- } else {
- indices[key] = true
- }
- }
-
- if bound >= 0 && key >= bound {
- base.Errorf("array index %d out of bounds [0:%d]", key, bound)
- bound = -1
- }
- }
-
- key++
- if key > length {
- length = key
- }
- }
-
- return length
-}
-
-// visible reports whether sym is exported or locally defined.
-func visible(sym *types.Sym) bool {
- return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == types.LocalPkg)
-}
-
-// nonexported reports whether sym is an unexported field.
-func nonexported(sym *types.Sym) bool {
- return sym != nil && !types.IsExported(sym.Name)
-}
-
-// lvalue etc
-func islvalue(n ir.Node) bool {
- switch n.Op() {
- case ir.OINDEX:
- if n.Left().Type() != nil && n.Left().Type().IsArray() {
- return islvalue(n.Left())
- }
- if n.Left().Type() != nil && n.Left().Type().IsString() {
- return false
- }
- fallthrough
- case ir.ODEREF, ir.ODOTPTR, ir.OCLOSUREREAD:
- return true
-
- case ir.ODOT:
- return islvalue(n.Left())
-
- case ir.ONAME:
- if n.Class() == ir.PFUNC {
- return false
- }
- return true
-
- case ir.ONAMEOFFSET:
- return true
- }
-
- return false
-}
-
-func checklvalue(n ir.Node, verb string) {
- if !islvalue(n) {
- base.Errorf("cannot %s %v", verb, n)
- }
-}
-
-func checkassign(stmt ir.Node, n ir.Node) {
- // Variables declared in ORANGE are assigned on every iteration.
- if !ir.DeclaredBy(n, stmt) || stmt.Op() == ir.ORANGE {
- r := outervalue(n)
- if r.Op() == ir.ONAME {
- r.Name().SetAssigned(true)
- if r.Name().IsClosureVar() {
- r.Name().Defn.Name().SetAssigned(true)
- }
- }
- }
-
- if islvalue(n) {
- return
- }
- if n.Op() == ir.OINDEXMAP {
- n.SetIndexMapLValue(true)
- return
- }
-
- // have already complained about n being invalid
- if n.Type() == nil {
- return
- }
-
- switch {
- case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).Left().Op() == ir.OINDEXMAP:
- base.Errorf("cannot assign to struct field %v in map", n)
- case (n.Op() == ir.OINDEX && n.(*ir.IndexExpr).Left().Type().IsString()) || n.Op() == ir.OSLICESTR:
- base.Errorf("cannot assign to %v (strings are immutable)", n)
- case n.Op() == ir.OLITERAL && n.Sym() != nil && isGoConst(n):
- base.Errorf("cannot assign to %v (declared const)", n)
- default:
- base.Errorf("cannot assign to %v", n)
- }
- n.SetType(nil)
-}
-
-func checkassignlist(stmt ir.Node, l ir.Nodes) {
- for _, n := range l.Slice() {
- checkassign(stmt, n)
- }
-}
-
-// samesafeexpr checks whether it is safe to reuse one of l and r
-// instead of computing both. samesafeexpr assumes that l and r are
-// used in the same statement or expression. In order for it to be
-// safe to reuse l or r, they must:
-// * be the same expression
-// * not have side-effects (no function calls, no channel ops);
-// however, panics are ok
-// * not cause inappropriate aliasing; e.g. two string to []byte
-// conversions, must result in two distinct slices
-//
-// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both
-// as an lvalue (map assignment) and an rvalue (map access). This is
-// currently OK, since the only place samesafeexpr gets used on an
-// lvalue expression is for OSLICE and OAPPEND optimizations, and it
-// is correct in those settings.
-func samesafeexpr(l ir.Node, r ir.Node) bool {
- if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) {
- return false
- }
-
- switch l.Op() {
- case ir.ONAME, ir.OCLOSUREREAD:
- return l == r
-
- case ir.ODOT, ir.ODOTPTR:
- l := l.(*ir.SelectorExpr)
- r := r.(*ir.SelectorExpr)
- return l.Sym() != nil && r.Sym() != nil && l.Sym() == r.Sym() && samesafeexpr(l.Left(), r.Left())
-
- case ir.ODEREF:
- l := l.(*ir.StarExpr)
- r := r.(*ir.StarExpr)
- return samesafeexpr(l.Left(), r.Left())
-
- case ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG:
- l := l.(*ir.UnaryExpr)
- r := r.(*ir.UnaryExpr)
- return samesafeexpr(l.Left(), r.Left())
-
- case ir.OCONVNOP:
- l := l.(*ir.ConvExpr)
- r := r.(*ir.ConvExpr)
- return samesafeexpr(l.Left(), r.Left())
-
- case ir.OCONV:
- l := l.(*ir.ConvExpr)
- r := r.(*ir.ConvExpr)
- // Some conversions can't be reused, such as []byte(str).
- // Allow only numeric-ish types. This is a bit conservative.
- return issimple[l.Type().Kind()] && samesafeexpr(l.Left(), r.Left())
-
- case ir.OINDEX, ir.OINDEXMAP:
- l := l.(*ir.IndexExpr)
- r := r.(*ir.IndexExpr)
- return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right())
-
- case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
- l := l.(*ir.BinaryExpr)
- r := r.(*ir.BinaryExpr)
- return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right())
-
- case ir.OLITERAL:
- return constant.Compare(l.Val(), token.EQL, r.Val())
-
- case ir.ONIL:
- return true
- }
-
- return false
-}
-
-// type check assignment.
-// if this assignment is the definition of a var on the left side,
-// fill in the var's type.
-func typecheckas(n *ir.AssignStmt) {
- if enableTrace && base.Flag.LowerT {
- defer tracePrint("typecheckas", n)(nil)
- }
-
- // delicate little dance.
- // the definition of n may refer to this assignment
- // as its definition, in which case it will call typecheckas.
- // in that case, do not call typecheck back, or it will cycle.
- // if the variable has a type (ntype) then typechecking
- // will not look at defn, so it is okay (and desirable,
- // so that the conversion below happens).
- n.SetLeft(resolve(n.Left()))
-
- if !ir.DeclaredBy(n.Left(), n) || n.Left().Name().Ntype != nil {
- n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign))
- }
-
- // Use ctxMultiOK so we can emit an "N variables but M values" error
- // to be consistent with typecheckas2 (#26616).
- n.SetRight(typecheck(n.Right(), ctxExpr|ctxMultiOK))
- checkassign(n, n.Left())
- if n.Right() != nil && n.Right().Type() != nil {
- if n.Right().Type().IsFuncArgStruct() {
- base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right().(*ir.CallExpr).Left(), n.Right().Type().NumFields())
- // Multi-value RHS isn't actually valid for OAS; nil out
- // to indicate failed typechecking.
- n.Right().SetType(nil)
- } else if n.Left().Type() != nil {
- n.SetRight(assignconv(n.Right(), n.Left().Type(), "assignment"))
- }
- }
-
- if ir.DeclaredBy(n.Left(), n) && n.Left().Name().Ntype == nil {
- n.SetRight(defaultlit(n.Right(), nil))
- n.Left().SetType(n.Right().Type())
- }
-
- // second half of dance.
- // now that right is done, typecheck the left
- // just to get it over with. see dance above.
- n.SetTypecheck(1)
-
- if n.Left().Typecheck() == 0 {
- n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign))
- }
- if !ir.IsBlank(n.Left()) {
- checkwidth(n.Left().Type()) // ensure width is calculated for backend
- }
-}
-
-func checkassignto(src *types.Type, dst ir.Node) {
- if op, why := assignop(src, dst.Type()); op == ir.OXXX {
- base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why)
- return
- }
-}
-
-func typecheckas2(n *ir.AssignListStmt) {
- if enableTrace && base.Flag.LowerT {
- defer tracePrint("typecheckas2", n)(nil)
- }
-
- ls := n.List().Slice()
- for i1, n1 := range ls {
- // delicate little dance.
- n1 = resolve(n1)
- ls[i1] = n1
-
- if !ir.DeclaredBy(n1, n) || n1.Name().Ntype != nil {
- ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
- }
- }
-
- cl := n.List().Len()
- cr := n.Rlist().Len()
- if cl > 1 && cr == 1 {
- n.Rlist().SetFirst(typecheck(n.Rlist().First(), ctxExpr|ctxMultiOK))
- } else {
- typecheckslice(n.Rlist().Slice(), ctxExpr)
- }
- checkassignlist(n, n.List())
-
- var l ir.Node
- var r ir.Node
- if cl == cr {
- // easy
- ls := n.List().Slice()
- rs := n.Rlist().Slice()
- for il, nl := range ls {
- nr := rs[il]
- if nl.Type() != nil && nr.Type() != nil {
- rs[il] = assignconv(nr, nl.Type(), "assignment")
- }
- if ir.DeclaredBy(nl, n) && nl.Name().Ntype == nil {
- rs[il] = defaultlit(rs[il], nil)
- nl.SetType(rs[il].Type())
- }
- }
-
- goto out
- }
-
- l = n.List().First()
- r = n.Rlist().First()
-
- // x,y,z = f()
- if cr == 1 {
- if r.Type() == nil {
- goto out
- }
- switch r.Op() {
- case ir.OCALLMETH, ir.OCALLINTER, ir.OCALLFUNC:
- if !r.Type().IsFuncArgStruct() {
- break
- }
- cr = r.Type().NumFields()
- if cr != cl {
- goto mismatch
- }
- r.(*ir.CallExpr).Use = ir.CallUseList
- n.SetOp(ir.OAS2FUNC)
- for i, l := range n.List().Slice() {
- f := r.Type().Field(i)
- if f.Type != nil && l.Type() != nil {
- checkassignto(f.Type, l)
- }
- if ir.DeclaredBy(l, n) && l.Name().Ntype == nil {
- l.SetType(f.Type)
- }
- }
- goto out
- }
- }
-
- // x, ok = y
- if cl == 2 && cr == 1 {
- if r.Type() == nil {
- goto out
- }
- switch r.Op() {
- case ir.OINDEXMAP, ir.ORECV, ir.ODOTTYPE:
- switch r.Op() {
- case ir.OINDEXMAP:
- n.SetOp(ir.OAS2MAPR)
- case ir.ORECV:
- n.SetOp(ir.OAS2RECV)
- case ir.ODOTTYPE:
- n.SetOp(ir.OAS2DOTTYPE)
- r.SetOp(ir.ODOTTYPE2)
- }
- if l.Type() != nil {
- checkassignto(r.Type(), l)
- }
- if ir.DeclaredBy(l, n) {
- l.SetType(r.Type())
- }
- l := n.List().Second()
- if l.Type() != nil && !l.Type().IsBoolean() {
- checkassignto(types.Types[types.TBOOL], l)
- }
- if ir.DeclaredBy(l, n) && l.Name().Ntype == nil {
- l.SetType(types.Types[types.TBOOL])
- }
- goto out
- }
- }
-
-mismatch:
- switch r.Op() {
- default:
- base.Errorf("assignment mismatch: %d variables but %d values", cl, cr)
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.Left(), cr)
- }
-
- // second half of dance
-out:
- n.SetTypecheck(1)
- ls = n.List().Slice()
- for i1, n1 := range ls {
- if n1.Typecheck() == 0 {
- ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
- }
- }
-}
-
-// type check function definition
-// To be called by typecheck, not directly.
-// (Call typecheckFunc instead.)
-func typecheckfunc(n *ir.Func) {
- if enableTrace && base.Flag.LowerT {
- defer tracePrint("typecheckfunc", n)(nil)
- }
-
- for _, ln := range n.Dcl {
- if ln.Op() == ir.ONAME && (ln.Class() == ir.PPARAM || ln.Class() == ir.PPARAMOUT) {
- ln.Decldepth = 1
- }
- }
-
- n.Nname = typecheck(n.Nname, ctxExpr|ctxAssign).(*ir.Name)
- t := n.Nname.Type()
- if t == nil {
- return
- }
- n.SetType(t)
- rcvr := t.Recv()
- if rcvr != nil && n.Shortname != nil {
- m := addmethod(n, n.Shortname, t, true, n.Pragma&ir.Nointerface != 0)
- if m == nil {
- return
- }
-
- n.Nname.SetSym(methodSym(rcvr.Type, n.Shortname))
- declare(n.Nname, ir.PFUNC)
- }
-
- if base.Ctxt.Flag_dynlink && !inimport && n.Nname != nil {
- NeedFuncSym(n.Sym())
- }
-}
-
-// The result of stringtoruneslit MUST be assigned back to n, e.g.
-// n.Left = stringtoruneslit(n.Left)
-func stringtoruneslit(n *ir.ConvExpr) ir.Node {
- if n.Left().Op() != ir.OLITERAL || n.Left().Val().Kind() != constant.String {
- base.Fatalf("stringtoarraylit %v", n)
- }
-
- var l []ir.Node
- i := 0
- for _, r := range ir.StringVal(n.Left()) {
- l = append(l, ir.Nod(ir.OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
- i++
- }
-
- nn := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(n.Type()))
- nn.PtrList().Set(l)
- return typecheck(nn, ctxExpr)
-}
-
-var mapqueue []*ir.MapType
-
-func checkMapKeys() {
- for _, n := range mapqueue {
- k := n.Type().MapType().Key
- if !k.Broke() && !IsComparable(k) {
- base.ErrorfAt(n.Pos(), "invalid map key type %v", k)
- }
- }
- mapqueue = nil
-}
-
-func typecheckdeftype(n *ir.Name) {
- if enableTrace && base.Flag.LowerT {
- defer tracePrint("typecheckdeftype", n)(nil)
- }
-
- t := types.NewNamed(n)
- t.Vargen = n.Vargen
- if n.Pragma()&ir.NotInHeap != 0 {
- t.SetNotInHeap(true)
- }
-
- n.SetType(t)
- n.SetTypecheck(1)
- n.SetWalkdef(1)
-
- defercheckwidth()
- errorsBefore := base.Errors()
- n.Ntype = typecheckNtype(n.Ntype)
- if underlying := n.Ntype.Type(); underlying != nil {
- t.SetUnderlying(underlying)
- } else {
- n.SetDiag(true)
- n.SetType(nil)
- }
- if t.Kind() == types.TFORW && base.Errors() > errorsBefore {
- // Something went wrong during type-checking,
- // but it was reported. Silence future errors.
- t.SetBroke(true)
- }
- resumecheckwidth()
-}
-
-func typecheckdef(n ir.Node) {
- if enableTrace && base.Flag.LowerT {
- defer tracePrint("typecheckdef", n)(nil)
- }
-
- lno := setlineno(n)
-
- if n.Op() == ir.ONONAME {
- if !n.Diag() {
- n.SetDiag(true)
-
- // Note: adderrorname looks for this string and
- // adds context about the outer expression
- base.ErrorfAt(base.Pos, "undefined: %v", n.Sym())
- }
- base.Pos = lno
- return
- }
-
- if n.Walkdef() == 1 {
- base.Pos = lno
- return
- }
-
- typecheckdefstack = append(typecheckdefstack, n)
- if n.Walkdef() == 2 {
- base.FlushErrors()
- fmt.Printf("typecheckdef loop:")
- for i := len(typecheckdefstack) - 1; i >= 0; i-- {
- n := typecheckdefstack[i]
- fmt.Printf(" %v", n.Sym())
- }
- fmt.Printf("\n")
- base.Fatalf("typecheckdef loop")
- }
-
- n.SetWalkdef(2)
-
- if n.Type() != nil || n.Sym() == nil { // builtin or no name
- goto ret
- }
-
- switch n.Op() {
- default:
- base.Fatalf("typecheckdef %v", n.Op())
-
- case ir.OLITERAL:
- if n.Name().Ntype != nil {
- n.Name().Ntype = typecheckNtype(n.Name().Ntype)
- n.SetType(n.Name().Ntype.Type())
- n.Name().Ntype = nil
- if n.Type() == nil {
- n.SetDiag(true)
- goto ret
- }
- }
-
- e := n.Name().Defn
- n.Name().Defn = nil
- if e == nil {
- ir.Dump("typecheckdef nil defn", n)
- base.ErrorfAt(n.Pos(), "xxx")
- }
-
- e = typecheck(e, ctxExpr)
- if e.Type() == nil {
- goto ret
- }
- if !isGoConst(e) {
- if !e.Diag() {
- if e.Op() == ir.ONIL {
- base.ErrorfAt(n.Pos(), "const initializer cannot be nil")
- } else {
- base.ErrorfAt(n.Pos(), "const initializer %v is not a constant", e)
- }
- e.SetDiag(true)
- }
- goto ret
- }
-
- t := n.Type()
- if t != nil {
- if !ir.OKForConst[t.Kind()] {
- base.ErrorfAt(n.Pos(), "invalid constant type %v", t)
- goto ret
- }
-
- if !e.Type().IsUntyped() && !types.Identical(t, e.Type()) {
- base.ErrorfAt(n.Pos(), "cannot use %L as type %v in const initializer", e, t)
- goto ret
- }
-
- e = convlit(e, t)
- }
-
- n.SetType(e.Type())
- if n.Type() != nil {
- n.SetVal(e.Val())
- }
-
- case ir.ONAME:
- if n.Name().Ntype != nil {
- n.Name().Ntype = typecheckNtype(n.Name().Ntype)
- n.SetType(n.Name().Ntype.Type())
- if n.Type() == nil {
- n.SetDiag(true)
- goto ret
- }
- }
-
- if n.Type() != nil {
- break
- }
- if n.Name().Defn == nil {
- if n.SubOp() != 0 { // like OPRINTN
- break
- }
- if base.Errors() > 0 {
- // Can have undefined variables in x := foo
- // that make x have an n.name.Defn == nil.
- // If there are other errors anyway, don't
- // bother adding to the noise.
- break
- }
-
- base.Fatalf("var without type, init: %v", n.Sym())
- }
-
- if n.Name().Defn.Op() == ir.ONAME {
- n.Name().Defn = typecheck(n.Name().Defn, ctxExpr)
- n.SetType(n.Name().Defn.Type())
- break
- }
-
- n.Name().Defn = typecheck(n.Name().Defn, ctxStmt) // fills in n.Type
-
- case ir.OTYPE:
- n := n.(*ir.Name)
- if n.Alias() {
- // Type alias declaration: Simply use the rhs type - no need
- // to create a new type.
- // If we have a syntax error, name.Ntype may be nil.
- if n.Ntype != nil {
- n.Ntype = typecheckNtype(n.Ntype)
- n.SetType(n.Ntype.Type())
- if n.Type() == nil {
- n.SetDiag(true)
- goto ret
- }
- // For package-level type aliases, set n.Sym.Def so we can identify
- // it as a type alias during export. See also #31959.
- if n.Curfn == nil {
- n.Sym().Def = n.Ntype
- }
- }
- break
- }
-
- // regular type declaration
- typecheckdeftype(n)
- }
-
-ret:
- if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().IsUntyped() {
- base.Fatalf("got %v for %v", n.Type(), n)
- }
- last := len(typecheckdefstack) - 1
- if typecheckdefstack[last] != n {
- base.Fatalf("typecheckdefstack mismatch")
- }
- typecheckdefstack[last] = nil
- typecheckdefstack = typecheckdefstack[:last]
-
- base.Pos = lno
- n.SetWalkdef(1)
-}
-
-func checkmake(t *types.Type, arg string, np *ir.Node) bool {
- n := *np
- if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
- base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type())
- return false
- }
-
- // Do range checks for constants before defaultlit
- // to avoid redundant "constant NNN overflows int" errors.
- if n.Op() == ir.OLITERAL {
- v := toint(n.Val())
- if constant.Sign(v) < 0 {
- base.Errorf("negative %s argument in make(%v)", arg, t)
- return false
- }
- if doesoverflow(v, types.Types[types.TINT]) {
- base.Errorf("%s argument too large in make(%v)", arg, t)
- return false
- }
- }
-
- // defaultlit is necessary for non-constants too: n might be 1.1<<k.
- // TODO(gri) The length argument requirements for (array/slice) make
- // are the same as for index expressions. Factor the code better;
- // for instance, indexlit might be called here and incorporate some
- // of the bounds checks done for make.
- n = defaultlit(n, types.Types[types.TINT])
- *np = n
-
- return true
-}
-
-// markBreak marks control statements containing break statements with SetHasBreak(true).
-func markBreak(fn *ir.Func) {
- var labels map[*types.Sym]ir.Node
- var implicit ir.Node
-
- var mark func(ir.Node) error
- mark = func(n ir.Node) error {
- switch n.Op() {
- default:
- ir.DoChildren(n, mark)
-
- case ir.OBREAK:
- if n.Sym() == nil {
- setHasBreak(implicit)
- } else {
- setHasBreak(labels[n.Sym()])
- }
-
- case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT, ir.ORANGE:
- old := implicit
- implicit = n
- var sym *types.Sym
- switch n := n.(type) {
- case *ir.ForStmt:
- sym = n.Sym()
- case *ir.RangeStmt:
- sym = n.Sym()
- case *ir.SelectStmt:
- sym = n.Sym()
- case *ir.SwitchStmt:
- sym = n.Sym()
- }
- if sym != nil {
- if labels == nil {
- // Map creation delayed until we need it - most functions don't.
- labels = make(map[*types.Sym]ir.Node)
- }
- labels[sym] = n
- }
- ir.DoChildren(n, mark)
- if sym != nil {
- delete(labels, sym)
- }
- implicit = old
- }
- return nil
- }
-
- mark(fn)
-}
-
-func controlLabel(n ir.Node) *types.Sym {
- switch n := n.(type) {
- default:
- base.Fatalf("controlLabel %+v", n.Op())
- return nil
- case *ir.ForStmt:
- return n.Sym()
- case *ir.RangeStmt:
- return n.Sym()
- case *ir.SelectStmt:
- return n.Sym()
- case *ir.SwitchStmt:
- return n.Sym()
- }
-}
-
-func setHasBreak(n ir.Node) {
- switch n := n.(type) {
- default:
- base.Fatalf("setHasBreak %+v", n.Op())
- case nil:
- // ignore
- case *ir.ForStmt:
- n.SetHasBreak(true)
- case *ir.RangeStmt:
- n.SetHasBreak(true)
- case *ir.SelectStmt:
- n.SetHasBreak(true)
- case *ir.SwitchStmt:
- n.SetHasBreak(true)
- }
-}
-
-// isTermNodes reports whether the Nodes list ends with a terminating statement.
-func isTermNodes(l ir.Nodes) bool {
- s := l.Slice()
- c := len(s)
- if c == 0 {
- return false
- }
- return isTermNode(s[c-1])
-}
-
-// isTermNode reports whether the node n, the last one in a
-// statement list, is a terminating statement.
-func isTermNode(n ir.Node) bool {
- switch n.Op() {
- // NOTE: OLABEL is treated as a separate statement,
- // not a separate prefix, so skipping to the last statement
- // in the block handles the labeled statement case by
- // skipping over the label. No case OLABEL here.
-
- case ir.OBLOCK:
- return isTermNodes(n.List())
-
- case ir.OGOTO, ir.ORETURN, ir.ORETJMP, ir.OPANIC, ir.OFALL:
- return true
-
- case ir.OFOR, ir.OFORUNTIL:
- if n.Left() != nil {
- return false
- }
- if n.HasBreak() {
- return false
- }
- return true
-
- case ir.OIF:
- return isTermNodes(n.Body()) && isTermNodes(n.Rlist())
-
- case ir.OSWITCH:
- if n.HasBreak() {
- return false
- }
- def := false
- for _, cas := range n.List().Slice() {
- cas := cas.(*ir.CaseStmt)
- if !isTermNodes(cas.Body()) {
- return false
- }
- if cas.List().Len() == 0 { // default
- def = true
- }
- }
- return def
-
- case ir.OSELECT:
- if n.HasBreak() {
- return false
- }
- for _, cas := range n.List().Slice() {
- cas := cas.(*ir.CaseStmt)
- if !isTermNodes(cas.Body()) {
- return false
- }
- }
- return true
- }
-
- return false
-}
-
-// checkreturn makes sure that fn terminates appropriately.
-func checkreturn(fn *ir.Func) {
- if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 {
- markBreak(fn)
- if !isTermNodes(fn.Body()) {
- base.ErrorfAt(fn.Endlineno, "missing return at end of function")
- }
- }
-}
-
-func deadcode(fn *ir.Func) {
- deadcodeslice(fn.PtrBody())
-
- if fn.Body().Len() == 0 {
- return
- }
-
- for _, n := range fn.Body().Slice() {
- if n.Init().Len() > 0 {
- return
- }
- switch n.Op() {
- case ir.OIF:
- if !ir.IsConst(n.Left(), constant.Bool) || n.Body().Len() > 0 || n.Rlist().Len() > 0 {
- return
- }
- case ir.OFOR:
- if !ir.IsConst(n.Left(), constant.Bool) || ir.BoolVal(n.Left()) {
- return
- }
- default:
- return
- }
- }
-
- fn.PtrBody().Set([]ir.Node{ir.Nod(ir.OBLOCK, nil, nil)})
-}
-
-func deadcodeslice(nn *ir.Nodes) {
- var lastLabel = -1
- for i, n := range nn.Slice() {
- if n != nil && n.Op() == ir.OLABEL {
- lastLabel = i
- }
- }
- for i, n := range nn.Slice() {
- // Cut is set to true when all nodes after i'th position
- // should be removed.
- // In other words, it marks whole slice "tail" as dead.
- cut := false
- if n == nil {
- continue
- }
- if n.Op() == ir.OIF {
- n.SetLeft(deadcodeexpr(n.Left()))
- if ir.IsConst(n.Left(), constant.Bool) {
- var body ir.Nodes
- if ir.BoolVal(n.Left()) {
- n.SetRlist(ir.Nodes{})
- body = n.Body()
- } else {
- n.SetBody(ir.Nodes{})
- body = n.Rlist()
- }
- // If "then" or "else" branch ends with panic or return statement,
- // it is safe to remove all statements after this node.
- // isterminating is not used to avoid goto-related complications.
- // We must be careful not to deadcode-remove labels, as they
- // might be the target of a goto. See issue 28616.
- if body := body.Slice(); len(body) != 0 {
- switch body[(len(body) - 1)].Op() {
- case ir.ORETURN, ir.ORETJMP, ir.OPANIC:
- if i > lastLabel {
- cut = true
- }
- }
- }
- }
- }
-
- deadcodeslice(n.PtrInit())
- switch n.Op() {
- case ir.OBLOCK:
- deadcodeslice(n.PtrList())
- case ir.OCASE:
- deadcodeslice(n.PtrBody())
- case ir.OFOR:
- deadcodeslice(n.PtrBody())
- case ir.OIF:
- deadcodeslice(n.PtrBody())
- deadcodeslice(n.PtrRlist())
- case ir.ORANGE:
- deadcodeslice(n.PtrBody())
- case ir.OSELECT:
- deadcodeslice(n.PtrList())
- case ir.OSWITCH:
- deadcodeslice(n.PtrList())
- }
-
- if cut {
- nn.Set(nn.Slice()[:i+1])
- break
- }
- }
-}
-
-func deadcodeexpr(n ir.Node) ir.Node {
- // Perform dead-code elimination on short-circuited boolean
- // expressions involving constants with the intent of
- // producing a constant 'if' condition.
- switch n.Op() {
- case ir.OANDAND:
- n.SetLeft(deadcodeexpr(n.Left()))
- n.SetRight(deadcodeexpr(n.Right()))
- if ir.IsConst(n.Left(), constant.Bool) {
- if ir.BoolVal(n.Left()) {
- return n.Right() // true && x => x
- } else {
- return n.Left() // false && x => false
- }
- }
- case ir.OOROR:
- n.SetLeft(deadcodeexpr(n.Left()))
- n.SetRight(deadcodeexpr(n.Right()))
- if ir.IsConst(n.Left(), constant.Bool) {
- if ir.BoolVal(n.Left()) {
- return n.Left() // true || x => true
- } else {
- return n.Right() // false || x => x
- }
- }
- }
- return n
-}
-
-// getIotaValue returns the current value for "iota",
-// or -1 if not within a ConstSpec.
-func getIotaValue() int64 {
- if i := len(typecheckdefstack); i > 0 {
- if x := typecheckdefstack[i-1]; x.Op() == ir.OLITERAL {
- return x.(*ir.Name).Iota()
- }
- }
-
- if Curfn != nil && Curfn.Iota() >= 0 {
- return Curfn.Iota()
- }
-
- return -1
-}
-
-// curpkg returns the current package, based on Curfn.
-func curpkg() *types.Pkg {
- fn := Curfn
- if fn == nil {
- // Initialization expressions for package-scope variables.
- return types.LocalPkg
- }
- return fnpkg(fn.Nname)
-}
-
-// MethodName returns the ONAME representing the method
-// referenced by expression n, which must be a method selector,
-// method expression, or method value.
-func methodExprName(n ir.Node) *ir.Name {
- name, _ := methodExprFunc(n).Nname.(*ir.Name)
- return name
-}
-
-// MethodFunc is like MethodName, but returns the types.Field instead.
-func methodExprFunc(n ir.Node) *types.Field {
- switch n.Op() {
- case ir.ODOTMETH:
- return n.(*ir.SelectorExpr).Selection
- case ir.OMETHEXPR:
- return n.(*ir.MethodExpr).Method
- case ir.OCALLPART:
- return callpartMethod(n)
- }
- base.Fatalf("unexpected node: %v (%v)", n, n.Op())
- panic("unreachable")
-}
+++ /dev/null
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements convertions between *types.Node and *Node.
-// TODO(gri) try to eliminate these soon
-
-package gc
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
-)
-
-// evalunsafe evaluates a package unsafe operation and returns the result.
-func evalunsafe(n ir.Node) int64 {
- switch n.Op() {
- case ir.OALIGNOF, ir.OSIZEOF:
- n.SetLeft(typecheck(n.Left(), ctxExpr))
- n.SetLeft(defaultlit(n.Left(), nil))
- tr := n.Left().Type()
- if tr == nil {
- return 0
- }
- dowidth(tr)
- if n.Op() == ir.OALIGNOF {
- return int64(tr.Align)
- }
- return tr.Width
-
- case ir.OOFFSETOF:
- // must be a selector.
- if n.Left().Op() != ir.OXDOT {
- base.Errorf("invalid expression %v", n)
- return 0
- }
- sel := n.Left().(*ir.SelectorExpr)
-
- // Remember base of selector to find it back after dot insertion.
- // Since r->left may be mutated by typechecking, check it explicitly
- // first to track it correctly.
- sel.SetLeft(typecheck(sel.Left(), ctxExpr))
- sbase := sel.Left()
-
- tsel := typecheck(sel, ctxExpr)
- n.SetLeft(tsel)
- if tsel.Type() == nil {
- return 0
- }
- switch tsel.Op() {
- case ir.ODOT, ir.ODOTPTR:
- break
- case ir.OCALLPART:
- base.Errorf("invalid expression %v: argument is a method value", n)
- return 0
- default:
- base.Errorf("invalid expression %v", n)
- return 0
- }
-
- // Sum offsets for dots until we reach sbase.
- var v int64
- var next ir.Node
- for r := tsel; r != sbase; r = next {
- switch r.Op() {
- case ir.ODOTPTR:
- // For Offsetof(s.f), s may itself be a pointer,
- // but accessing f must not otherwise involve
- // indirection via embedded pointer types.
- if r.Left() != sbase {
- base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.Left())
- return 0
- }
- fallthrough
- case ir.ODOT:
- v += r.Offset()
- next = r.Left()
- default:
- ir.Dump("unsafenmagic", tsel)
- base.Fatalf("impossible %v node after dot insertion", r.Op())
- }
- }
- return v
- }
-
- base.Fatalf("unexpected op %v", n.Op())
- return 0
-}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/obj"
- "cmd/internal/objabi"
- "cmd/internal/sys"
- "encoding/binary"
- "errors"
- "fmt"
- "go/constant"
- "go/token"
- "strings"
-)
-
-// The constant is known to runtime.
-const tmpstringbufsize = 32
-const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
-
-func walk(fn *ir.Func) {
- Curfn = fn
- errorsBefore := base.Errors()
- order(fn)
- if base.Errors() > errorsBefore {
- return
- }
-
- if base.Flag.W != 0 {
- s := fmt.Sprintf("\nbefore walk %v", Curfn.Sym())
- ir.DumpList(s, Curfn.Body())
- }
-
- lno := base.Pos
-
- // Final typecheck for any unused variables.
- for i, ln := range fn.Dcl {
- if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) {
- ln = typecheck(ln, ctxExpr|ctxAssign).(*ir.Name)
- fn.Dcl[i] = ln
- }
- }
-
- // Propagate the used flag for typeswitch variables up to the NONAME in its definition.
- for _, ln := range fn.Dcl {
- if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Defn != nil && ln.Defn.Op() == ir.OTYPESW && ln.Used() {
- ln.Defn.(*ir.TypeSwitchGuard).Used = true
- }
- }
-
- for _, ln := range fn.Dcl {
- if ln.Op() != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Used() {
- continue
- }
- if defn, ok := ln.Defn.(*ir.TypeSwitchGuard); ok {
- if defn.Used {
- continue
- }
- base.ErrorfAt(defn.Tag.Pos(), "%v declared but not used", ln.Sym())
- defn.Used = true // suppress repeats
- } else {
- base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym())
- }
- }
-
- base.Pos = lno
- if base.Errors() > errorsBefore {
- return
- }
- walkstmtlist(Curfn.Body().Slice())
- if base.Flag.W != 0 {
- s := fmt.Sprintf("after walk %v", Curfn.Sym())
- ir.DumpList(s, Curfn.Body())
- }
-
- zeroResults()
- heapmoves()
- if base.Flag.W != 0 && Curfn.Enter.Len() > 0 {
- s := fmt.Sprintf("enter %v", Curfn.Sym())
- ir.DumpList(s, Curfn.Enter)
- }
-
- if instrumenting {
- instrument(fn)
- }
-}
-
-func walkstmtlist(s []ir.Node) {
- for i := range s {
- s[i] = walkstmt(s[i])
- }
-}
-
-func paramoutheap(fn *ir.Func) bool {
- for _, ln := range fn.Dcl {
- switch ln.Class() {
- case ir.PPARAMOUT:
- if isParamStackCopy(ln) || ln.Addrtaken() {
- return true
- }
-
- case ir.PAUTO:
- // stop early - parameters are over
- return false
- }
- }
-
- return false
-}
-
-// The result of walkstmt MUST be assigned back to n, e.g.
-// n.Left = walkstmt(n.Left)
-func walkstmt(n ir.Node) ir.Node {
- if n == nil {
- return n
- }
-
- setlineno(n)
-
- walkstmtlist(n.Init().Slice())
-
- switch n.Op() {
- default:
- if n.Op() == ir.ONAME {
- base.Errorf("%v is not a top level statement", n.Sym())
- } else {
- base.Errorf("%v is not a top level statement", n.Op())
- }
- ir.Dump("nottop", n)
- return n
-
- case ir.OAS,
- ir.OASOP,
- ir.OAS2,
- ir.OAS2DOTTYPE,
- ir.OAS2RECV,
- ir.OAS2FUNC,
- ir.OAS2MAPR,
- ir.OCLOSE,
- ir.OCOPY,
- ir.OCALLMETH,
- ir.OCALLINTER,
- ir.OCALL,
- ir.OCALLFUNC,
- ir.ODELETE,
- ir.OSEND,
- ir.OPRINT,
- ir.OPRINTN,
- ir.OPANIC,
- ir.ORECOVER,
- ir.OGETG:
- if n.Typecheck() == 0 {
- base.Fatalf("missing typecheck: %+v", n)
- }
- init := n.Init()
- n.PtrInit().Set(nil)
- n = walkexpr(n, &init)
- if n.Op() == ir.ONAME {
- // copy rewrote to a statement list and a temp for the length.
- // Throw away the temp to avoid plain values as statements.
- n = ir.NewBlockStmt(n.Pos(), init.Slice())
- init.Set(nil)
- }
- if init.Len() > 0 {
- switch n.Op() {
- case ir.OAS, ir.OAS2, ir.OBLOCK:
- n.PtrInit().Prepend(init.Slice()...)
-
- default:
- init.Append(n)
- n = ir.NewBlockStmt(n.Pos(), init.Slice())
- }
- }
- return n
-
- // special case for a receive where we throw away
- // the value received.
- case ir.ORECV:
- if n.Typecheck() == 0 {
- base.Fatalf("missing typecheck: %+v", n)
- }
- init := n.Init()
- n.PtrInit().Set(nil)
-
- n.SetLeft(walkexpr(n.Left(), &init))
- call := walkexpr(mkcall1(chanfn("chanrecv1", 2, n.Left().Type()), nil, &init, n.Left(), nodnil()), &init)
- return initExpr(init.Slice(), call)
-
- case ir.OBREAK,
- ir.OCONTINUE,
- ir.OFALL,
- ir.OGOTO,
- ir.OLABEL,
- ir.ODCLCONST,
- ir.ODCLTYPE,
- ir.OCHECKNIL,
- ir.OVARDEF,
- ir.OVARKILL,
- ir.OVARLIVE:
- return n
-
- case ir.ODCL:
- v := n.Left().(*ir.Name)
- if v.Class() == ir.PAUTOHEAP {
- if base.Flag.CompilingRuntime {
- base.Errorf("%v escapes to heap, not allowed in runtime", v)
- }
- nn := ir.Nod(ir.OAS, v.Name().Heapaddr, callnew(v.Type()))
- nn.SetColas(true)
- return walkstmt(typecheck(nn, ctxStmt))
- }
- return n
-
- case ir.OBLOCK:
- walkstmtlist(n.List().Slice())
- return n
-
- case ir.OCASE:
- base.Errorf("case statement out of place")
- panic("unreachable")
-
- case ir.ODEFER:
- Curfn.SetHasDefer(true)
- Curfn.NumDefers++
- if Curfn.NumDefers > maxOpenDefers {
- // Don't allow open-coded defers if there are more than
- // 8 defers in the function, since we use a single
- // byte to record active defers.
- Curfn.SetOpenCodedDeferDisallowed(true)
- }
- if n.Esc() != EscNever {
- // If n.Esc is not EscNever, then this defer occurs in a loop,
- // so open-coded defers cannot be used in this function.
- Curfn.SetOpenCodedDeferDisallowed(true)
- }
- fallthrough
- case ir.OGO:
- var init ir.Nodes
- switch call := n.Left(); call.Op() {
- case ir.OPRINT, ir.OPRINTN:
- call := call.(*ir.CallExpr)
- n.SetLeft(wrapCall(call, &init))
-
- case ir.ODELETE:
- call := call.(*ir.CallExpr)
- if mapfast(call.List().First().Type()) == mapslow {
- n.SetLeft(wrapCall(call, &init))
- } else {
- n.SetLeft(walkexpr(call, &init))
- }
-
- case ir.OCOPY:
- call := call.(*ir.BinaryExpr)
- n.SetLeft(copyany(call, &init, true))
-
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- call := call.(*ir.CallExpr)
- if call.Body().Len() > 0 {
- n.SetLeft(wrapCall(call, &init))
- } else {
- n.SetLeft(walkexpr(call, &init))
- }
-
- default:
- n.SetLeft(walkexpr(call, &init))
- }
- if init.Len() > 0 {
- init.Append(n)
- return ir.NewBlockStmt(n.Pos(), init.Slice())
- }
- return n
-
- case ir.OFOR, ir.OFORUNTIL:
- if n.Left() != nil {
- walkstmtlist(n.Left().Init().Slice())
- init := n.Left().Init()
- n.Left().PtrInit().Set(nil)
- n.SetLeft(walkexpr(n.Left(), &init))
- n.SetLeft(initExpr(init.Slice(), n.Left()))
- }
-
- n.SetRight(walkstmt(n.Right()))
- if n.Op() == ir.OFORUNTIL {
- walkstmtlist(n.List().Slice())
- }
- walkstmtlist(n.Body().Slice())
- return n
-
- case ir.OIF:
- n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
- walkstmtlist(n.Body().Slice())
- walkstmtlist(n.Rlist().Slice())
- return n
-
- case ir.ORETURN:
- Curfn.NumReturns++
- if n.List().Len() == 0 {
- return n
- }
- if (hasNamedResults(Curfn) && n.List().Len() > 1) || paramoutheap(Curfn) {
- // assign to the function out parameters,
- // so that ascompatee can fix up conflicts
- var rl []ir.Node
-
- for _, ln := range Curfn.Dcl {
- cl := ln.Class()
- if cl == ir.PAUTO || cl == ir.PAUTOHEAP {
- break
- }
- if cl == ir.PPARAMOUT {
- var ln ir.Node = ln
- if isParamStackCopy(ln) {
- ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name().Heapaddr, nil), ctxExpr), nil)
- }
- rl = append(rl, ln)
- }
- }
-
- if got, want := n.List().Len(), len(rl); got != want {
- // order should have rewritten multi-value function calls
- // with explicit OAS2FUNC nodes.
- base.Fatalf("expected %v return arguments, have %v", want, got)
- }
-
- // move function calls out, to make ascompatee's job easier.
- walkexprlistsafe(n.List().Slice(), n.PtrInit())
-
- n.PtrList().Set(ascompatee(n.Op(), rl, n.List().Slice(), n.PtrInit()))
- return n
- }
- walkexprlist(n.List().Slice(), n.PtrInit())
-
- // For each return parameter (lhs), assign the corresponding result (rhs).
- lhs := Curfn.Type().Results()
- rhs := n.List().Slice()
- res := make([]ir.Node, lhs.NumFields())
- for i, nl := range lhs.FieldSlice() {
- nname := ir.AsNode(nl.Nname)
- if isParamHeapCopy(nname) {
- nname = nname.Name().Stackcopy
- }
- a := ir.NewAssignStmt(base.Pos, nname, rhs[i])
- res[i] = convas(a, n.PtrInit())
- }
- n.PtrList().Set(res)
- return n
-
- case ir.ORETJMP:
- return n
-
- case ir.OINLMARK:
- return n
-
- case ir.OSELECT:
- n := n.(*ir.SelectStmt)
- walkselect(n)
- return n
-
- case ir.OSWITCH:
- n := n.(*ir.SwitchStmt)
- walkswitch(n)
- return n
-
- case ir.ORANGE:
- n := n.(*ir.RangeStmt)
- return walkrange(n)
- }
-
- // No return! Each case must return (or panic),
- // to avoid confusion about what gets returned
- // in the presence of type assertions.
-}
-
-// walk the whole tree of the body of an
-// expression or simple statement.
-// the types expressions are calculated.
-// compile-time constants are evaluated.
-// complex side effects like statements are appended to init
-func walkexprlist(s []ir.Node, init *ir.Nodes) {
- for i := range s {
- s[i] = walkexpr(s[i], init)
- }
-}
-
-func walkexprlistsafe(s []ir.Node, init *ir.Nodes) {
- for i, n := range s {
- s[i] = safeexpr(n, init)
- s[i] = walkexpr(s[i], init)
- }
-}
-
-func walkexprlistcheap(s []ir.Node, init *ir.Nodes) {
- for i, n := range s {
- s[i] = cheapexpr(n, init)
- s[i] = walkexpr(s[i], init)
- }
-}
-
-// convFuncName builds the runtime function name for interface conversion.
-// It also reports whether the function expects the data by address.
-// Not all names are possible. For example, we never generate convE2E or convE2I.
-func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
- tkind := to.Tie()
- switch from.Tie() {
- case 'I':
- if tkind == 'I' {
- return "convI2I", false
- }
- case 'T':
- switch {
- case from.Size() == 2 && from.Align == 2:
- return "convT16", false
- case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
- return "convT32", false
- case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers():
- return "convT64", false
- }
- if sc := from.SoleComponent(); sc != nil {
- switch {
- case sc.IsString():
- return "convTstring", false
- case sc.IsSlice():
- return "convTslice", false
- }
- }
-
- switch tkind {
- case 'E':
- if !from.HasPointers() {
- return "convT2Enoptr", true
- }
- return "convT2E", true
- case 'I':
- if !from.HasPointers() {
- return "convT2Inoptr", true
- }
- return "convT2I", true
- }
- }
- base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
- panic("unreachable")
-}
-
-// The result of walkexpr MUST be assigned back to n, e.g.
-// n.Left = walkexpr(n.Left, init)
-func walkexpr(n ir.Node, init *ir.Nodes) ir.Node {
- if n == nil {
- return n
- }
-
- // Eagerly checkwidth all expressions for the back end.
- if n.Type() != nil && !n.Type().WidthCalculated() {
- switch n.Type().Kind() {
- case types.TBLANK, types.TNIL, types.TIDEAL:
- default:
- checkwidth(n.Type())
- }
- }
-
- if init == n.PtrInit() {
- // not okay to use n->ninit when walking n,
- // because we might replace n with some other node
- // and would lose the init list.
- base.Fatalf("walkexpr init == &n->ninit")
- }
-
- if n.Init().Len() != 0 {
- walkstmtlist(n.Init().Slice())
- init.AppendNodes(n.PtrInit())
- }
-
- lno := setlineno(n)
-
- if base.Flag.LowerW > 1 {
- ir.Dump("before walk expr", n)
- }
-
- if n.Typecheck() != 1 {
- base.Fatalf("missed typecheck: %+v", n)
- }
-
- if n.Type().IsUntyped() {
- base.Fatalf("expression has untyped type: %+v", n)
- }
-
- if n.Op() == ir.ONAME && n.(*ir.Name).Class() == ir.PAUTOHEAP {
- nn := ir.Nod(ir.ODEREF, n.Name().Heapaddr, nil)
- nn.Left().MarkNonNil()
- return walkexpr(typecheck(nn, ctxExpr), init)
- }
-
- n = walkexpr1(n, init)
-
- // Expressions that are constant at run time but not
- // considered const by the language spec are not turned into
- // constants until walk. For example, if n is y%1 == 0, the
- // walk of y%1 may have replaced it by 0.
- // Check whether n with its updated args is itself now a constant.
- t := n.Type()
- n = evalConst(n)
- if n.Type() != t {
- base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type())
- }
- if n.Op() == ir.OLITERAL {
- n = typecheck(n, ctxExpr)
- // Emit string symbol now to avoid emitting
- // any concurrently during the backend.
- if v := n.Val(); v.Kind() == constant.String {
- _ = stringsym(n.Pos(), constant.StringVal(v))
- }
- }
-
- updateHasCall(n)
-
- if base.Flag.LowerW != 0 && n != nil {
- ir.Dump("after walk expr", n)
- }
-
- base.Pos = lno
- return n
-}
-
-func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
- switch n.Op() {
- default:
- ir.Dump("walk", n)
- base.Fatalf("walkexpr: switch 1 unknown op %+v", n.Op())
- panic("unreachable")
-
- case ir.ONONAME, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR:
- return n
-
- case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
- // TODO(mdempsky): Just return n; see discussion on CL 38655.
- // Perhaps refactor to use Node.mayBeShared for these instead.
- // If these return early, make sure to still call
- // stringsym for constant strings.
- return n
-
- case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA:
- n.SetLeft(walkexpr(n.Left(), init))
- return n
-
- case ir.ODOTMETH, ir.ODOTINTER:
- n.SetLeft(walkexpr(n.Left(), init))
- return n
-
- case ir.OADDR:
- n.SetLeft(walkexpr(n.Left(), init))
- return n
-
- case ir.ODEREF:
- n.SetLeft(walkexpr(n.Left(), init))
- return n
-
- case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH:
- n.SetLeft(walkexpr(n.Left(), init))
- n.SetRight(walkexpr(n.Right(), init))
- return n
-
- case ir.ODOT, ir.ODOTPTR:
- n := n.(*ir.SelectorExpr)
- usefield(n)
- n.SetLeft(walkexpr(n.Left(), init))
- return n
-
- case ir.ODOTTYPE, ir.ODOTTYPE2:
- n.SetLeft(walkexpr(n.Left(), init))
- // Set up interface type addresses for back end.
- n.SetRight(typename(n.Type()))
- if n.Op() == ir.ODOTTYPE {
- n.Right().(*ir.AddrExpr).SetRight(typename(n.Left().Type()))
- }
- if !n.Type().IsInterface() && !n.Left().Type().IsEmptyInterface() {
- n.PtrList().Set1(itabname(n.Type(), n.Left().Type()))
- }
- return n
-
- case ir.OLEN, ir.OCAP:
- if isRuneCount(n) {
- // Replace len([]rune(string)) with runtime.countrunes(string).
- return mkcall("countrunes", n.Type(), init, conv(n.Left().(*ir.ConvExpr).Left(), types.Types[types.TSTRING]))
- }
-
- n.SetLeft(walkexpr(n.Left(), init))
-
- // replace len(*[10]int) with 10.
- // delayed until now to preserve side effects.
- t := n.Left().Type()
-
- if t.IsPtr() {
- t = t.Elem()
- }
- if t.IsArray() {
- safeexpr(n.Left(), init)
- con := origIntConst(n, t.NumElem())
- con.SetTypecheck(1)
- return con
- }
- return n
-
- case ir.OCOMPLEX:
- n.SetLeft(walkexpr(n.Left(), init))
- n.SetRight(walkexpr(n.Right(), init))
- return n
-
- case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
- n := n.(*ir.BinaryExpr)
- return walkcompare(n, init)
-
- case ir.OANDAND, ir.OOROR:
- n.SetLeft(walkexpr(n.Left(), init))
-
- // cannot put side effects from n.Right on init,
- // because they cannot run before n.Left is checked.
- // save elsewhere and store on the eventual n.Right.
- var ll ir.Nodes
-
- n.SetRight(walkexpr(n.Right(), &ll))
- n.SetRight(initExpr(ll.Slice(), n.Right()))
- return n
-
- case ir.OPRINT, ir.OPRINTN:
- return walkprint(n.(*ir.CallExpr), init)
-
- case ir.OPANIC:
- return mkcall("gopanic", nil, init, n.Left())
-
- case ir.ORECOVER:
- return mkcall("gorecover", n.Type(), init, nodAddr(nodfp))
-
- case ir.OCLOSUREREAD, ir.OCFUNC:
- return n
-
- case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH:
- n := n.(*ir.CallExpr)
- if n.Op() == ir.OCALLINTER {
- usemethod(n)
- markUsedIfaceMethod(n)
- }
-
- if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.OCLOSURE {
- // Transform direct call of a closure to call of a normal function.
- // transformclosure already did all preparation work.
-
- // Prepend captured variables to argument list.
- clo := n.Left().(*ir.ClosureExpr)
- n.PtrList().Prepend(clo.Func().ClosureEnter.Slice()...)
- clo.Func().ClosureEnter.Set(nil)
-
- // Replace OCLOSURE with ONAME/PFUNC.
- n.SetLeft(clo.Func().Nname)
-
- // Update type of OCALLFUNC node.
- // Output arguments had not changed, but their offsets could.
- if n.Left().Type().NumResults() == 1 {
- n.SetType(n.Left().Type().Results().Field(0).Type)
- } else {
- n.SetType(n.Left().Type().Results())
- }
- }
-
- walkCall(n, init)
- return n
-
- case ir.OAS, ir.OASOP:
- init.AppendNodes(n.PtrInit())
-
- var left, right ir.Node
- switch n.Op() {
- case ir.OAS:
- left, right = n.Left(), n.Right()
- case ir.OASOP:
- left, right = n.Left(), n.Right()
- }
-
- // Recognize m[k] = append(m[k], ...) so we can reuse
- // the mapassign call.
- var mapAppend *ir.CallExpr
- if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND {
- mapAppend = right.(*ir.CallExpr)
- if !samesafeexpr(left, mapAppend.List().First()) {
- base.Fatalf("not same expressions: %v != %v", left, mapAppend.List().First())
- }
- }
-
- left = walkexpr(left, init)
- left = safeexpr(left, init)
- if mapAppend != nil {
- mapAppend.List().SetFirst(left)
- }
-
- if n.Op() == ir.OASOP {
- // Rewrite x op= y into x = x op y.
- n = ir.Nod(ir.OAS, left,
- typecheck(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).SubOp(), left, right), ctxExpr))
- } else {
- n.(*ir.AssignStmt).SetLeft(left)
- }
- as := n.(*ir.AssignStmt)
-
- if oaslit(as, init) {
- return ir.NodAt(as.Pos(), ir.OBLOCK, nil, nil)
- }
-
- if as.Right() == nil {
- // TODO(austin): Check all "implicit zeroing"
- return as
- }
-
- if !instrumenting && isZero(as.Right()) {
- return as
- }
-
- switch as.Right().Op() {
- default:
- as.SetRight(walkexpr(as.Right(), init))
-
- case ir.ORECV:
- // x = <-c; as.Left is x, as.Right.Left is c.
- // order.stmt made sure x is addressable.
- recv := as.Right().(*ir.UnaryExpr)
- recv.SetLeft(walkexpr(recv.Left(), init))
-
- n1 := nodAddr(as.Left())
- r := recv.Left() // the channel
- return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1)
-
- case ir.OAPPEND:
- // x = append(...)
- call := as.Right().(*ir.CallExpr)
- if call.Type().Elem().NotInHeap() {
- base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem())
- }
- var r ir.Node
- switch {
- case isAppendOfMake(call):
- // x = append(y, make([]T, y)...)
- r = extendslice(call, init)
- case call.IsDDD():
- r = appendslice(call, init) // also works for append(slice, string).
- default:
- r = walkappend(call, init, as)
- }
- as.SetRight(r)
- if r.Op() == ir.OAPPEND {
- // Left in place for back end.
- // Do not add a new write barrier.
- // Set up address of type for back end.
- r.(*ir.CallExpr).SetLeft(typename(r.Type().Elem()))
- return as
- }
- // Otherwise, lowered for race detector.
- // Treat as ordinary assignment.
- }
-
- if as.Left() != nil && as.Right() != nil {
- return convas(as, init)
- }
- return as
-
- case ir.OAS2:
- init.AppendNodes(n.PtrInit())
- walkexprlistsafe(n.List().Slice(), init)
- walkexprlistsafe(n.Rlist().Slice(), init)
- return liststmt(ascompatee(ir.OAS, n.List().Slice(), n.Rlist().Slice(), init))
-
- // a,b,... = fn()
- case ir.OAS2FUNC:
- init.AppendNodes(n.PtrInit())
-
- r := n.Rlist().First()
- walkexprlistsafe(n.List().Slice(), init)
- r = walkexpr(r, init)
-
- if IsIntrinsicCall(r.(*ir.CallExpr)) {
- n.PtrRlist().Set1(r)
- return n
- }
- init.Append(r)
-
- ll := ascompatet(n.List(), r.Type())
- return liststmt(ll)
-
- // x, y = <-c
- // order.stmt made sure x is addressable or blank.
- case ir.OAS2RECV:
- init.AppendNodes(n.PtrInit())
-
- r := n.Rlist().First().(*ir.UnaryExpr) // recv
- walkexprlistsafe(n.List().Slice(), init)
- r.SetLeft(walkexpr(r.Left(), init))
- var n1 ir.Node
- if ir.IsBlank(n.List().First()) {
- n1 = nodnil()
- } else {
- n1 = nodAddr(n.List().First())
- }
- fn := chanfn("chanrecv2", 2, r.Left().Type())
- ok := n.List().Second()
- call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left(), n1)
- return typecheck(ir.Nod(ir.OAS, ok, call), ctxStmt)
-
- // a,b = m[i]
- case ir.OAS2MAPR:
- init.AppendNodes(n.PtrInit())
-
- r := n.Rlist().First().(*ir.IndexExpr)
- walkexprlistsafe(n.List().Slice(), init)
- r.SetLeft(walkexpr(r.Left(), init))
- r.SetRight(walkexpr(r.Right(), init))
- t := r.Left().Type()
-
- fast := mapfast(t)
- var key ir.Node
- if fast != mapslow {
- // fast versions take key by value
- key = r.Right()
- } else {
- // standard version takes key by reference
- // order.expr made sure key is addressable.
- key = nodAddr(r.Right())
- }
-
- // from:
- // a,b = m[i]
- // to:
- // var,b = mapaccess2*(t, m, i)
- // a = *var
- a := n.List().First()
-
- var call *ir.CallExpr
- if w := t.Elem().Width; w <= zeroValSize {
- fn := mapfn(mapaccess2[fast], t)
- call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key)
- } else {
- fn := mapfn("mapaccess2_fat", t)
- z := zeroaddr(w)
- call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key, z)
- }
-
- // mapaccess2* returns a typed bool, but due to spec changes,
- // the boolean result of i.(T) is now untyped so we make it the
- // same type as the variable on the lhs.
- if ok := n.List().Second(); !ir.IsBlank(ok) && ok.Type().IsBoolean() {
- call.Type().Field(1).Type = ok.Type()
- }
- n.PtrRlist().Set1(call)
- n.SetOp(ir.OAS2FUNC)
-
- // don't generate a = *var if a is _
- if ir.IsBlank(a) {
- return walkexpr(typecheck(n, ctxStmt), init)
- }
-
- var_ := temp(types.NewPtr(t.Elem()))
- var_.SetTypecheck(1)
- var_.MarkNonNil() // mapaccess always returns a non-nil pointer
-
- n.List().SetFirst(var_)
- init.Append(walkexpr(n, init))
-
- as := ir.Nod(ir.OAS, a, ir.Nod(ir.ODEREF, var_, nil))
- return walkexpr(typecheck(as, ctxStmt), init)
-
- case ir.ODELETE:
- init.AppendNodes(n.PtrInit())
- map_ := n.List().First()
- key := n.List().Second()
- map_ = walkexpr(map_, init)
- key = walkexpr(key, init)
-
- t := map_.Type()
- fast := mapfast(t)
- if fast == mapslow {
- // order.stmt made sure key is addressable.
- key = nodAddr(key)
- }
- return mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key)
-
- case ir.OAS2DOTTYPE:
- walkexprlistsafe(n.List().Slice(), init)
- n.PtrRlist().SetIndex(0, walkexpr(n.Rlist().First(), init))
- return n
-
- case ir.OCONVIFACE:
- n.SetLeft(walkexpr(n.Left(), init))
-
- fromType := n.Left().Type()
- toType := n.Type()
-
- if !fromType.IsInterface() && !ir.IsBlank(Curfn.Nname) { // skip unnamed functions (func _())
- markTypeUsedInInterface(fromType, Curfn.LSym)
- }
-
- // typeword generates the type word of the interface value.
- typeword := func() ir.Node {
- if toType.IsEmptyInterface() {
- return typename(fromType)
- }
- return itabname(fromType, toType)
- }
-
- // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
- if isdirectiface(fromType) {
- l := ir.Nod(ir.OEFACE, typeword(), n.Left())
- l.SetType(toType)
- l.SetTypecheck(n.Typecheck())
- return l
- }
-
- if staticuint64s == nil {
- staticuint64s = NewName(Runtimepkg.Lookup("staticuint64s"))
- staticuint64s.SetClass(ir.PEXTERN)
- // The actual type is [256]uint64, but we use [256*8]uint8 so we can address
- // individual bytes.
- staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8))
- zerobase = NewName(Runtimepkg.Lookup("zerobase"))
- zerobase.SetClass(ir.PEXTERN)
- zerobase.SetType(types.Types[types.TUINTPTR])
- }
-
- // Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
- // by using an existing addressable value identical to n.Left
- // or creating one on the stack.
- var value ir.Node
- switch {
- case fromType.Size() == 0:
- // n.Left is zero-sized. Use zerobase.
- cheapexpr(n.Left(), init) // Evaluate n.Left for side-effects. See issue 19246.
- value = zerobase
- case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
- // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
- // and staticuint64s[n.Left * 8 + 7] on big-endian.
- n.SetLeft(cheapexpr(n.Left(), init))
- // byteindex widens n.Left so that the multiplication doesn't overflow.
- index := ir.Nod(ir.OLSH, byteindex(n.Left()), nodintconst(3))
- if thearch.LinkArch.ByteOrder == binary.BigEndian {
- index = ir.Nod(ir.OADD, index, nodintconst(7))
- }
- xe := ir.Nod(ir.OINDEX, staticuint64s, index)
- xe.SetBounded(true)
- value = xe
- case n.Left().Op() == ir.ONAME && n.Left().(*ir.Name).Class() == ir.PEXTERN && n.Left().(*ir.Name).Readonly():
- // n.Left is a readonly global; use it directly.
- value = n.Left()
- case !fromType.IsInterface() && n.Esc() == EscNone && fromType.Width <= 1024:
- // n.Left does not escape. Use a stack temporary initialized to n.Left.
- value = temp(fromType)
- init.Append(typecheck(ir.Nod(ir.OAS, value, n.Left()), ctxStmt))
- }
-
- if value != nil {
- // Value is identical to n.Left.
- // Construct the interface directly: {type/itab, &value}.
- l := ir.Nod(ir.OEFACE, typeword(), typecheck(nodAddr(value), ctxExpr))
- l.SetType(toType)
- l.SetTypecheck(n.Typecheck())
- return l
- }
-
- // Implement interface to empty interface conversion.
- // tmp = i.itab
- // if tmp != nil {
- // tmp = tmp.type
- // }
- // e = iface{tmp, i.data}
- if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() {
- // Evaluate the input interface.
- c := temp(fromType)
- init.Append(ir.Nod(ir.OAS, c, n.Left()))
-
- // Get the itab out of the interface.
- tmp := temp(types.NewPtr(types.Types[types.TUINT8]))
- init.Append(ir.Nod(ir.OAS, tmp, typecheck(ir.Nod(ir.OITAB, c, nil), ctxExpr)))
-
- // Get the type out of the itab.
- nif := ir.Nod(ir.OIF, typecheck(ir.Nod(ir.ONE, tmp, nodnil()), ctxExpr), nil)
- nif.PtrBody().Set1(ir.Nod(ir.OAS, tmp, itabType(tmp)))
- init.Append(nif)
-
- // Build the result.
- e := ir.Nod(ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8])))
- e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
- e.SetTypecheck(1)
- return e
- }
-
- fnname, needsaddr := convFuncName(fromType, toType)
-
- if !needsaddr && !fromType.IsInterface() {
- // Use a specialized conversion routine that only returns a data pointer.
- // ptr = convT2X(val)
- // e = iface{typ/tab, ptr}
- fn := syslook(fnname)
- dowidth(fromType)
- fn = substArgTypes(fn, fromType)
- dowidth(fn.Type())
- call := ir.Nod(ir.OCALL, fn, nil)
- call.PtrList().Set1(n.Left())
- e := ir.Nod(ir.OEFACE, typeword(), safeexpr(walkexpr(typecheck(call, ctxExpr), init), init))
- e.SetType(toType)
- e.SetTypecheck(1)
- return e
- }
-
- var tab ir.Node
- if fromType.IsInterface() {
- // convI2I
- tab = typename(toType)
- } else {
- // convT2x
- tab = typeword()
- }
-
- v := n.Left()
- if needsaddr {
- // Types of large or unknown size are passed by reference.
- // Orderexpr arranged for n.Left to be a temporary for all
- // the conversions it could see. Comparison of an interface
- // with a non-interface, especially in a switch on interface value
- // with non-interface cases, is not visible to order.stmt, so we
- // have to fall back on allocating a temp here.
- if !islvalue(v) {
- v = copyexpr(v, v.Type(), init)
- }
- v = nodAddr(v)
- }
-
- dowidth(fromType)
- fn := syslook(fnname)
- fn = substArgTypes(fn, fromType, toType)
- dowidth(fn.Type())
- call := ir.Nod(ir.OCALL, fn, nil)
- call.PtrList().Set2(tab, v)
- return walkexpr(typecheck(call, ctxExpr), init)
-
- case ir.OCONV, ir.OCONVNOP:
- n := n.(*ir.ConvExpr)
- n.SetLeft(walkexpr(n.Left(), init))
- if n.Op() == ir.OCONVNOP && n.Type() == n.Left().Type() {
- return n.Left()
- }
- if n.Op() == ir.OCONVNOP && checkPtr(Curfn, 1) {
- if n.Type().IsPtr() && n.Left().Type().IsUnsafePtr() { // unsafe.Pointer to *T
- return walkCheckPtrAlignment(n, init, nil)
- }
- if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() { // uintptr to unsafe.Pointer
- return walkCheckPtrArithmetic(n, init)
- }
- }
- param, result := rtconvfn(n.Left().Type(), n.Type())
- if param == types.Txxx {
- return n
- }
- fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result]
- return conv(mkcall(fn, types.Types[result], init, conv(n.Left(), types.Types[param])), n.Type())
-
- case ir.ODIV, ir.OMOD:
- n.SetLeft(walkexpr(n.Left(), init))
- n.SetRight(walkexpr(n.Right(), init))
-
- // rewrite complex div into function call.
- et := n.Left().Type().Kind()
-
- if isComplex[et] && n.Op() == ir.ODIV {
- t := n.Type()
- call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left(), types.Types[types.TCOMPLEX128]), conv(n.Right(), types.Types[types.TCOMPLEX128]))
- return conv(call, t)
- }
-
- // Nothing to do for float divisions.
- if isFloat[et] {
- return n
- }
-
- // rewrite 64-bit div and mod on 32-bit architectures.
- // TODO: Remove this code once we can introduce
- // runtime calls late in SSA processing.
- if Widthreg < 8 && (et == types.TINT64 || et == types.TUINT64) {
- if n.Right().Op() == ir.OLITERAL {
- // Leave div/mod by constant powers of 2 or small 16-bit constants.
- // The SSA backend will handle those.
- switch et {
- case types.TINT64:
- c := ir.Int64Val(n.Right())
- if c < 0 {
- c = -c
- }
- if c != 0 && c&(c-1) == 0 {
- return n
- }
- case types.TUINT64:
- c := ir.Uint64Val(n.Right())
- if c < 1<<16 {
- return n
- }
- if c != 0 && c&(c-1) == 0 {
- return n
- }
- }
- }
- var fn string
- if et == types.TINT64 {
- fn = "int64"
- } else {
- fn = "uint64"
- }
- if n.Op() == ir.ODIV {
- fn += "div"
- } else {
- fn += "mod"
- }
- return mkcall(fn, n.Type(), init, conv(n.Left(), types.Types[et]), conv(n.Right(), types.Types[et]))
- }
- return n
-
- case ir.OINDEX:
- n.SetLeft(walkexpr(n.Left(), init))
-
- // save the original node for bounds checking elision.
- // If it was a ODIV/OMOD walk might rewrite it.
- r := n.Right()
-
- n.SetRight(walkexpr(n.Right(), init))
-
- // if range of type cannot exceed static array bound,
- // disable bounds check.
- if n.Bounded() {
- return n
- }
- t := n.Left().Type()
- if t != nil && t.IsPtr() {
- t = t.Elem()
- }
- if t.IsArray() {
- n.SetBounded(bounded(r, t.NumElem()))
- if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) {
- base.Warn("index bounds check elided")
- }
- if smallintconst(n.Right()) && !n.Bounded() {
- base.Errorf("index out of bounds")
- }
- } else if ir.IsConst(n.Left(), constant.String) {
- n.SetBounded(bounded(r, int64(len(ir.StringVal(n.Left())))))
- if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) {
- base.Warn("index bounds check elided")
- }
- if smallintconst(n.Right()) && !n.Bounded() {
- base.Errorf("index out of bounds")
- }
- }
-
- if ir.IsConst(n.Right(), constant.Int) {
- if v := n.Right().Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[types.TINT]) {
- base.Errorf("index out of bounds")
- }
- }
- return n
-
- case ir.OINDEXMAP:
- // Replace m[k] with *map{access1,assign}(maptype, m, &k)
- n.SetLeft(walkexpr(n.Left(), init))
- n.SetRight(walkexpr(n.Right(), init))
- map_ := n.Left()
- key := n.Right()
- t := map_.Type()
- var call *ir.CallExpr
- if n.IndexMapLValue() {
- // This m[k] expression is on the left-hand side of an assignment.
- fast := mapfast(t)
- if fast == mapslow {
- // standard version takes key by reference.
- // order.expr made sure key is addressable.
- key = nodAddr(key)
- }
- call = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key)
- } else {
- // m[k] is not the target of an assignment.
- fast := mapfast(t)
- if fast == mapslow {
- // standard version takes key by reference.
- // order.expr made sure key is addressable.
- key = nodAddr(key)
- }
-
- if w := t.Elem().Width; w <= zeroValSize {
- call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, typename(t), map_, key)
- } else {
- z := zeroaddr(w)
- call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z)
- }
- }
- call.SetType(types.NewPtr(t.Elem()))
- call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
- star := ir.Nod(ir.ODEREF, call, nil)
- star.SetType(t.Elem())
- star.SetTypecheck(1)
- return star
-
- case ir.ORECV:
- base.Fatalf("walkexpr ORECV") // should see inside OAS only
- panic("unreachable")
-
- case ir.OSLICEHEADER:
- n.SetLeft(walkexpr(n.Left(), init))
- n.List().SetFirst(walkexpr(n.List().First(), init))
- n.List().SetSecond(walkexpr(n.List().Second(), init))
- return n
-
- case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
- n := n.(*ir.SliceExpr)
-
- checkSlice := checkPtr(Curfn, 1) && n.Op() == ir.OSLICE3ARR && n.Left().Op() == ir.OCONVNOP && n.Left().(*ir.ConvExpr).Left().Type().IsUnsafePtr()
- if checkSlice {
- conv := n.Left().(*ir.ConvExpr)
- conv.SetLeft(walkexpr(conv.Left(), init))
- } else {
- n.SetLeft(walkexpr(n.Left(), init))
- }
-
- low, high, max := n.SliceBounds()
- low = walkexpr(low, init)
- if low != nil && isZero(low) {
- // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k].
- low = nil
- }
- high = walkexpr(high, init)
- max = walkexpr(max, init)
- n.SetSliceBounds(low, high, max)
- if checkSlice {
- n.SetLeft(walkCheckPtrAlignment(n.Left().(*ir.ConvExpr), init, max))
- }
-
- if n.Op().IsSlice3() {
- if max != nil && max.Op() == ir.OCAP && samesafeexpr(n.Left(), max.(*ir.UnaryExpr).Left()) {
- // Reduce x[i:j:cap(x)] to x[i:j].
- if n.Op() == ir.OSLICE3 {
- n.SetOp(ir.OSLICE)
- } else {
- n.SetOp(ir.OSLICEARR)
- }
- return reduceSlice(n)
- }
- return n
- }
- return reduceSlice(n)
-
- case ir.ONEW:
- if n.Type().Elem().NotInHeap() {
- base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
- }
- if n.Esc() == EscNone {
- if n.Type().Elem().Width >= maxImplicitStackVarSize {
- base.Fatalf("large ONEW with EscNone: %v", n)
- }
- r := temp(n.Type().Elem())
- init.Append(typecheck(ir.Nod(ir.OAS, r, nil), ctxStmt)) // zero temp
- return typecheck(nodAddr(r), ctxExpr)
- }
- return callnew(n.Type().Elem())
-
- case ir.OADDSTR:
- return addstr(n.(*ir.AddStringExpr), init)
-
- case ir.OAPPEND:
- // order should make sure we only see OAS(node, OAPPEND), which we handle above.
- base.Fatalf("append outside assignment")
- panic("unreachable")
-
- case ir.OCOPY:
- return copyany(n.(*ir.BinaryExpr), init, instrumenting && !base.Flag.CompilingRuntime)
-
- case ir.OCLOSE:
- // cannot use chanfn - closechan takes any, not chan any
- fn := syslook("closechan")
- fn = substArgTypes(fn, n.Left().Type())
- return mkcall1(fn, nil, init, n.Left())
-
- case ir.OMAKECHAN:
- // When size fits into int, use makechan instead of
- // makechan64, which is faster and shorter on 32 bit platforms.
- size := n.Left()
- fnname := "makechan64"
- argtype := types.Types[types.TINT64]
-
- // Type checking guarantees that TIDEAL size is positive and fits in an int.
- // The case of size overflow when converting TUINT or TUINTPTR to TINT
- // will be handled by the negative range checks in makechan during runtime.
- if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
- fnname = "makechan"
- argtype = types.Types[types.TINT]
- }
-
- return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), conv(size, argtype))
-
- case ir.OMAKEMAP:
- t := n.Type()
- hmapType := hmap(t)
- hint := n.Left()
-
- // var h *hmap
- var h ir.Node
- if n.Esc() == EscNone {
- // Allocate hmap on stack.
-
- // var hv hmap
- hv := temp(hmapType)
- init.Append(typecheck(ir.Nod(ir.OAS, hv, nil), ctxStmt))
- // h = &hv
- h = nodAddr(hv)
-
- // Allocate one bucket pointed to by hmap.buckets on stack if hint
- // is not larger than BUCKETSIZE. In case hint is larger than
- // BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
- // Maximum key and elem size is 128 bytes, larger objects
- // are stored with an indirection. So max bucket size is 2048+eps.
- if !ir.IsConst(hint, constant.Int) ||
- constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) {
-
- // In case hint is larger than BUCKETSIZE runtime.makemap
- // will allocate the buckets on the heap, see #20184
- //
- // if hint <= BUCKETSIZE {
- // var bv bmap
- // b = &bv
- // h.buckets = b
- // }
-
- nif := ir.Nod(ir.OIF, ir.Nod(ir.OLE, hint, nodintconst(BUCKETSIZE)), nil)
- nif.SetLikely(true)
-
- // var bv bmap
- bv := temp(bmap(t))
- nif.PtrBody().Append(ir.Nod(ir.OAS, bv, nil))
-
- // b = &bv
- b := nodAddr(bv)
-
- // h.buckets = b
- bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
- na := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, bsym), b)
- nif.PtrBody().Append(na)
- appendWalkStmt(init, nif)
- }
- }
-
- if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) {
- // Handling make(map[any]any) and
- // make(map[any]any, hint) where hint <= BUCKETSIZE
- // special allows for faster map initialization and
- // improves binary size by using calls with fewer arguments.
- // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
- // and no buckets will be allocated by makemap. Therefore,
- // no buckets need to be allocated in this code path.
- if n.Esc() == EscNone {
- // Only need to initialize h.hash0 since
- // hmap h has been allocated on the stack already.
- // h.hash0 = fastrand()
- rand := mkcall("fastrand", types.Types[types.TUINT32], init)
- hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
- appendWalkStmt(init, ir.Nod(ir.OAS, nodSym(ir.ODOT, h, hashsym), rand))
- return convnop(h, t)
- }
- // Call runtime.makehmap to allocate an
- // hmap on the heap and initialize hmap's hash0 field.
- fn := syslook("makemap_small")
- fn = substArgTypes(fn, t.Key(), t.Elem())
- return mkcall1(fn, n.Type(), init)
- }
-
- if n.Esc() != EscNone {
- h = nodnil()
- }
- // Map initialization with a variable or large hint is
- // more complicated. We therefore generate a call to
- // runtime.makemap to initialize hmap and allocate the
- // map buckets.
-
- // When hint fits into int, use makemap instead of
- // makemap64, which is faster and shorter on 32 bit platforms.
- fnname := "makemap64"
- argtype := types.Types[types.TINT64]
-
- // Type checking guarantees that TIDEAL hint is positive and fits in an int.
- // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
- // The case of hint overflow when converting TUINT or TUINTPTR to TINT
- // will be handled by the negative range checks in makemap during runtime.
- if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
- fnname = "makemap"
- argtype = types.Types[types.TINT]
- }
-
- fn := syslook(fnname)
- fn = substArgTypes(fn, hmapType, t.Key(), t.Elem())
- return mkcall1(fn, n.Type(), init, typename(n.Type()), conv(hint, argtype), h)
-
- case ir.OMAKESLICE:
- l := n.Left()
- r := n.Right()
- if r == nil {
- r = safeexpr(l, init)
- l = r
- }
- t := n.Type()
- if t.Elem().NotInHeap() {
- base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
- }
- if n.Esc() == EscNone {
- if why := heapAllocReason(n); why != "" {
- base.Fatalf("%v has EscNone, but %v", n, why)
- }
- // var arr [r]T
- // n = arr[:l]
- i := indexconst(r)
- if i < 0 {
- base.Fatalf("walkexpr: invalid index %v", r)
- }
-
- // cap is constrained to [0,2^31) or [0,2^63) depending on whether
- // we're in 32-bit or 64-bit systems. So it's safe to do:
- //
- // if uint64(len) > cap {
- // if len < 0 { panicmakeslicelen() }
- // panicmakeslicecap()
- // }
- nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, conv(l, types.Types[types.TUINT64]), nodintconst(i)), nil)
- niflen := ir.Nod(ir.OIF, ir.Nod(ir.OLT, l, nodintconst(0)), nil)
- niflen.PtrBody().Set1(mkcall("panicmakeslicelen", nil, init))
- nif.PtrBody().Append(niflen, mkcall("panicmakeslicecap", nil, init))
- init.Append(typecheck(nif, ctxStmt))
-
- t = types.NewArray(t.Elem(), i) // [r]T
- var_ := temp(t)
- appendWalkStmt(init, ir.Nod(ir.OAS, var_, nil)) // zero temp
- r := ir.Nod(ir.OSLICE, var_, nil) // arr[:l]
- r.SetSliceBounds(nil, l, nil)
- // The conv is necessary in case n.Type is named.
- return walkexpr(typecheck(conv(r, n.Type()), ctxExpr), init)
- }
-
- // n escapes; set up a call to makeslice.
- // When len and cap can fit into int, use makeslice instead of
- // makeslice64, which is faster and shorter on 32 bit platforms.
-
- len, cap := l, r
-
- fnname := "makeslice64"
- argtype := types.Types[types.TINT64]
-
- // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
- // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
- // will be handled by the negative range checks in makeslice during runtime.
- if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
- (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
- fnname = "makeslice"
- argtype = types.Types[types.TINT]
- }
-
- m := ir.Nod(ir.OSLICEHEADER, nil, nil)
- m.SetType(t)
-
- fn := syslook(fnname)
- m.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)))
- m.Left().MarkNonNil()
- m.PtrList().Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT]))
- return walkexpr(typecheck(m, ctxExpr), init)
-
- case ir.OMAKESLICECOPY:
- if n.Esc() == EscNone {
- base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
- }
-
- t := n.Type()
- if t.Elem().NotInHeap() {
- base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
- }
-
- length := conv(n.Left(), types.Types[types.TINT])
- copylen := ir.Nod(ir.OLEN, n.Right(), nil)
- copyptr := ir.Nod(ir.OSPTR, n.Right(), nil)
-
- if !t.Elem().HasPointers() && n.Bounded() {
- // When len(to)==len(from) and elements have no pointers:
- // replace make+copy with runtime.mallocgc+runtime.memmove.
-
- // We do not check for overflow of len(to)*elem.Width here
- // since len(from) is an existing checked slice capacity
- // with same elem.Width for the from slice.
- size := ir.Nod(ir.OMUL, conv(length, types.Types[types.TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[types.TUINTPTR]))
-
- // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
- fn := syslook("mallocgc")
- sh := ir.Nod(ir.OSLICEHEADER, nil, nil)
- sh.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false)))
- sh.Left().MarkNonNil()
- sh.PtrList().Set2(length, length)
- sh.SetType(t)
-
- s := temp(t)
- r := typecheck(ir.Nod(ir.OAS, s, sh), ctxStmt)
- r = walkexpr(r, init)
- init.Append(r)
-
- // instantiate memmove(to *any, frm *any, size uintptr)
- fn = syslook("memmove")
- fn = substArgTypes(fn, t.Elem(), t.Elem())
- ncopy := mkcall1(fn, nil, init, ir.Nod(ir.OSPTR, s, nil), copyptr, size)
- init.Append(walkexpr(typecheck(ncopy, ctxStmt), init))
-
- return s
- }
- // Replace make+copy with runtime.makeslicecopy.
- // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
- fn := syslook("makeslicecopy")
- s := ir.Nod(ir.OSLICEHEADER, nil, nil)
- s.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR])))
- s.Left().MarkNonNil()
- s.PtrList().Set2(length, length)
- s.SetType(t)
- return walkexpr(typecheck(s, ctxExpr), init)
-
- case ir.ORUNESTR:
- a := nodnil()
- if n.Esc() == EscNone {
- t := types.NewArray(types.Types[types.TUINT8], 4)
- a = nodAddr(temp(t))
- }
- // intstring(*[4]byte, rune)
- return mkcall("intstring", n.Type(), init, a, conv(n.Left(), types.Types[types.TINT64]))
-
- case ir.OBYTES2STR, ir.ORUNES2STR:
- a := nodnil()
- if n.Esc() == EscNone {
- // Create temporary buffer for string on stack.
- t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
- a = nodAddr(temp(t))
- }
- if n.Op() == ir.ORUNES2STR {
- // slicerunetostring(*[32]byte, []rune) string
- return mkcall("slicerunetostring", n.Type(), init, a, n.Left())
- }
- // slicebytetostring(*[32]byte, ptr *byte, n int) string
- n.SetLeft(cheapexpr(n.Left(), init))
- ptr, len := backingArrayPtrLen(n.Left())
- return mkcall("slicebytetostring", n.Type(), init, a, ptr, len)
-
- case ir.OBYTES2STRTMP:
- n.SetLeft(walkexpr(n.Left(), init))
- if !instrumenting {
- // Let the backend handle OBYTES2STRTMP directly
- // to avoid a function call to slicebytetostringtmp.
- return n
- }
- // slicebytetostringtmp(ptr *byte, n int) string
- n.SetLeft(cheapexpr(n.Left(), init))
- ptr, len := backingArrayPtrLen(n.Left())
- return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len)
-
- case ir.OSTR2BYTES:
- s := n.Left()
- if ir.IsConst(s, constant.String) {
- sc := ir.StringVal(s)
-
- // Allocate a [n]byte of the right size.
- t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
- var a ir.Node
- if n.Esc() == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
- a = nodAddr(temp(t))
- } else {
- a = callnew(t)
- }
- p := temp(t.PtrTo()) // *[n]byte
- init.Append(typecheck(ir.Nod(ir.OAS, p, a), ctxStmt))
-
- // Copy from the static string data to the [n]byte.
- if len(sc) > 0 {
- as := ir.Nod(ir.OAS,
- ir.Nod(ir.ODEREF, p, nil),
- ir.Nod(ir.ODEREF, convnop(ir.Nod(ir.OSPTR, s, nil), t.PtrTo()), nil))
- appendWalkStmt(init, as)
- }
-
- // Slice the [n]byte to a []byte.
- slice := ir.NodAt(n.Pos(), ir.OSLICEARR, p, nil)
- slice.SetType(n.Type())
- slice.SetTypecheck(1)
- return walkexpr(slice, init)
- }
-
- a := nodnil()
- if n.Esc() == EscNone {
- // Create temporary buffer for slice on stack.
- t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
- a = nodAddr(temp(t))
- }
- // stringtoslicebyte(*32[byte], string) []byte
- return mkcall("stringtoslicebyte", n.Type(), init, a, conv(s, types.Types[types.TSTRING]))
-
- case ir.OSTR2BYTESTMP:
- // []byte(string) conversion that creates a slice
- // referring to the actual string bytes.
- // This conversion is handled later by the backend and
- // is only for use by internal compiler optimizations
- // that know that the slice won't be mutated.
- // The only such case today is:
- // for i, c := range []byte(string)
- n.SetLeft(walkexpr(n.Left(), init))
- return n
-
- case ir.OSTR2RUNES:
- a := nodnil()
- if n.Esc() == EscNone {
- // Create temporary buffer for slice on stack.
- t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize)
- a = nodAddr(temp(t))
- }
- // stringtoslicerune(*[32]rune, string) []rune
- return mkcall("stringtoslicerune", n.Type(), init, a, conv(n.Left(), types.Types[types.TSTRING]))
-
- case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT:
- if isStaticCompositeLiteral(n) && !canSSAType(n.Type()) {
- n := n.(*ir.CompLitExpr) // not OPTRLIT
- // n can be directly represented in the read-only data section.
- // Make direct reference to the static data. See issue 12841.
- vstat := readonlystaticname(n.Type())
- fixedlit(inInitFunction, initKindStatic, n, vstat, init)
- return typecheck(vstat, ctxExpr)
- }
- var_ := temp(n.Type())
- anylit(n, var_, init)
- return var_
-
- case ir.OSEND:
- n1 := n.Right()
- n1 = assignconv(n1, n.Left().Type().Elem(), "chan send")
- n1 = walkexpr(n1, init)
- n1 = nodAddr(n1)
- return mkcall1(chanfn("chansend1", 2, n.Left().Type()), nil, init, n.Left(), n1)
-
- case ir.OCLOSURE:
- return walkclosure(n.(*ir.ClosureExpr), init)
-
- case ir.OCALLPART:
- return walkpartialcall(n.(*ir.CallPartExpr), init)
- }
-
- // No return! Each case must return (or panic),
- // to avoid confusion about what gets returned
- // in the presence of type assertions.
-}
-
-// markTypeUsedInInterface marks that type t is converted to an interface.
-// This information is used in the linker in dead method elimination.
-func markTypeUsedInInterface(t *types.Type, from *obj.LSym) {
- tsym := typenamesym(t).Linksym()
- // Emit a marker relocation. The linker will know the type is converted
- // to an interface if "from" is reachable.
- r := obj.Addrel(from)
- r.Sym = tsym
- r.Type = objabi.R_USEIFACE
-}
-
-// markUsedIfaceMethod marks that an interface method is used in the current
-// function. n is OCALLINTER node.
-func markUsedIfaceMethod(n *ir.CallExpr) {
- dot := n.Left().(*ir.SelectorExpr)
- ityp := dot.Left().Type()
- tsym := typenamesym(ityp).Linksym()
- r := obj.Addrel(Curfn.LSym)
- r.Sym = tsym
- // dot.Xoffset is the method index * Widthptr (the offset of code pointer
- // in itab).
- midx := dot.Offset() / int64(Widthptr)
- r.Add = ifaceMethodOffset(ityp, midx)
- r.Type = objabi.R_USEIFACEMETHOD
-}
-
-// rtconvfn returns the parameter and result types that will be used by a
-// runtime function to convert from type src to type dst. The runtime function
-// name can be derived from the names of the returned types.
-//
-// If no such function is necessary, it returns (Txxx, Txxx).
-func rtconvfn(src, dst *types.Type) (param, result types.Kind) {
- if thearch.SoftFloat {
- return types.Txxx, types.Txxx
- }
-
- switch thearch.LinkArch.Family {
- case sys.ARM, sys.MIPS:
- if src.IsFloat() {
- switch dst.Kind() {
- case types.TINT64, types.TUINT64:
- return types.TFLOAT64, dst.Kind()
- }
- }
- if dst.IsFloat() {
- switch src.Kind() {
- case types.TINT64, types.TUINT64:
- return src.Kind(), types.TFLOAT64
- }
- }
-
- case sys.I386:
- if src.IsFloat() {
- switch dst.Kind() {
- case types.TINT64, types.TUINT64:
- return types.TFLOAT64, dst.Kind()
- case types.TUINT32, types.TUINT, types.TUINTPTR:
- return types.TFLOAT64, types.TUINT32
- }
- }
- if dst.IsFloat() {
- switch src.Kind() {
- case types.TINT64, types.TUINT64:
- return src.Kind(), types.TFLOAT64
- case types.TUINT32, types.TUINT, types.TUINTPTR:
- return types.TUINT32, types.TFLOAT64
- }
- }
- }
- return types.Txxx, types.Txxx
-}
-
-// TODO(josharian): combine this with its caller and simplify
-func reduceSlice(n *ir.SliceExpr) ir.Node {
- low, high, max := n.SliceBounds()
- if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.Left(), high.(*ir.UnaryExpr).Left()) {
- // Reduce x[i:len(x)] to x[i:].
- high = nil
- }
- n.SetSliceBounds(low, high, max)
- if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && low == nil && high == nil {
- // Reduce x[:] to x.
- if base.Debug.Slice > 0 {
- base.Warn("slice: omit slice operation")
- }
- return n.Left()
- }
- return n
-}
-
-func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) *ir.AssignStmt {
- // convas will turn map assigns into function calls,
- // making it impossible for reorder3 to work.
- n := ir.NewAssignStmt(base.Pos, l, r)
-
- if l.Op() == ir.OINDEXMAP {
- return n
- }
-
- return convas(n, init)
-}
-
-func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node {
- // check assign expression list to
- // an expression list. called in
- // expr-list = expr-list
-
- // ensure order of evaluation for function calls
- for i := range nl {
- nl[i] = safeexpr(nl[i], init)
- }
- for i1 := range nr {
- nr[i1] = safeexpr(nr[i1], init)
- }
-
- var nn []*ir.AssignStmt
- i := 0
- for ; i < len(nl); i++ {
- if i >= len(nr) {
- break
- }
- // Do not generate 'x = x' during return. See issue 4014.
- if op == ir.ORETURN && samesafeexpr(nl[i], nr[i]) {
- continue
- }
- nn = append(nn, ascompatee1(nl[i], nr[i], init))
- }
-
- // cannot happen: caller checked that lists had same length
- if i < len(nl) || i < len(nr) {
- var nln, nrn ir.Nodes
- nln.Set(nl)
- nrn.Set(nr)
- base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(Curfn))
- }
- return reorder3(nn)
-}
-
-// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
-func fncall(l ir.Node, rt *types.Type) bool {
- if l.HasCall() || l.Op() == ir.OINDEXMAP {
- return true
- }
- if types.Identical(l.Type(), rt) {
- return false
- }
- // There might be a conversion required, which might involve a runtime call.
- return true
-}
-
-// check assign type list to
-// an expression list. called in
-// expr-list = func()
-func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
- if nl.Len() != nr.NumFields() {
- base.Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
- }
-
- var nn, mm ir.Nodes
- for i, l := range nl.Slice() {
- if ir.IsBlank(l) {
- continue
- }
- r := nr.Field(i)
-
- // Any assignment to an lvalue that might cause a function call must be
- // deferred until all the returned values have been read.
- if fncall(l, r.Type) {
- tmp := ir.Node(temp(r.Type))
- tmp = typecheck(tmp, ctxExpr)
- a := convas(ir.NewAssignStmt(base.Pos, l, tmp), &mm)
- mm.Append(a)
- l = tmp
- }
-
- res := ir.Nod(ir.ORESULT, nil, nil)
- res.SetOffset(base.Ctxt.FixedFrameSize() + r.Offset)
- res.SetType(r.Type)
- res.SetTypecheck(1)
-
- a := convas(ir.NewAssignStmt(base.Pos, l, res), &nn)
- updateHasCall(a)
- if a.HasCall() {
- ir.Dump("ascompatet ucount", a)
- base.Fatalf("ascompatet: too many function calls evaluating parameters")
- }
-
- nn.Append(a)
- }
- return append(nn.Slice(), mm.Slice()...)
-}
-
-// package all the arguments that match a ... T parameter into a []T.
-func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node {
- var n ir.Node
- if len(args) == 0 {
- n = nodnil()
- n.SetType(typ)
- } else {
- lit := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ))
- lit.PtrList().Append(args...)
- lit.SetImplicit(true)
- n = lit
- }
-
- n = typecheck(n, ctxExpr)
- if n.Type() == nil {
- base.Fatalf("mkdotargslice: typecheck failed")
- }
- return n
-}
-
-// fixVariadicCall rewrites calls to variadic functions to use an
-// explicit ... argument if one is not already present.
-func fixVariadicCall(call *ir.CallExpr) {
- fntype := call.Left().Type()
- if !fntype.IsVariadic() || call.IsDDD() {
- return
- }
-
- vi := fntype.NumParams() - 1
- vt := fntype.Params().Field(vi).Type
-
- args := call.List().Slice()
- extra := args[vi:]
- slice := mkdotargslice(vt, extra)
- for i := range extra {
- extra[i] = nil // allow GC
- }
-
- call.PtrList().Set(append(args[:vi], slice))
- call.SetIsDDD(true)
-}
-
-func walkCall(n *ir.CallExpr, init *ir.Nodes) {
- if n.Rlist().Len() != 0 {
- return // already walked
- }
-
- params := n.Left().Type().Params()
- args := n.List().Slice()
-
- n.SetLeft(walkexpr(n.Left(), init))
- walkexprlist(args, init)
-
- // If this is a method call, add the receiver at the beginning of the args.
- if n.Op() == ir.OCALLMETH {
- withRecv := make([]ir.Node, len(args)+1)
- dot := n.Left().(*ir.SelectorExpr)
- withRecv[0] = dot.Left()
- dot.SetLeft(nil)
- copy(withRecv[1:], args)
- args = withRecv
- }
-
- // For any argument whose evaluation might require a function call,
- // store that argument into a temporary variable,
- // to prevent that calls from clobbering arguments already on the stack.
- // When instrumenting, all arguments might require function calls.
- var tempAssigns []ir.Node
- for i, arg := range args {
- updateHasCall(arg)
- // Determine param type.
- var t *types.Type
- if n.Op() == ir.OCALLMETH {
- if i == 0 {
- t = n.Left().Type().Recv().Type
- } else {
- t = params.Field(i - 1).Type
- }
- } else {
- t = params.Field(i).Type
- }
- if instrumenting || fncall(arg, t) {
- // make assignment of fncall to tempAt
- tmp := temp(t)
- a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init)
- tempAssigns = append(tempAssigns, a)
- // replace arg with temp
- args[i] = tmp
- }
- }
-
- n.PtrList().Set(tempAssigns)
- n.PtrRlist().Set(args)
-}
-
-// generate code for print
-func walkprint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
- // Hoist all the argument evaluation up before the lock.
- walkexprlistcheap(nn.List().Slice(), init)
-
- // For println, add " " between elements and "\n" at the end.
- if nn.Op() == ir.OPRINTN {
- s := nn.List().Slice()
- t := make([]ir.Node, 0, len(s)*2)
- for i, n := range s {
- if i != 0 {
- t = append(t, nodstr(" "))
- }
- t = append(t, n)
- }
- t = append(t, nodstr("\n"))
- nn.PtrList().Set(t)
- }
-
- // Collapse runs of constant strings.
- s := nn.List().Slice()
- t := make([]ir.Node, 0, len(s))
- for i := 0; i < len(s); {
- var strs []string
- for i < len(s) && ir.IsConst(s[i], constant.String) {
- strs = append(strs, ir.StringVal(s[i]))
- i++
- }
- if len(strs) > 0 {
- t = append(t, nodstr(strings.Join(strs, "")))
- }
- if i < len(s) {
- t = append(t, s[i])
- i++
- }
- }
- nn.PtrList().Set(t)
-
- calls := []ir.Node{mkcall("printlock", nil, init)}
- for i, n := range nn.List().Slice() {
- if n.Op() == ir.OLITERAL {
- if n.Type() == types.UntypedRune {
- n = defaultlit(n, types.RuneType)
- }
-
- switch n.Val().Kind() {
- case constant.Int:
- n = defaultlit(n, types.Types[types.TINT64])
-
- case constant.Float:
- n = defaultlit(n, types.Types[types.TFLOAT64])
- }
- }
-
- if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
- n = defaultlit(n, types.Types[types.TINT64])
- }
- n = defaultlit(n, nil)
- nn.List().SetIndex(i, n)
- if n.Type() == nil || n.Type().Kind() == types.TFORW {
- continue
- }
-
- var on *ir.Name
- switch n.Type().Kind() {
- case types.TINTER:
- if n.Type().IsEmptyInterface() {
- on = syslook("printeface")
- } else {
- on = syslook("printiface")
- }
- on = substArgTypes(on, n.Type()) // any-1
- case types.TPTR:
- if n.Type().Elem().NotInHeap() {
- on = syslook("printuintptr")
- n = ir.Nod(ir.OCONV, n, nil)
- n.SetType(types.Types[types.TUNSAFEPTR])
- n = ir.Nod(ir.OCONV, n, nil)
- n.SetType(types.Types[types.TUINTPTR])
- break
- }
- fallthrough
- case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
- on = syslook("printpointer")
- on = substArgTypes(on, n.Type()) // any-1
- case types.TSLICE:
- on = syslook("printslice")
- on = substArgTypes(on, n.Type()) // any-1
- case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
- if isRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" {
- on = syslook("printhex")
- } else {
- on = syslook("printuint")
- }
- case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
- on = syslook("printint")
- case types.TFLOAT32, types.TFLOAT64:
- on = syslook("printfloat")
- case types.TCOMPLEX64, types.TCOMPLEX128:
- on = syslook("printcomplex")
- case types.TBOOL:
- on = syslook("printbool")
- case types.TSTRING:
- cs := ""
- if ir.IsConst(n, constant.String) {
- cs = ir.StringVal(n)
- }
- switch cs {
- case " ":
- on = syslook("printsp")
- case "\n":
- on = syslook("printnl")
- default:
- on = syslook("printstring")
- }
- default:
- badtype(ir.OPRINT, n.Type(), nil)
- continue
- }
-
- r := ir.Nod(ir.OCALL, on, nil)
- if params := on.Type().Params().FieldSlice(); len(params) > 0 {
- t := params[0].Type
- if !types.Identical(t, n.Type()) {
- n = ir.Nod(ir.OCONV, n, nil)
- n.SetType(t)
- }
- r.PtrList().Append(n)
- }
- calls = append(calls, r)
- }
-
- calls = append(calls, mkcall("printunlock", nil, init))
-
- typecheckslice(calls, ctxStmt)
- walkexprlist(calls, init)
-
- r := ir.Nod(ir.OBLOCK, nil, nil)
- r.PtrList().Set(calls)
- return walkstmt(typecheck(r, ctxStmt))
-}
-
-func callnew(t *types.Type) ir.Node {
- dowidth(t)
- n := ir.Nod(ir.ONEWOBJ, typename(t), nil)
- n.SetType(types.NewPtr(t))
- n.SetTypecheck(1)
- n.MarkNonNil()
- return n
-}
-
-// isReflectHeaderDataField reports whether l is an expression p.Data
-// where p has type reflect.SliceHeader or reflect.StringHeader.
-func isReflectHeaderDataField(l ir.Node) bool {
- if l.Type() != types.Types[types.TUINTPTR] {
- return false
- }
-
- var tsym *types.Sym
- switch l.Op() {
- case ir.ODOT:
- tsym = l.Left().Type().Sym()
- case ir.ODOTPTR:
- tsym = l.Left().Type().Elem().Sym()
- default:
- return false
- }
-
- if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" {
- return false
- }
- return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
-}
-
-func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
- if n.Op() != ir.OAS {
- base.Fatalf("convas: not OAS %v", n.Op())
- }
- defer updateHasCall(n)
-
- n.SetTypecheck(1)
-
- if n.Left() == nil || n.Right() == nil {
- return n
- }
-
- lt := n.Left().Type()
- rt := n.Right().Type()
- if lt == nil || rt == nil {
- return n
- }
-
- if ir.IsBlank(n.Left()) {
- n.SetRight(defaultlit(n.Right(), nil))
- return n
- }
-
- if !types.Identical(lt, rt) {
- n.SetRight(assignconv(n.Right(), lt, "assignment"))
- n.SetRight(walkexpr(n.Right(), init))
- }
- dowidth(n.Right().Type())
-
- return n
-}
-
-// reorder3
-// from ascompatee
-// a,b = c,d
-// simultaneous assignment. there cannot
-// be later use of an earlier lvalue.
-//
-// function calls have been removed.
-func reorder3(all []*ir.AssignStmt) []ir.Node {
- // If a needed expression may be affected by an
- // earlier assignment, make an early copy of that
- // expression and use the copy instead.
- var early []ir.Node
-
- var mapinit ir.Nodes
- for i, n := range all {
- l := n.Left()
-
- // Save subexpressions needed on left side.
- // Drill through non-dereferences.
- for {
- switch ll := l; ll.Op() {
- case ir.ODOT:
- l = ll.Left()
- continue
- case ir.OPAREN:
- l = ll.Left()
- continue
- case ir.OINDEX:
- if ll.Left().Type().IsArray() {
- ll.SetRight(reorder3save(ll.Right(), all, i, &early))
- l = ll.Left()
- continue
- }
- }
- break
- }
-
- switch l.Op() {
- default:
- base.Fatalf("reorder3 unexpected lvalue %v", l.Op())
-
- case ir.ONAME:
- break
-
- case ir.OINDEX, ir.OINDEXMAP:
- l.SetLeft(reorder3save(l.Left(), all, i, &early))
- l.SetRight(reorder3save(l.Right(), all, i, &early))
- if l.Op() == ir.OINDEXMAP {
- all[i] = convas(all[i], &mapinit)
- }
-
- case ir.ODEREF:
- l.SetLeft(reorder3save(l.Left(), all, i, &early))
- case ir.ODOTPTR:
- l.SetLeft(reorder3save(l.Left(), all, i, &early))
- }
-
- // Save expression on right side.
- all[i].SetRight(reorder3save(all[i].Right(), all, i, &early))
- }
-
- early = append(mapinit.Slice(), early...)
- for _, as := range all {
- early = append(early, as)
- }
- return early
-}
-
-// if the evaluation of *np would be affected by the
-// assignments in all up to but not including the ith assignment,
-// copy into a temporary during *early and
-// replace *np with that temp.
-// The result of reorder3save MUST be assigned back to n, e.g.
-// n.Left = reorder3save(n.Left, all, i, early)
-func reorder3save(n ir.Node, all []*ir.AssignStmt, i int, early *[]ir.Node) ir.Node {
- if !aliased(n, all[:i]) {
- return n
- }
-
- q := ir.Node(temp(n.Type()))
- as := typecheck(ir.Nod(ir.OAS, q, n), ctxStmt)
- *early = append(*early, as)
- return q
-}
-
-// what's the outer value that a write to n affects?
-// outer value means containing struct or array.
-func outervalue(n ir.Node) ir.Node {
- for {
- switch nn := n; nn.Op() {
- case ir.OXDOT:
- base.Fatalf("OXDOT in walk")
- case ir.ODOT:
- n = nn.Left()
- continue
- case ir.OPAREN:
- n = nn.Left()
- continue
- case ir.OCONVNOP:
- n = nn.Left()
- continue
- case ir.OINDEX:
- if nn.Left().Type() != nil && nn.Left().Type().IsArray() {
- n = nn.Left()
- continue
- }
- }
-
- return n
- }
-}
-
-// Is it possible that the computation of r might be
-// affected by assignments in all?
-func aliased(r ir.Node, all []*ir.AssignStmt) bool {
- if r == nil {
- return false
- }
-
- // Treat all fields of a struct as referring to the whole struct.
- // We could do better but we would have to keep track of the fields.
- for r.Op() == ir.ODOT {
- r = r.(*ir.SelectorExpr).Left()
- }
-
- // Look for obvious aliasing: a variable being assigned
- // during the all list and appearing in n.
- // Also record whether there are any writes to addressable
- // memory (either main memory or variables whose addresses
- // have been taken).
- memwrite := false
- for _, as := range all {
- // We can ignore assignments to blank.
- if ir.IsBlank(as.Left()) {
- continue
- }
-
- lv := outervalue(as.Left())
- if lv.Op() != ir.ONAME {
- memwrite = true
- continue
- }
- l := lv.(*ir.Name)
-
- switch l.Class() {
- default:
- base.Fatalf("unexpected class: %v, %v", l, l.Class())
-
- case ir.PAUTOHEAP, ir.PEXTERN:
- memwrite = true
- continue
-
- case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
- if l.Name().Addrtaken() {
- memwrite = true
- continue
- }
-
- if refersToName(l, r) {
- // Direct hit: l appears in r.
- return true
- }
- }
- }
-
- // The variables being written do not appear in r.
- // However, r might refer to computed addresses
- // that are being written.
-
- // If no computed addresses are affected by the writes, no aliasing.
- if !memwrite {
- return false
- }
-
- // If r does not refer to any variables whose addresses have been taken,
- // then the only possible writes to r would be directly to the variables,
- // and we checked those above, so no aliasing problems.
- if !anyAddrTaken(r) {
- return false
- }
-
- // Otherwise, both the writes and r refer to computed memory addresses.
- // Assume that they might conflict.
- return true
-}
-
-// anyAddrTaken reports whether the evaluation n,
-// which appears on the left side of an assignment,
-// may refer to variables whose addresses have been taken.
-func anyAddrTaken(n ir.Node) bool {
- return ir.Any(n, func(n ir.Node) bool {
- switch n.Op() {
- case ir.ONAME:
- return n.Class() == ir.PEXTERN || n.Class() == ir.PAUTOHEAP || n.Name().Addrtaken()
-
- case ir.ODOT: // but not ODOTPTR - should have been handled in aliased.
- base.Fatalf("anyAddrTaken unexpected ODOT")
-
- case ir.OADD,
- ir.OAND,
- ir.OANDAND,
- ir.OANDNOT,
- ir.OBITNOT,
- ir.OCONV,
- ir.OCONVIFACE,
- ir.OCONVNOP,
- ir.ODIV,
- ir.ODOTTYPE,
- ir.OLITERAL,
- ir.OLSH,
- ir.OMOD,
- ir.OMUL,
- ir.ONEG,
- ir.ONIL,
- ir.OOR,
- ir.OOROR,
- ir.OPAREN,
- ir.OPLUS,
- ir.ORSH,
- ir.OSUB,
- ir.OXOR:
- return false
- }
- // Be conservative.
- return true
- })
-}
-
-// refersToName reports whether r refers to name.
-func refersToName(name *ir.Name, r ir.Node) bool {
- return ir.Any(r, func(r ir.Node) bool {
- return r.Op() == ir.ONAME && r == name
- })
-}
-
-var stop = errors.New("stop")
-
-// refersToCommonName reports whether any name
-// appears in common between l and r.
-// This is called from sinit.go.
-func refersToCommonName(l ir.Node, r ir.Node) bool {
- if l == nil || r == nil {
- return false
- }
-
- // This could be written elegantly as a Find nested inside a Find:
- //
- // found := ir.Find(l, func(l ir.Node) interface{} {
- // if l.Op() == ir.ONAME {
- // return ir.Find(r, func(r ir.Node) interface{} {
- // if r.Op() == ir.ONAME && l.Name() == r.Name() {
- // return r
- // }
- // return nil
- // })
- // }
- // return nil
- // })
- // return found != nil
- //
- // But that would allocate a new closure for the inner Find
- // for each name found on the left side.
- // It may not matter at all, but the below way of writing it
- // only allocates two closures, not O(|L|) closures.
-
- var doL, doR func(ir.Node) error
- var targetL *ir.Name
- doR = func(r ir.Node) error {
- if r.Op() == ir.ONAME && r.Name() == targetL {
- return stop
- }
- return ir.DoChildren(r, doR)
- }
- doL = func(l ir.Node) error {
- if l.Op() == ir.ONAME {
- targetL = l.Name()
- if doR(r) == stop {
- return stop
- }
- }
- return ir.DoChildren(l, doL)
- }
- return doL(l) == stop
-}
-
-// paramstoheap returns code to allocate memory for heap-escaped parameters
-// and to copy non-result parameters' values from the stack.
-func paramstoheap(params *types.Type) []ir.Node {
- var nn []ir.Node
- for _, t := range params.Fields().Slice() {
- v := ir.AsNode(t.Nname)
- if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result
- v = nil
- }
- if v == nil {
- continue
- }
-
- if stackcopy := v.Name().Stackcopy; stackcopy != nil {
- nn = append(nn, walkstmt(ir.Nod(ir.ODCL, v, nil)))
- if stackcopy.Class() == ir.PPARAM {
- nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, v, stackcopy), ctxStmt)))
- }
- }
- }
-
- return nn
-}
-
-// zeroResults zeros the return values at the start of the function.
-// We need to do this very early in the function. Defer might stop a
-// panic and show the return values as they exist at the time of
-// panic. For precise stacks, the garbage collector assumes results
-// are always live, so we need to zero them before any allocations,
-// even allocations to move params/results to the heap.
-// The generated code is added to Curfn's Enter list.
-func zeroResults() {
- for _, f := range Curfn.Type().Results().Fields().Slice() {
- v := ir.AsNode(f.Nname)
- if v != nil && v.Name().Heapaddr != nil {
- // The local which points to the return value is the
- // thing that needs zeroing. This is already handled
- // by a Needzero annotation in plive.go:livenessepilogue.
- continue
- }
- if isParamHeapCopy(v) {
- // TODO(josharian/khr): Investigate whether we can switch to "continue" here,
- // and document more in either case.
- // In the review of CL 114797, Keith wrote (roughly):
- // I don't think the zeroing below matters.
- // The stack return value will never be marked as live anywhere in the function.
- // It is not written to until deferreturn returns.
- v = v.Name().Stackcopy
- }
- // Zero the stack location containing f.
- Curfn.Enter.Append(ir.NodAt(Curfn.Pos(), ir.OAS, v, nil))
- }
-}
-
-// returnsfromheap returns code to copy values for heap-escaped parameters
-// back to the stack.
-func returnsfromheap(params *types.Type) []ir.Node {
- var nn []ir.Node
- for _, t := range params.Fields().Slice() {
- v := ir.AsNode(t.Nname)
- if v == nil {
- continue
- }
- if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT {
- nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, stackcopy, v), ctxStmt)))
- }
- }
-
- return nn
-}
-
-// heapmoves generates code to handle migrating heap-escaped parameters
-// between the stack and the heap. The generated code is added to Curfn's
-// Enter and Exit lists.
-func heapmoves() {
- lno := base.Pos
- base.Pos = Curfn.Pos()
- nn := paramstoheap(Curfn.Type().Recvs())
- nn = append(nn, paramstoheap(Curfn.Type().Params())...)
- nn = append(nn, paramstoheap(Curfn.Type().Results())...)
- Curfn.Enter.Append(nn...)
- base.Pos = Curfn.Endlineno
- Curfn.Exit.Append(returnsfromheap(Curfn.Type().Results())...)
- base.Pos = lno
-}
-
-func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
- if fn.Type() == nil || fn.Type().Kind() != types.TFUNC {
- base.Fatalf("mkcall %v %v", fn, fn.Type())
- }
-
- n := fn.Type().NumParams()
- if n != len(va) {
- base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
- }
-
- call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, va)
- TypecheckCall(call)
- call.SetType(t)
- return walkexpr(call, init).(*ir.CallExpr)
-}
-
-func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
- return vmkcall(syslook(name), t, init, args)
-}
-
-func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
- return vmkcall(fn, t, init, args)
-}
-
-func conv(n ir.Node, t *types.Type) ir.Node {
- if types.Identical(n.Type(), t) {
- return n
- }
- n = ir.Nod(ir.OCONV, n, nil)
- n.SetType(t)
- n = typecheck(n, ctxExpr)
- return n
-}
-
-// convnop converts node n to type t using the OCONVNOP op
-// and typechecks the result with ctxExpr.
-func convnop(n ir.Node, t *types.Type) ir.Node {
- if types.Identical(n.Type(), t) {
- return n
- }
- n = ir.Nod(ir.OCONVNOP, n, nil)
- n.SetType(t)
- n = typecheck(n, ctxExpr)
- return n
-}
-
-// byteindex converts n, which is byte-sized, to an int used to index into an array.
-// We cannot use conv, because we allow converting bool to int here,
-// which is forbidden in user code.
-func byteindex(n ir.Node) ir.Node {
- // We cannot convert from bool to int directly.
- // While converting from int8 to int is possible, it would yield
- // the wrong result for negative values.
- // Reinterpreting the value as an unsigned byte solves both cases.
- if !types.Identical(n.Type(), types.Types[types.TUINT8]) {
- n = ir.Nod(ir.OCONV, n, nil)
- n.SetType(types.Types[types.TUINT8])
- n.SetTypecheck(1)
- }
- n = ir.Nod(ir.OCONV, n, nil)
- n.SetType(types.Types[types.TINT])
- n.SetTypecheck(1)
- return n
-}
-
-func chanfn(name string, n int, t *types.Type) ir.Node {
- if !t.IsChan() {
- base.Fatalf("chanfn %v", t)
- }
- fn := syslook(name)
- switch n {
- default:
- base.Fatalf("chanfn %d", n)
- case 1:
- fn = substArgTypes(fn, t.Elem())
- case 2:
- fn = substArgTypes(fn, t.Elem(), t.Elem())
- }
- return fn
-}
-
-func mapfn(name string, t *types.Type) ir.Node {
- if !t.IsMap() {
- base.Fatalf("mapfn %v", t)
- }
- fn := syslook(name)
- fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
- return fn
-}
-
-func mapfndel(name string, t *types.Type) ir.Node {
- if !t.IsMap() {
- base.Fatalf("mapfn %v", t)
- }
- fn := syslook(name)
- fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key())
- return fn
-}
-
-const (
- mapslow = iota
- mapfast32
- mapfast32ptr
- mapfast64
- mapfast64ptr
- mapfaststr
- nmapfast
-)
-
-type mapnames [nmapfast]string
-
-func mkmapnames(base string, ptr string) mapnames {
- return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
-}
-
-var mapaccess1 = mkmapnames("mapaccess1", "")
-var mapaccess2 = mkmapnames("mapaccess2", "")
-var mapassign = mkmapnames("mapassign", "ptr")
-var mapdelete = mkmapnames("mapdelete", "")
-
-func mapfast(t *types.Type) int {
- // Check runtime/map.go:maxElemSize before changing.
- if t.Elem().Width > 128 {
- return mapslow
- }
- switch algtype(t.Key()) {
- case AMEM32:
- if !t.Key().HasPointers() {
- return mapfast32
- }
- if Widthptr == 4 {
- return mapfast32ptr
- }
- base.Fatalf("small pointer %v", t.Key())
- case AMEM64:
- if !t.Key().HasPointers() {
- return mapfast64
- }
- if Widthptr == 8 {
- return mapfast64ptr
- }
- // Two-word object, at least one of which is a pointer.
- // Use the slow path.
- case ASTRING:
- return mapfaststr
- }
- return mapslow
-}
-
-func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
- fn := syslook(name)
- fn = substArgTypes(fn, l, r)
- return fn
-}
-
-func addstr(n *ir.AddStringExpr, init *ir.Nodes) ir.Node {
- c := n.List().Len()
-
- if c < 2 {
- base.Fatalf("addstr count %d too small", c)
- }
-
- buf := nodnil()
- if n.Esc() == EscNone {
- sz := int64(0)
- for _, n1 := range n.List().Slice() {
- if n1.Op() == ir.OLITERAL {
- sz += int64(len(ir.StringVal(n1)))
- }
- }
-
- // Don't allocate the buffer if the result won't fit.
- if sz < tmpstringbufsize {
- // Create temporary buffer for result string on stack.
- t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
- buf = nodAddr(temp(t))
- }
- }
-
- // build list of string arguments
- args := []ir.Node{buf}
- for _, n2 := range n.List().Slice() {
- args = append(args, conv(n2, types.Types[types.TSTRING]))
- }
-
- var fn string
- if c <= 5 {
- // small numbers of strings use direct runtime helpers.
- // note: order.expr knows this cutoff too.
- fn = fmt.Sprintf("concatstring%d", c)
- } else {
- // large numbers of strings are passed to the runtime as a slice.
- fn = "concatstrings"
-
- t := types.NewSlice(types.Types[types.TSTRING])
- // args[1:] to skip buf arg
- slice := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(t), args[1:])
- slice.Prealloc = n.Prealloc
- args = []ir.Node{buf, slice}
- slice.SetEsc(EscNone)
- }
-
- cat := syslook(fn)
- r := ir.Nod(ir.OCALL, cat, nil)
- r.PtrList().Set(args)
- r1 := typecheck(r, ctxExpr)
- r1 = walkexpr(r1, init)
- r1.SetType(n.Type())
-
- return r1
-}
-
-func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) {
- walkexprlistsafe(n.List().Slice(), init)
-
- // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
- // and n are name or literal, but those may index the slice we're
- // modifying here. Fix explicitly.
- ls := n.List().Slice()
- for i1, n1 := range ls {
- ls[i1] = cheapexpr(n1, init)
- }
-}
-
-// expand append(l1, l2...) to
-// init {
-// s := l1
-// n := len(s) + len(l2)
-// // Compare as uint so growslice can panic on overflow.
-// if uint(n) > uint(cap(s)) {
-// s = growslice(s, n)
-// }
-// s = s[:n]
-// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
-// }
-// s
-//
-// l2 is allowed to be a string.
-func appendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
- walkAppendArgs(n, init)
-
- l1 := n.List().First()
- l2 := n.List().Second()
- l2 = cheapexpr(l2, init)
- n.List().SetSecond(l2)
-
- var nodes ir.Nodes
-
- // var s []T
- s := temp(l1.Type())
- nodes.Append(ir.Nod(ir.OAS, s, l1)) // s = l1
-
- elemtype := s.Type().Elem()
-
- // n := len(s) + len(l2)
- nn := temp(types.Types[types.TINT])
- nodes.Append(ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, s, nil), ir.Nod(ir.OLEN, l2, nil))))
-
- // if uint(n) > uint(cap(s))
- nif := ir.Nod(ir.OIF, nil, nil)
- nuint := conv(nn, types.Types[types.TUINT])
- scapuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT])
- nif.SetLeft(ir.Nod(ir.OGT, nuint, scapuint))
-
- // instantiate growslice(typ *type, []any, int) []any
- fn := syslook("growslice")
- fn = substArgTypes(fn, elemtype, elemtype)
-
- // s = growslice(T, s, n)
- nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn)))
- nodes.Append(nif)
-
- // s = s[:n]
- nt := ir.Nod(ir.OSLICE, s, nil)
- nt.SetSliceBounds(nil, nn, nil)
- nt.SetBounded(true)
- nodes.Append(ir.Nod(ir.OAS, s, nt))
-
- var ncopy ir.Node
- if elemtype.HasPointers() {
- // copy(s[len(l1):], l2)
- slice := ir.Nod(ir.OSLICE, s, nil)
- slice.SetType(s.Type())
- slice.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil)
-
- Curfn.SetWBPos(n.Pos())
-
- // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
- fn := syslook("typedslicecopy")
- fn = substArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
- ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes))
- ptr2, len2 := backingArrayPtrLen(l2)
- ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2)
- } else if instrumenting && !base.Flag.CompilingRuntime {
- // rely on runtime to instrument:
- // copy(s[len(l1):], l2)
- // l2 can be a slice or string.
- slice := ir.Nod(ir.OSLICE, s, nil)
- slice.SetType(s.Type())
- slice.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil)
-
- ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes))
- ptr2, len2 := backingArrayPtrLen(l2)
-
- fn := syslook("slicecopy")
- fn = substArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem())
- ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width))
- } else {
- // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
- ix := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil))
- ix.SetBounded(true)
- addr := nodAddr(ix)
-
- sptr := ir.Nod(ir.OSPTR, l2, nil)
-
- nwid := cheapexpr(conv(ir.Nod(ir.OLEN, l2, nil), types.Types[types.TUINTPTR]), &nodes)
- nwid = ir.Nod(ir.OMUL, nwid, nodintconst(elemtype.Width))
-
- // instantiate func memmove(to *any, frm *any, length uintptr)
- fn := syslook("memmove")
- fn = substArgTypes(fn, elemtype, elemtype)
- ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid)
- }
- ln := append(nodes.Slice(), ncopy)
-
- typecheckslice(ln, ctxStmt)
- walkstmtlist(ln)
- init.Append(ln...)
- return s
-}
-
-// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
-// isAppendOfMake assumes n has already been typechecked.
-func isAppendOfMake(n ir.Node) bool {
- if base.Flag.N != 0 || instrumenting {
- return false
- }
-
- if n.Typecheck() == 0 {
- base.Fatalf("missing typecheck: %+v", n)
- }
-
- if n.Op() != ir.OAPPEND {
- return false
- }
- call := n.(*ir.CallExpr)
- if !call.IsDDD() || call.List().Len() != 2 || call.List().Second().Op() != ir.OMAKESLICE {
- return false
- }
-
- mk := call.List().Second().(*ir.MakeExpr)
- if mk.Right() != nil {
- return false
- }
-
- // y must be either an integer constant or the largest possible positive value
- // of variable y needs to fit into an uint.
-
- // typecheck made sure that constant arguments to make are not negative and fit into an int.
-
- // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
- y := mk.Left()
- if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() {
- return false
- }
-
- return true
-}
-
-// extendslice rewrites append(l1, make([]T, l2)...) to
-// init {
-// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
-// } else {
-// panicmakeslicelen()
-// }
-// s := l1
-// n := len(s) + l2
-// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
-// // cap is a positive int and n can become negative when len(s) + l2
-// // overflows int. Interpreting n when negative as uint makes it larger
-// // than cap(s). growslice will check the int n arg and panic if n is
-// // negative. This prevents the overflow from being undetected.
-// if uint(n) > uint(cap(s)) {
-// s = growslice(T, s, n)
-// }
-// s = s[:n]
-// lptr := &l1[0]
-// sptr := &s[0]
-// if lptr == sptr || !T.HasPointers() {
-// // growslice did not clear the whole underlying array (or did not get called)
-// hp := &s[len(l1)]
-// hn := l2 * sizeof(T)
-// memclr(hp, hn)
-// }
-// }
-// s
-func extendslice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
- // isAppendOfMake made sure all possible positive values of l2 fit into an uint.
- // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
- // check of l2 < 0 at runtime which is generated below.
- l2 := conv(n.List().Second().(*ir.MakeExpr).Left(), types.Types[types.TINT])
- l2 = typecheck(l2, ctxExpr)
- n.List().SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second().
-
- walkAppendArgs(n, init)
-
- l1 := n.List().First()
- l2 = n.List().Second() // re-read l2, as it may have been updated by walkAppendArgs
-
- var nodes []ir.Node
-
- // if l2 >= 0 (likely happens), do nothing
- nifneg := ir.Nod(ir.OIF, ir.Nod(ir.OGE, l2, nodintconst(0)), nil)
- nifneg.SetLikely(true)
-
- // else panicmakeslicelen()
- nifneg.PtrRlist().Set1(mkcall("panicmakeslicelen", nil, init))
- nodes = append(nodes, nifneg)
-
- // s := l1
- s := temp(l1.Type())
- nodes = append(nodes, ir.Nod(ir.OAS, s, l1))
-
- elemtype := s.Type().Elem()
-
- // n := len(s) + l2
- nn := temp(types.Types[types.TINT])
- nodes = append(nodes, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, s, nil), l2)))
-
- // if uint(n) > uint(cap(s))
- nuint := conv(nn, types.Types[types.TUINT])
- capuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT])
- nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, nuint, capuint), nil)
-
- // instantiate growslice(typ *type, old []any, newcap int) []any
- fn := syslook("growslice")
- fn = substArgTypes(fn, elemtype, elemtype)
-
- // s = growslice(T, s, n)
- nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn)))
- nodes = append(nodes, nif)
-
- // s = s[:n]
- nt := ir.Nod(ir.OSLICE, s, nil)
- nt.SetSliceBounds(nil, nn, nil)
- nt.SetBounded(true)
- nodes = append(nodes, ir.Nod(ir.OAS, s, nt))
-
- // lptr := &l1[0]
- l1ptr := temp(l1.Type().Elem().PtrTo())
- tmp := ir.Nod(ir.OSPTR, l1, nil)
- nodes = append(nodes, ir.Nod(ir.OAS, l1ptr, tmp))
-
- // sptr := &s[0]
- sptr := temp(elemtype.PtrTo())
- tmp = ir.Nod(ir.OSPTR, s, nil)
- nodes = append(nodes, ir.Nod(ir.OAS, sptr, tmp))
-
- // hp := &s[len(l1)]
- ix := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil))
- ix.SetBounded(true)
- hp := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR])
-
- // hn := l2 * sizeof(elem(s))
- hn := conv(ir.Nod(ir.OMUL, l2, nodintconst(elemtype.Width)), types.Types[types.TUINTPTR])
-
- clrname := "memclrNoHeapPointers"
- hasPointers := elemtype.HasPointers()
- if hasPointers {
- clrname = "memclrHasPointers"
- Curfn.SetWBPos(n.Pos())
- }
-
- var clr ir.Nodes
- clrfn := mkcall(clrname, nil, &clr, hp, hn)
- clr.Append(clrfn)
-
- if hasPointers {
- // if l1ptr == sptr
- nifclr := ir.Nod(ir.OIF, ir.Nod(ir.OEQ, l1ptr, sptr), nil)
- nifclr.SetBody(clr)
- nodes = append(nodes, nifclr)
- } else {
- nodes = append(nodes, clr.Slice()...)
- }
-
- typecheckslice(nodes, ctxStmt)
- walkstmtlist(nodes)
- init.Append(nodes...)
- return s
-}
-
-// Rewrite append(src, x, y, z) so that any side effects in
-// x, y, z (including runtime panics) are evaluated in
-// initialization statements before the append.
-// For normal code generation, stop there and leave the
-// rest to cgen_append.
-//
-// For race detector, expand append(src, a [, b]* ) to
-//
-// init {
-// s := src
-// const argc = len(args) - 1
-// if cap(s) - len(s) < argc {
-// s = growslice(s, len(s)+argc)
-// }
-// n := len(s)
-// s = s[:n+argc]
-// s[n] = a
-// s[n+1] = b
-// ...
-// }
-// s
-func walkappend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
- if !samesafeexpr(dst, n.List().First()) {
- n.List().SetFirst(safeexpr(n.List().First(), init))
- n.List().SetFirst(walkexpr(n.List().First(), init))
- }
- walkexprlistsafe(n.List().Slice()[1:], init)
-
- nsrc := n.List().First()
-
- // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
- // and n are name or literal, but those may index the slice we're
- // modifying here. Fix explicitly.
- // Using cheapexpr also makes sure that the evaluation
- // of all arguments (and especially any panics) happen
- // before we begin to modify the slice in a visible way.
- ls := n.List().Slice()[1:]
- for i, n := range ls {
- n = cheapexpr(n, init)
- if !types.Identical(n.Type(), nsrc.Type().Elem()) {
- n = assignconv(n, nsrc.Type().Elem(), "append")
- n = walkexpr(n, init)
- }
- ls[i] = n
- }
-
- argc := n.List().Len() - 1
- if argc < 1 {
- return nsrc
- }
-
- // General case, with no function calls left as arguments.
- // Leave for gen, except that instrumentation requires old form.
- if !instrumenting || base.Flag.CompilingRuntime {
- return n
- }
-
- var l []ir.Node
-
- ns := temp(nsrc.Type())
- l = append(l, ir.Nod(ir.OAS, ns, nsrc)) // s = src
-
- na := nodintconst(int64(argc)) // const argc
- nif := ir.Nod(ir.OIF, nil, nil) // if cap(s) - len(s) < argc
- nif.SetLeft(ir.Nod(ir.OLT, ir.Nod(ir.OSUB, ir.Nod(ir.OCAP, ns, nil), ir.Nod(ir.OLEN, ns, nil)), na))
-
- fn := syslook("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
- fn = substArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
-
- nif.PtrBody().Set1(ir.Nod(ir.OAS, ns,
- mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns,
- ir.Nod(ir.OADD, ir.Nod(ir.OLEN, ns, nil), na))))
-
- l = append(l, nif)
-
- nn := temp(types.Types[types.TINT])
- l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OLEN, ns, nil))) // n = len(s)
-
- slice := ir.Nod(ir.OSLICE, ns, nil) // ...s[:n+argc]
- slice.SetSliceBounds(nil, ir.Nod(ir.OADD, nn, na), nil)
- slice.SetBounded(true)
- l = append(l, ir.Nod(ir.OAS, ns, slice)) // s = s[:n+argc]
-
- ls = n.List().Slice()[1:]
- for i, n := range ls {
- ix := ir.Nod(ir.OINDEX, ns, nn) // s[n] ...
- ix.SetBounded(true)
- l = append(l, ir.Nod(ir.OAS, ix, n)) // s[n] = arg
- if i+1 < len(ls) {
- l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, nn, nodintconst(1)))) // n = n + 1
- }
- }
-
- typecheckslice(l, ctxStmt)
- walkstmtlist(l)
- init.Append(l...)
- return ns
-}
-
-// Lower copy(a, b) to a memmove call or a runtime call.
-//
-// init {
-// n := len(a)
-// if n > len(b) { n = len(b) }
-// if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
-// }
-// n;
-//
-// Also works if b is a string.
-//
-func copyany(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
- if n.Left().Type().Elem().HasPointers() {
- Curfn.SetWBPos(n.Pos())
- fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem())
- n.SetLeft(cheapexpr(n.Left(), init))
- ptrL, lenL := backingArrayPtrLen(n.Left())
- n.SetRight(cheapexpr(n.Right(), init))
- ptrR, lenR := backingArrayPtrLen(n.Right())
- return mkcall1(fn, n.Type(), init, typename(n.Left().Type().Elem()), ptrL, lenL, ptrR, lenR)
- }
-
- if runtimecall {
- // rely on runtime to instrument:
- // copy(n.Left, n.Right)
- // n.Right can be a slice or string.
-
- n.SetLeft(cheapexpr(n.Left(), init))
- ptrL, lenL := backingArrayPtrLen(n.Left())
- n.SetRight(cheapexpr(n.Right(), init))
- ptrR, lenR := backingArrayPtrLen(n.Right())
-
- fn := syslook("slicecopy")
- fn = substArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
-
- return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left().Type().Elem().Width))
- }
-
- n.SetLeft(walkexpr(n.Left(), init))
- n.SetRight(walkexpr(n.Right(), init))
- nl := temp(n.Left().Type())
- nr := temp(n.Right().Type())
- var l []ir.Node
- l = append(l, ir.Nod(ir.OAS, nl, n.Left()))
- l = append(l, ir.Nod(ir.OAS, nr, n.Right()))
-
- nfrm := ir.Nod(ir.OSPTR, nr, nil)
- nto := ir.Nod(ir.OSPTR, nl, nil)
-
- nlen := temp(types.Types[types.TINT])
-
- // n = len(to)
- l = append(l, ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nl, nil)))
-
- // if n > len(frm) { n = len(frm) }
- nif := ir.Nod(ir.OIF, nil, nil)
-
- nif.SetLeft(ir.Nod(ir.OGT, nlen, ir.Nod(ir.OLEN, nr, nil)))
- nif.PtrBody().Append(ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nr, nil)))
- l = append(l, nif)
-
- // if to.ptr != frm.ptr { memmove( ... ) }
- ne := ir.Nod(ir.OIF, ir.Nod(ir.ONE, nto, nfrm), nil)
- ne.SetLikely(true)
- l = append(l, ne)
-
- fn := syslook("memmove")
- fn = substArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
- nwid := ir.Node(temp(types.Types[types.TUINTPTR]))
- setwid := ir.Nod(ir.OAS, nwid, conv(nlen, types.Types[types.TUINTPTR]))
- ne.PtrBody().Append(setwid)
- nwid = ir.Nod(ir.OMUL, nwid, nodintconst(nl.Type().Elem().Width))
- call := mkcall1(fn, nil, init, nto, nfrm, nwid)
- ne.PtrBody().Append(call)
-
- typecheckslice(l, ctxStmt)
- walkstmtlist(l)
- init.Append(l...)
- return nlen
-}
-
-func eqfor(t *types.Type) (n ir.Node, needsize bool) {
- // Should only arrive here with large memory or
- // a struct/array containing a non-memory field/element.
- // Small memory is handled inline, and single non-memory
- // is handled by walkcompare.
- switch a, _ := algtype1(t); a {
- case AMEM:
- n := syslook("memequal")
- n = substArgTypes(n, t, t)
- return n, true
- case ASPECIAL:
- sym := typesymprefix(".eq", t)
- n := NewName(sym)
- setNodeNameFunc(n)
- n.SetType(functype(nil, []*ir.Field{
- anonfield(types.NewPtr(t)),
- anonfield(types.NewPtr(t)),
- }, []*ir.Field{
- anonfield(types.Types[types.TBOOL]),
- }))
- return n, false
- }
- base.Fatalf("eqfor %v", t)
- return nil, false
-}
-
-// The result of walkcompare MUST be assigned back to n, e.g.
-// n.Left = walkcompare(n.Left, init)
-func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
- if n.Left().Type().IsInterface() && n.Right().Type().IsInterface() && n.Left().Op() != ir.ONIL && n.Right().Op() != ir.ONIL {
- return walkcompareInterface(n, init)
- }
-
- if n.Left().Type().IsString() && n.Right().Type().IsString() {
- return walkcompareString(n, init)
- }
-
- n.SetLeft(walkexpr(n.Left(), init))
- n.SetRight(walkexpr(n.Right(), init))
-
- // Given mixed interface/concrete comparison,
- // rewrite into types-equal && data-equal.
- // This is efficient, avoids allocations, and avoids runtime calls.
- if n.Left().Type().IsInterface() != n.Right().Type().IsInterface() {
- // Preserve side-effects in case of short-circuiting; see #32187.
- l := cheapexpr(n.Left(), init)
- r := cheapexpr(n.Right(), init)
- // Swap so that l is the interface value and r is the concrete value.
- if n.Right().Type().IsInterface() {
- l, r = r, l
- }
-
- // Handle both == and !=.
- eq := n.Op()
- andor := ir.OOROR
- if eq == ir.OEQ {
- andor = ir.OANDAND
- }
- // Check for types equal.
- // For empty interface, this is:
- // l.tab == type(r)
- // For non-empty interface, this is:
- // l.tab != nil && l.tab._type == type(r)
- var eqtype ir.Node
- tab := ir.Nod(ir.OITAB, l, nil)
- rtyp := typename(r.Type())
- if l.Type().IsEmptyInterface() {
- tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
- tab.SetTypecheck(1)
- eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp)
- } else {
- nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), nodnil(), tab)
- match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp)
- eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match)
- }
- // Check for data equal.
- eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r)
- // Put it all together.
- expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata)
- return finishcompare(n, expr, init)
- }
-
- // Must be comparison of array or struct.
- // Otherwise back end handles it.
- // While we're here, decide whether to
- // inline or call an eq alg.
- t := n.Left().Type()
- var inline bool
-
- maxcmpsize := int64(4)
- unalignedLoad := canMergeLoads()
- if unalignedLoad {
- // Keep this low enough to generate less code than a function call.
- maxcmpsize = 2 * int64(thearch.LinkArch.RegSize)
- }
-
- switch t.Kind() {
- default:
- if base.Debug.Libfuzzer != 0 && t.IsInteger() {
- n.SetLeft(cheapexpr(n.Left(), init))
- n.SetRight(cheapexpr(n.Right(), init))
-
- // If exactly one comparison operand is
- // constant, invoke the constcmp functions
- // instead, and arrange for the constant
- // operand to be the first argument.
- l, r := n.Left(), n.Right()
- if r.Op() == ir.OLITERAL {
- l, r = r, l
- }
- constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL
-
- var fn string
- var paramType *types.Type
- switch t.Size() {
- case 1:
- fn = "libfuzzerTraceCmp1"
- if constcmp {
- fn = "libfuzzerTraceConstCmp1"
- }
- paramType = types.Types[types.TUINT8]
- case 2:
- fn = "libfuzzerTraceCmp2"
- if constcmp {
- fn = "libfuzzerTraceConstCmp2"
- }
- paramType = types.Types[types.TUINT16]
- case 4:
- fn = "libfuzzerTraceCmp4"
- if constcmp {
- fn = "libfuzzerTraceConstCmp4"
- }
- paramType = types.Types[types.TUINT32]
- case 8:
- fn = "libfuzzerTraceCmp8"
- if constcmp {
- fn = "libfuzzerTraceConstCmp8"
- }
- paramType = types.Types[types.TUINT64]
- default:
- base.Fatalf("unexpected integer size %d for %v", t.Size(), t)
- }
- init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init)))
- }
- return n
- case types.TARRAY:
- // We can compare several elements at once with 2/4/8 byte integer compares
- inline = t.NumElem() <= 1 || (issimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
- case types.TSTRUCT:
- inline = t.NumComponents(types.IgnoreBlankFields) <= 4
- }
-
- cmpl := n.Left()
- for cmpl != nil && cmpl.Op() == ir.OCONVNOP {
- cmpl = cmpl.(*ir.ConvExpr).Left()
- }
- cmpr := n.Right()
- for cmpr != nil && cmpr.Op() == ir.OCONVNOP {
- cmpr = cmpr.(*ir.ConvExpr).Left()
- }
-
- // Chose not to inline. Call equality function directly.
- if !inline {
- // eq algs take pointers; cmpl and cmpr must be addressable
- if !islvalue(cmpl) || !islvalue(cmpr) {
- base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
- }
-
- fn, needsize := eqfor(t)
- call := ir.Nod(ir.OCALL, fn, nil)
- call.PtrList().Append(nodAddr(cmpl))
- call.PtrList().Append(nodAddr(cmpr))
- if needsize {
- call.PtrList().Append(nodintconst(t.Width))
- }
- res := ir.Node(call)
- if n.Op() != ir.OEQ {
- res = ir.Nod(ir.ONOT, res, nil)
- }
- return finishcompare(n, res, init)
- }
-
- // inline: build boolean expression comparing element by element
- andor := ir.OANDAND
- if n.Op() == ir.ONE {
- andor = ir.OOROR
- }
- var expr ir.Node
- compare := func(el, er ir.Node) {
- a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er)
- if expr == nil {
- expr = a
- } else {
- expr = ir.NewLogicalExpr(base.Pos, andor, expr, a)
- }
- }
- cmpl = safeexpr(cmpl, init)
- cmpr = safeexpr(cmpr, init)
- if t.IsStruct() {
- for _, f := range t.Fields().Slice() {
- sym := f.Sym
- if sym.IsBlank() {
- continue
- }
- compare(
- nodSym(ir.OXDOT, cmpl, sym),
- nodSym(ir.OXDOT, cmpr, sym),
- )
- }
- } else {
- step := int64(1)
- remains := t.NumElem() * t.Elem().Width
- combine64bit := unalignedLoad && Widthreg == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger()
- combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger()
- combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger()
- for i := int64(0); remains > 0; {
- var convType *types.Type
- switch {
- case remains >= 8 && combine64bit:
- convType = types.Types[types.TINT64]
- step = 8 / t.Elem().Width
- case remains >= 4 && combine32bit:
- convType = types.Types[types.TUINT32]
- step = 4 / t.Elem().Width
- case remains >= 2 && combine16bit:
- convType = types.Types[types.TUINT16]
- step = 2 / t.Elem().Width
- default:
- step = 1
- }
- if step == 1 {
- compare(
- ir.Nod(ir.OINDEX, cmpl, nodintconst(i)),
- ir.Nod(ir.OINDEX, cmpr, nodintconst(i)),
- )
- i++
- remains -= t.Elem().Width
- } else {
- elemType := t.Elem().ToUnsigned()
- cmplw := ir.Node(ir.Nod(ir.OINDEX, cmpl, nodintconst(i)))
- cmplw = conv(cmplw, elemType) // convert to unsigned
- cmplw = conv(cmplw, convType) // widen
- cmprw := ir.Node(ir.Nod(ir.OINDEX, cmpr, nodintconst(i)))
- cmprw = conv(cmprw, elemType)
- cmprw = conv(cmprw, convType)
- // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
- // ssa will generate a single large load.
- for offset := int64(1); offset < step; offset++ {
- lb := ir.Node(ir.Nod(ir.OINDEX, cmpl, nodintconst(i+offset)))
- lb = conv(lb, elemType)
- lb = conv(lb, convType)
- lb = ir.Nod(ir.OLSH, lb, nodintconst(8*t.Elem().Width*offset))
- cmplw = ir.Nod(ir.OOR, cmplw, lb)
- rb := ir.Node(ir.Nod(ir.OINDEX, cmpr, nodintconst(i+offset)))
- rb = conv(rb, elemType)
- rb = conv(rb, convType)
- rb = ir.Nod(ir.OLSH, rb, nodintconst(8*t.Elem().Width*offset))
- cmprw = ir.Nod(ir.OOR, cmprw, rb)
- }
- compare(cmplw, cmprw)
- i += step
- remains -= step * t.Elem().Width
- }
- }
- }
- if expr == nil {
- expr = nodbool(n.Op() == ir.OEQ)
- // We still need to use cmpl and cmpr, in case they contain
- // an expression which might panic. See issue 23837.
- t := temp(cmpl.Type())
- a1 := typecheck(ir.Nod(ir.OAS, t, cmpl), ctxStmt)
- a2 := typecheck(ir.Nod(ir.OAS, t, cmpr), ctxStmt)
- init.Append(a1, a2)
- }
- return finishcompare(n, expr, init)
-}
-
-func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
- // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
- if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 {
- n = copyexpr(n, n.Type(), init)
- }
-
- return conv(n, t)
-}
-
-func walkcompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
- n.SetRight(cheapexpr(n.Right(), init))
- n.SetLeft(cheapexpr(n.Left(), init))
- eqtab, eqdata := eqinterface(n.Left(), n.Right())
- var cmp ir.Node
- if n.Op() == ir.OEQ {
- cmp = ir.Nod(ir.OANDAND, eqtab, eqdata)
- } else {
- eqtab.SetOp(ir.ONE)
- cmp = ir.Nod(ir.OOROR, eqtab, ir.Nod(ir.ONOT, eqdata, nil))
- }
- return finishcompare(n, cmp, init)
-}
-
-func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
- // Rewrite comparisons to short constant strings as length+byte-wise comparisons.
- var cs, ncs ir.Node // const string, non-const string
- switch {
- case ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.String):
- // ignore; will be constant evaluated
- case ir.IsConst(n.Left(), constant.String):
- cs = n.Left()
- ncs = n.Right()
- case ir.IsConst(n.Right(), constant.String):
- cs = n.Right()
- ncs = n.Left()
- }
- if cs != nil {
- cmp := n.Op()
- // Our comparison below assumes that the non-constant string
- // is on the left hand side, so rewrite "" cmp x to x cmp "".
- // See issue 24817.
- if ir.IsConst(n.Left(), constant.String) {
- cmp = brrev(cmp)
- }
-
- // maxRewriteLen was chosen empirically.
- // It is the value that minimizes cmd/go file size
- // across most architectures.
- // See the commit description for CL 26758 for details.
- maxRewriteLen := 6
- // Some architectures can load unaligned byte sequence as 1 word.
- // So we can cover longer strings with the same amount of code.
- canCombineLoads := canMergeLoads()
- combine64bit := false
- if canCombineLoads {
- // Keep this low enough to generate less code than a function call.
- maxRewriteLen = 2 * thearch.LinkArch.RegSize
- combine64bit = thearch.LinkArch.RegSize >= 8
- }
-
- var and ir.Op
- switch cmp {
- case ir.OEQ:
- and = ir.OANDAND
- case ir.ONE:
- and = ir.OOROR
- default:
- // Don't do byte-wise comparisons for <, <=, etc.
- // They're fairly complicated.
- // Length-only checks are ok, though.
- maxRewriteLen = 0
- }
- if s := ir.StringVal(cs); len(s) <= maxRewriteLen {
- if len(s) > 0 {
- ncs = safeexpr(ncs, init)
- }
- r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.Nod(ir.OLEN, ncs, nil), nodintconst(int64(len(s)))))
- remains := len(s)
- for i := 0; remains > 0; {
- if remains == 1 || !canCombineLoads {
- cb := nodintconst(int64(s[i]))
- ncb := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i)))
- r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb))
- remains--
- i++
- continue
- }
- var step int
- var convType *types.Type
- switch {
- case remains >= 8 && combine64bit:
- convType = types.Types[types.TINT64]
- step = 8
- case remains >= 4:
- convType = types.Types[types.TUINT32]
- step = 4
- case remains >= 2:
- convType = types.Types[types.TUINT16]
- step = 2
- }
- ncsubstr := conv(ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i))), convType)
- csubstr := int64(s[i])
- // Calculate large constant from bytes as sequence of shifts and ors.
- // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
- // ssa will combine this into a single large load.
- for offset := 1; offset < step; offset++ {
- b := conv(ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i+offset))), convType)
- b = ir.Nod(ir.OLSH, b, nodintconst(int64(8*offset)))
- ncsubstr = ir.Nod(ir.OOR, ncsubstr, b)
- csubstr |= int64(s[i+offset]) << uint8(8*offset)
- }
- csubstrPart := nodintconst(csubstr)
- // Compare "step" bytes as once
- r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr))
- remains -= step
- i += step
- }
- return finishcompare(n, r, init)
- }
- }
-
- var r ir.Node
- if n.Op() == ir.OEQ || n.Op() == ir.ONE {
- // prepare for rewrite below
- n.SetLeft(cheapexpr(n.Left(), init))
- n.SetRight(cheapexpr(n.Right(), init))
- eqlen, eqmem := eqstring(n.Left(), n.Right())
- // quick check of len before full compare for == or !=.
- // memequal then tests equality up to length len.
- if n.Op() == ir.OEQ {
- // len(left) == len(right) && memequal(left, right, len)
- r = ir.Nod(ir.OANDAND, eqlen, eqmem)
- } else {
- // len(left) != len(right) || !memequal(left, right, len)
- eqlen.SetOp(ir.ONE)
- r = ir.Nod(ir.OOROR, eqlen, ir.Nod(ir.ONOT, eqmem, nil))
- }
- } else {
- // sys_cmpstring(s1, s2) :: 0
- r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.Left(), types.Types[types.TSTRING]), conv(n.Right(), types.Types[types.TSTRING]))
- r = ir.NewBinaryExpr(base.Pos, n.Op(), r, nodintconst(0))
- }
-
- return finishcompare(n, r, init)
-}
-
-// The result of finishcompare MUST be assigned back to n, e.g.
-// n.Left = finishcompare(n.Left, x, r, init)
-func finishcompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node {
- r = typecheck(r, ctxExpr)
- r = conv(r, n.Type())
- r = walkexpr(r, init)
- return r
-}
-
-// return 1 if integer n must be in range [0, max), 0 otherwise
-func bounded(n ir.Node, max int64) bool {
- if n.Type() == nil || !n.Type().IsInteger() {
- return false
- }
-
- sign := n.Type().IsSigned()
- bits := int32(8 * n.Type().Width)
-
- if smallintconst(n) {
- v := ir.Int64Val(n)
- return 0 <= v && v < max
- }
-
- switch n.Op() {
- case ir.OAND, ir.OANDNOT:
- v := int64(-1)
- switch {
- case smallintconst(n.Left()):
- v = ir.Int64Val(n.Left())
- case smallintconst(n.Right()):
- v = ir.Int64Val(n.Right())
- if n.Op() == ir.OANDNOT {
- v = ^v
- if !sign {
- v &= 1<<uint(bits) - 1
- }
- }
- }
- if 0 <= v && v < max {
- return true
- }
-
- case ir.OMOD:
- if !sign && smallintconst(n.Right()) {
- v := ir.Int64Val(n.Right())
- if 0 <= v && v <= max {
- return true
- }
- }
-
- case ir.ODIV:
- if !sign && smallintconst(n.Right()) {
- v := ir.Int64Val(n.Right())
- for bits > 0 && v >= 2 {
- bits--
- v >>= 1
- }
- }
-
- case ir.ORSH:
- if !sign && smallintconst(n.Right()) {
- v := ir.Int64Val(n.Right())
- if v > int64(bits) {
- return true
- }
- bits -= int32(v)
- }
- }
-
- if !sign && bits <= 62 && 1<<uint(bits) <= max {
- return true
- }
-
- return false
-}
-
-// usemethod checks interface method calls for uses of reflect.Type.Method.
-func usemethod(n *ir.CallExpr) {
- t := n.Left().Type()
-
- // Looking for either of:
- // Method(int) reflect.Method
- // MethodByName(string) (reflect.Method, bool)
- //
- // TODO(crawshaw): improve precision of match by working out
- // how to check the method name.
- if n := t.NumParams(); n != 1 {
- return
- }
- if n := t.NumResults(); n != 1 && n != 2 {
- return
- }
- p0 := t.Params().Field(0)
- res0 := t.Results().Field(0)
- var res1 *types.Field
- if t.NumResults() == 2 {
- res1 = t.Results().Field(1)
- }
-
- if res1 == nil {
- if p0.Type.Kind() != types.TINT {
- return
- }
- } else {
- if !p0.Type.IsString() {
- return
- }
- if !res1.Type.IsBoolean() {
- return
- }
- }
-
- // Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors
- // (including global variables such as numImports - was issue #19028).
- // Also need to check for reflect package itself (see Issue #38515).
- if s := res0.Type.Sym(); s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) {
- Curfn.SetReflectMethod(true)
- // The LSym is initialized at this point. We need to set the attribute on the LSym.
- Curfn.LSym.Set(obj.AttrReflectMethod, true)
- }
-}
-
-func usefield(n *ir.SelectorExpr) {
- if objabi.Fieldtrack_enabled == 0 {
- return
- }
-
- switch n.Op() {
- default:
- base.Fatalf("usefield %v", n.Op())
-
- case ir.ODOT, ir.ODOTPTR:
- break
- }
- if n.Sym() == nil {
- // No field name. This DOTPTR was built by the compiler for access
- // to runtime data structures. Ignore.
- return
- }
-
- t := n.Left().Type()
- if t.IsPtr() {
- t = t.Elem()
- }
- field := n.Selection
- if field == nil {
- base.Fatalf("usefield %v %v without paramfld", n.Left().Type(), n.Sym())
- }
- if field.Sym != n.Sym() || field.Offset != n.Offset() {
- base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym(), n.Offset())
- }
- if !strings.Contains(field.Note, "go:\"track\"") {
- return
- }
-
- outer := n.Left().Type()
- if outer.IsPtr() {
- outer = outer.Elem()
- }
- if outer.Sym() == nil {
- base.Errorf("tracked field must be in named struct type")
- }
- if !types.IsExported(field.Sym.Name) {
- base.Errorf("tracked field must be exported (upper case)")
- }
-
- sym := tracksym(outer, field)
- if Curfn.FieldTrack == nil {
- Curfn.FieldTrack = make(map[*types.Sym]struct{})
- }
- Curfn.FieldTrack[sym] = struct{}{}
-}
-
-// anySideEffects reports whether n contains any operations that could have observable side effects.
-func anySideEffects(n ir.Node) bool {
- return ir.Any(n, func(n ir.Node) bool {
- switch n.Op() {
- // Assume side effects unless we know otherwise.
- default:
- return true
-
- // No side effects here (arguments are checked separately).
- case ir.ONAME,
- ir.ONONAME,
- ir.OTYPE,
- ir.OPACK,
- ir.OLITERAL,
- ir.ONIL,
- ir.OADD,
- ir.OSUB,
- ir.OOR,
- ir.OXOR,
- ir.OADDSTR,
- ir.OADDR,
- ir.OANDAND,
- ir.OBYTES2STR,
- ir.ORUNES2STR,
- ir.OSTR2BYTES,
- ir.OSTR2RUNES,
- ir.OCAP,
- ir.OCOMPLIT,
- ir.OMAPLIT,
- ir.OSTRUCTLIT,
- ir.OARRAYLIT,
- ir.OSLICELIT,
- ir.OPTRLIT,
- ir.OCONV,
- ir.OCONVIFACE,
- ir.OCONVNOP,
- ir.ODOT,
- ir.OEQ,
- ir.ONE,
- ir.OLT,
- ir.OLE,
- ir.OGT,
- ir.OGE,
- ir.OKEY,
- ir.OSTRUCTKEY,
- ir.OLEN,
- ir.OMUL,
- ir.OLSH,
- ir.ORSH,
- ir.OAND,
- ir.OANDNOT,
- ir.ONEW,
- ir.ONOT,
- ir.OBITNOT,
- ir.OPLUS,
- ir.ONEG,
- ir.OOROR,
- ir.OPAREN,
- ir.ORUNESTR,
- ir.OREAL,
- ir.OIMAG,
- ir.OCOMPLEX:
- return false
-
- // Only possible side effect is division by zero.
- case ir.ODIV, ir.OMOD:
- if n.Right().Op() != ir.OLITERAL || constant.Sign(n.Right().Val()) == 0 {
- return true
- }
-
- // Only possible side effect is panic on invalid size,
- // but many makechan and makemap use size zero, which is definitely OK.
- case ir.OMAKECHAN, ir.OMAKEMAP:
- if !ir.IsConst(n.Left(), constant.Int) || constant.Sign(n.Left().Val()) != 0 {
- return true
- }
-
- // Only possible side effect is panic on invalid size.
- // TODO(rsc): Merge with previous case (probably breaks toolstash -cmp).
- case ir.OMAKESLICE, ir.OMAKESLICECOPY:
- return true
- }
- return false
- })
-}
-
-// Rewrite
-// go builtin(x, y, z)
-// into
-// go func(a1, a2, a3) {
-// builtin(a1, a2, a3)
-// }(x, y, z)
-// for print, println, and delete.
-//
-// Rewrite
-// go f(x, y, uintptr(unsafe.Pointer(z)))
-// into
-// go func(a1, a2, a3) {
-// builtin(a1, a2, uintptr(a3))
-// }(x, y, unsafe.Pointer(z))
-// for function contains unsafe-uintptr arguments.
-
-var wrapCall_prgen int
-
-// The result of wrapCall MUST be assigned back to n, e.g.
-// n.Left = wrapCall(n.Left, init)
-func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
- if n.Init().Len() != 0 {
- walkstmtlist(n.Init().Slice())
- init.AppendNodes(n.PtrInit())
- }
-
- isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER
-
- // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
- if !isBuiltinCall && n.IsDDD() {
- last := n.List().Len() - 1
- if va := n.List().Index(last); va.Op() == ir.OSLICELIT {
- n.PtrList().Set(append(n.List().Slice()[:last], va.List().Slice()...))
- n.SetIsDDD(false)
- }
- }
-
- // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
- origArgs := make([]ir.Node, n.List().Len())
- var funcArgs []*ir.Field
- for i, arg := range n.List().Slice() {
- s := lookupN("a", i)
- if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).Left().Type().IsUnsafePtr() {
- origArgs[i] = arg
- arg = arg.(*ir.ConvExpr).Left()
- n.List().SetIndex(i, arg)
- }
- funcArgs = append(funcArgs, symfield(s, arg.Type()))
- }
- t := ir.NewFuncType(base.Pos, nil, funcArgs, nil)
-
- wrapCall_prgen++
- sym := lookupN("wrap·", wrapCall_prgen)
- fn := dclfunc(sym, t)
-
- args := paramNnames(t.Type())
- for i, origArg := range origArgs {
- if origArg == nil {
- continue
- }
- args[i] = ir.NewConvExpr(base.Pos, origArg.Op(), origArg.Type(), args[i])
- }
- call := ir.NewCallExpr(base.Pos, n.Op(), n.Left(), args)
- if !isBuiltinCall {
- call.SetOp(ir.OCALL)
- call.SetIsDDD(n.IsDDD())
- }
- fn.PtrBody().Set1(call)
-
- funcbody()
-
- typecheckFunc(fn)
- typecheckslice(fn.Body().Slice(), ctxStmt)
- Target.Decls = append(Target.Decls, fn)
-
- call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.List().Slice())
- return walkexpr(typecheck(call, ctxStmt), init)
-}
-
-// substArgTypes substitutes the given list of types for
-// successive occurrences of the "any" placeholder in the
-// type syntax expression n.Type.
-// The result of substArgTypes MUST be assigned back to old, e.g.
-// n.Left = substArgTypes(n.Left, t1, t2)
-func substArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
- n := old.CloneName()
-
- for _, t := range types_ {
- dowidth(t)
- }
- n.SetType(types.SubstAny(n.Type(), &types_))
- if len(types_) > 0 {
- base.Fatalf("substArgTypes: too many argument types")
- }
- return n
-}
-
-// canMergeLoads reports whether the backend optimization passes for
-// the current architecture can combine adjacent loads into a single
-// larger, possibly unaligned, load. Note that currently the
-// optimizations must be able to handle little endian byte order.
-func canMergeLoads() bool {
- switch thearch.LinkArch.Family {
- case sys.ARM64, sys.AMD64, sys.I386, sys.S390X:
- return true
- case sys.PPC64:
- // Load combining only supported on ppc64le.
- return thearch.LinkArch.ByteOrder == binary.LittleEndian
- }
- return false
-}
-
-// isRuneCount reports whether n is of the form len([]rune(string)).
-// These are optimized into a call to runtime.countrunes.
-func isRuneCount(n ir.Node) bool {
- return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).Left().Op() == ir.OSTR2RUNES
-}
-
-func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Node {
- if !n.Type().IsPtr() {
- base.Fatalf("expected pointer type: %v", n.Type())
- }
- elem := n.Type().Elem()
- if count != nil {
- if !elem.IsArray() {
- base.Fatalf("expected array type: %v", elem)
- }
- elem = elem.Elem()
- }
-
- size := elem.Size()
- if elem.Alignment() == 1 && (size == 0 || size == 1 && count == nil) {
- return n
- }
-
- if count == nil {
- count = nodintconst(1)
- }
-
- n.SetLeft(cheapexpr(n.Left(), init))
- init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left(), types.Types[types.TUNSAFEPTR]), typename(elem), conv(count, types.Types[types.TUINTPTR])))
- return n
-}
-
-var walkCheckPtrArithmeticMarker byte
-
-func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
- // Calling cheapexpr(n, init) below leads to a recursive call
- // to walkexpr, which leads us back here again. Use n.Opt to
- // prevent infinite loops.
- if opt := n.Opt(); opt == &walkCheckPtrArithmeticMarker {
- return n
- } else if opt != nil {
- // We use n.Opt() here because today it's not used for OCONVNOP. If that changes,
- // there's no guarantee that temporarily replacing it is safe, so just hard fail here.
- base.Fatalf("unexpected Opt: %v", opt)
- }
- n.SetOpt(&walkCheckPtrArithmeticMarker)
- defer n.SetOpt(nil)
-
- // TODO(mdempsky): Make stricter. We only need to exempt
- // reflect.Value.Pointer and reflect.Value.UnsafeAddr.
- switch n.Left().Op() {
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- return n
- }
-
- if n.Left().Op() == ir.ODOTPTR && isReflectHeaderDataField(n.Left()) {
- return n
- }
-
- // Find original unsafe.Pointer operands involved in this
- // arithmetic expression.
- //
- // "It is valid both to add and to subtract offsets from a
- // pointer in this way. It is also valid to use &^ to round
- // pointers, usually for alignment."
- var originals []ir.Node
- var walk func(n ir.Node)
- walk = func(n ir.Node) {
- switch n.Op() {
- case ir.OADD:
- walk(n.Left())
- walk(n.Right())
- case ir.OSUB, ir.OANDNOT:
- walk(n.Left())
- case ir.OCONVNOP:
- if n.Left().Type().IsUnsafePtr() {
- n.SetLeft(cheapexpr(n.Left(), init))
- originals = append(originals, convnop(n.Left(), types.Types[types.TUNSAFEPTR]))
- }
- }
- }
- walk(n.Left())
-
- cheap := cheapexpr(n, init)
-
- slice := mkdotargslice(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
- slice.SetEsc(EscNone)
-
- init.Append(mkcall("checkptrArithmetic", nil, init, convnop(cheap, types.Types[types.TUNSAFEPTR]), slice))
- // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
- // the backing store for multiple calls to checkptrArithmetic.
-
- return cheap
-}
-
-// checkPtr reports whether pointer checking should be enabled for
-// function fn at a given level. See debugHelpFooter for defined
-// levels.
-func checkPtr(fn *ir.Func, level int) bool {
- return base.Debug.Checkptr >= level && fn.Pragma&ir.NoCheckPtr == 0
-}
-
-// appendWalkStmt typechecks and walks stmt and then appends it to init.
-func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
- op := stmt.Op()
- n := typecheck(stmt, ctxStmt)
- if op == ir.OAS || op == ir.OAS2 {
- // If the assignment has side effects, walkexpr will append them
- // directly to init for us, while walkstmt will wrap it in an OBLOCK.
- // We need to append them directly.
- // TODO(rsc): Clean this up.
- n = walkexpr(n, init)
- } else {
- n = walkstmt(n)
- }
- init.Append(n)
-}
// The Debug.m flag enables diagnostic output. a single -m is useful for verifying
// which calls get inlined or not, more is for debugging, and may go away at any point.
-package gc
+package inline
import (
+ "errors"
+ "fmt"
+ "go/constant"
+ "strings"
+
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
- "errors"
- "fmt"
- "go/constant"
- "strings"
)
-// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation.
-var IsIntrinsicCall = func(*ir.CallExpr) bool { return false }
-
// Inlining budget parameters, gathered in one place
const (
inlineMaxBudget = 80
func InlinePackage() {
// Find functions that can be inlined and clone them before walk expands them.
- visitBottomUp(Target.Decls, func(list []*ir.Func, recursive bool) {
+ ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) {
numfns := numNonClosures(list)
for _, n := range list {
if !recursive || numfns > 1 {
// We allow inlining if there is no
// recursion, or the recursion cycle is
// across more than one function.
- caninl(n)
+ CanInline(n)
} else {
if base.Flag.LowerM > 1 {
fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname)
}
}
- inlcalls(n)
+ InlineCalls(n)
}
})
}
-// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
-// the ->sym can be re-used in the local package, so peel it off the receiver's type.
-func fnpkg(fn *ir.Name) *types.Pkg {
- if ir.IsMethod(fn) {
- // method
- rcvr := fn.Type().Recv().Type
-
- if rcvr.IsPtr() {
- rcvr = rcvr.Elem()
- }
- if rcvr.Sym() == nil {
- base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym(), fn, rcvr)
- }
- return rcvr.Sym().Pkg
- }
-
- // non-method
- return fn.Sym().Pkg
-}
-
-// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
-// because they're a copy of an already checked body.
-func typecheckinl(fn *ir.Func) {
- lno := setlineno(fn.Nname)
-
- expandInline(fn)
-
- // typecheckinl is only for imported functions;
- // their bodies may refer to unsafe as long as the package
- // was marked safe during import (which was checked then).
- // the ->inl of a local function has been typechecked before caninl copied it.
- pkg := fnpkg(fn.Nname)
-
- if pkg == types.LocalPkg || pkg == nil {
- return // typecheckinl on local function
- }
-
- if base.Flag.LowerM > 2 || base.Debug.Export != 0 {
- fmt.Printf("typecheck import [%v] %L { %v }\n", fn.Sym(), fn, ir.AsNodes(fn.Inl.Body))
- }
-
- savefn := Curfn
- Curfn = fn
- typecheckslice(fn.Inl.Body, ctxStmt)
- Curfn = savefn
-
- // During expandInline (which imports fn.Func.Inl.Body),
- // declarations are added to fn.Func.Dcl by funcHdr(). Move them
- // to fn.Func.Inl.Dcl for consistency with how local functions
- // behave. (Append because typecheckinl may be called multiple
- // times.)
- fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...)
- fn.Dcl = nil
-
- base.Pos = lno
-}
-
// Caninl determines whether fn is inlineable.
-// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
+// If so, CanInline saves fn->nbody in fn->inl and substitutes it with a copy.
// fn and ->nbody will already have been typechecked.
-func caninl(fn *ir.Func) {
+func CanInline(fn *ir.Func) {
if fn.Nname == nil {
base.Fatalf("caninl no nname %+v", fn)
}
}
// If fn has no body (is defined outside of Go), cannot inline it.
- if fn.Body().Len() == 0 {
+ if len(fn.Body) == 0 {
reason = "no function body"
return
}
}
n := fn.Nname
- if n.Func().InlinabilityChecked() {
+ if n.Func.InlinabilityChecked() {
return
}
- defer n.Func().SetInlinabilityChecked(true)
+ defer n.Func.SetInlinabilityChecked(true)
cc := int32(inlineExtraCallCost)
if base.Flag.LowerL == 4 {
return
}
- n.Func().Inl = &ir.Inline{
+ n.Func.Inl = &ir.Inline{
Cost: inlineMaxBudget - visitor.budget,
- Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Func().Dcl, &visitor),
- Body: ir.DeepCopyList(src.NoXPos, fn.Body().Slice()),
+ Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
+ Body: ir.DeepCopyList(src.NoXPos, fn.Body),
}
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.AsNodes(n.Func().Inl.Body))
+ fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.Nodes(n.Func.Inl.Body))
} else if base.Flag.LowerM != 0 {
fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
}
}
}
-// inlFlood marks n's inline body for export and recursively ensures
+// Inline_Flood marks n's inline body for export and recursively ensures
// all called functions are marked too.
-func inlFlood(n *ir.Name, exportsym func(*ir.Name)) {
+func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
if n == nil {
return
}
- if n.Op() != ir.ONAME || n.Class() != ir.PFUNC {
- base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class())
+ if n.Op() != ir.ONAME || n.Class_ != ir.PFUNC {
+ base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class_)
}
- fn := n.Func()
+ fn := n.Func
if fn == nil {
base.Fatalf("inlFlood: missing Func on %v", n)
}
}
fn.SetExportInline(true)
- typecheckinl(fn)
+ typecheck.ImportedBody(fn)
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
- ir.VisitList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) {
+ ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) {
switch n.Op() {
case ir.OMETHEXPR, ir.ODOTMETH:
- inlFlood(methodExprName(n), exportsym)
+ Inline_Flood(ir.MethodExprName(n), exportsym)
case ir.ONAME:
n := n.(*ir.Name)
- switch n.Class() {
+ switch n.Class_ {
case ir.PFUNC:
- inlFlood(n, exportsym)
+ Inline_Flood(n, exportsym)
exportsym(n)
case ir.PEXTERN:
exportsym(n)
// because getcaller{pc,sp} expect a pointer to the caller's first argument.
//
// runtime.throw is a "cheap call" like panic in normal code.
- if n.Left().Op() == ir.ONAME {
- name := n.Left().(*ir.Name)
- if name.Class() == ir.PFUNC && isRuntimePkg(name.Sym().Pkg) {
+ if n.X.Op() == ir.ONAME {
+ name := n.X.(*ir.Name)
+ if name.Class_ == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) {
fn := name.Sym().Name
if fn == "getcallerpc" || fn == "getcallersp" {
return errors.New("call to " + fn)
}
}
- if IsIntrinsicCall(n) {
+ if ir.IsIntrinsicCall(n) {
// Treat like any other node.
break
}
- if fn := inlCallee(n.Left()); fn != nil && fn.Inl != nil {
+ if fn := inlCallee(n.X); fn != nil && fn.Inl != nil {
v.budget -= fn.Inl.Cost
break
}
// Call is okay if inlinable and we have the budget for the body.
case ir.OCALLMETH:
- t := n.Left().Type()
+ n := n.(*ir.CallExpr)
+ t := n.X.Type()
if t == nil {
- base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left())
+ base.Fatalf("no function type for [%p] %+v\n", n.X, n.X)
}
- if isRuntimePkg(n.Left().Sym().Pkg) {
- fn := n.Left().Sym().Name
+ if types.IsRuntimePkg(n.X.Sym().Pkg) {
+ fn := n.X.Sym().Name
if fn == "heapBits.nextArena" {
// Special case: explicitly allow
// mid-stack inlining of
break
}
}
- if inlfn := methodExprName(n.Left()).Func(); inlfn.Inl != nil {
+ if inlfn := ir.MethodExprName(n.X).Func; inlfn.Inl != nil {
v.budget -= inlfn.Inl.Cost
break
}
return nil
case ir.OFOR, ir.OFORUNTIL:
- if n.Sym() != nil {
+ n := n.(*ir.ForStmt)
+ if n.Label != nil {
return errors.New("labeled control")
}
case ir.OSWITCH:
- if n.Sym() != nil {
+ n := n.(*ir.SwitchStmt)
+ if n.Label != nil {
return errors.New("labeled control")
}
// case ir.ORANGE, ir.OSELECT in "unhandled" above
case ir.OBREAK, ir.OCONTINUE:
- if n.Sym() != nil {
+ n := n.(*ir.BranchStmt)
+ if n.Label != nil {
// Should have short-circuited due to labeled control error above.
base.Fatalf("unexpected labeled break/continue: %v", n)
}
case ir.OIF:
- if ir.IsConst(n.Left(), constant.Bool) {
+ n := n.(*ir.IfStmt)
+ if ir.IsConst(n.Cond, constant.Bool) {
// This if and the condition cost nothing.
// TODO(rsc): It seems strange that we visit the dead branch.
if err := ir.DoList(n.Init(), v.do); err != nil {
return err
}
- if err := ir.DoList(n.Body(), v.do); err != nil {
+ if err := ir.DoList(n.Body, v.do); err != nil {
return err
}
- if err := ir.DoList(n.Rlist(), v.do); err != nil {
+ if err := ir.DoList(n.Else, v.do); err != nil {
return err
}
return nil
case ir.ONAME:
n := n.(*ir.Name)
- if n.Class() == ir.PAUTO {
+ if n.Class_ == ir.PAUTO {
v.usedLocals[n] = true
}
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
// calls made to inlineable functions. This is the external entry point.
-func inlcalls(fn *ir.Func) {
- savefn := Curfn
- Curfn = fn
+func InlineCalls(fn *ir.Func) {
+ savefn := ir.CurFunc
+ ir.CurFunc = fn
maxCost := int32(inlineMaxBudget)
if isBigFunc(fn) {
maxCost = inlineBigFunctionMaxCost
return inlnode(n, maxCost, inlMap, edit)
}
ir.EditChildren(fn, edit)
- Curfn = savefn
+ ir.CurFunc = savefn
}
// Turn an OINLCALL into a statement.
func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node {
- n := ir.NodAt(inlcall.Pos(), ir.OBLOCK, nil, nil)
- n.SetList(inlcall.Init())
- n.PtrList().AppendNodes(inlcall.PtrBody())
+ n := ir.NewBlockStmt(inlcall.Pos(), nil)
+ n.List = inlcall.Init()
+ n.List.Append(inlcall.Body.Take()...)
return n
}
// The result of inlconv2expr MUST be assigned back to n, e.g.
// n.Left = inlconv2expr(n.Left)
func inlconv2expr(n *ir.InlinedCallExpr) ir.Node {
- r := n.Rlist().First()
- return initExpr(append(n.Init().Slice(), n.Body().Slice()...), r)
+ r := n.ReturnVars[0]
+ return ir.InitExpr(append(n.Init(), n.Body...), r)
}
// Turn the rlist (with the return values) of the OINLCALL in
// order will be preserved. Used in return, oas2func and call
// statements.
func inlconv2list(n *ir.InlinedCallExpr) []ir.Node {
- if n.Op() != ir.OINLCALL || n.Rlist().Len() == 0 {
+ if n.Op() != ir.OINLCALL || len(n.ReturnVars) == 0 {
base.Fatalf("inlconv2list %+v\n", n)
}
- s := n.Rlist().Slice()
- s[0] = initExpr(append(n.Init().Slice(), n.Body().Slice()...), s[0])
+ s := n.ReturnVars
+ s[0] = ir.InitExpr(append(n.Init(), n.Body...), s[0])
return s
}
switch n.Op() {
case ir.ODEFER, ir.OGO:
- switch call := n.Left(); call.Op() {
+ n := n.(*ir.GoDeferStmt)
+ switch call := n.Call; call.Op() {
case ir.OCALLFUNC, ir.OCALLMETH:
- call.SetNoInline(true)
+ call := call.(*ir.CallExpr)
+ call.NoInline = true
}
// TODO do them here (or earlier),
case ir.OCALLMETH:
// Prevent inlining some reflect.Value methods when using checkptr,
// even when package reflect was compiled without it (#35073).
- if s := n.Left().Sym(); base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
+ n := n.(*ir.CallExpr)
+ if s := n.X.Sym(); base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
return n
}
}
- lno := setlineno(n)
+ lno := ir.SetPos(n)
ir.EditChildren(n, edit)
if as := n; as.Op() == ir.OAS2FUNC {
- if as.Rlist().First().Op() == ir.OINLCALL {
- as.PtrRlist().Set(inlconv2list(as.Rlist().First().(*ir.InlinedCallExpr)))
+ as := as.(*ir.AssignListStmt)
+ if as.Rhs[0].Op() == ir.OINLCALL {
+ as.Rhs.Set(inlconv2list(as.Rhs[0].(*ir.InlinedCallExpr)))
as.SetOp(ir.OAS2)
as.SetTypecheck(0)
- n = typecheck(as, ctxStmt)
+ n = typecheck.Stmt(as)
}
}
// switch at the top of this function.
switch n.Op() {
case ir.OCALLFUNC, ir.OCALLMETH:
- if n.NoInline() {
+ n := n.(*ir.CallExpr)
+ if n.NoInline {
return n
}
}
case ir.OCALLFUNC:
call = n.(*ir.CallExpr)
if base.Flag.LowerM > 3 {
- fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.Left())
+ fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.X)
}
- if IsIntrinsicCall(call) {
+ if ir.IsIntrinsicCall(call) {
break
}
- if fn := inlCallee(call.Left()); fn != nil && fn.Inl != nil {
+ if fn := inlCallee(call.X); fn != nil && fn.Inl != nil {
n = mkinlcall(call, fn, maxCost, inlMap, edit)
}
case ir.OCALLMETH:
call = n.(*ir.CallExpr)
if base.Flag.LowerM > 3 {
- fmt.Printf("%v:call to meth %v\n", ir.Line(n), call.Left().(*ir.SelectorExpr).Sel)
+ fmt.Printf("%v:call to meth %v\n", ir.Line(n), call.X.(*ir.SelectorExpr).Sel)
}
// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
- if call.Left().Type() == nil {
- base.Fatalf("no function type for [%p] %+v\n", call.Left(), call.Left())
+ if call.X.Type() == nil {
+ base.Fatalf("no function type for [%p] %+v\n", call.X, call.X)
}
- n = mkinlcall(call, methodExprName(call.Left()).Func(), maxCost, inlMap, edit)
+ n = mkinlcall(call, ir.MethodExprName(call.X).Func, maxCost, inlMap, edit)
}
base.Pos = lno
// inlCallee takes a function-typed expression and returns the underlying function ONAME
// that it refers to if statically known. Otherwise, it returns nil.
func inlCallee(fn ir.Node) *ir.Func {
- fn = staticValue(fn)
+ fn = ir.StaticValue(fn)
switch fn.Op() {
case ir.OMETHEXPR:
fn := fn.(*ir.MethodExpr)
- n := methodExprName(fn)
+ n := ir.MethodExprName(fn)
// Check that receiver type matches fn.Left.
// TODO(mdempsky): Handle implicit dereference
// of pointer receiver argument?
if n == nil || !types.Identical(n.Type().Recv().Type, fn.T) {
return nil
}
- return n.Func()
+ return n.Func
case ir.ONAME:
- if fn.Class() == ir.PFUNC {
- return fn.Func()
+ fn := fn.(*ir.Name)
+ if fn.Class_ == ir.PFUNC {
+ return fn.Func
}
case ir.OCLOSURE:
fn := fn.(*ir.ClosureExpr)
- c := fn.Func()
- caninl(c)
+ c := fn.Func
+ CanInline(c)
return c
}
return nil
}
-func staticValue(n ir.Node) ir.Node {
- for {
- if n.Op() == ir.OCONVNOP {
- n = n.(*ir.ConvExpr).Left()
- continue
- }
-
- n1 := staticValue1(n)
- if n1 == nil {
- return n
- }
- n = n1
- }
-}
-
-// staticValue1 implements a simple SSA-like optimization. If n is a local variable
-// that is initialized and never reassigned, staticValue1 returns the initializer
-// expression. Otherwise, it returns nil.
-func staticValue1(nn ir.Node) ir.Node {
- if nn.Op() != ir.ONAME {
- return nil
- }
- n := nn.(*ir.Name)
- if n.Class() != ir.PAUTO || n.Name().Addrtaken() {
- return nil
- }
-
- defn := n.Name().Defn
- if defn == nil {
- return nil
- }
-
- var rhs ir.Node
-FindRHS:
- switch defn.Op() {
- case ir.OAS:
- rhs = defn.Right()
- case ir.OAS2:
- for i, lhs := range defn.List().Slice() {
- if lhs == n {
- rhs = defn.Rlist().Index(i)
- break FindRHS
- }
- }
- base.Fatalf("%v missing from LHS of %v", n, defn)
- default:
- return nil
- }
- if rhs == nil {
- base.Fatalf("RHS is nil: %v", defn)
- }
-
- if reassigned(n) {
- return nil
- }
-
- return rhs
-}
-
-// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
-// indicating whether the name has any assignments other than its declaration.
-// The second return value is the first such assignment encountered in the walk, if any. It is mostly
-// useful for -m output documenting the reason for inhibited optimizations.
-// NB: global variables are always considered to be re-assigned.
-// TODO: handle initial declaration not including an assignment and followed by a single assignment?
-func reassigned(name *ir.Name) bool {
- if name.Op() != ir.ONAME {
- base.Fatalf("reassigned %v", name)
- }
- // no way to reliably check for no-reassignment of globals, assume it can be
- if name.Curfn == nil {
- return true
- }
- return ir.Any(name.Curfn, func(n ir.Node) bool {
- switch n.Op() {
- case ir.OAS:
- if n.Left() == name && n != name.Defn {
- return true
- }
- case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OSELRECV2:
- for _, p := range n.List().Slice() {
- if p == name && n != name.Defn {
- return true
- }
- }
- }
- return false
- })
-}
-
func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node {
n := ir.AsNode(t.Nname)
if n == nil || ir.IsBlank(n) {
if inlvar == nil {
base.Fatalf("missing inlvar for %v", n)
}
- as.PtrInit().Append(ir.Nod(ir.ODCL, inlvar, nil))
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, inlvar))
inlvar.Name().Defn = as
return inlvar
}
func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
if fn.Inl == nil {
if logopt.Enabled() {
- logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn)))
}
return n
// The inlined function body is too big. Typically we use this check to restrict
// inlining into very big functions. See issue 26546 and 17566.
if logopt.Enabled() {
- logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Inl.Cost, ir.PkgFuncName(fn), maxCost))
}
return n
}
- if fn == Curfn {
+ if fn == ir.CurFunc {
// Can't recursively inline a function into itself.
if logopt.Enabled() {
- logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(Curfn)))
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(ir.CurFunc)))
}
return n
}
- if instrumenting && isRuntimePkg(fn.Sym().Pkg) {
+ if base.Flag.Cfg.Instrumenting && types.IsRuntimePkg(fn.Sym().Pkg) {
// Runtime package must not be instrumented.
// Instrument skips runtime package. However, some runtime code can be
// inlined into other packages and instrumented there. To avoid this,
if inlMap[fn] {
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(Curfn))
+ fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(ir.CurFunc))
}
return n
}
inlMap[fn] = false
}()
if base.Debug.TypecheckInl == 0 {
- typecheckinl(fn)
+ typecheck.ImportedBody(fn)
}
// We have a function node, and it has an inlineable body.
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: inlining call to %v %v { %v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.AsNodes(fn.Inl.Body))
+ fmt.Printf("%v: inlining call to %v %v { %v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.Nodes(fn.Inl.Body))
} else if base.Flag.LowerM != 0 {
fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
}
// inlconv2expr or inlconv2list). Make sure to preserve these,
// if necessary (#42703).
if n.Op() == ir.OCALLFUNC {
- callee := n.Left()
+ callee := n.X
for callee.Op() == ir.OCONVNOP {
conv := callee.(*ir.ConvExpr)
- ninit.AppendNodes(conv.PtrInit())
- callee = conv.Left()
+ ninit.Append(conv.PtrInit().Take()...)
+ callee = conv.X
}
if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR {
base.Fatalf("unexpected callee expression: %v", callee)
// NB: if we enabled inlining of functions containing OCLOSURE or refined
// the reassigned check via some sort of copy propagation this would most
// likely need to be changed to a loop to walk up to the correct Param
- if o == nil || o.Curfn != Curfn {
+ if o == nil || o.Curfn != ir.CurFunc {
base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v)
}
if v.Byval() {
- iv := typecheck(inlvar(v), ctxExpr)
- ninit.Append(ir.Nod(ir.ODCL, iv, nil))
- ninit.Append(typecheck(ir.Nod(ir.OAS, iv, o), ctxStmt))
+ iv := typecheck.Expr(inlvar(v))
+ ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, iv))
+ ninit.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, iv, o)))
inlvars[v] = iv
} else {
- addr := NewName(lookup("&" + v.Sym().Name))
+ addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name))
addr.SetType(types.NewPtr(v.Type()))
- ia := typecheck(inlvar(addr), ctxExpr)
- ninit.Append(ir.Nod(ir.ODCL, ia, nil))
- ninit.Append(typecheck(ir.Nod(ir.OAS, ia, nodAddr(o)), ctxStmt))
+ ia := typecheck.Expr(inlvar(addr))
+ ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, ia))
+ ninit.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, ia, typecheck.NodAddr(o))))
inlvars[addr] = ia
// When capturing by reference, all occurrence of the captured var
// must be substituted with dereference of the temporary address
- inlvars[v] = typecheck(ir.Nod(ir.ODEREF, ia, nil), ctxExpr)
+ inlvars[v] = typecheck.Expr(ir.NewStarExpr(base.Pos, ia))
}
}
}
if ln.Op() != ir.ONAME {
continue
}
- if ln.Class() == ir.PPARAMOUT { // return values handled below.
+ if ln.Class_ == ir.PPARAMOUT { // return values handled below.
continue
}
- if isParamStackCopy(ln) { // ignore the on-stack copy of a parameter that moved to the heap
+ if ir.IsParamStackCopy(ln) { // ignore the on-stack copy of a parameter that moved to the heap
// TODO(mdempsky): Remove once I'm confident
// this never actually happens. We currently
// perform inlining before escape analysis, so
// nothing should have moved to the heap yet.
base.Fatalf("impossible: %v", ln)
}
- inlf := typecheck(inlvar(ln), ctxExpr)
+ inlf := typecheck.Expr(inlvar(ln))
inlvars[ln] = inlf
if base.Flag.GenDwarfInl > 0 {
- if ln.Class() == ir.PPARAM {
+ if ln.Class_ == ir.PPARAM {
inlf.Name().SetInlFormal(true)
} else {
inlf.Name().SetInlLocal(true)
}
nreturns := 0
- ir.VisitList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) {
+ ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) {
if n != nil && n.Op() == ir.ORETURN {
nreturns++
}
if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym().Name, "~r") {
n := n.(*ir.Name)
m = inlvar(n)
- m = typecheck(m, ctxExpr)
+ m = typecheck.Expr(m)
inlvars[n] = m
delayretvars = false // found a named result parameter
} else {
}
// Assign arguments to the parameters' temp names.
- as := ir.Nod(ir.OAS2, nil, nil)
- as.SetColas(true)
+ as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ as.Def = true
if n.Op() == ir.OCALLMETH {
- sel := n.Left().(*ir.SelectorExpr)
- if sel.Left() == nil {
+ sel := n.X.(*ir.SelectorExpr)
+ if sel.X == nil {
base.Fatalf("method call without receiver: %+v", n)
}
- as.PtrRlist().Append(sel.Left())
+ as.Rhs.Append(sel.X)
}
- as.PtrRlist().Append(n.List().Slice()...)
+ as.Rhs.Append(n.Args...)
// For non-dotted calls to variadic functions, we assign the
// variadic parameter's temp name separately.
var vas *ir.AssignStmt
if recv := fn.Type().Recv(); recv != nil {
- as.PtrList().Append(inlParam(recv, as, inlvars))
+ as.Lhs.Append(inlParam(recv, as, inlvars))
}
for _, param := range fn.Type().Params().Fields().Slice() {
// For ordinary parameters or variadic parameters in
// dotted calls, just add the variable to the
// assignment list, and we're done.
- if !param.IsDDD() || n.IsDDD() {
- as.PtrList().Append(inlParam(param, as, inlvars))
+ if !param.IsDDD() || n.IsDDD {
+ as.Lhs.Append(inlParam(param, as, inlvars))
continue
}
// Otherwise, we need to collect the remaining values
// to pass as a slice.
- x := as.List().Len()
- for as.List().Len() < as.Rlist().Len() {
- as.PtrList().Append(argvar(param.Type, as.List().Len()))
+ x := len(as.Lhs)
+ for len(as.Lhs) < len(as.Rhs) {
+ as.Lhs.Append(argvar(param.Type, len(as.Lhs)))
}
- varargs := as.List().Slice()[x:]
+ varargs := as.Lhs[x:]
vas = ir.NewAssignStmt(base.Pos, nil, nil)
- vas.SetLeft(inlParam(param, vas, inlvars))
+ vas.X = inlParam(param, vas, inlvars)
if len(varargs) == 0 {
- vas.SetRight(nodnil())
- vas.Right().SetType(param.Type)
+ vas.Y = typecheck.NodNil()
+ vas.Y.SetType(param.Type)
} else {
- lit := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(param.Type))
- lit.PtrList().Set(varargs)
- vas.SetRight(lit)
+ lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type).(ir.Ntype), nil)
+ lit.List.Set(varargs)
+ vas.Y = lit
}
}
- if as.Rlist().Len() != 0 {
- ninit.Append(typecheck(as, ctxStmt))
+ if len(as.Rhs) != 0 {
+ ninit.Append(typecheck.Stmt(as))
}
if vas != nil {
- ninit.Append(typecheck(vas, ctxStmt))
+ ninit.Append(typecheck.Stmt(vas))
}
if !delayretvars {
// Zero the return parameters.
for _, n := range retvars {
- ninit.Append(ir.Nod(ir.ODCL, n, nil))
- ras := ir.Nod(ir.OAS, n, nil)
- ninit.Append(typecheck(ras, ctxStmt))
+ ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, n))
+ ras := ir.NewAssignStmt(base.Pos, n, nil)
+ ninit.Append(typecheck.Stmt(ras))
}
}
- retlabel := autolabel(".i")
+ retlabel := typecheck.AutoLabel(".i")
inlgen++
// to put a breakpoint. Not sure if that's really necessary or not
// (in which case it could go at the end of the function instead).
// Note issue 28603.
- inlMark := ir.Nod(ir.OINLMARK, nil, nil)
+ inlMark := ir.NewInlineMarkStmt(base.Pos, types.BADWIDTH)
inlMark.SetPos(n.Pos().WithIsStmt())
- inlMark.SetOffset(int64(newIndex))
+ inlMark.Index = int64(newIndex)
ninit.Append(inlMark)
if base.Flag.GenDwarfInl > 0 {
}
subst.edit = subst.node
- body := subst.list(ir.AsNodes(fn.Inl.Body))
+ body := subst.list(ir.Nodes(fn.Inl.Body))
- lab := nodSym(ir.OLABEL, nil, retlabel)
+ lab := ir.NewLabelStmt(base.Pos, retlabel)
body = append(body, lab)
- typecheckslice(body, ctxStmt)
+ typecheck.Stmts(body)
if base.Flag.GenDwarfInl > 0 {
for _, v := range inlfvars {
//dumplist("ninit post", ninit);
- call := ir.Nod(ir.OINLCALL, nil, nil)
- call.PtrInit().Set(ninit.Slice())
- call.PtrBody().Set(body)
- call.PtrRlist().Set(retvars)
+ call := ir.NewInlinedCallExpr(base.Pos, nil, nil)
+ call.PtrInit().Set(ninit)
+ call.Body.Set(body)
+ call.ReturnVars.Set(retvars)
call.SetType(n.Type())
call.SetTypecheck(1)
fmt.Printf("inlvar %+v\n", var_)
}
- n := NewName(var_.Sym())
+ n := typecheck.NewName(var_.Sym())
n.SetType(var_.Type())
- n.SetClass(ir.PAUTO)
+ n.Class_ = ir.PAUTO
n.SetUsed(true)
- n.Curfn = Curfn // the calling function, not the called one
+ n.Curfn = ir.CurFunc // the calling function, not the called one
n.SetAddrtaken(var_.Name().Addrtaken())
- Curfn.Dcl = append(Curfn.Dcl, n)
+ ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
return n
}
// Synthesize a variable to store the inlined function's results in.
func retvar(t *types.Field, i int) ir.Node {
- n := NewName(lookupN("~R", i))
+ n := typecheck.NewName(typecheck.LookupNum("~R", i))
n.SetType(t.Type)
- n.SetClass(ir.PAUTO)
+ n.Class_ = ir.PAUTO
n.SetUsed(true)
- n.Curfn = Curfn // the calling function, not the called one
- Curfn.Dcl = append(Curfn.Dcl, n)
+ n.Curfn = ir.CurFunc // the calling function, not the called one
+ ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
return n
}
// Synthesize a variable to store the inlined function's arguments
// when they come from a multiple return call.
func argvar(t *types.Type, i int) ir.Node {
- n := NewName(lookupN("~arg", i))
+ n := typecheck.NewName(typecheck.LookupNum("~arg", i))
n.SetType(t.Elem())
- n.SetClass(ir.PAUTO)
+ n.Class_ = ir.PAUTO
n.SetUsed(true)
- n.Curfn = Curfn // the calling function, not the called one
- Curfn.Dcl = append(Curfn.Dcl, n)
+ n.Curfn = ir.CurFunc // the calling function, not the called one
+ ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
return n
}
// list inlines a list of nodes.
func (subst *inlsubst) list(ll ir.Nodes) []ir.Node {
- s := make([]ir.Node, 0, ll.Len())
- for _, n := range ll.Slice() {
+ s := make([]ir.Node, 0, len(ll))
+ for _, n := range ll {
s = append(s, subst.node(n))
}
return s
return n
case ir.OMETHEXPR:
+ n := n.(*ir.MethodExpr)
return n
case ir.OLITERAL, ir.ONIL, ir.OTYPE:
case ir.ORETURN:
// Since we don't handle bodies with closures,
// this return is guaranteed to belong to the current inlined function.
+ n := n.(*ir.ReturnStmt)
init := subst.list(n.Init())
- if len(subst.retvars) != 0 && n.List().Len() != 0 {
- as := ir.Nod(ir.OAS2, nil, nil)
+ if len(subst.retvars) != 0 && len(n.Results) != 0 {
+ as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
// Make a shallow copy of retvars.
// Otherwise OINLCALL.Rlist will be the same list,
// and later walk and typecheck may clobber it.
for _, n := range subst.retvars {
- as.PtrList().Append(n)
+ as.Lhs.Append(n)
}
- as.PtrRlist().Set(subst.list(n.List()))
+ as.Rhs.Set(subst.list(n.Results))
if subst.delayretvars {
- for _, n := range as.List().Slice() {
- as.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
+ for _, n := range as.Lhs {
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n))
n.Name().Defn = as
}
}
- init = append(init, typecheck(as, ctxStmt))
+ init = append(init, typecheck.Stmt(as))
}
- init = append(init, nodSym(ir.OGOTO, nil, subst.retlabel))
- typecheckslice(init, ctxStmt)
+ init = append(init, ir.NewBranchStmt(base.Pos, ir.OGOTO, subst.retlabel))
+ typecheck.Stmts(init)
return ir.NewBlockStmt(base.Pos, init)
case ir.OGOTO:
+ n := n.(*ir.BranchStmt)
m := ir.Copy(n).(*ir.BranchStmt)
m.SetPos(subst.updatedPos(m.Pos()))
m.PtrInit().Set(nil)
- p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen)
- m.SetSym(lookup(p))
+ p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen)
+ m.Label = typecheck.Lookup(p)
return m
case ir.OLABEL:
+ n := n.(*ir.LabelStmt)
m := ir.Copy(n).(*ir.LabelStmt)
m.SetPos(subst.updatedPos(m.Pos()))
m.PtrInit().Set(nil)
- p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen)
- m.SetSym(lookup(p))
+ p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen)
+ m.Label = typecheck.Lookup(p)
return m
}
func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
s := make([]*ir.Name, 0, len(ll))
for _, n := range ll {
- if n.Class() == ir.PAUTO {
+ if n.Class_ == ir.PAUTO {
if _, found := vis.usedLocals[n]; !found {
continue
}
return s
}
-// devirtualize replaces interface method calls within fn with direct
+// Devirtualize replaces interface method calls within fn with direct
// concrete-type method calls where applicable.
-func devirtualize(fn *ir.Func) {
- Curfn = fn
- ir.VisitList(fn.Body(), func(n ir.Node) {
+func Devirtualize(fn *ir.Func) {
+ ir.CurFunc = fn
+ ir.VisitList(fn.Body, func(n ir.Node) {
if n.Op() == ir.OCALLINTER {
devirtualizeCall(n.(*ir.CallExpr))
}
}
func devirtualizeCall(call *ir.CallExpr) {
- sel := call.Left().(*ir.SelectorExpr)
- r := staticValue(sel.Left())
+ sel := call.X.(*ir.SelectorExpr)
+ r := ir.StaticValue(sel.X)
if r.Op() != ir.OCONVIFACE {
return
}
recv := r.(*ir.ConvExpr)
- typ := recv.Left().Type()
+ typ := recv.X.Type()
if typ.IsInterface() {
return
}
- dt := ir.NodAt(sel.Pos(), ir.ODOTTYPE, sel.Left(), nil)
+ dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
dt.SetType(typ)
- x := typecheck(nodlSym(sel.Pos(), ir.OXDOT, dt, sel.Sym()), ctxExpr|ctxCallee)
+ x := typecheck.Callee(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel))
switch x.Op() {
case ir.ODOTMETH:
+ x := x.(*ir.SelectorExpr)
if base.Flag.LowerM != 0 {
base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ)
}
call.SetOp(ir.OCALLMETH)
- call.SetLeft(x)
+ call.X = x
case ir.ODOTINTER:
// Promoted method from embedded interface-typed field (#42279).
+ x := x.(*ir.SelectorExpr)
if base.Flag.LowerM != 0 {
base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ)
}
call.SetOp(ir.OCALLINTER)
- call.SetLeft(x)
+ call.X = x
default:
// TODO(mdempsky): Turn back into Fatalf after more testing.
if base.Flag.LowerM != 0 {
// Receiver parameter size may have changed; need to update
// call.Type to get correct stack offsets for result
// parameters.
- checkwidth(x.Type())
+ types.CheckSize(x.Type())
switch ft := x.Type(); ft.NumResults() {
case 0:
case 1:
call.SetType(ft.Results())
}
}
+
+// numNonClosures returns the number of functions in list which are not closures.
+func numNonClosures(list []*ir.Func) int {
+ count := 0
+ for _, fn := range list {
+ if fn.OClosure == nil {
+ count++
+ }
+ }
+ return count
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+var (
+ // maximum size variable which we will allocate on the stack.
+ // This limit is for explicit variable declarations like "var x T" or "x := ...".
+ // Note: the flag smallframes can update this value.
+ MaxStackVarSize = int64(10 * 1024 * 1024)
+
+ // maximum size of implicit variables that we will allocate on the stack.
+ // p := new(T) allocating T on the stack
+ // p := &T{} allocating T on the stack
+ // s := make([]T, n) allocating [n]T on the stack
+ // s := []byte("...") allocating [n]byte on the stack
+ // Note: the flag smallframes can update this value.
+ MaxImplicitStackVarSize = int64(64 * 1024)
+
+ // MaxSmallArraySize is the maximum size of an array which is considered small.
+ // Small arrays will be initialized directly with a sequence of constant stores.
+ // Large arrays will be initialized by copying from a static temp.
+ // 256 bytes was chosen to minimize generated code + statictmp size.
+ MaxSmallArraySize = int64(256)
+)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "go/constant"
+ "math"
+ "math/big"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+)
+
+func NewBool(b bool) Node {
+ return NewLiteral(constant.MakeBool(b))
+}
+
+func NewInt(v int64) Node {
+ return NewLiteral(constant.MakeInt64(v))
+}
+
+func NewString(s string) Node {
+ return NewLiteral(constant.MakeString(s))
+}
+
+const (
+ // Maximum size in bits for big.Ints before signalling
+ // overflow and also mantissa precision for big.Floats.
+ ConstPrec = 512
+)
+
+func BigFloat(v constant.Value) *big.Float {
+ f := new(big.Float)
+ f.SetPrec(ConstPrec)
+ switch u := constant.Val(v).(type) {
+ case int64:
+ f.SetInt64(u)
+ case *big.Int:
+ f.SetInt(u)
+ case *big.Float:
+ f.Set(u)
+ case *big.Rat:
+ f.SetRat(u)
+ default:
+ base.Fatalf("unexpected: %v", u)
+ }
+ return f
+}
+
+// ConstOverflow reports whether constant value v is too large
+// to represent with type t.
+func ConstOverflow(v constant.Value, t *types.Type) bool {
+ switch {
+ case t.IsInteger():
+ bits := uint(8 * t.Size())
+ if t.IsUnsigned() {
+ x, ok := constant.Uint64Val(v)
+ return !ok || x>>bits != 0
+ }
+ x, ok := constant.Int64Val(v)
+ if x < 0 {
+ x = ^x
+ }
+ return !ok || x>>(bits-1) != 0
+ case t.IsFloat():
+ switch t.Size() {
+ case 4:
+ f, _ := constant.Float32Val(v)
+ return math.IsInf(float64(f), 0)
+ case 8:
+ f, _ := constant.Float64Val(v)
+ return math.IsInf(f, 0)
+ }
+ case t.IsComplex():
+ ft := types.FloatForComplex(t)
+ return ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft)
+ }
+ base.Fatalf("doesoverflow: %v, %v", v, t)
+ panic("unreachable")
+}
+
+// IsConstNode reports whether n is a Go language constant (as opposed to a
+// compile-time constant).
+//
+// Expressions derived from nil, like string([]byte(nil)), while they
+// may be known at compile time, are not Go language constants.
+func IsConstNode(n Node) bool {
+ return n.Op() == OLITERAL
+}
+
+func IsSmallIntConst(n Node) bool {
+ if n.Op() == OLITERAL {
+ v, ok := constant.Int64Val(n.Val())
+ return ok && int64(int32(v)) == v
+ }
+ return false
+}
omitted = true
continue // exclude zero-valued fields
}
- if n, ok := x.Interface().(Nodes); ok && n.Len() == 0 {
+ if n, ok := x.Interface().(Nodes); ok && len(n) == 0 {
omitted = true
continue // exclude empty Nodes slices
}
package ir
import (
+ "bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/src"
+ "fmt"
"go/constant"
+ "go/token"
)
func maybeDo(x Node, err error, do func(Node) error) error {
// An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1].
type AddStringExpr struct {
miniExpr
- List_ Nodes
+ List Nodes
Prealloc *Name
}
n := &AddStringExpr{}
n.pos = pos
n.op = OADDSTR
- n.List_.Set(list)
+ n.List.Set(list)
return n
}
-func (n *AddStringExpr) List() Nodes { return n.List_ }
-func (n *AddStringExpr) PtrList() *Nodes { return &n.List_ }
-func (n *AddStringExpr) SetList(x Nodes) { n.List_ = x }
-
// An AddrExpr is an address-of expression &X.
// It may end up being a normal address-of or an allocation of a composite literal.
type AddrExpr struct {
return n
}
-func (n *AddrExpr) Left() Node { return n.X }
-func (n *AddrExpr) SetLeft(x Node) { n.X = x }
-func (n *AddrExpr) Right() Node { return n.Alloc }
-func (n *AddrExpr) SetRight(x Node) { n.Alloc = x }
func (n *AddrExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *AddrExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
return n
}
-func (n *BinaryExpr) Left() Node { return n.X }
-func (n *BinaryExpr) SetLeft(x Node) { n.X = x }
-func (n *BinaryExpr) Right() Node { return n.Y }
-func (n *BinaryExpr) SetRight(y Node) { n.Y = y }
-
func (n *BinaryExpr) SetOp(op Op) {
switch op {
default:
// A CallExpr is a function call X(Args).
type CallExpr struct {
miniExpr
- orig Node
- X Node
- Args Nodes
- Rargs Nodes // TODO(rsc): Delete.
- Body_ Nodes // TODO(rsc): Delete.
- DDD bool
- Use CallUse
- NoInline_ bool
+ orig Node
+ X Node
+ Args Nodes
+ Rargs Nodes // TODO(rsc): Delete.
+ Body Nodes // TODO(rsc): Delete.
+ IsDDD bool
+ Use CallUse
+ NoInline bool
}
func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {
func (*CallExpr) isStmt() {}
-func (n *CallExpr) Orig() Node { return n.orig }
-func (n *CallExpr) SetOrig(x Node) { n.orig = x }
-func (n *CallExpr) Left() Node { return n.X }
-func (n *CallExpr) SetLeft(x Node) { n.X = x }
-func (n *CallExpr) List() Nodes { return n.Args }
-func (n *CallExpr) PtrList() *Nodes { return &n.Args }
-func (n *CallExpr) SetList(x Nodes) { n.Args = x }
-func (n *CallExpr) Rlist() Nodes { return n.Rargs }
-func (n *CallExpr) PtrRlist() *Nodes { return &n.Rargs }
-func (n *CallExpr) SetRlist(x Nodes) { n.Rargs = x }
-func (n *CallExpr) IsDDD() bool { return n.DDD }
-func (n *CallExpr) SetIsDDD(x bool) { n.DDD = x }
-func (n *CallExpr) NoInline() bool { return n.NoInline_ }
-func (n *CallExpr) SetNoInline(x bool) { n.NoInline_ = x }
-func (n *CallExpr) Body() Nodes { return n.Body_ }
-func (n *CallExpr) PtrBody() *Nodes { return &n.Body_ }
-func (n *CallExpr) SetBody(x Nodes) { n.Body_ = x }
+func (n *CallExpr) Orig() Node { return n.orig }
+func (n *CallExpr) SetOrig(x Node) { n.orig = x }
func (n *CallExpr) SetOp(op Op) {
switch op {
// A CallPartExpr is a method expression X.Method (uncalled).
type CallPartExpr struct {
miniExpr
- Func_ *Func
+ Func *Func
X Node
Method *types.Field
Prealloc *Name
}
func NewCallPartExpr(pos src.XPos, x Node, method *types.Field, fn *Func) *CallPartExpr {
- n := &CallPartExpr{Func_: fn, X: x, Method: method}
+ n := &CallPartExpr{Func: fn, X: x, Method: method}
n.op = OCALLPART
n.pos = pos
n.typ = fn.Type()
- n.Func_ = fn
+ n.Func = fn
return n
}
-func (n *CallPartExpr) Func() *Func { return n.Func_ }
-func (n *CallPartExpr) Left() Node { return n.X }
func (n *CallPartExpr) Sym() *types.Sym { return n.Method.Sym }
-func (n *CallPartExpr) SetLeft(x Node) { n.X = x }
// A ClosureExpr is a function literal expression.
type ClosureExpr struct {
miniExpr
- Func_ *Func
+ Func *Func
Prealloc *Name
}
func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr {
- n := &ClosureExpr{Func_: fn}
+ n := &ClosureExpr{Func: fn}
n.op = OCLOSURE
n.pos = pos
return n
}
-func (n *ClosureExpr) Func() *Func { return n.Func_ }
-
// A ClosureRead denotes reading a variable stored within a closure struct.
type ClosureReadExpr struct {
miniExpr
- Offset_ int64
+ Offset int64
}
func NewClosureRead(typ *types.Type, offset int64) *ClosureReadExpr {
- n := &ClosureReadExpr{Offset_: offset}
+ n := &ClosureReadExpr{Offset: offset}
n.typ = typ
n.op = OCLOSUREREAD
return n
}
-func (n *ClosureReadExpr) Type() *types.Type { return n.typ }
-func (n *ClosureReadExpr) Offset() int64 { return n.Offset_ }
-
// A CompLitExpr is a composite literal Type{Vals}.
// Before type-checking, the type is Ntype.
type CompLitExpr struct {
miniExpr
orig Node
Ntype Ntype
- List_ Nodes // initialized values
+ List Nodes // initialized values
Prealloc *Name
Len int64 // backing array length for OSLICELIT
}
n := &CompLitExpr{Ntype: typ}
n.pos = pos
n.SetOp(op)
- n.List_.Set(list)
+ n.List.Set(list)
n.orig = n
return n
}
func (n *CompLitExpr) Orig() Node { return n.orig }
func (n *CompLitExpr) SetOrig(x Node) { n.orig = x }
-func (n *CompLitExpr) Right() Node { return n.Ntype }
-func (n *CompLitExpr) SetRight(x Node) { n.Ntype = toNtype(x) }
-func (n *CompLitExpr) List() Nodes { return n.List_ }
-func (n *CompLitExpr) PtrList() *Nodes { return &n.List_ }
-func (n *CompLitExpr) SetList(x Nodes) { n.List_ = x }
func (n *CompLitExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *CompLitExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
return n
}
-func (n *ConvExpr) Left() Node { return n.X }
-func (n *ConvExpr) SetLeft(x Node) { n.X = x }
func (n *ConvExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *ConvExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
return n
}
-func (n *IndexExpr) Left() Node { return n.X }
-func (n *IndexExpr) SetLeft(x Node) { n.X = x }
-func (n *IndexExpr) Right() Node { return n.Index }
-func (n *IndexExpr) SetRight(y Node) { n.Index = y }
-func (n *IndexExpr) IndexMapLValue() bool { return n.Assigned }
-func (n *IndexExpr) SetIndexMapLValue(x bool) { n.Assigned = x }
-
func (n *IndexExpr) SetOp(op Op) {
switch op {
default:
return n
}
-func (n *KeyExpr) Left() Node { return n.Key }
-func (n *KeyExpr) SetLeft(x Node) { n.Key = x }
-func (n *KeyExpr) Right() Node { return n.Value }
-func (n *KeyExpr) SetRight(y Node) { n.Value = y }
-
// A StructKeyExpr is an Field: Value composite literal key.
type StructKeyExpr struct {
miniExpr
- Field *types.Sym
- Value Node
- Offset_ int64
+ Field *types.Sym
+ Value Node
+ Offset int64
}
func NewStructKeyExpr(pos src.XPos, field *types.Sym, value Node) *StructKeyExpr {
n := &StructKeyExpr{Field: field, Value: value}
n.pos = pos
n.op = OSTRUCTKEY
- n.Offset_ = types.BADWIDTH
+ n.Offset = types.BADWIDTH
return n
}
-func (n *StructKeyExpr) Sym() *types.Sym { return n.Field }
-func (n *StructKeyExpr) SetSym(x *types.Sym) { n.Field = x }
-func (n *StructKeyExpr) Left() Node { return n.Value }
-func (n *StructKeyExpr) SetLeft(x Node) { n.Value = x }
-func (n *StructKeyExpr) Offset() int64 { return n.Offset_ }
-func (n *StructKeyExpr) SetOffset(x int64) { n.Offset_ = x }
+func (n *StructKeyExpr) Sym() *types.Sym { return n.Field }
// An InlinedCallExpr is an inlined function call.
type InlinedCallExpr struct {
miniExpr
- Body_ Nodes
+ Body Nodes
ReturnVars Nodes
}
n := &InlinedCallExpr{}
n.pos = pos
n.op = OINLCALL
- n.Body_.Set(body)
+ n.Body.Set(body)
n.ReturnVars.Set(retvars)
return n
}
-func (n *InlinedCallExpr) Body() Nodes { return n.Body_ }
-func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.Body_ }
-func (n *InlinedCallExpr) SetBody(x Nodes) { n.Body_ = x }
-func (n *InlinedCallExpr) Rlist() Nodes { return n.ReturnVars }
-func (n *InlinedCallExpr) PtrRlist() *Nodes { return &n.ReturnVars }
-func (n *InlinedCallExpr) SetRlist(x Nodes) { n.ReturnVars = x }
-
// A LogicalExpr is a expression X Op Y where Op is && or ||.
// It is separate from BinaryExpr to make room for statements
// that must be executed before Y but after X.
return n
}
-func (n *LogicalExpr) Left() Node { return n.X }
-func (n *LogicalExpr) SetLeft(x Node) { n.X = x }
-func (n *LogicalExpr) Right() Node { return n.Y }
-func (n *LogicalExpr) SetRight(y Node) { n.Y = y }
-
func (n *LogicalExpr) SetOp(op Op) {
switch op {
default:
return n
}
-func (n *MakeExpr) Left() Node { return n.Len }
-func (n *MakeExpr) SetLeft(x Node) { n.Len = x }
-func (n *MakeExpr) Right() Node { return n.Cap }
-func (n *MakeExpr) SetRight(x Node) { n.Cap = x }
-
func (n *MakeExpr) SetOp(op Op) {
switch op {
default:
return n
}
-func (n *MethodExpr) FuncName() *Name { return n.FuncName_ }
-func (n *MethodExpr) Left() Node { panic("MethodExpr.Left") }
-func (n *MethodExpr) SetLeft(x Node) { panic("MethodExpr.SetLeft") }
-func (n *MethodExpr) Right() Node { panic("MethodExpr.Right") }
-func (n *MethodExpr) SetRight(x Node) { panic("MethodExpr.SetRight") }
-func (n *MethodExpr) Sym() *types.Sym { panic("MethodExpr.Sym") }
-func (n *MethodExpr) Offset() int64 { panic("MethodExpr.Offset") }
-func (n *MethodExpr) SetOffset(x int64) { panic("MethodExpr.SetOffset") }
-func (n *MethodExpr) Class() Class { panic("MethodExpr.Class") }
-func (n *MethodExpr) SetClass(x Class) { panic("MethodExpr.SetClass") }
+func (n *MethodExpr) FuncName() *Name { return n.FuncName_ }
+func (n *MethodExpr) Sym() *types.Sym { panic("MethodExpr.Sym") }
// A NilExpr represents the predefined untyped constant nil.
// (It may be copied and assigned a type, though.)
return n
}
-func (n *ParenExpr) Left() Node { return n.X }
-func (n *ParenExpr) SetLeft(x Node) { n.X = x }
func (n *ParenExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *ParenExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
// A ResultExpr represents a direct access to a result slot on the stack frame.
type ResultExpr struct {
miniExpr
- Offset_ int64
+ Offset int64
}
func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr {
- n := &ResultExpr{Offset_: offset}
+ n := &ResultExpr{Offset: offset}
n.pos = pos
n.op = ORESULT
n.typ = typ
return n
}
-func (n *ResultExpr) Offset() int64 { return n.Offset_ }
-func (n *ResultExpr) SetOffset(x int64) { n.Offset_ = x }
-
// A NameOffsetExpr refers to an offset within a variable.
// It is like a SelectorExpr but without the field name.
type NameOffsetExpr struct {
miniExpr
X Node
Sel *types.Sym
- Offset_ int64
+ Offset int64
Selection *types.Field
}
func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr {
n := &SelectorExpr{X: x, Sel: sel}
n.pos = pos
- n.Offset_ = types.BADWIDTH
+ n.Offset = types.BADWIDTH
n.SetOp(op)
return n
}
}
}
-func (n *SelectorExpr) Left() Node { return n.X }
-func (n *SelectorExpr) SetLeft(x Node) { n.X = x }
-func (n *SelectorExpr) Sym() *types.Sym { return n.Sel }
-func (n *SelectorExpr) SetSym(x *types.Sym) { n.Sel = x }
-func (n *SelectorExpr) Offset() int64 { return n.Offset_ }
-func (n *SelectorExpr) SetOffset(x int64) { n.Offset_ = x }
-func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
-func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+func (n *SelectorExpr) Sym() *types.Sym { return n.Sel }
+func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
// Before type-checking, bytes.Buffer is a SelectorExpr.
// After type-checking it becomes a Name.
// A SliceExpr is a slice expression X[Low:High] or X[Low:High:Max].
type SliceExpr struct {
miniExpr
- X Node
- List_ Nodes // TODO(rsc): Use separate Nodes
+ X Node
+ List Nodes // TODO(rsc): Use separate Nodes
}
func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr {
return n
}
-func (n *SliceExpr) Left() Node { return n.X }
-func (n *SliceExpr) SetLeft(x Node) { n.X = x }
-func (n *SliceExpr) List() Nodes { return n.List_ }
-func (n *SliceExpr) PtrList() *Nodes { return &n.List_ }
-func (n *SliceExpr) SetList(x Nodes) { n.List_ = x }
-
func (n *SliceExpr) SetOp(op Op) {
switch op {
default:
// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max].
// n must be a slice expression. max is nil if n is a simple slice expression.
func (n *SliceExpr) SliceBounds() (low, high, max Node) {
- if n.List_.Len() == 0 {
+ if len(n.List) == 0 {
return nil, nil, nil
}
switch n.Op() {
case OSLICE, OSLICEARR, OSLICESTR:
- s := n.List_.Slice()
+ s := n.List
return s[0], s[1], nil
case OSLICE3, OSLICE3ARR:
- s := n.List_.Slice()
+ s := n.List
return s[0], s[1], s[2]
}
base.Fatalf("SliceBounds op %v: %v", n.Op(), n)
if max != nil {
base.Fatalf("SetSliceBounds %v given three bounds", n.Op())
}
- s := n.List_.Slice()
+ s := n.List
if s == nil {
if low == nil && high == nil {
return
}
- n.List_.Set2(low, high)
+ n.List = []Node{low, high}
return
}
s[0] = low
s[1] = high
return
case OSLICE3, OSLICE3ARR:
- s := n.List_.Slice()
+ s := n.List
if s == nil {
if low == nil && high == nil && max == nil {
return
}
- n.List_.Set3(low, high, max)
+ n.List = []Node{low, high, max}
return
}
s[0] = low
// A SliceHeader expression constructs a slice header from its parts.
type SliceHeaderExpr struct {
miniExpr
- Ptr Node
- LenCap_ Nodes // TODO(rsc): Split into two Node fields
+ Ptr Node
+ LenCap Nodes // TODO(rsc): Split into two Node fields
}
func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *SliceHeaderExpr {
n.pos = pos
n.op = OSLICEHEADER
n.typ = typ
- n.LenCap_.Set2(len, cap)
+ n.LenCap = []Node{len, cap}
return n
}
-func (n *SliceHeaderExpr) Left() Node { return n.Ptr }
-func (n *SliceHeaderExpr) SetLeft(x Node) { n.Ptr = x }
-func (n *SliceHeaderExpr) List() Nodes { return n.LenCap_ }
-func (n *SliceHeaderExpr) PtrList() *Nodes { return &n.LenCap_ }
-func (n *SliceHeaderExpr) SetList(x Nodes) { n.LenCap_ = x }
-
// A StarExpr is a dereference expression *X.
// It may end up being a value or a type.
type StarExpr struct {
return n
}
-func (n *StarExpr) Left() Node { return n.X }
-func (n *StarExpr) SetLeft(x Node) { n.X = x }
func (n *StarExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *StarExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
return n
}
-func (n *TypeAssertExpr) Left() Node { return n.X }
-func (n *TypeAssertExpr) SetLeft(x Node) { n.X = x }
-func (n *TypeAssertExpr) Right() Node { return n.Ntype }
-func (n *TypeAssertExpr) SetRight(x Node) { n.Ntype = x } // TODO: toNtype(x)
-func (n *TypeAssertExpr) List() Nodes { return n.Itab }
-func (n *TypeAssertExpr) PtrList() *Nodes { return &n.Itab }
-func (n *TypeAssertExpr) SetList(x Nodes) { n.Itab = x }
-
func (n *TypeAssertExpr) SetOp(op Op) {
switch op {
default:
return n
}
-func (n *UnaryExpr) Left() Node { return n.X }
-func (n *UnaryExpr) SetLeft(x Node) { n.X = x }
-
func (n *UnaryExpr) SetOp(op Op) {
switch op {
default:
n.op = op
}
}
+
+func IsZero(n Node) bool {
+ switch n.Op() {
+ case ONIL:
+ return true
+
+ case OLITERAL:
+ switch u := n.Val(); u.Kind() {
+ case constant.String:
+ return constant.StringVal(u) == ""
+ case constant.Bool:
+ return !constant.BoolVal(u)
+ default:
+ return constant.Sign(u) == 0
+ }
+
+ case OARRAYLIT:
+ n := n.(*CompLitExpr)
+ for _, n1 := range n.List {
+ if n1.Op() == OKEY {
+ n1 = n1.(*KeyExpr).Value
+ }
+ if !IsZero(n1) {
+ return false
+ }
+ }
+ return true
+
+ case OSTRUCTLIT:
+ n := n.(*CompLitExpr)
+ for _, n1 := range n.List {
+ n1 := n1.(*StructKeyExpr)
+ if !IsZero(n1.Value) {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+// lvalue etc
+func IsAssignable(n Node) bool {
+ switch n.Op() {
+ case OINDEX:
+ n := n.(*IndexExpr)
+ if n.X.Type() != nil && n.X.Type().IsArray() {
+ return IsAssignable(n.X)
+ }
+ if n.X.Type() != nil && n.X.Type().IsString() {
+ return false
+ }
+ fallthrough
+ case ODEREF, ODOTPTR, OCLOSUREREAD:
+ return true
+
+ case ODOT:
+ n := n.(*SelectorExpr)
+ return IsAssignable(n.X)
+
+ case ONAME:
+ n := n.(*Name)
+ if n.Class_ == PFUNC {
+ return false
+ }
+ return true
+
+ case ONAMEOFFSET:
+ return true
+ }
+
+ return false
+}
+
+func StaticValue(n Node) Node {
+ for {
+ if n.Op() == OCONVNOP {
+ n = n.(*ConvExpr).X
+ continue
+ }
+
+ n1 := staticValue1(n)
+ if n1 == nil {
+ return n
+ }
+ n = n1
+ }
+}
+
+// staticValue1 implements a simple SSA-like optimization. If n is a local variable
+// that is initialized and never reassigned, staticValue1 returns the initializer
+// expression. Otherwise, it returns nil.
+func staticValue1(nn Node) Node {
+ if nn.Op() != ONAME {
+ return nil
+ }
+ n := nn.(*Name)
+ if n.Class_ != PAUTO || n.Name().Addrtaken() {
+ return nil
+ }
+
+ defn := n.Name().Defn
+ if defn == nil {
+ return nil
+ }
+
+ var rhs Node
+FindRHS:
+ switch defn.Op() {
+ case OAS:
+ defn := defn.(*AssignStmt)
+ rhs = defn.Y
+ case OAS2:
+ defn := defn.(*AssignListStmt)
+ for i, lhs := range defn.Lhs {
+ if lhs == n {
+ rhs = defn.Rhs[i]
+ break FindRHS
+ }
+ }
+ base.Fatalf("%v missing from LHS of %v", n, defn)
+ default:
+ return nil
+ }
+ if rhs == nil {
+ base.Fatalf("RHS is nil: %v", defn)
+ }
+
+ if reassigned(n) {
+ return nil
+ }
+
+ return rhs
+}
+
+// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
+// indicating whether the name has any assignments other than its declaration.
+// The second return value is the first such assignment encountered in the walk, if any. It is mostly
+// useful for -m output documenting the reason for inhibited optimizations.
+// NB: global variables are always considered to be re-assigned.
+// TODO: handle initial declaration not including an assignment and followed by a single assignment?
+func reassigned(name *Name) bool {
+ if name.Op() != ONAME {
+ base.Fatalf("reassigned %v", name)
+ }
+ // no way to reliably check for no-reassignment of globals, assume it can be
+ if name.Curfn == nil {
+ return true
+ }
+ return Any(name.Curfn, func(n Node) bool {
+ switch n.Op() {
+ case OAS:
+ n := n.(*AssignStmt)
+ if n.X == name && n != name.Defn {
+ return true
+ }
+ case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2:
+ n := n.(*AssignListStmt)
+ for _, p := range n.Lhs {
+ if p == name && n != name.Defn {
+ return true
+ }
+ }
+ }
+ return false
+ })
+}
+
+// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation.
+var IsIntrinsicCall = func(*CallExpr) bool { return false }
+
+// SameSafeExpr checks whether it is safe to reuse one of l and r
+// instead of computing both. SameSafeExpr assumes that l and r are
+// used in the same statement or expression. In order for it to be
+// safe to reuse l or r, they must:
+// * be the same expression
+// * not have side-effects (no function calls, no channel ops);
+// however, panics are ok
+// * not cause inappropriate aliasing; e.g. two string to []byte
+// conversions, must result in two distinct slices
+//
+// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both
+// as an lvalue (map assignment) and an rvalue (map access). This is
+// currently OK, since the only place SameSafeExpr gets used on an
+// lvalue expression is for OSLICE and OAPPEND optimizations, and it
+// is correct in those settings.
+func SameSafeExpr(l Node, r Node) bool {
+ if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) {
+ return false
+ }
+
+ switch l.Op() {
+ case ONAME, OCLOSUREREAD:
+ return l == r
+
+ case ODOT, ODOTPTR:
+ l := l.(*SelectorExpr)
+ r := r.(*SelectorExpr)
+ return l.Sel != nil && r.Sel != nil && l.Sel == r.Sel && SameSafeExpr(l.X, r.X)
+
+ case ODEREF:
+ l := l.(*StarExpr)
+ r := r.(*StarExpr)
+ return SameSafeExpr(l.X, r.X)
+
+ case ONOT, OBITNOT, OPLUS, ONEG:
+ l := l.(*UnaryExpr)
+ r := r.(*UnaryExpr)
+ return SameSafeExpr(l.X, r.X)
+
+ case OCONVNOP:
+ l := l.(*ConvExpr)
+ r := r.(*ConvExpr)
+ return SameSafeExpr(l.X, r.X)
+
+ case OCONV:
+ l := l.(*ConvExpr)
+ r := r.(*ConvExpr)
+ // Some conversions can't be reused, such as []byte(str).
+ // Allow only numeric-ish types. This is a bit conservative.
+ return types.IsSimple[l.Type().Kind()] && SameSafeExpr(l.X, r.X)
+
+ case OINDEX, OINDEXMAP:
+ l := l.(*IndexExpr)
+ r := r.(*IndexExpr)
+ return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Index, r.Index)
+
+ case OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
+ l := l.(*BinaryExpr)
+ r := r.(*BinaryExpr)
+ return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Y, r.Y)
+
+ case OLITERAL:
+ return constant.Compare(l.Val(), token.EQL, r.Val())
+
+ case ONIL:
+ return true
+ }
+
+ return false
+}
+
+// ShouldCheckPtr reports whether pointer checking should be enabled for
+// function fn at a given level. See debugHelpFooter for defined
+// levels.
+func ShouldCheckPtr(fn *Func, level int) bool {
+ return base.Debug.Checkptr >= level && fn.Pragma&NoCheckPtr == 0
+}
+
+// IsReflectHeaderDataField reports whether l is an expression p.Data
+// where p has type reflect.SliceHeader or reflect.StringHeader.
+func IsReflectHeaderDataField(l Node) bool {
+ if l.Type() != types.Types[types.TUINTPTR] {
+ return false
+ }
+
+ var tsym *types.Sym
+ switch l.Op() {
+ case ODOT:
+ l := l.(*SelectorExpr)
+ tsym = l.X.Type().Sym()
+ case ODOTPTR:
+ l := l.(*SelectorExpr)
+ tsym = l.X.Type().Elem().Sym()
+ default:
+ return false
+ }
+
+ if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" {
+ return false
+ }
+ return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
+}
+
+func ParamNames(ft *types.Type) []Node {
+ args := make([]Node, ft.NumParams())
+ for i, f := range ft.Params().FieldSlice() {
+ args[i] = AsNode(f.Nname)
+ }
+ return args
+}
+
+// MethodSym returns the method symbol representing a method name
+// associated with a specific receiver type.
+//
+// Method symbols can be used to distinguish the same method appearing
+// in different method sets. For example, T.M and (*T).M have distinct
+// method symbols.
+//
+// The returned symbol will be marked as a function.
+func MethodSym(recv *types.Type, msym *types.Sym) *types.Sym {
+ sym := MethodSymSuffix(recv, msym, "")
+ sym.SetFunc(true)
+ return sym
+}
+
+// MethodSymSuffix is like methodsym, but allows attaching a
+// distinguisher suffix. To avoid collisions, the suffix must not
+// start with a letter, number, or period.
+func MethodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
+ if msym.IsBlank() {
+ base.Fatalf("blank method name")
+ }
+
+ rsym := recv.Sym()
+ if recv.IsPtr() {
+ if rsym != nil {
+ base.Fatalf("declared pointer receiver type: %v", recv)
+ }
+ rsym = recv.Elem().Sym()
+ }
+
+ // Find the package the receiver type appeared in. For
+ // anonymous receiver types (i.e., anonymous structs with
+ // embedded fields), use the "go" pseudo-package instead.
+ rpkg := Pkgs.Go
+ if rsym != nil {
+ rpkg = rsym.Pkg
+ }
+
+ var b bytes.Buffer
+ if recv.IsPtr() {
+ // The parentheses aren't really necessary, but
+ // they're pretty traditional at this point.
+ fmt.Fprintf(&b, "(%-S)", recv)
+ } else {
+ fmt.Fprintf(&b, "%-S", recv)
+ }
+
+ // A particular receiver type may have multiple non-exported
+ // methods with the same name. To disambiguate them, include a
+ // package qualifier for names that came from a different
+ // package than the receiver type.
+ if !types.IsExported(msym.Name) && msym.Pkg != rpkg {
+ b.WriteString(".")
+ b.WriteString(msym.Pkg.Prefix)
+ }
+
+ b.WriteString(".")
+ b.WriteString(msym.Name)
+ b.WriteString(suffix)
+
+ return rpkg.LookupBytes(b.Bytes())
+}
+
+// MethodName returns the ONAME representing the method
+// referenced by expression n, which must be a method selector,
+// method expression, or method value.
+func MethodExprName(n Node) *Name {
+ name, _ := MethodExprFunc(n).Nname.(*Name)
+ return name
+}
+
+// MethodFunc is like MethodName, but returns the types.Field instead.
+func MethodExprFunc(n Node) *types.Field {
+ switch n.Op() {
+ case ODOTMETH:
+ return n.(*SelectorExpr).Selection
+ case OMETHEXPR:
+ return n.(*MethodExpr).Method
+ case OCALLPART:
+ n := n.(*CallPartExpr)
+ return n.Method
+ }
+ base.Fatalf("unexpected node: %v (%v)", n, n.Op())
+ panic("unreachable")
+}
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
- simpleinit := n.Init().Len() == 1 && n.Init().First().Init().Len() == 0 && StmtWithInit(n.Op())
+ simpleinit := len(n.Init()) == 1 && len(n.Init()[0].Init()) == 0 && StmtWithInit(n.Op())
// otherwise, print the inits as separate statements
- complexinit := n.Init().Len() != 0 && !simpleinit && exportFormat
+ complexinit := len(n.Init()) != 0 && !simpleinit && exportFormat
// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
extrablock := complexinit && StmtWithInit(n.Op())
switch n.Op() {
case ODCL:
n := n.(*Decl)
- fmt.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type())
+ fmt.Fprintf(s, "var %v %v", n.X.Sym(), n.X.Type())
// Don't export "v = <N>" initializing statements, hope they're always
// preceded by the DCL which will be re-parsed and typechecked to reproduce
// the "v = <N>" again.
case OAS:
n := n.(*AssignStmt)
- if n.Colas() && !complexinit {
- fmt.Fprintf(s, "%v := %v", n.Left(), n.Right())
+ if n.Def && !complexinit {
+ fmt.Fprintf(s, "%v := %v", n.X, n.Y)
} else {
- fmt.Fprintf(s, "%v = %v", n.Left(), n.Right())
+ fmt.Fprintf(s, "%v = %v", n.X, n.Y)
}
case OASOP:
n := n.(*AssignOpStmt)
- if n.Implicit() {
- if n.SubOp() == OADD {
- fmt.Fprintf(s, "%v++", n.Left())
+ if n.IncDec {
+ if n.AsOp == OADD {
+ fmt.Fprintf(s, "%v++", n.X)
} else {
- fmt.Fprintf(s, "%v--", n.Left())
+ fmt.Fprintf(s, "%v--", n.X)
}
break
}
- fmt.Fprintf(s, "%v %v= %v", n.Left(), n.SubOp(), n.Right())
+ fmt.Fprintf(s, "%v %v= %v", n.X, n.AsOp, n.Y)
case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
n := n.(*AssignListStmt)
- if n.Colas() && !complexinit {
- fmt.Fprintf(s, "%.v := %.v", n.List(), n.Rlist())
+ if n.Def && !complexinit {
+ fmt.Fprintf(s, "%.v := %.v", n.Lhs, n.Rhs)
} else {
- fmt.Fprintf(s, "%.v = %.v", n.List(), n.Rlist())
+ fmt.Fprintf(s, "%.v = %.v", n.Lhs, n.Rhs)
}
case OBLOCK:
n := n.(*BlockStmt)
- if n.List().Len() != 0 {
- fmt.Fprintf(s, "%v", n.List())
+ if len(n.List) != 0 {
+ fmt.Fprintf(s, "%v", n.List)
}
case ORETURN:
n := n.(*ReturnStmt)
- fmt.Fprintf(s, "return %.v", n.List())
+ fmt.Fprintf(s, "return %.v", n.Results)
case ORETJMP:
n := n.(*BranchStmt)
- fmt.Fprintf(s, "retjmp %v", n.Sym())
+ fmt.Fprintf(s, "retjmp %v", n.Label)
case OINLMARK:
n := n.(*InlineMarkStmt)
- fmt.Fprintf(s, "inlmark %d", n.Offset())
+ fmt.Fprintf(s, "inlmark %d", n.Index)
case OGO:
n := n.(*GoDeferStmt)
- fmt.Fprintf(s, "go %v", n.Left())
+ fmt.Fprintf(s, "go %v", n.Call)
case ODEFER:
n := n.(*GoDeferStmt)
- fmt.Fprintf(s, "defer %v", n.Left())
+ fmt.Fprintf(s, "defer %v", n.Call)
case OIF:
n := n.(*IfStmt)
if simpleinit {
- fmt.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body())
+ fmt.Fprintf(s, "if %v; %v { %v }", n.Init()[0], n.Cond, n.Body)
} else {
- fmt.Fprintf(s, "if %v { %v }", n.Left(), n.Body())
+ fmt.Fprintf(s, "if %v { %v }", n.Cond, n.Body)
}
- if n.Rlist().Len() != 0 {
- fmt.Fprintf(s, " else { %v }", n.Rlist())
+ if len(n.Else) != 0 {
+ fmt.Fprintf(s, " else { %v }", n.Else)
}
case OFOR, OFORUNTIL:
fmt.Fprint(s, opname)
if simpleinit {
- fmt.Fprintf(s, " %v;", n.Init().First())
- } else if n.Right() != nil {
+ fmt.Fprintf(s, " %v;", n.Init()[0])
+ } else if n.Post != nil {
fmt.Fprint(s, " ;")
}
- if n.Left() != nil {
- fmt.Fprintf(s, " %v", n.Left())
+ if n.Cond != nil {
+ fmt.Fprintf(s, " %v", n.Cond)
}
- if n.Right() != nil {
- fmt.Fprintf(s, "; %v", n.Right())
+ if n.Post != nil {
+ fmt.Fprintf(s, "; %v", n.Post)
} else if simpleinit {
fmt.Fprint(s, ";")
}
- if n.Op() == OFORUNTIL && n.List().Len() != 0 {
- fmt.Fprintf(s, "; %v", n.List())
+ if n.Op() == OFORUNTIL && len(n.Late) != 0 {
+ fmt.Fprintf(s, "; %v", n.Late)
}
- fmt.Fprintf(s, " { %v }", n.Body())
+ fmt.Fprintf(s, " { %v }", n.Body)
case ORANGE:
n := n.(*RangeStmt)
break
}
- if n.List().Len() == 0 {
- fmt.Fprintf(s, "for range %v { %v }", n.Right(), n.Body())
+ if len(n.Vars) == 0 {
+ fmt.Fprintf(s, "for range %v { %v }", n.X, n.Body)
break
}
- fmt.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body())
+ fmt.Fprintf(s, "for %.v = range %v { %v }", n.Vars, n.X, n.Body)
case OSELECT:
n := n.(*SelectStmt)
fmt.Fprintf(s, "%v statement", n.Op())
break
}
- fmt.Fprintf(s, "select { %v }", n.List())
+ fmt.Fprintf(s, "select { %v }", n.Cases)
case OSWITCH:
n := n.(*SwitchStmt)
}
fmt.Fprintf(s, "switch")
if simpleinit {
- fmt.Fprintf(s, " %v;", n.Init().First())
+ fmt.Fprintf(s, " %v;", n.Init()[0])
}
- if n.Left() != nil {
- fmt.Fprintf(s, " %v ", n.Left())
+ if n.Tag != nil {
+ fmt.Fprintf(s, " %v ", n.Tag)
}
- fmt.Fprintf(s, " { %v }", n.List())
+ fmt.Fprintf(s, " { %v }", n.Cases)
case OCASE:
n := n.(*CaseStmt)
- if n.List().Len() != 0 {
- fmt.Fprintf(s, "case %.v", n.List())
+ if len(n.List) != 0 {
+ fmt.Fprintf(s, "case %.v", n.List)
} else {
fmt.Fprint(s, "default")
}
- fmt.Fprintf(s, ": %v", n.Body())
+ fmt.Fprintf(s, ": %v", n.Body)
case OBREAK, OCONTINUE, OGOTO, OFALL:
n := n.(*BranchStmt)
- if n.Sym() != nil {
- fmt.Fprintf(s, "%v %v", n.Op(), n.Sym())
+ if n.Label != nil {
+ fmt.Fprintf(s, "%v %v", n.Op(), n.Label)
} else {
fmt.Fprintf(s, "%v", n.Op())
}
case OLABEL:
n := n.(*LabelStmt)
- fmt.Fprintf(s, "%v: ", n.Sym())
+ fmt.Fprintf(s, "%v: ", n.Label)
}
if extrablock {
case OADDR:
nn := nn.(*AddrExpr)
if nn.Implicit() {
- n = nn.Left()
+ n = nn.X
continue
}
case ODEREF:
nn := nn.(*StarExpr)
if nn.Implicit() {
- n = nn.Left()
+ n = nn.X
continue
}
case OCONV, OCONVNOP, OCONVIFACE:
nn := nn.(*ConvExpr)
if nn.Implicit() {
- n = nn.Left()
+ n = nn.X
continue
}
}
switch n.Op() {
case OPAREN:
n := n.(*ParenExpr)
- fmt.Fprintf(s, "(%v)", n.Left())
+ fmt.Fprintf(s, "(%v)", n.X)
case ONIL:
fmt.Fprint(s, "nil")
fmt.Fprint(s, "func literal")
return
}
- fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func().Body())
+ fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func.Body)
case OCOMPLIT:
n := n.(*CompLitExpr)
fmt.Fprintf(s, "... argument")
return
}
- if n.Right() != nil {
- fmt.Fprintf(s, "%v{%s}", n.Right(), ellipsisIf(n.List().Len() != 0))
+ if n.Ntype != nil {
+ fmt.Fprintf(s, "%v{%s}", n.Ntype, ellipsisIf(len(n.List) != 0))
return
}
fmt.Fprint(s, "composite literal")
return
}
- fmt.Fprintf(s, "(%v{ %.v })", n.Right(), n.List())
+ fmt.Fprintf(s, "(%v{ %.v })", n.Ntype, n.List)
case OPTRLIT:
n := n.(*AddrExpr)
- fmt.Fprintf(s, "&%v", n.Left())
+ fmt.Fprintf(s, "&%v", n.X)
case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
n := n.(*CompLitExpr)
if !exportFormat {
- fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List().Len() != 0))
+ fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(len(n.List) != 0))
return
}
- fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List())
+ fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List)
case OKEY:
n := n.(*KeyExpr)
- if n.Left() != nil && n.Right() != nil {
- fmt.Fprintf(s, "%v:%v", n.Left(), n.Right())
+ if n.Key != nil && n.Value != nil {
+ fmt.Fprintf(s, "%v:%v", n.Key, n.Value)
return
}
- if n.Left() == nil && n.Right() != nil {
- fmt.Fprintf(s, ":%v", n.Right())
+ if n.Key == nil && n.Value != nil {
+ fmt.Fprintf(s, ":%v", n.Value)
return
}
- if n.Left() != nil && n.Right() == nil {
- fmt.Fprintf(s, "%v:", n.Left())
+ if n.Key != nil && n.Value == nil {
+ fmt.Fprintf(s, "%v:", n.Key)
return
}
fmt.Fprint(s, ":")
case OSTRUCTKEY:
n := n.(*StructKeyExpr)
- fmt.Fprintf(s, "%v:%v", n.Sym(), n.Left())
+ fmt.Fprintf(s, "%v:%v", n.Field, n.Value)
case OCALLPART:
n := n.(*CallPartExpr)
- exprFmt(n.Left(), s, nprec)
- if n.Sym() == nil {
+ exprFmt(n.X, s, nprec)
+ if n.Method.Sym == nil {
fmt.Fprint(s, ".<nil>")
return
}
- fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym()))
+ fmt.Fprintf(s, ".%s", types.SymMethodName(n.Method.Sym))
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
n := n.(*SelectorExpr)
- exprFmt(n.Left(), s, nprec)
- if n.Sym() == nil {
+ exprFmt(n.X, s, nprec)
+ if n.Sel == nil {
fmt.Fprint(s, ".<nil>")
return
}
- fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym()))
+ fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sel))
case ODOTTYPE, ODOTTYPE2:
n := n.(*TypeAssertExpr)
- exprFmt(n.Left(), s, nprec)
- if n.Right() != nil {
- fmt.Fprintf(s, ".(%v)", n.Right())
+ exprFmt(n.X, s, nprec)
+ if n.Ntype != nil {
+ fmt.Fprintf(s, ".(%v)", n.Ntype)
return
}
fmt.Fprintf(s, ".(%v)", n.Type())
case OINDEX, OINDEXMAP:
n := n.(*IndexExpr)
- exprFmt(n.Left(), s, nprec)
- fmt.Fprintf(s, "[%v]", n.Right())
+ exprFmt(n.X, s, nprec)
+ fmt.Fprintf(s, "[%v]", n.Index)
case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
n := n.(*SliceExpr)
- exprFmt(n.Left(), s, nprec)
+ exprFmt(n.X, s, nprec)
fmt.Fprint(s, "[")
low, high, max := n.SliceBounds()
if low != nil {
case OSLICEHEADER:
n := n.(*SliceHeaderExpr)
- if n.List().Len() != 2 {
- base.Fatalf("bad OSLICEHEADER list length %d", n.List().Len())
+ if len(n.LenCap) != 2 {
+ base.Fatalf("bad OSLICEHEADER list length %d", len(n.LenCap))
}
- fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left(), n.List().First(), n.List().Second())
+ fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Ptr, n.LenCap[0], n.LenCap[1])
case OCOMPLEX, OCOPY:
n := n.(*BinaryExpr)
- fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.Left(), n.Right())
+ fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.X, n.Y)
case OCONV,
OCONVIFACE,
} else {
fmt.Fprintf(s, "%v", n.Type())
}
- fmt.Fprintf(s, "(%v)", n.Left())
+ fmt.Fprintf(s, "(%v)", n.X)
case OREAL,
OIMAG,
OOFFSETOF,
OSIZEOF:
n := n.(*UnaryExpr)
- fmt.Fprintf(s, "%v(%v)", n.Op(), n.Left())
+ fmt.Fprintf(s, "%v(%v)", n.Op(), n.X)
case OAPPEND,
ODELETE,
OPRINT,
OPRINTN:
n := n.(*CallExpr)
- if n.IsDDD() {
- fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.List())
+ if n.IsDDD {
+ fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.Args)
return
}
- fmt.Fprintf(s, "%v(%.v)", n.Op(), n.List())
+ fmt.Fprintf(s, "%v(%.v)", n.Op(), n.Args)
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
n := n.(*CallExpr)
- exprFmt(n.Left(), s, nprec)
- if n.IsDDD() {
- fmt.Fprintf(s, "(%.v...)", n.List())
+ exprFmt(n.X, s, nprec)
+ if n.IsDDD {
+ fmt.Fprintf(s, "(%.v...)", n.Args)
return
}
- fmt.Fprintf(s, "(%.v)", n.List())
+ fmt.Fprintf(s, "(%.v)", n.Args)
case OMAKEMAP, OMAKECHAN, OMAKESLICE:
n := n.(*MakeExpr)
- if n.Right() != nil {
- fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Left(), n.Right())
+ if n.Cap != nil {
+ fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Len, n.Cap)
return
}
- if n.Left() != nil && (n.Op() == OMAKESLICE || !n.Left().Type().IsUntyped()) {
- fmt.Fprintf(s, "make(%v, %v)", n.Type(), n.Left())
+ if n.Len != nil && (n.Op() == OMAKESLICE || !n.Len.Type().IsUntyped()) {
+ fmt.Fprintf(s, "make(%v, %v)", n.Type(), n.Len)
return
}
fmt.Fprintf(s, "make(%v)", n.Type())
case OMAKESLICECOPY:
n := n.(*MakeExpr)
- fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Left(), n.Right())
+ fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Len, n.Cap)
case OPLUS, ONEG, OBITNOT, ONOT, ORECV:
// Unary
n := n.(*UnaryExpr)
fmt.Fprintf(s, "%v", n.Op())
- if n.Left() != nil && n.Left().Op() == n.Op() {
+ if n.X != nil && n.X.Op() == n.Op() {
fmt.Fprint(s, " ")
}
- exprFmt(n.Left(), s, nprec+1)
+ exprFmt(n.X, s, nprec+1)
case OADDR:
n := n.(*AddrExpr)
fmt.Fprintf(s, "%v", n.Op())
- if n.Left() != nil && n.Left().Op() == n.Op() {
+ if n.X != nil && n.X.Op() == n.Op() {
fmt.Fprint(s, " ")
}
- exprFmt(n.Left(), s, nprec+1)
+ exprFmt(n.X, s, nprec+1)
case ODEREF:
n := n.(*StarExpr)
fmt.Fprintf(s, "%v", n.Op())
- exprFmt(n.Left(), s, nprec+1)
+ exprFmt(n.X, s, nprec+1)
// Binary
case OADD,
OSUB,
OXOR:
n := n.(*BinaryExpr)
- exprFmt(n.Left(), s, nprec)
+ exprFmt(n.X, s, nprec)
fmt.Fprintf(s, " %v ", n.Op())
- exprFmt(n.Right(), s, nprec+1)
+ exprFmt(n.Y, s, nprec+1)
case OANDAND,
OOROR:
n := n.(*LogicalExpr)
- exprFmt(n.Left(), s, nprec)
+ exprFmt(n.X, s, nprec)
fmt.Fprintf(s, " %v ", n.Op())
- exprFmt(n.Right(), s, nprec+1)
+ exprFmt(n.Y, s, nprec+1)
case OSEND:
n := n.(*SendStmt)
- exprFmt(n.Left(), s, nprec)
+ exprFmt(n.Chan, s, nprec)
fmt.Fprintf(s, " <- ")
- exprFmt(n.Right(), s, nprec+1)
+ exprFmt(n.Value, s, nprec+1)
case OADDSTR:
n := n.(*AddStringExpr)
- for i, n1 := range n.List().Slice() {
+ for i, n1 := range n.List {
if i != 0 {
fmt.Fprint(s, " + ")
}
sep = ", "
}
- for i, n := range l.Slice() {
+ for i, n := range l {
fmt.Fprint(s, n)
- if i+1 < l.Len() {
+ if i+1 < len(l) {
fmt.Fprint(s, sep)
}
}
if n.Op() == OCLOSURE {
n := n.(*ClosureExpr)
- if fn := n.Func(); fn != nil && fn.Nname.Sym() != nil {
+ if fn := n.Func; fn != nil && fn.Nname.Sym() != nil {
fmt.Fprintf(w, " fnName(%+v)", fn.Nname.Sym())
}
}
return
}
- if n.Init().Len() != 0 {
+ if len(n.Init()) != 0 {
fmt.Fprintf(w, "%+v-init", n.Op())
dumpNodes(w, n.Init(), depth+1)
indent(w, depth)
case OASOP:
n := n.(*AssignOpStmt)
- fmt.Fprintf(w, "%+v-%+v", n.Op(), n.SubOp())
+ fmt.Fprintf(w, "%+v-%+v", n.Op(), n.AsOp)
dumpNodeHeader(w, n)
case OTYPE:
n := n.(*Func)
fmt.Fprintf(w, "%+v", n.Op())
dumpNodeHeader(w, n)
- fn := n.Func()
+ fn := n
if len(fn.Dcl) > 0 {
indent(w, depth)
fmt.Fprintf(w, "%+v-Dcl", n.Op())
- for _, dcl := range n.Func().Dcl {
+ for _, dcl := range n.Dcl {
dumpNode(w, dcl, depth+1)
}
}
- if fn.Body().Len() > 0 {
+ if len(fn.Body) > 0 {
indent(w, depth)
fmt.Fprintf(w, "%+v-body", n.Op())
- dumpNodes(w, fn.Body(), depth+1)
+ dumpNodes(w, fn.Body, depth+1)
}
return
}
}
dumpNode(w, val, depth+1)
case Nodes:
- if val.Len() == 0 {
+ if len(val) == 0 {
continue
}
if name != "" {
}
func dumpNodes(w io.Writer, list Nodes, depth int) {
- if list.Len() == 0 {
+ if len(list) == 0 {
fmt.Fprintf(w, " <nil>")
return
}
- for _, n := range list.Slice() {
+ for _, n := range list {
dumpNode(w, n, depth)
}
}
// pointer from the Func back to the OCALLPART.
type Func struct {
miniNode
- typ *types.Type
- Body_ Nodes
- iota int64
+ typ *types.Type
+ Body Nodes
+ Iota int64
Nname *Name // ONAME node
OClosure *ClosureExpr // OCLOSURE node
f := new(Func)
f.pos = pos
f.op = ODCLFUNC
- f.iota = -1
+ f.Iota = -1
return f
}
func (f *Func) isStmt() {}
-func (f *Func) Func() *Func { return f }
-func (f *Func) Body() Nodes { return f.Body_ }
-func (f *Func) PtrBody() *Nodes { return &f.Body_ }
-func (f *Func) SetBody(x Nodes) { f.Body_ = x }
func (f *Func) Type() *types.Type { return f.typ }
func (f *Func) SetType(x *types.Type) { f.typ = x }
-func (f *Func) Iota() int64 { return f.iota }
-func (f *Func) SetIota(x int64) { f.iota = x }
func (f *Func) Sym() *types.Sym {
if f.Nname != nil {
case *Func:
f = n
case *Name:
- f = n.Func()
+ f = n.Func
case *CallPartExpr:
- f = n.Func()
+ f = n.Func
case *ClosureExpr:
- f = n.Func()
+ f = n.Func
}
if f == nil || f.Nname == nil {
return "<nil>"
var f *Func
switch n := n.(type) {
case *CallPartExpr:
- f = n.Func()
+ f = n.Func
case *ClosureExpr:
- f = n.Func()
+ f = n.Func
case *Func:
f = n
}
}
return p + "." + s.Name
}
+
+var CurFunc *Func
+
+func FuncSymName(s *types.Sym) string {
+ return s.Name + "·f"
+}
+
+// NewFuncNameAt generates a new name node for a function or method.
+func NewFuncNameAt(pos src.XPos, s *types.Sym, fn *Func) *Name {
+ if fn.Nname != nil {
+ base.Fatalf("newFuncName - already have name")
+ }
+ n := NewNameAt(pos, s)
+ n.SetFunc(fn)
+ fn.Nname = n
+ return n
+}
+
+// MarkFunc marks a node as a function.
+func MarkFunc(n *Name) {
+ if n.Op() != ONAME || n.Class_ != Pxxx {
+ base.Fatalf("expected ONAME/Pxxx node, got %v", n)
+ }
+
+ n.Class_ = PFUNC
+ n.Sym().SetFunc(true)
+}
+
+// ClosureDebugRuntimeCheck applies boilerplate checks for debug flags
+// and compiling runtime
+func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
+ if base.Debug.Closure > 0 {
+ if clo.Esc() == EscHeap {
+ base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars)
+ } else {
+ base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
+ }
+ }
+ if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
+ base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
+ }
+}
+
+// IsTrivialClosure reports whether closure clo has an
+// empty list of captured vars.
+func IsTrivialClosure(clo *ClosureExpr) bool {
+ return len(clo.Func.ClosureVars) == 0
+}
esc uint16
}
-func (n *miniNode) Format(s fmt.State, verb rune) { panic(1) }
-func (n *miniNode) copy() Node { panic(1) }
-func (n *miniNode) doChildren(do func(Node) error) error { panic(1) }
-func (n *miniNode) editChildren(edit func(Node) Node) { panic(1) }
-
// posOr returns pos if known, or else n.pos.
// For use in DeepCopy.
func (n *miniNode) posOr(pos src.XPos) src.XPos {
// Empty, immutable graph structure.
-func (n *miniNode) Left() Node { return nil }
-func (n *miniNode) Right() Node { return nil }
-func (n *miniNode) Init() Nodes { return Nodes{} }
-func (n *miniNode) PtrInit() *Nodes { return &immutableEmptyNodes }
-func (n *miniNode) Body() Nodes { return Nodes{} }
-func (n *miniNode) PtrBody() *Nodes { return &immutableEmptyNodes }
-func (n *miniNode) List() Nodes { return Nodes{} }
-func (n *miniNode) PtrList() *Nodes { return &immutableEmptyNodes }
-func (n *miniNode) Rlist() Nodes { return Nodes{} }
-func (n *miniNode) PtrRlist() *Nodes { return &immutableEmptyNodes }
-func (n *miniNode) SetLeft(x Node) {
- if x != nil {
- panic(n.no("SetLeft"))
- }
-}
-func (n *miniNode) SetRight(x Node) {
- if x != nil {
- panic(n.no("SetRight"))
- }
-}
+func (n *miniNode) Init() Nodes { return Nodes{} }
+func (n *miniNode) PtrInit() *Nodes { return &immutableEmptyNodes }
func (n *miniNode) SetInit(x Nodes) {
if x != nil {
panic(n.no("SetInit"))
}
}
-func (n *miniNode) SetBody(x Nodes) {
- if x != nil {
- panic(n.no("SetBody"))
- }
-}
-func (n *miniNode) SetList(x Nodes) {
- if x != nil {
- panic(n.no("SetList"))
- }
-}
-func (n *miniNode) SetRlist(x Nodes) {
- if x != nil {
- panic(n.no("SetRlist"))
- }
-}
// Additional functionality unavailable.
func (n *miniNode) no(name string) string { return "cannot " + name + " on " + n.op.String() }
-func (n *miniNode) SetOp(Op) { panic(n.no("SetOp")) }
-func (n *miniNode) SubOp() Op { panic(n.no("SubOp")) }
-func (n *miniNode) SetSubOp(Op) { panic(n.no("SetSubOp")) }
-func (n *miniNode) Type() *types.Type { return nil }
-func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) }
-func (n *miniNode) Func() *Func { return nil }
-func (n *miniNode) Name() *Name { return nil }
-func (n *miniNode) Sym() *types.Sym { return nil }
-func (n *miniNode) SetSym(*types.Sym) { panic(n.no("SetSym")) }
-func (n *miniNode) Offset() int64 { return types.BADWIDTH }
-func (n *miniNode) SetOffset(x int64) { panic(n.no("SetOffset")) }
-func (n *miniNode) Class() Class { return Pxxx }
-func (n *miniNode) SetClass(Class) { panic(n.no("SetClass")) }
-func (n *miniNode) Likely() bool { panic(n.no("Likely")) }
-func (n *miniNode) SetLikely(bool) { panic(n.no("SetLikely")) }
-func (n *miniNode) SliceBounds() (low, high, max Node) {
- panic(n.no("SliceBounds"))
-}
-func (n *miniNode) SetSliceBounds(low, high, max Node) {
- panic(n.no("SetSliceBounds"))
-}
-func (n *miniNode) Iota() int64 { panic(n.no("Iota")) }
-func (n *miniNode) SetIota(int64) { panic(n.no("SetIota")) }
-func (n *miniNode) Colas() bool { return false }
-func (n *miniNode) SetColas(bool) { panic(n.no("SetColas")) }
-func (n *miniNode) NoInline() bool { panic(n.no("NoInline")) }
-func (n *miniNode) SetNoInline(bool) { panic(n.no("SetNoInline")) }
-func (n *miniNode) Transient() bool { panic(n.no("Transient")) }
-func (n *miniNode) SetTransient(bool) { panic(n.no("SetTransient")) }
-func (n *miniNode) Implicit() bool { return false }
-func (n *miniNode) SetImplicit(bool) { panic(n.no("SetImplicit")) }
-func (n *miniNode) IsDDD() bool { return false }
-func (n *miniNode) SetIsDDD(bool) { panic(n.no("SetIsDDD")) }
-func (n *miniNode) Embedded() bool { return false }
-func (n *miniNode) SetEmbedded(bool) { panic(n.no("SetEmbedded")) }
-func (n *miniNode) IndexMapLValue() bool { panic(n.no("IndexMapLValue")) }
-func (n *miniNode) SetIndexMapLValue(bool) { panic(n.no("SetIndexMapLValue")) }
-func (n *miniNode) ResetAux() { panic(n.no("ResetAux")) }
-func (n *miniNode) HasBreak() bool { panic(n.no("HasBreak")) }
-func (n *miniNode) SetHasBreak(bool) { panic(n.no("SetHasBreak")) }
-func (n *miniNode) Val() constant.Value { panic(n.no("Val")) }
-func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) }
-func (n *miniNode) Int64Val() int64 { panic(n.no("Int64Val")) }
-func (n *miniNode) Uint64Val() uint64 { panic(n.no("Uint64Val")) }
-func (n *miniNode) CanInt64() bool { panic(n.no("CanInt64")) }
-func (n *miniNode) BoolVal() bool { panic(n.no("BoolVal")) }
-func (n *miniNode) StringVal() string { panic(n.no("StringVal")) }
-func (n *miniNode) HasCall() bool { return false }
-func (n *miniNode) SetHasCall(bool) { panic(n.no("SetHasCall")) }
-func (n *miniNode) NonNil() bool { return false }
-func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) }
-func (n *miniNode) Bounded() bool { return false }
-func (n *miniNode) SetBounded(bool) { panic(n.no("SetBounded")) }
-func (n *miniNode) Opt() interface{} { return nil }
-func (n *miniNode) SetOpt(interface{}) { panic(n.no("SetOpt")) }
-func (n *miniNode) MarkReadonly() { panic(n.no("MarkReadonly")) }
-func (n *miniNode) TChanDir() types.ChanDir { panic(n.no("TChanDir")) }
-func (n *miniNode) SetTChanDir(types.ChanDir) { panic(n.no("SetTChanDir")) }
+func (n *miniNode) Type() *types.Type { return nil }
+func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) }
+func (n *miniNode) Name() *Name { return nil }
+func (n *miniNode) Sym() *types.Sym { return nil }
+func (n *miniNode) Val() constant.Value { panic(n.no("Val")) }
+func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) }
+func (n *miniNode) HasCall() bool { return false }
+func (n *miniNode) SetHasCall(bool) { panic(n.no("SetHasCall")) }
+func (n *miniNode) NonNil() bool { return false }
+func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) }
+func (n *miniNode) Opt() interface{} { return nil }
+func (n *miniNode) SetOpt(interface{}) { panic(n.no("SetOpt")) }
flags bitset16
pragma PragmaFlag // int16
sym *types.Sym
- fn *Func
+ Func *Func
Offset_ int64
val constant.Value
orig Node
func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x }
func (n *Name) Class() Class { return n.Class_ }
func (n *Name) SetClass(x Class) { n.Class_ = x }
-func (n *Name) Func() *Func { return n.fn }
-func (n *Name) SetFunc(x *Func) { n.fn = x }
+func (n *Name) SetFunc(x *Func) { n.Func = x }
func (n *Name) Offset() int64 { panic("Name.Offset") }
func (n *Name) SetOffset(x int64) {
if x != 0 {
p.pos = pos
return p
}
+
+// IsParamStackCopy reports whether this is the on-stack copy of a
+// function parameter that moved to the heap.
+func IsParamStackCopy(n Node) bool {
+ if n.Op() != ONAME {
+ return false
+ }
+ name := n.(*Name)
+ return (name.Class_ == PPARAM || name.Class_ == PPARAMOUT) && name.Heapaddr != nil
+}
+
+// IsParamHeapCopy reports whether this is the on-heap copy of
+// a function parameter that moved to the heap.
+func IsParamHeapCopy(n Node) bool {
+ if n.Op() != ONAME {
+ return false
+ }
+ name := n.(*Name)
+ return name.Class_ == PAUTOHEAP && name.Name().Stackcopy != nil
+}
+
+var RegFP *Name
// Abstract graph structure, for generic traversals.
Op() Op
- SetOp(x Op)
- SubOp() Op
- SetSubOp(x Op)
- Left() Node
- SetLeft(x Node)
- Right() Node
- SetRight(x Node)
Init() Nodes
PtrInit() *Nodes
SetInit(x Nodes)
- Body() Nodes
- PtrBody() *Nodes
- SetBody(x Nodes)
- List() Nodes
- SetList(x Nodes)
- PtrList() *Nodes
- Rlist() Nodes
- SetRlist(x Nodes)
- PtrRlist() *Nodes
// Fields specific to certain Ops only.
Type() *types.Type
SetType(t *types.Type)
- Func() *Func
Name() *Name
Sym() *types.Sym
- SetSym(x *types.Sym)
- Offset() int64
- SetOffset(x int64)
- Class() Class
- SetClass(x Class)
- Likely() bool
- SetLikely(x bool)
- SliceBounds() (low, high, max Node)
- SetSliceBounds(low, high, max Node)
- Iota() int64
- SetIota(x int64)
- Colas() bool
- SetColas(x bool)
- NoInline() bool
- SetNoInline(x bool)
- Transient() bool
- SetTransient(x bool)
- Implicit() bool
- SetImplicit(x bool)
- IsDDD() bool
- SetIsDDD(x bool)
- IndexMapLValue() bool
- SetIndexMapLValue(x bool)
- ResetAux()
- HasBreak() bool
- SetHasBreak(x bool)
- MarkReadonly()
Val() constant.Value
SetVal(v constant.Value)
SetOpt(x interface{})
Diag() bool
SetDiag(x bool)
- Bounded() bool
- SetBounded(x bool)
Typecheck() uint8
SetTypecheck(x uint8)
NonNil() bool
// The methods that would modify it panic instead.
var immutableEmptyNodes = Nodes{}
-// asNodes returns a slice of *Node as a Nodes value.
-func AsNodes(s []Node) Nodes {
- return s
-}
-
-// Slice returns the entries in Nodes as a slice.
-// Changes to the slice entries (as in s[i] = n) will be reflected in
-// the Nodes.
-func (n Nodes) Slice() []Node {
- return n
-}
-
-// Len returns the number of entries in Nodes.
-func (n Nodes) Len() int {
- return len(n)
-}
-
-// Index returns the i'th element of Nodes.
-// It panics if n does not have at least i+1 elements.
-func (n Nodes) Index(i int) Node {
- return n[i]
-}
-
-// First returns the first element of Nodes (same as n.Index(0)).
-// It panics if n has no elements.
-func (n Nodes) First() Node {
- return n[0]
-}
-
-// Second returns the second element of Nodes (same as n.Index(1)).
-// It panics if n has fewer than two elements.
-func (n Nodes) Second() Node {
- return n[1]
-}
-
func (n *Nodes) mutate() {
if n == &immutableEmptyNodes {
panic("immutable Nodes.Set")
*n = s
}
-// Set1 sets n to a slice containing a single node.
-func (n *Nodes) Set1(n1 Node) {
- n.mutate()
- *n = []Node{n1}
-}
-
-// Set2 sets n to a slice containing two nodes.
-func (n *Nodes) Set2(n1, n2 Node) {
- n.mutate()
- *n = []Node{n1, n2}
-}
-
-// Set3 sets n to a slice containing three nodes.
-func (n *Nodes) Set3(n1, n2, n3 Node) {
- n.mutate()
- *n = []Node{n1, n2, n3}
-}
-
-// MoveNodes sets n to the contents of n2, then clears n2.
-func (n *Nodes) MoveNodes(n2 *Nodes) {
- n.mutate()
- *n = *n2
- *n2 = nil
-}
-
-// SetIndex sets the i'th element of Nodes to node.
-// It panics if n does not have at least i+1 elements.
-func (n Nodes) SetIndex(i int, node Node) {
- n[i] = node
-}
-
-// SetFirst sets the first element of Nodes to node.
-// It panics if n does not have at least one elements.
-func (n Nodes) SetFirst(node Node) {
- n[0] = node
-}
-
-// SetSecond sets the second element of Nodes to node.
-// It panics if n does not have at least two elements.
-func (n Nodes) SetSecond(node Node) {
- n[1] = node
-}
-
-// Addr returns the address of the i'th element of Nodes.
-// It panics if n does not have at least i+1 elements.
-func (n Nodes) Addr(i int) *Node {
- return &n[i]
-}
-
// Append appends entries to Nodes.
func (n *Nodes) Append(a ...Node) {
if len(a) == 0 {
return ret
}
-// AppendNodes appends the contents of *n2 to n, then clears n2.
-func (n *Nodes) AppendNodes(n2 *Nodes) {
- n.mutate()
- *n = append(*n, n2.Take()...)
-}
-
// Copy returns a copy of the content of the slice.
func (n Nodes) Copy() Nodes {
if n == nil {
return nil
}
- c := make(Nodes, n.Len())
+ c := make(Nodes, len(n))
copy(c, n)
return c
}
return n.Type().Recv() != nil
}
-func Nod(op Op, nleft, nright Node) Node {
- return NodAt(base.Pos, op, nleft, nright)
+func HasNamedResults(fn *Func) bool {
+ typ := fn.Type()
+ return typ.NumResults() > 0 && types.OrigSym(typ.Results().Field(0).Sym) != nil
}
-func NodAt(pos src.XPos, op Op, nleft, nright Node) Node {
- switch op {
- default:
- panic("NodAt " + op.String())
- case OADD, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE,
- OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR,
- OCOPY, OCOMPLEX,
- OEFACE:
- return NewBinaryExpr(pos, op, nleft, nright)
- case OADDR:
- return NewAddrExpr(pos, nleft)
- case OADDSTR:
- return NewAddStringExpr(pos, nil)
- case OANDAND, OOROR:
- return NewLogicalExpr(pos, op, nleft, nright)
- case OARRAYLIT, OCOMPLIT, OMAPLIT, OSTRUCTLIT, OSLICELIT:
- var typ Ntype
- if nright != nil {
- typ = nright.(Ntype)
- }
- return NewCompLitExpr(pos, op, typ, nil)
- case OAS:
- return NewAssignStmt(pos, nleft, nright)
- case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2:
- n := NewAssignListStmt(pos, op, nil, nil)
- return n
- case OASOP:
- return NewAssignOpStmt(pos, OXXX, nleft, nright)
- case OBITNOT, ONEG, ONOT, OPLUS, ORECV,
- OALIGNOF, OCAP, OCLOSE, OIMAG, OLEN, ONEW, ONEWOBJ,
- OOFFSETOF, OPANIC, OREAL, OSIZEOF,
- OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR, OVARDEF, OVARKILL, OVARLIVE:
- if nright != nil {
- panic("unary nright")
- }
- return NewUnaryExpr(pos, op, nleft)
- case OBLOCK:
- return NewBlockStmt(pos, nil)
- case OBREAK, OCONTINUE, OFALL, OGOTO, ORETJMP:
- return NewBranchStmt(pos, op, nil)
- case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
- OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER:
- return NewCallExpr(pos, op, nleft, nil)
- case OCASE:
- return NewCaseStmt(pos, nil, nil)
- case OCONV, OCONVIFACE, OCONVNOP, ORUNESTR:
- return NewConvExpr(pos, op, nil, nleft)
- case ODCL, ODCLCONST, ODCLTYPE:
- return NewDecl(pos, op, nleft)
- case ODCLFUNC:
- return NewFunc(pos)
- case ODEFER, OGO:
- return NewGoDeferStmt(pos, op, nleft)
- case ODEREF:
- return NewStarExpr(pos, nleft)
- case ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OXDOT:
- return NewSelectorExpr(pos, op, nleft, nil)
- case ODOTTYPE, ODOTTYPE2:
- var typ Ntype
- if nright != nil {
- typ = nright.(Ntype)
+// HasUniquePos reports whether n has a unique position that can be
+// used for reporting error messages.
+//
+// It's primarily used to distinguish references to named objects,
+// whose Pos will point back to their declaration position rather than
+// their usage position.
+func HasUniquePos(n Node) bool {
+ switch n.Op() {
+ case ONAME, OPACK:
+ return false
+ case OLITERAL, ONIL, OTYPE:
+ if n.Sym() != nil {
+ return false
}
- n := NewTypeAssertExpr(pos, nleft, typ)
- if op != ODOTTYPE {
- n.SetOp(op)
+ }
+
+ if !n.Pos().IsKnown() {
+ if base.Flag.K != 0 {
+ base.Warn("setlineno: unknown position (line 0)")
}
+ return false
+ }
+
+ return true
+}
+
+func SetPos(n Node) src.XPos {
+ lno := base.Pos
+ if n != nil && HasUniquePos(n) {
+ base.Pos = n.Pos()
+ }
+ return lno
+}
+
+// The result of InitExpr MUST be assigned back to n, e.g.
+// n.Left = InitExpr(init, n.Left)
+func InitExpr(init []Node, n Node) Node {
+ if len(init) == 0 {
return n
- case OFOR:
- return NewForStmt(pos, nil, nleft, nright, nil)
- case OIF:
- return NewIfStmt(pos, nleft, nil, nil)
- case OINDEX, OINDEXMAP:
- n := NewIndexExpr(pos, nleft, nright)
- if op != OINDEX {
- n.SetOp(op)
+ }
+ if MayBeShared(n) {
+ // Introduce OCONVNOP to hold init list.
+ old := n
+ n = NewConvExpr(base.Pos, OCONVNOP, nil, old)
+ n.SetType(old.Type())
+ n.SetTypecheck(1)
+ }
+
+ n.PtrInit().Prepend(init...)
+ n.SetHasCall(true)
+ return n
+}
+
+// what's the outer value that a write to n affects?
+// outer value means containing struct or array.
+func OuterValue(n Node) Node {
+ for {
+ switch nn := n; nn.Op() {
+ case OXDOT:
+ base.Fatalf("OXDOT in walk")
+ case ODOT:
+ nn := nn.(*SelectorExpr)
+ n = nn.X
+ continue
+ case OPAREN:
+ nn := nn.(*ParenExpr)
+ n = nn.X
+ continue
+ case OCONVNOP:
+ nn := nn.(*ConvExpr)
+ n = nn.X
+ continue
+ case OINDEX:
+ nn := nn.(*IndexExpr)
+ if nn.X.Type() != nil && nn.X.Type().IsArray() {
+ n = nn.X
+ continue
+ }
}
+
return n
- case OINLMARK:
- return NewInlineMarkStmt(pos, types.BADWIDTH)
- case OKEY:
- return NewKeyExpr(pos, nleft, nright)
- case OSTRUCTKEY:
- return NewStructKeyExpr(pos, nil, nleft)
- case OLABEL:
- return NewLabelStmt(pos, nil)
- case OLITERAL, OTYPE, OIOTA:
- return newNameAt(pos, op, nil)
- case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY:
- return NewMakeExpr(pos, op, nleft, nright)
- case ONIL:
- return NewNilExpr(pos)
- case OPACK:
- return NewPkgName(pos, nil, nil)
- case OPAREN:
- return NewParenExpr(pos, nleft)
- case ORANGE:
- return NewRangeStmt(pos, nil, nright, nil)
- case ORESULT:
- return NewResultExpr(pos, nil, types.BADWIDTH)
- case ORETURN:
- return NewReturnStmt(pos, nil)
- case OSELECT:
- return NewSelectStmt(pos, nil)
- case OSEND:
- return NewSendStmt(pos, nleft, nright)
- case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
- return NewSliceExpr(pos, op, nleft)
- case OSLICEHEADER:
- return NewSliceHeaderExpr(pos, nil, nleft, nil, nil)
- case OSWITCH:
- return NewSwitchStmt(pos, nleft, nil)
- case OINLCALL:
- return NewInlinedCallExpr(pos, nil, nil)
}
}
+
+const (
+ EscUnknown = iota
+ EscNone // Does not escape to heap, result, or parameters.
+ EscHeap // Reachable from the heap
+ EscNever // By construction will not escape.
+)
func (n *AddStringExpr) copy() Node {
c := *n
c.init = c.init.Copy()
- c.List_ = c.List_.Copy()
+ c.List = c.List.Copy()
return &c
}
func (n *AddStringExpr) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
- err = maybeDoList(n.List_, err, do)
+ err = maybeDoList(n.List, err, do)
return err
}
func (n *AddStringExpr) editChildren(edit func(Node) Node) {
editList(n.init, edit)
- editList(n.List_, edit)
+ editList(n.List, edit)
}
func (n *AddrExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *BlockStmt) copy() Node {
c := *n
c.init = c.init.Copy()
- c.List_ = c.List_.Copy()
+ c.List = c.List.Copy()
return &c
}
func (n *BlockStmt) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
- err = maybeDoList(n.List_, err, do)
+ err = maybeDoList(n.List, err, do)
return err
}
func (n *BlockStmt) editChildren(edit func(Node) Node) {
editList(n.init, edit)
- editList(n.List_, edit)
+ editList(n.List, edit)
}
func (n *BranchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
c.init = c.init.Copy()
c.Args = c.Args.Copy()
c.Rargs = c.Rargs.Copy()
- c.Body_ = c.Body_.Copy()
+ c.Body = c.Body.Copy()
return &c
}
func (n *CallExpr) doChildren(do func(Node) error) error {
err = maybeDo(n.X, err, do)
err = maybeDoList(n.Args, err, do)
err = maybeDoList(n.Rargs, err, do)
- err = maybeDoList(n.Body_, err, do)
+ err = maybeDoList(n.Body, err, do)
return err
}
func (n *CallExpr) editChildren(edit func(Node) Node) {
n.X = maybeEdit(n.X, edit)
editList(n.Args, edit)
editList(n.Rargs, edit)
- editList(n.Body_, edit)
+ editList(n.Body, edit)
}
func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
c := *n
c.init = c.init.Copy()
c.Vars = c.Vars.Copy()
- c.List_ = c.List_.Copy()
- c.Body_ = c.Body_.Copy()
+ c.List = c.List.Copy()
+ c.Body = c.Body.Copy()
return &c
}
func (n *CaseStmt) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDoList(n.Vars, err, do)
- err = maybeDoList(n.List_, err, do)
+ err = maybeDoList(n.List, err, do)
err = maybeDo(n.Comm, err, do)
- err = maybeDoList(n.Body_, err, do)
+ err = maybeDoList(n.Body, err, do)
return err
}
func (n *CaseStmt) editChildren(edit func(Node) Node) {
editList(n.init, edit)
editList(n.Vars, edit)
- editList(n.List_, edit)
+ editList(n.List, edit)
n.Comm = maybeEdit(n.Comm, edit)
- editList(n.Body_, edit)
+ editList(n.Body, edit)
}
func (n *ChanType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *CompLitExpr) copy() Node {
c := *n
c.init = c.init.Copy()
- c.List_ = c.List_.Copy()
+ c.List = c.List.Copy()
return &c
}
func (n *CompLitExpr) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDo(n.Ntype, err, do)
- err = maybeDoList(n.List_, err, do)
+ err = maybeDoList(n.List, err, do)
return err
}
func (n *CompLitExpr) editChildren(edit func(Node) Node) {
editList(n.init, edit)
n.Ntype = toNtype(maybeEdit(n.Ntype, edit))
- editList(n.List_, edit)
+ editList(n.List, edit)
}
func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
c := *n
c.init = c.init.Copy()
c.Late = c.Late.Copy()
- c.Body_ = c.Body_.Copy()
+ c.Body = c.Body.Copy()
return &c
}
func (n *ForStmt) doChildren(do func(Node) error) error {
err = maybeDo(n.Cond, err, do)
err = maybeDoList(n.Late, err, do)
err = maybeDo(n.Post, err, do)
- err = maybeDoList(n.Body_, err, do)
+ err = maybeDoList(n.Body, err, do)
return err
}
func (n *ForStmt) editChildren(edit func(Node) Node) {
n.Cond = maybeEdit(n.Cond, edit)
editList(n.Late, edit)
n.Post = maybeEdit(n.Post, edit)
- editList(n.Body_, edit)
+ editList(n.Body, edit)
}
func (n *Func) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *Func) copy() Node {
c := *n
- c.Body_ = c.Body_.Copy()
+ c.Body = c.Body.Copy()
return &c
}
func (n *Func) doChildren(do func(Node) error) error {
var err error
- err = maybeDoList(n.Body_, err, do)
+ err = maybeDoList(n.Body, err, do)
return err
}
func (n *Func) editChildren(edit func(Node) Node) {
- editList(n.Body_, edit)
+ editList(n.Body, edit)
}
func (n *FuncType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *IfStmt) copy() Node {
c := *n
c.init = c.init.Copy()
- c.Body_ = c.Body_.Copy()
+ c.Body = c.Body.Copy()
c.Else = c.Else.Copy()
return &c
}
var err error
err = maybeDoList(n.init, err, do)
err = maybeDo(n.Cond, err, do)
- err = maybeDoList(n.Body_, err, do)
+ err = maybeDoList(n.Body, err, do)
err = maybeDoList(n.Else, err, do)
return err
}
func (n *IfStmt) editChildren(edit func(Node) Node) {
editList(n.init, edit)
n.Cond = maybeEdit(n.Cond, edit)
- editList(n.Body_, edit)
+ editList(n.Body, edit)
editList(n.Else, edit)
}
func (n *InlinedCallExpr) copy() Node {
c := *n
c.init = c.init.Copy()
- c.Body_ = c.Body_.Copy()
+ c.Body = c.Body.Copy()
c.ReturnVars = c.ReturnVars.Copy()
return &c
}
func (n *InlinedCallExpr) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
- err = maybeDoList(n.Body_, err, do)
+ err = maybeDoList(n.Body, err, do)
err = maybeDoList(n.ReturnVars, err, do)
return err
}
func (n *InlinedCallExpr) editChildren(edit func(Node) Node) {
editList(n.init, edit)
- editList(n.Body_, edit)
+ editList(n.Body, edit)
editList(n.ReturnVars, edit)
}
c := *n
c.init = c.init.Copy()
c.Vars = c.Vars.Copy()
- c.Body_ = c.Body_.Copy()
+ c.Body = c.Body.Copy()
return &c
}
func (n *RangeStmt) doChildren(do func(Node) error) error {
err = maybeDoList(n.init, err, do)
err = maybeDoList(n.Vars, err, do)
err = maybeDo(n.X, err, do)
- err = maybeDoList(n.Body_, err, do)
+ err = maybeDoList(n.Body, err, do)
return err
}
func (n *RangeStmt) editChildren(edit func(Node) Node) {
editList(n.init, edit)
editList(n.Vars, edit)
n.X = maybeEdit(n.X, edit)
- editList(n.Body_, edit)
+ editList(n.Body, edit)
}
func (n *ResultExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *SliceExpr) copy() Node {
c := *n
c.init = c.init.Copy()
- c.List_ = c.List_.Copy()
+ c.List = c.List.Copy()
return &c
}
func (n *SliceExpr) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDo(n.X, err, do)
- err = maybeDoList(n.List_, err, do)
+ err = maybeDoList(n.List, err, do)
return err
}
func (n *SliceExpr) editChildren(edit func(Node) Node) {
editList(n.init, edit)
n.X = maybeEdit(n.X, edit)
- editList(n.List_, edit)
+ editList(n.List, edit)
}
func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *SliceHeaderExpr) copy() Node {
c := *n
c.init = c.init.Copy()
- c.LenCap_ = c.LenCap_.Copy()
+ c.LenCap = c.LenCap.Copy()
return &c
}
func (n *SliceHeaderExpr) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDo(n.Ptr, err, do)
- err = maybeDoList(n.LenCap_, err, do)
+ err = maybeDoList(n.LenCap, err, do)
return err
}
func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) {
editList(n.init, edit)
n.Ptr = maybeEdit(n.Ptr, edit)
- editList(n.LenCap_, edit)
+ editList(n.LenCap, edit)
}
func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
-
-import "cmd/compile/internal/ir"
+package ir
// Strongly connected components.
//
// when analyzing a set of mutually recursive functions.
type bottomUpVisitor struct {
- analyze func([]*ir.Func, bool)
+ analyze func([]*Func, bool)
visitgen uint32
- nodeID map[*ir.Func]uint32
- stack []*ir.Func
+ nodeID map[*Func]uint32
+ stack []*Func
}
-// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
+// VisitFuncsBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
// It calls analyze with successive groups of functions, working from
// the bottom of the call graph upward. Each time analyze is called with
// a list of functions, every function on that list only calls other functions
// If recursive is false, the list consists of only a single function and its closures.
// If recursive is true, the list may still contain only a single function,
// if that function is itself recursive.
-func visitBottomUp(list []ir.Node, analyze func(list []*ir.Func, recursive bool)) {
+func VisitFuncsBottomUp(list []Node, analyze func(list []*Func, recursive bool)) {
var v bottomUpVisitor
v.analyze = analyze
- v.nodeID = make(map[*ir.Func]uint32)
+ v.nodeID = make(map[*Func]uint32)
for _, n := range list {
- if n.Op() == ir.ODCLFUNC {
- n := n.(*ir.Func)
- if !n.Func().IsHiddenClosure() {
+ if n.Op() == ODCLFUNC {
+ n := n.(*Func)
+ if !n.IsHiddenClosure() {
v.visit(n)
}
}
}
}
-func (v *bottomUpVisitor) visit(n *ir.Func) uint32 {
+func (v *bottomUpVisitor) visit(n *Func) uint32 {
if id := v.nodeID[n]; id > 0 {
// already visited
return id
min := v.visitgen
v.stack = append(v.stack, n)
- ir.Visit(n, func(n ir.Node) {
+ Visit(n, func(n Node) {
switch n.Op() {
- case ir.ONAME:
- if n.Class() == ir.PFUNC {
+ case ONAME:
+ n := n.(*Name)
+ if n.Class_ == PFUNC {
if n != nil && n.Name().Defn != nil {
- if m := v.visit(n.Name().Defn.(*ir.Func)); m < min {
+ if m := v.visit(n.Name().Defn.(*Func)); m < min {
min = m
}
}
}
- case ir.OMETHEXPR:
- fn := methodExprName(n)
+ case OMETHEXPR:
+ n := n.(*MethodExpr)
+ fn := MethodExprName(n)
if fn != nil && fn.Defn != nil {
- if m := v.visit(fn.Defn.(*ir.Func)); m < min {
+ if m := v.visit(fn.Defn.(*Func)); m < min {
min = m
}
}
- case ir.ODOTMETH:
- fn := methodExprName(n)
- if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Defn != nil {
- if m := v.visit(fn.Defn.(*ir.Func)); m < min {
+ case ODOTMETH:
+ n := n.(*SelectorExpr)
+ fn := MethodExprName(n)
+ if fn != nil && fn.Op() == ONAME && fn.Class_ == PFUNC && fn.Defn != nil {
+ if m := v.visit(fn.Defn.(*Func)); m < min {
min = m
}
}
- case ir.OCALLPART:
- fn := ir.AsNode(callpartMethod(n).Nname)
- if fn != nil && fn.Op() == ir.ONAME {
- if fn := fn.(*ir.Name); fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
- if m := v.visit(fn.Name().Defn.(*ir.Func)); m < min {
+ case OCALLPART:
+ n := n.(*CallPartExpr)
+ fn := AsNode(n.Method.Nname)
+ if fn != nil && fn.Op() == ONAME {
+ if fn := fn.(*Name); fn.Class_ == PFUNC && fn.Name().Defn != nil {
+ if m := v.visit(fn.Name().Defn.(*Func)); m < min {
min = m
}
}
}
- case ir.OCLOSURE:
- n := n.(*ir.ClosureExpr)
- if m := v.visit(n.Func()); m < min {
+ case OCLOSURE:
+ n := n.(*ClosureExpr)
+ if m := v.visit(n.Func); m < min {
min = m
}
}
func (*Decl) isStmt() {}
-func (n *Decl) Left() Node { return n.X }
-func (n *Decl) SetLeft(x Node) { n.X = x }
-
// A Stmt is a Node that can appear as a statement.
// This includes statement-like expressions such as f().
//
return n
}
-func (n *AssignListStmt) List() Nodes { return n.Lhs }
-func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs }
-func (n *AssignListStmt) SetList(x Nodes) { n.Lhs = x }
-func (n *AssignListStmt) Rlist() Nodes { return n.Rhs }
-func (n *AssignListStmt) PtrRlist() *Nodes { return &n.Rhs }
-func (n *AssignListStmt) SetRlist(x Nodes) { n.Rhs = x }
-func (n *AssignListStmt) Colas() bool { return n.Def }
-func (n *AssignListStmt) SetColas(x bool) { n.Def = x }
-
func (n *AssignListStmt) SetOp(op Op) {
switch op {
default:
return n
}
-func (n *AssignStmt) Left() Node { return n.X }
-func (n *AssignStmt) SetLeft(x Node) { n.X = x }
-func (n *AssignStmt) Right() Node { return n.Y }
-func (n *AssignStmt) SetRight(y Node) { n.Y = y }
-func (n *AssignStmt) Colas() bool { return n.Def }
-func (n *AssignStmt) SetColas(x bool) { n.Def = x }
-
func (n *AssignStmt) SetOp(op Op) {
switch op {
default:
return n
}
-func (n *AssignOpStmt) Left() Node { return n.X }
-func (n *AssignOpStmt) SetLeft(x Node) { n.X = x }
-func (n *AssignOpStmt) Right() Node { return n.Y }
-func (n *AssignOpStmt) SetRight(y Node) { n.Y = y }
-func (n *AssignOpStmt) SubOp() Op { return n.AsOp }
-func (n *AssignOpStmt) SetSubOp(x Op) { n.AsOp = x }
-func (n *AssignOpStmt) Implicit() bool { return n.IncDec }
-func (n *AssignOpStmt) SetImplicit(b bool) { n.IncDec = b }
func (n *AssignOpStmt) Type() *types.Type { return n.typ }
func (n *AssignOpStmt) SetType(x *types.Type) { n.typ = x }
// A BlockStmt is a block: { List }.
type BlockStmt struct {
miniStmt
- List_ Nodes
+ List Nodes
}
func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt {
}
}
n.op = OBLOCK
- n.List_.Set(list)
+ n.List.Set(list)
return n
}
-func (n *BlockStmt) List() Nodes { return n.List_ }
-func (n *BlockStmt) PtrList() *Nodes { return &n.List_ }
-func (n *BlockStmt) SetList(x Nodes) { n.List_ = x }
-
// A BranchStmt is a break, continue, fallthrough, or goto statement.
//
// For back-end code generation, Op may also be RETJMP (return+jump),
return n
}
-func (n *BranchStmt) Sym() *types.Sym { return n.Label }
-func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym }
+func (n *BranchStmt) Sym() *types.Sym { return n.Label }
// A CaseStmt is a case statement in a switch or select: case List: Body.
type CaseStmt struct {
miniStmt
- Vars Nodes // declared variable for this case in type switch
- List_ Nodes // list of expressions for switch, early select
- Comm Node // communication case (Exprs[0]) after select is type-checked
- Body_ Nodes
+ Vars Nodes // declared variable for this case in type switch
+ List Nodes // list of expressions for switch, early select
+ Comm Node // communication case (Exprs[0]) after select is type-checked
+ Body Nodes
}
func NewCaseStmt(pos src.XPos, list, body []Node) *CaseStmt {
n := &CaseStmt{}
n.pos = pos
n.op = OCASE
- n.List_.Set(list)
- n.Body_.Set(body)
+ n.List.Set(list)
+ n.Body.Set(body)
return n
}
-func (n *CaseStmt) List() Nodes { return n.List_ }
-func (n *CaseStmt) PtrList() *Nodes { return &n.List_ }
-func (n *CaseStmt) SetList(x Nodes) { n.List_ = x }
-func (n *CaseStmt) Body() Nodes { return n.Body_ }
-func (n *CaseStmt) PtrBody() *Nodes { return &n.Body_ }
-func (n *CaseStmt) SetBody(x Nodes) { n.Body_ = x }
-func (n *CaseStmt) Rlist() Nodes { return n.Vars }
-func (n *CaseStmt) PtrRlist() *Nodes { return &n.Vars }
-func (n *CaseStmt) SetRlist(x Nodes) { n.Vars = x }
-func (n *CaseStmt) Left() Node { return n.Comm }
-func (n *CaseStmt) SetLeft(x Node) { n.Comm = x }
-
// A ForStmt is a non-range for loop: for Init; Cond; Post { Body }
// Op can be OFOR or OFORUNTIL (!Cond).
type ForStmt struct {
miniStmt
- Label *types.Sym
- Cond Node
- Late Nodes
- Post Node
- Body_ Nodes
- HasBreak_ bool
+ Label *types.Sym
+ Cond Node
+ Late Nodes
+ Post Node
+ Body Nodes
+ HasBreak bool
}
func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStmt {
n.pos = pos
n.op = OFOR
n.init.Set(init)
- n.Body_.Set(body)
+ n.Body.Set(body)
return n
}
-func (n *ForStmt) Sym() *types.Sym { return n.Label }
-func (n *ForStmt) SetSym(x *types.Sym) { n.Label = x }
-func (n *ForStmt) Left() Node { return n.Cond }
-func (n *ForStmt) SetLeft(x Node) { n.Cond = x }
-func (n *ForStmt) Right() Node { return n.Post }
-func (n *ForStmt) SetRight(x Node) { n.Post = x }
-func (n *ForStmt) Body() Nodes { return n.Body_ }
-func (n *ForStmt) PtrBody() *Nodes { return &n.Body_ }
-func (n *ForStmt) SetBody(x Nodes) { n.Body_ = x }
-func (n *ForStmt) List() Nodes { return n.Late }
-func (n *ForStmt) PtrList() *Nodes { return &n.Late }
-func (n *ForStmt) SetList(x Nodes) { n.Late = x }
-func (n *ForStmt) HasBreak() bool { return n.HasBreak_ }
-func (n *ForStmt) SetHasBreak(b bool) { n.HasBreak_ = b }
-
func (n *ForStmt) SetOp(op Op) {
if op != OFOR && op != OFORUNTIL {
panic(n.no("SetOp " + op.String()))
return n
}
-func (n *GoDeferStmt) Left() Node { return n.Call }
-func (n *GoDeferStmt) SetLeft(x Node) { n.Call = x }
-
// A IfStmt is a return statement: if Init; Cond { Then } else { Else }.
type IfStmt struct {
miniStmt
- Cond Node
- Body_ Nodes
- Else Nodes
- Likely_ bool // code layout hint
+ Cond Node
+ Body Nodes
+ Else Nodes
+ Likely bool // code layout hint
}
func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt {
n := &IfStmt{Cond: cond}
n.pos = pos
n.op = OIF
- n.Body_.Set(body)
+ n.Body.Set(body)
n.Else.Set(els)
return n
}
-func (n *IfStmt) Left() Node { return n.Cond }
-func (n *IfStmt) SetLeft(x Node) { n.Cond = x }
-func (n *IfStmt) Body() Nodes { return n.Body_ }
-func (n *IfStmt) PtrBody() *Nodes { return &n.Body_ }
-func (n *IfStmt) SetBody(x Nodes) { n.Body_ = x }
-func (n *IfStmt) Rlist() Nodes { return n.Else }
-func (n *IfStmt) PtrRlist() *Nodes { return &n.Else }
-func (n *IfStmt) SetRlist(x Nodes) { n.Else = x }
-func (n *IfStmt) Likely() bool { return n.Likely_ }
-func (n *IfStmt) SetLikely(x bool) { n.Likely_ = x }
-
// An InlineMarkStmt is a marker placed just before an inlined body.
type InlineMarkStmt struct {
miniStmt
return n
}
-func (n *LabelStmt) Sym() *types.Sym { return n.Label }
-func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x }
+func (n *LabelStmt) Sym() *types.Sym { return n.Label }
// A RangeStmt is a range loop: for Vars = range X { Stmts }
// Op can be OFOR or OFORUNTIL (!Cond).
type RangeStmt struct {
miniStmt
- Label *types.Sym
- Vars Nodes // TODO(rsc): Replace with Key, Value Node
- Def bool
- X Node
- Body_ Nodes
- HasBreak_ bool
- typ *types.Type // TODO(rsc): Remove - use X.Type() instead
- Prealloc *Name
+ Label *types.Sym
+ Vars Nodes // TODO(rsc): Replace with Key, Value Node
+ Def bool
+ X Node
+ Body Nodes
+ HasBreak bool
+ typ *types.Type // TODO(rsc): Remove - use X.Type() instead
+ Prealloc *Name
}
func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt {
n.pos = pos
n.op = ORANGE
n.Vars.Set(vars)
- n.Body_.Set(body)
+ n.Body.Set(body)
return n
}
-func (n *RangeStmt) Sym() *types.Sym { return n.Label }
-func (n *RangeStmt) SetSym(x *types.Sym) { n.Label = x }
-func (n *RangeStmt) Right() Node { return n.X }
-func (n *RangeStmt) SetRight(x Node) { n.X = x }
-func (n *RangeStmt) Body() Nodes { return n.Body_ }
-func (n *RangeStmt) PtrBody() *Nodes { return &n.Body_ }
-func (n *RangeStmt) SetBody(x Nodes) { n.Body_ = x }
-func (n *RangeStmt) List() Nodes { return n.Vars }
-func (n *RangeStmt) PtrList() *Nodes { return &n.Vars }
-func (n *RangeStmt) SetList(x Nodes) { n.Vars = x }
-func (n *RangeStmt) HasBreak() bool { return n.HasBreak_ }
-func (n *RangeStmt) SetHasBreak(b bool) { n.HasBreak_ = b }
-func (n *RangeStmt) Colas() bool { return n.Def }
-func (n *RangeStmt) SetColas(b bool) { n.Def = b }
func (n *RangeStmt) Type() *types.Type { return n.typ }
func (n *RangeStmt) SetType(x *types.Type) { n.typ = x }
return n
}
-func (n *ReturnStmt) Orig() Node { return n.orig }
-func (n *ReturnStmt) SetOrig(x Node) { n.orig = x }
-func (n *ReturnStmt) List() Nodes { return n.Results }
-func (n *ReturnStmt) PtrList() *Nodes { return &n.Results }
-func (n *ReturnStmt) SetList(x Nodes) { n.Results = x }
-func (n *ReturnStmt) IsDDD() bool { return false } // typecheckargs asks
+func (n *ReturnStmt) Orig() Node { return n.orig }
+func (n *ReturnStmt) SetOrig(x Node) { n.orig = x }
// A SelectStmt is a block: { Cases }.
type SelectStmt struct {
miniStmt
- Label *types.Sym
- Cases Nodes
- HasBreak_ bool
+ Label *types.Sym
+ Cases Nodes
+ HasBreak bool
// TODO(rsc): Instead of recording here, replace with a block?
Compiled Nodes // compiled form, after walkswitch
return n
}
-func (n *SelectStmt) List() Nodes { return n.Cases }
-func (n *SelectStmt) PtrList() *Nodes { return &n.Cases }
-func (n *SelectStmt) SetList(x Nodes) { n.Cases = x }
-func (n *SelectStmt) Sym() *types.Sym { return n.Label }
-func (n *SelectStmt) SetSym(x *types.Sym) { n.Label = x }
-func (n *SelectStmt) HasBreak() bool { return n.HasBreak_ }
-func (n *SelectStmt) SetHasBreak(x bool) { n.HasBreak_ = x }
-func (n *SelectStmt) Body() Nodes { return n.Compiled }
-func (n *SelectStmt) PtrBody() *Nodes { return &n.Compiled }
-func (n *SelectStmt) SetBody(x Nodes) { n.Compiled = x }
-
// A SendStmt is a send statement: X <- Y.
type SendStmt struct {
miniStmt
return n
}
-func (n *SendStmt) Left() Node { return n.Chan }
-func (n *SendStmt) SetLeft(x Node) { n.Chan = x }
-func (n *SendStmt) Right() Node { return n.Value }
-func (n *SendStmt) SetRight(y Node) { n.Value = y }
-
// A SwitchStmt is a switch statement: switch Init; Expr { Cases }.
type SwitchStmt struct {
miniStmt
- Tag Node
- Cases Nodes // list of *CaseStmt
- Label *types.Sym
- HasBreak_ bool
+ Tag Node
+ Cases Nodes // list of *CaseStmt
+ Label *types.Sym
+ HasBreak bool
// TODO(rsc): Instead of recording here, replace with a block?
Compiled Nodes // compiled form, after walkswitch
return n
}
-func (n *SwitchStmt) Left() Node { return n.Tag }
-func (n *SwitchStmt) SetLeft(x Node) { n.Tag = x }
-func (n *SwitchStmt) List() Nodes { return n.Cases }
-func (n *SwitchStmt) PtrList() *Nodes { return &n.Cases }
-func (n *SwitchStmt) SetList(x Nodes) { n.Cases = x }
-func (n *SwitchStmt) Body() Nodes { return n.Compiled }
-func (n *SwitchStmt) PtrBody() *Nodes { return &n.Compiled }
-func (n *SwitchStmt) SetBody(x Nodes) { n.Compiled = x }
-func (n *SwitchStmt) Sym() *types.Sym { return n.Label }
-func (n *SwitchStmt) SetSym(x *types.Sym) { n.Label = x }
-func (n *SwitchStmt) HasBreak() bool { return n.HasBreak_ }
-func (n *SwitchStmt) SetHasBreak(x bool) { n.HasBreak_ = x }
-
// A TypeSwitchGuard is the [Name :=] X.(type) in a type switch.
type TypeSwitchGuard struct {
miniNode
n.op = OTYPESW
return n
}
-
-func (n *TypeSwitchGuard) Left() Node {
- if n.Tag == nil {
- return nil
- }
- return n.Tag
-}
-func (n *TypeSwitchGuard) SetLeft(x Node) {
- if x == nil {
- n.Tag = nil
- return
- }
- n.Tag = x.(*Ident)
-}
-func (n *TypeSwitchGuard) Right() Node { return n.X }
-func (n *TypeSwitchGuard) SetRight(x Node) { n.X = x }
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+// Names holds known names.
+var Names struct {
+ Staticuint64s *Name
+ Zerobase *Name
+}
+
+// Syms holds known symbols.
+var Syms struct {
+ AssertE2I *obj.LSym
+ AssertE2I2 *obj.LSym
+ AssertI2I *obj.LSym
+ AssertI2I2 *obj.LSym
+ Deferproc *obj.LSym
+ DeferprocStack *obj.LSym
+ Deferreturn *obj.LSym
+ Duffcopy *obj.LSym
+ Duffzero *obj.LSym
+ GCWriteBarrier *obj.LSym
+ Goschedguarded *obj.LSym
+ Growslice *obj.LSym
+ Msanread *obj.LSym
+ Msanwrite *obj.LSym
+ Msanmove *obj.LSym
+ Newobject *obj.LSym
+ Newproc *obj.LSym
+ Panicdivide *obj.LSym
+ Panicshift *obj.LSym
+ PanicdottypeE *obj.LSym
+ PanicdottypeI *obj.LSym
+ Panicnildottype *obj.LSym
+ Panicoverflow *obj.LSym
+ Raceread *obj.LSym
+ Racereadrange *obj.LSym
+ Racewrite *obj.LSym
+ Racewriterange *obj.LSym
+ // Wasm
+ SigPanic *obj.LSym
+ Typedmemclr *obj.LSym
+ Typedmemmove *obj.LSym
+ Udiv *obj.LSym
+ WriteBarrier *obj.LSym
+ Zerobase *obj.LSym
+ ARM64HasATOMICS *obj.LSym
+ ARMHasVFPv4 *obj.LSym
+ X86HasFMA *obj.LSym
+ X86HasPOPCNT *obj.LSym
+ X86HasSSE41 *obj.LSym
+ // Wasm
+ WasmDiv *obj.LSym
+ // Wasm
+ WasmMove *obj.LSym
+ // Wasm
+ WasmZero *obj.LSym
+ // Wasm
+ WasmTruncS *obj.LSym
+ // Wasm
+ WasmTruncU *obj.LSym
+}
+
+// Pkgs holds known packages.
+var Pkgs struct {
+ Go *types.Pkg
+ Itab *types.Pkg
+ Itablink *types.Pkg
+ Map *types.Pkg
+ Msan *types.Pkg
+ Race *types.Pkg
+ Runtime *types.Pkg
+ Track *types.Pkg
+ Unsafe *types.Pkg
+}
// Note that DoList only calls do on the nodes in the list, not their children.
// If x's children should be processed, do(x) must call DoChildren(x, do) itself.
func DoList(list Nodes, do func(Node) error) error {
- for _, x := range list.Slice() {
+ for _, x := range list {
if x != nil {
if err := do(x); err != nil {
return err
// VisitList calls Visit(x, visit) for each node x in the list.
func VisitList(list Nodes, visit func(Node)) {
- for _, x := range list.Slice() {
+ for _, x := range list {
Visit(x, visit)
}
}
// Otherwise, AnyList returns false after calling Any(x, cond)
// for every x in the list.
func AnyList(list Nodes, cond func(Node) bool) bool {
- for _, x := range list.Slice() {
+ for _, x := range list {
if Any(x, cond) {
return true
}
// Note that editList only calls edit on the nodes in the list, not their children.
// If x's children should be processed, edit(x) must call EditChildren(x, edit) itself.
func editList(list Nodes, edit func(Node) Node) {
- s := list.Slice()
- for i, x := range list.Slice() {
+ s := list
+ for i, x := range list {
if x != nil {
s[i] = edit(x)
}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package liveness
+
+import "cmd/compile/internal/bitvec"
+
+// FNV-1 hash function constants.
+const (
+ h0 = 2166136261
+ hp = 16777619
+)
+
+// bvecSet is a set of bvecs, in initial insertion order.
+type bvecSet struct {
+ index []int // hash -> uniq index. -1 indicates empty slot.
+ uniq []bitvec.BitVec // unique bvecs, in insertion order
+}
+
+func (m *bvecSet) grow() {
+ // Allocate new index.
+ n := len(m.index) * 2
+ if n == 0 {
+ n = 32
+ }
+ newIndex := make([]int, n)
+ for i := range newIndex {
+ newIndex[i] = -1
+ }
+
+ // Rehash into newIndex.
+ for i, bv := range m.uniq {
+ h := hashbitmap(h0, bv) % uint32(len(newIndex))
+ for {
+ j := newIndex[h]
+ if j < 0 {
+ newIndex[h] = i
+ break
+ }
+ h++
+ if h == uint32(len(newIndex)) {
+ h = 0
+ }
+ }
+ }
+ m.index = newIndex
+}
+
+// add adds bv to the set and returns its index in m.extractUniqe.
+// The caller must not modify bv after this.
+func (m *bvecSet) add(bv bitvec.BitVec) int {
+ if len(m.uniq)*4 >= len(m.index) {
+ m.grow()
+ }
+
+ index := m.index
+ h := hashbitmap(h0, bv) % uint32(len(index))
+ for {
+ j := index[h]
+ if j < 0 {
+ // New bvec.
+ index[h] = len(m.uniq)
+ m.uniq = append(m.uniq, bv)
+ return len(m.uniq) - 1
+ }
+ jlive := m.uniq[j]
+ if bv.Eq(jlive) {
+ // Existing bvec.
+ return j
+ }
+
+ h++
+ if h == uint32(len(index)) {
+ h = 0
+ }
+ }
+}
+
+// extractUnique returns this slice of unique bit vectors in m, as
+// indexed by the result of bvecSet.add.
+func (m *bvecSet) extractUnique() []bitvec.BitVec {
+ return m.uniq
+}
+
+func hashbitmap(h uint32, bv bitvec.BitVec) uint32 {
+ n := int((bv.N + 31) / 32)
+ for i := 0; i < n; i++ {
+ w := bv.B[i]
+ h = (h * hp) ^ (w & 0xff)
+ h = (h * hp) ^ ((w >> 8) & 0xff)
+ h = (h * hp) ^ ((w >> 16) & 0xff)
+ h = (h * hp) ^ ((w >> 24) & 0xff)
+ }
+
+ return h
+}
//
// Each level includes the earlier output as well.
-package gc
+package liveness
import (
+ "crypto/md5"
+ "fmt"
+ "strings"
+
"cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
- "crypto/md5"
- "fmt"
- "strings"
)
// OpVarDef is an annotation for the liveness analysis, marking a place
// so the compiler can allocate two temps to the same location. Here it's now
// useless, since the implementation of stack objects.
-// BlockEffects summarizes the liveness effects on an SSA block.
-type BlockEffects struct {
+// blockEffects summarizes the liveness effects on an SSA block.
+type blockEffects struct {
// Computed during Liveness.prologue using only the content of
// individual blocks:
//
// uevar: upward exposed variables (used before set in block)
// varkill: killed variables (set in block)
- uevar bvec
- varkill bvec
+ uevar bitvec.BitVec
+ varkill bitvec.BitVec
// Computed during Liveness.solve using control flow information:
//
// livein: variables live at block entry
// liveout: variables live at block exit
- livein bvec
- liveout bvec
+ livein bitvec.BitVec
+ liveout bitvec.BitVec
}
// A collection of global state used by liveness analysis.
-type Liveness struct {
+type liveness struct {
fn *ir.Func
f *ssa.Func
vars []*ir.Name
idx map[*ir.Name]int32
stkptrsize int64
- be []BlockEffects
+ be []blockEffects
// allUnsafe indicates that all points in this function are
// unsafe-points.
allUnsafe bool
// unsafePoints bit i is set if Value ID i is an unsafe-point
// (preemption is not allowed). Only valid if !allUnsafe.
- unsafePoints bvec
+ unsafePoints bitvec.BitVec
// An array with a bit vector for each safe point in the
// current Block during Liveness.epilogue. Indexed in Value
// order for that block. Additionally, for the entry block
// livevars[0] is the entry bitmap. Liveness.compact moves
// these to stackMaps.
- livevars []bvec
+ livevars []bitvec.BitVec
// livenessMap maps from safe points (i.e., CALLs) to their
// liveness map indexes.
- livenessMap LivenessMap
+ livenessMap Map
stackMapSet bvecSet
- stackMaps []bvec
+ stackMaps []bitvec.BitVec
cache progeffectscache
}
-// LivenessMap maps from *ssa.Value to LivenessIndex.
-type LivenessMap struct {
- vals map[ssa.ID]LivenessIndex
- // The set of live, pointer-containing variables at the deferreturn
+// Map maps from *ssa.Value to LivenessIndex.
+type Map struct {
+ Vals map[ssa.ID]objw.LivenessIndex
+ // The set of live, pointer-containing variables at the DeferReturn
// call (only set when open-coded defers are used).
- deferreturn LivenessIndex
+ DeferReturn objw.LivenessIndex
}
-func (m *LivenessMap) reset() {
- if m.vals == nil {
- m.vals = make(map[ssa.ID]LivenessIndex)
+func (m *Map) reset() {
+ if m.Vals == nil {
+ m.Vals = make(map[ssa.ID]objw.LivenessIndex)
} else {
- for k := range m.vals {
- delete(m.vals, k)
+ for k := range m.Vals {
+ delete(m.Vals, k)
}
}
- m.deferreturn = LivenessDontCare
+ m.DeferReturn = objw.LivenessDontCare
}
-func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) {
- m.vals[v.ID] = i
+func (m *Map) set(v *ssa.Value, i objw.LivenessIndex) {
+ m.Vals[v.ID] = i
}
-func (m LivenessMap) Get(v *ssa.Value) LivenessIndex {
+func (m Map) Get(v *ssa.Value) objw.LivenessIndex {
// If v isn't in the map, then it's a "don't care" and not an
// unsafe-point.
- if idx, ok := m.vals[v.ID]; ok {
+ if idx, ok := m.Vals[v.ID]; ok {
return idx
}
- return LivenessIndex{StackMapDontCare, false}
-}
-
-// LivenessIndex stores the liveness map information for a Value.
-type LivenessIndex struct {
- stackMapIndex int
-
- // isUnsafePoint indicates that this is an unsafe-point.
- //
- // Note that it's possible for a call Value to have a stack
- // map while also being an unsafe-point. This means it cannot
- // be preempted at this instruction, but that a preemption or
- // stack growth may happen in the called function.
- isUnsafePoint bool
-}
-
-// LivenessDontCare indicates that the liveness information doesn't
-// matter. Currently it is used in deferreturn liveness when we don't
-// actually need it. It should never be emitted to the PCDATA stream.
-var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
-
-// StackMapDontCare indicates that the stack map index at a Value
-// doesn't matter.
-//
-// This is a sentinel value that should never be emitted to the PCDATA
-// stream. We use -1000 because that's obviously never a valid stack
-// index (but -1 is).
-const StackMapDontCare = -1000
-
-func (idx LivenessIndex) StackMapValid() bool {
- return idx.stackMapIndex != StackMapDontCare
+ return objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: false}
}
type progeffectscache struct {
initialized bool
}
-// livenessShouldTrack reports whether the liveness analysis
+// ShouldTrack reports whether the liveness analysis
// should track the variable n.
// We don't care about variables that have no pointers,
// nor do we care about non-local variables,
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
-func livenessShouldTrack(nn ir.Node) bool {
+func ShouldTrack(nn ir.Node) bool {
if nn.Op() != ir.ONAME {
return false
}
n := nn.(*ir.Name)
- return (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers()
+ return (n.Class_ == ir.PAUTO || n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT) && n.Type().HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) {
var vars []*ir.Name
for _, n := range fn.Dcl {
- if livenessShouldTrack(n) {
+ if ShouldTrack(n) {
vars = append(vars, n)
}
}
return vars, idx
}
-func (lv *Liveness) initcache() {
+func (lv *liveness) initcache() {
if lv.cache.initialized {
base.Fatalf("liveness cache initialized twice")
return
lv.cache.initialized = true
for i, node := range lv.vars {
- switch node.Class() {
+ switch node.Class_ {
case ir.PPARAM:
// A return instruction with a p.to is a tail return, which brings
// the stack pointer back up (if it ever went down) and then jumps
// valueEffects returns the index of a variable in lv.vars and the
// liveness effects v has on that variable.
// If v does not affect any tracked variables, it returns -1, 0.
-func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
+func (lv *liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
n, e := affectedNode(v)
if e == 0 || n == nil || n.Op() != ir.ONAME { // cheapest checks first
return -1, 0
// Special cases.
switch v.Op {
case ssa.OpLoadReg:
- n, _ := AutoVar(v.Args[0])
+ n, _ := ssa.AutoVar(v.Args[0])
return n, ssa.SymRead
case ssa.OpStoreReg:
- n, _ := AutoVar(v)
+ n, _ := ssa.AutoVar(v)
return n, ssa.SymWrite
case ssa.OpVarLive:
case ssa.OpVarDef, ssa.OpVarKill:
return v.Aux.(*ir.Name), ssa.SymWrite
case ssa.OpKeepAlive:
- n, _ := AutoVar(v.Args[0])
+ n, _ := ssa.AutoVar(v.Args[0])
return n, ssa.SymRead
}
}
type livenessFuncCache struct {
- be []BlockEffects
- livenessMap LivenessMap
+ be []blockEffects
+ livenessMap Map
}
// Constructs a new liveness structure used to hold the global state of the
// liveness computation. The cfg argument is a slice of *BasicBlocks and the
// vars argument is a slice of *Nodes.
-func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int32, stkptrsize int64) *Liveness {
- lv := &Liveness{
+func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int32, stkptrsize int64) *liveness {
+ lv := &liveness{
fn: fn,
f: f,
vars: vars,
if cap(lc.be) >= f.NumBlocks() {
lv.be = lc.be[:f.NumBlocks()]
}
- lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessDontCare}
- lc.livenessMap.vals = nil
+ lv.livenessMap = Map{Vals: lc.livenessMap.Vals, DeferReturn: objw.LivenessDontCare}
+ lc.livenessMap.Vals = nil
}
if lv.be == nil {
- lv.be = make([]BlockEffects, f.NumBlocks())
+ lv.be = make([]blockEffects, f.NumBlocks())
}
nblocks := int32(len(f.Blocks))
nvars := int32(len(vars))
- bulk := bvbulkalloc(nvars, nblocks*7)
+ bulk := bitvec.NewBulk(nvars, nblocks*7)
for _, b := range f.Blocks {
be := lv.blockEffects(b)
- be.uevar = bulk.next()
- be.varkill = bulk.next()
- be.livein = bulk.next()
- be.liveout = bulk.next()
+ be.uevar = bulk.Next()
+ be.varkill = bulk.Next()
+ be.livein = bulk.Next()
+ be.liveout = bulk.Next()
}
lv.livenessMap.reset()
return lv
}
-func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects {
+func (lv *liveness) blockEffects(b *ssa.Block) *blockEffects {
return &lv.be[b.ID]
}
// NOTE: The bitmap for a specific type t could be cached in t after
// the first run and then simply copied into bv at the correct offset
// on future calls with the same type t.
-func onebitwalktype1(t *types.Type, off int64, bv bvec) {
+func SetTypeBits(t *types.Type, off int64, bv bitvec.BitVec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 {
base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
}
switch t.Kind() {
case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP:
- if off&int64(Widthptr-1) != 0 {
+ if off&int64(types.PtrSize-1) != 0 {
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
- bv.Set(int32(off / int64(Widthptr))) // pointer
+ bv.Set(int32(off / int64(types.PtrSize))) // pointer
case types.TSTRING:
// struct { byte *str; intgo len; }
- if off&int64(Widthptr-1) != 0 {
+ if off&int64(types.PtrSize-1) != 0 {
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
- bv.Set(int32(off / int64(Widthptr))) //pointer in first slot
+ bv.Set(int32(off / int64(types.PtrSize))) //pointer in first slot
case types.TINTER:
// struct { Itab *tab; void *data; }
// or, when isnilinter(t)==true:
// struct { Type *type; void *data; }
- if off&int64(Widthptr-1) != 0 {
+ if off&int64(types.PtrSize-1) != 0 {
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
// The first word of an interface is a pointer, but we don't
// the underlying type so it won't be GCd.
// If we ever have a moving GC, we need to change this for 2b (as
// well as scan itabs to update their itab._type fields).
- bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot
+ bv.Set(int32(off/int64(types.PtrSize) + 1)) // pointer in second slot
case types.TSLICE:
// struct { byte *array; uintgo len; uintgo cap; }
- if off&int64(Widthptr-1) != 0 {
+ if off&int64(types.PtrSize-1) != 0 {
base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
}
- bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer)
+ bv.Set(int32(off / int64(types.PtrSize))) // pointer in first slot (BitsPointer)
case types.TARRAY:
elt := t.Elem()
break
}
for i := int64(0); i < t.NumElem(); i++ {
- onebitwalktype1(elt, off, bv)
+ SetTypeBits(elt, off, bv)
off += elt.Width
}
case types.TSTRUCT:
for _, f := range t.Fields().Slice() {
- onebitwalktype1(f.Type, off+f.Offset, bv)
+ SetTypeBits(f.Type, off+f.Offset, bv)
}
default:
// Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes.
-func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Name, args, locals bvec) {
+func (lv *liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, locals bitvec.BitVec) {
for i := int32(0); ; i++ {
i = liveout.Next(i)
if i < 0 {
break
}
node := vars[i]
- switch node.Class() {
+ switch node.Class_ {
case ir.PAUTO:
- onebitwalktype1(node.Type(), node.FrameOffset()+lv.stkptrsize, locals)
+ SetTypeBits(node.Type(), node.FrameOffset()+lv.stkptrsize, locals)
case ir.PPARAM, ir.PPARAMOUT:
- onebitwalktype1(node.Type(), node.FrameOffset(), args)
+ SetTypeBits(node.Type(), node.FrameOffset(), args)
}
}
}
-// allUnsafe indicates that all points in this function are
+// IsUnsafe indicates that all points in this function are
// unsafe-points.
-func allUnsafe(f *ssa.Func) bool {
+func IsUnsafe(f *ssa.Func) bool {
// The runtime assumes the only safe-points are function
// prologues (because that's how it used to be). We could and
// should improve that, but for now keep consider all points
}
// markUnsafePoints finds unsafe points and computes lv.unsafePoints.
-func (lv *Liveness) markUnsafePoints() {
- if allUnsafe(lv.f) {
+func (lv *liveness) markUnsafePoints() {
+ if IsUnsafe(lv.f) {
// No complex analysis necessary.
lv.allUnsafe = true
return
}
- lv.unsafePoints = bvalloc(int32(lv.f.NumValues()))
+ lv.unsafePoints = bitvec.New(int32(lv.f.NumValues()))
// Mark architecture-specific unsafe points.
for _, b := range lv.f.Blocks {
var load *ssa.Value
v := wbBlock.Controls[0]
for {
- if sym, ok := v.Aux.(*obj.LSym); ok && sym == writeBarrier {
+ if sym, ok := v.Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier {
load = v
break
}
// nice to only flood as far as the unsafe.Pointer -> uintptr
// conversion, but it's hard to know which argument of an Add
// or Sub to follow.
- var flooded bvec
+ var flooded bitvec.BitVec
var flood func(b *ssa.Block, vi int)
flood = func(b *ssa.Block, vi int) {
- if flooded.n == 0 {
- flooded = bvalloc(int32(lv.f.NumBlocks()))
+ if flooded.N == 0 {
+ flooded = bitvec.New(int32(lv.f.NumBlocks()))
}
if flooded.Get(int32(b.ID)) {
return
// This does not necessarily mean the instruction is a safe-point. In
// particular, call Values can have a stack map in case the callee
// grows the stack, but not themselves be a safe-point.
-func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
+func (lv *liveness) hasStackMap(v *ssa.Value) bool {
if !v.Op.IsCall() {
return false
}
// typedmemclr and typedmemmove are write barriers and
// deeply non-preemptible. They are unsafe points and
// hence should not have liveness maps.
- if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == typedmemclr || sym.Fn == typedmemmove) {
+ if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
return false
}
return true
// Initializes the sets for solving the live variables. Visits all the
// instructions in each basic block to summarizes the information at each basic
// block
-func (lv *Liveness) prologue() {
+func (lv *liveness) prologue() {
lv.initcache()
for _, b := range lv.f.Blocks {
}
// Solve the liveness dataflow equations.
-func (lv *Liveness) solve() {
+func (lv *liveness) solve() {
// These temporary bitvectors exist to avoid successive allocations and
// frees within the loop.
nvars := int32(len(lv.vars))
- newlivein := bvalloc(nvars)
- newliveout := bvalloc(nvars)
+ newlivein := bitvec.New(nvars)
+ newliveout := bitvec.New(nvars)
// Walk blocks in postorder ordering. This improves convergence.
po := lv.f.Postorder()
// Visits all instructions in a basic block and computes a bit vector of live
// variables at each safe point locations.
-func (lv *Liveness) epilogue() {
+func (lv *liveness) epilogue() {
nvars := int32(len(lv.vars))
- liveout := bvalloc(nvars)
- livedefer := bvalloc(nvars) // always-live variables
+ liveout := bitvec.New(nvars)
+ livedefer := bitvec.New(nvars) // always-live variables
// If there is a defer (that could recover), then all output
// parameters are live all the time. In addition, any locals
// don't need to keep the stack copy live?
if lv.fn.HasDefer() {
for i, n := range lv.vars {
- if n.Class() == ir.PPARAMOUT {
+ if n.Class_ == ir.PPARAMOUT {
if n.Name().IsOutputParamHeapAddr() {
// Just to be paranoid. Heap addresses are PAUTOs.
base.Fatalf("variable %v both output param and heap output param", n)
{
// Reserve an entry for function entry.
- live := bvalloc(nvars)
+ live := bitvec.New(nvars)
lv.livevars = append(lv.livevars, live)
}
continue
}
- live := bvalloc(nvars)
+ live := bitvec.New(nvars)
lv.livevars = append(lv.livevars, live)
}
if !liveout.Get(int32(i)) {
continue
}
- if n.Class() == ir.PPARAM {
+ if n.Class_ == ir.PPARAM {
continue // ok
}
base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Nname, n)
// If we have an open-coded deferreturn call, make a liveness map for it.
if lv.fn.OpenCodedDeferDisallowed() {
- lv.livenessMap.deferreturn = LivenessDontCare
+ lv.livenessMap.DeferReturn = objw.LivenessDontCare
} else {
- lv.livenessMap.deferreturn = LivenessIndex{
- stackMapIndex: lv.stackMapSet.add(livedefer),
- isUnsafePoint: false,
+ lv.livenessMap.DeferReturn = objw.LivenessIndex{
+ StackMapIndex: lv.stackMapSet.add(livedefer),
+ IsUnsafePoint: false,
}
}
// Done compacting. Throw out the stack map set.
- lv.stackMaps = lv.stackMapSet.extractUniqe()
+ lv.stackMaps = lv.stackMapSet.extractUnique()
lv.stackMapSet = bvecSet{}
// Useful sanity check: on entry to the function,
// the only things that can possibly be live are the
// input parameters.
for j, n := range lv.vars {
- if n.Class() != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) {
+ if n.Class_ != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) {
lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Nname, n)
}
}
// is actually a net loss: we save about 50k of argument bitmaps but the new
// PCDATA tables cost about 100k. So for now we keep using a single index for
// both bitmap lists.
-func (lv *Liveness) compact(b *ssa.Block) {
+func (lv *liveness) compact(b *ssa.Block) {
pos := 0
if b == lv.f.Entry {
// Handle entry stack map.
for _, v := range b.Values {
hasStackMap := lv.hasStackMap(v)
isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID))
- idx := LivenessIndex{StackMapDontCare, isUnsafePoint}
+ idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint}
if hasStackMap {
- idx.stackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
+ idx.StackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
pos++
}
if hasStackMap || isUnsafePoint {
lv.livevars = lv.livevars[:0]
}
-func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
+func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) {
if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
return
}
base.WarnfAt(pos, s)
}
-func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
+func (lv *liveness) printbvec(printed bool, name string, live bitvec.BitVec) bool {
if live.IsEmpty() {
return printed
}
}
// printeffect is like printbvec, but for valueEffects.
-func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
+func (lv *liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
if !x {
return printed
}
// Prints the computed liveness information and inputs, for debugging.
// This format synthesizes the information used during the multiple passes
// into a single presentation.
-func (lv *Liveness) printDebug() {
+func (lv *liveness) printDebug() {
fmt.Printf("liveness: %s\n", ir.FuncName(lv.fn))
for i, b := range lv.f.Blocks {
fmt.Printf("\tlive=")
printed = false
if pcdata.StackMapValid() {
- live := lv.stackMaps[pcdata.stackMapIndex]
+ live := lv.stackMaps[pcdata.StackMapIndex]
for j, n := range lv.vars {
if !live.Get(int32(j)) {
continue
fmt.Printf("\n")
}
- if pcdata.isUnsafePoint {
+ if pcdata.IsUnsafePoint {
fmt.Printf("\tunsafe-point\n")
}
}
// first word dumped is the total number of bitmaps. The second word is the
// length of the bitmaps. All bitmaps are assumed to be of equal length. The
// remaining bytes are the raw bitmaps.
-func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
+func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) {
// Size args bitmaps to be just large enough to hold the largest pointer.
// First, find the largest Xoffset node we care about.
// (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
var maxArgNode *ir.Name
for _, n := range lv.vars {
- switch n.Class() {
+ switch n.Class_ {
case ir.PPARAM, ir.PPARAMOUT:
if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() {
maxArgNode = n
// Next, find the offset of the largest pointer in the largest node.
var maxArgs int64
if maxArgNode != nil {
- maxArgs = maxArgNode.FrameOffset() + typeptrdata(maxArgNode.Type())
+ maxArgs = maxArgNode.FrameOffset() + types.PtrDataSize(maxArgNode.Type())
}
// Size locals bitmaps to be stkptrsize sized.
// Temporary symbols for encoding bitmaps.
var argsSymTmp, liveSymTmp obj.LSym
- args := bvalloc(int32(maxArgs / int64(Widthptr)))
- aoff := duint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
- aoff = duint32(&argsSymTmp, aoff, uint32(args.n)) // number of bits in each bitmap
+ args := bitvec.New(int32(maxArgs / int64(types.PtrSize)))
+ aoff := objw.Uint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
+ aoff = objw.Uint32(&argsSymTmp, aoff, uint32(args.N)) // number of bits in each bitmap
- locals := bvalloc(int32(maxLocals / int64(Widthptr)))
- loff := duint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
- loff = duint32(&liveSymTmp, loff, uint32(locals.n)) // number of bits in each bitmap
+ locals := bitvec.New(int32(maxLocals / int64(types.PtrSize)))
+ loff := objw.Uint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
+ loff = objw.Uint32(&liveSymTmp, loff, uint32(locals.N)) // number of bits in each bitmap
for _, live := range lv.stackMaps {
args.Clear()
lv.pointerMap(live, lv.vars, args, locals)
- aoff = dbvec(&argsSymTmp, aoff, args)
- loff = dbvec(&liveSymTmp, loff, locals)
+ aoff = objw.BitVec(&argsSymTmp, aoff, args)
+ loff = objw.BitVec(&liveSymTmp, loff, locals)
}
// Give these LSyms content-addressable names,
return makeSym(&argsSymTmp), makeSym(&liveSymTmp)
}
-// Entry pointer for liveness analysis. Solves for the liveness of
+// Entry pointer for Compute analysis. Solves for the Compute of
// pointer variables in the function and emits a runtime data
// structure read by the garbage collector.
// Returns a map from GC safe points to their corresponding stack map index.
-func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) LivenessMap {
+func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) Map {
// Construct the global liveness state.
vars, idx := getvariables(curfn)
lv := newliveness(curfn, f, vars, idx, stkptrsize)
for _, b := range f.Blocks {
for _, val := range b.Values {
if idx := lv.livenessMap.Get(val); idx.StackMapValid() {
- lv.showlive(val, lv.stackMaps[idx.stackMapIndex])
+ lv.showlive(val, lv.stackMaps[idx.StackMapIndex])
}
}
}
cache := f.Cache.Liveness.(*livenessFuncCache)
if cap(lv.be) < 2000 { // Threshold from ssa.Cache slices.
for i := range lv.be {
- lv.be[i] = BlockEffects{}
+ lv.be[i] = blockEffects{}
}
cache.be = lv.be
}
- if len(lv.livenessMap.vals) < 2000 {
+ if len(lv.livenessMap.Vals) < 2000 {
cache.livenessMap = lv.livenessMap
}
}
fninfo.GCArgs, fninfo.GCLocals = lv.emit()
p := pp.Prog(obj.AFUNCDATA)
- Addrconst(&p.From, objabi.FUNCDATA_ArgsPointerMaps)
+ p.From.SetConst(objabi.FUNCDATA_ArgsPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCArgs
p = pp.Prog(obj.AFUNCDATA)
- Addrconst(&p.From, objabi.FUNCDATA_LocalsPointerMaps)
+ p.From.SetConst(objabi.FUNCDATA_LocalsPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCLocals
return false
}
+
+func WriteFuncMap(fn *ir.Func) {
+ if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" {
+ return
+ }
+ lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap")
+ nptr := int(fn.Type().ArgWidth() / int64(types.PtrSize))
+ bv := bitvec.New(int32(nptr) * 2)
+ nbitmap := 1
+ if fn.Type().NumResults() > 0 {
+ nbitmap = 2
+ }
+ off := objw.Uint32(lsym, 0, uint32(nbitmap))
+ off = objw.Uint32(lsym, off, uint32(bv.N))
+
+ if ir.IsMethod(fn) {
+ SetTypeBits(fn.Type().Recvs(), 0, bv)
+ }
+ if fn.Type().NumParams() > 0 {
+ SetTypeBits(fn.Type().Params(), 0, bv)
+ }
+ off = objw.BitVec(lsym, off, bv)
+
+ if fn.Type().NumResults() > 0 {
+ SetTypeBits(fn.Type().Results(), 0, bv)
+ off = objw.BitVec(lsym, off, bv)
+ }
+
+ objw.Global(lsym, int32(off), obj.RODATA|obj.LOCAL)
+}
package mips
import (
- "cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/internal/obj/mips"
"cmd/internal/objabi"
)
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &mips.Linkmips
if objabi.GOARCH == "mipsle" {
arch.LinkArch = &mips.Linkmipsle
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
- arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
)
// TODO(mips): implement DUFFZERO
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
- if cnt < int64(4*gc.Widthptr) {
- for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
- p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
}
} else {
//fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
// MOVW R0, (Widthptr)r1
// ADD $Widthptr, r1
// BNE r1, r2, loop
- p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
- p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1
- p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
+ p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
p1 := p
- p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
- p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = mips.REGRT2
- gc.Patch(p, p1)
+ p.To.SetTarget(p1)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(mips.ANOR)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REG_R0
"math"
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
panic("bad store type")
}
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpMIPSMOVWreg:
t := v.Type
}
r := v.Reg()
p := s.Prog(loadByType(v.Type, r))
- gc.AddrAuto(&p.From, v.Args[0])
+ ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = r
if isHILO(r) {
p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG
p.From.Reg = r
- gc.AddrAuto(&p.To, v)
+ ssagen.AddrAuto(&p.To, v)
case ssa.OpMIPSADD,
ssa.OpMIPSSUB,
ssa.OpMIPSAND,
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVW $off(SP), R
wantreg = "SP"
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpMIPSMOVBstore,
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpMIPSMOVBstorezero,
ssa.OpMIPSMOVHstorezero,
ssa.OpMIPSMOVWstorezero:
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpMIPSMOVBreg,
ssa.OpMIPSMOVBUreg,
ssa.OpMIPSMOVHreg,
p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p2)
+ p4.To.SetTarget(p2)
case ssa.OpMIPSLoweredMove:
// SUBU $4, R1
// MOVW 4(R1), Rtmp
p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1
p6.To.Type = obj.TYPE_BRANCH
- gc.Patch(p6, p2)
+ p6.To.SetTarget(p2)
case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
s.Call(v)
case ssa.OpMIPSLoweredWB:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(8) // space used in callee args area by assembly stubs
case ssa.OpMIPSLoweredPanicExtendA, ssa.OpMIPSLoweredPanicExtendB, ssa.OpMIPSLoweredPanicExtendC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
+ p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
s.UseArgs(12) // space used in callee args area by assembly stubs
case ssa.OpMIPSLoweredAtomicLoad8,
ssa.OpMIPSLoweredAtomicLoad32:
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicAdd:
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH
- gc.Patch(p5, p1)
+ p5.To.SetTarget(p1)
s.Prog(mips.ASYNC)
p6 := s.Prog(obj.ANOP)
- gc.Patch(p2, p6)
+ p2.To.SetTarget(p6)
case ssa.OpMIPSLoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
if logopt.Enabled() {
case ssa.OpMIPSLoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
- gc.CheckLoweredGetClosurePtr(v)
+ ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpMIPSLoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVW)
ssa.BlockMIPSFPF: {mips.ABFPF, mips.ABFPT},
}
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in R1:
p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:
package mips64
import (
- "cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/internal/obj/mips"
"cmd/internal/objabi"
)
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &mips.Linkmips64
if objabi.GOARCH == "mips64le" {
arch.LinkArch = &mips.Linkmips64le
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
- arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}
package mips64
import (
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
)
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
- if cnt < int64(4*gc.Widthptr) {
- for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
- p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
}
- } else if cnt <= int64(128*gc.Widthptr) {
- p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
+ } else if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffzero
- p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
} else {
// ADDV $(8+frame+lo-8), SP, r1
// ADDV $cnt, r1, r2
// MOVV R0, (Widthptr)r1
// ADDV $Widthptr, r1
// BNE r1, r2, loop
- p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
- p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1
- p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
+ p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
p1 := p
- p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
- p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = mips.REGRT2
- gc.Patch(p, p1)
+ p.To.SetTarget(p1)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(mips.ANOR)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REG_R0
"math"
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
panic("bad store type")
}
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpMIPS64MOVVreg:
if v.Type.IsMemory() {
}
r := v.Reg()
p := s.Prog(loadByType(v.Type, r))
- gc.AddrAuto(&p.From, v.Args[0])
+ ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = r
if isHILO(r) {
p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG
p.From.Reg = r
- gc.AddrAuto(&p.To, v)
+ ssagen.AddrAuto(&p.To, v)
case ssa.OpMIPS64ADDV,
ssa.OpMIPS64SUBV,
ssa.OpMIPS64AND,
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVV $off(SP), R
wantreg = "SP"
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpMIPS64MOVBstore,
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpMIPS64MOVBstorezero,
ssa.OpMIPS64MOVHstorezero,
ssa.OpMIPS64MOVWstorezero,
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpMIPS64MOVBreg,
ssa.OpMIPS64MOVBUreg,
ssa.OpMIPS64MOVHreg,
p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffzero
+ p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpMIPS64LoweredZero:
// SUBV $8, R1
p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p2)
+ p4.To.SetTarget(p2)
case ssa.OpMIPS64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffcopy
+ p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpMIPS64LoweredMove:
// SUBV $8, R1
p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1
p6.To.Type = obj.TYPE_BRANCH
- gc.Patch(p6, p2)
+ p6.To.SetTarget(p2)
case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter:
s.Call(v)
case ssa.OpMIPS64LoweredWB:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpMIPS64LoweredAtomicLoad8, ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64:
as := mips.AMOVV
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64:
// SYNC
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_REG
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_CONST
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH
- gc.Patch(p5, p1)
+ p5.To.SetTarget(p1)
p6 := s.Prog(mips.ASYNC)
- gc.Patch(p2, p6)
+ p2.To.SetTarget(p6)
case ssa.OpMIPS64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
if logopt.Enabled() {
p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg()
p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
- gc.Patch(p2, p4)
+ p2.To.SetTarget(p4)
case ssa.OpMIPS64LoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
- gc.CheckLoweredGetClosurePtr(v)
+ ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpMIPS64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVV)
ssa.BlockMIPS64FPF: {mips.ABFPF, mips.ABFPT},
}
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in R1:
p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mkbuiltin.go
+
+package noder
+
+import (
+ "fmt"
+ "go/constant"
+ "os"
+ "path"
+ "runtime"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/archive"
+ "cmd/internal/bio"
+ "cmd/internal/goobj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+func isDriveLetter(b byte) bool {
+ return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z'
+}
+
+// is this path a local name? begins with ./ or ../ or /
+func islocalname(name string) bool {
+ return strings.HasPrefix(name, "/") ||
+ runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' ||
+ strings.HasPrefix(name, "./") || name == "." ||
+ strings.HasPrefix(name, "../") || name == ".."
+}
+
+func findpkg(name string) (file string, ok bool) {
+ if islocalname(name) {
+ if base.Flag.NoLocalImports {
+ return "", false
+ }
+
+ if base.Flag.Cfg.PackageFile != nil {
+ file, ok = base.Flag.Cfg.PackageFile[name]
+ return file, ok
+ }
+
+ // try .a before .6. important for building libraries:
+ // if there is an array.6 in the array.a library,
+ // want to find all of array.a, not just array.6.
+ file = fmt.Sprintf("%s.a", name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ file = fmt.Sprintf("%s.o", name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ return "", false
+ }
+
+ // local imports should be canonicalized already.
+ // don't want to see "encoding/../encoding/base64"
+ // as different from "encoding/base64".
+ if q := path.Clean(name); q != name {
+ base.Errorf("non-canonical import path %q (should be %q)", name, q)
+ return "", false
+ }
+
+ if base.Flag.Cfg.PackageFile != nil {
+ file, ok = base.Flag.Cfg.PackageFile[name]
+ return file, ok
+ }
+
+ for _, dir := range base.Flag.Cfg.ImportDirs {
+ file = fmt.Sprintf("%s/%s.a", dir, name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ file = fmt.Sprintf("%s/%s.o", dir, name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ }
+
+ if objabi.GOROOT != "" {
+ suffix := ""
+ suffixsep := ""
+ if base.Flag.InstallSuffix != "" {
+ suffixsep = "_"
+ suffix = base.Flag.InstallSuffix
+ } else if base.Flag.Race {
+ suffixsep = "_"
+ suffix = "race"
+ } else if base.Flag.MSan {
+ suffixsep = "_"
+ suffix = "msan"
+ }
+
+ file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.o", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ }
+
+ return "", false
+}
+
+// myheight tracks the local package's height based on packages
+// imported so far.
+var myheight int
+
+func importfile(f constant.Value) *types.Pkg {
+ if f.Kind() != constant.String {
+ base.Errorf("import path must be a string")
+ return nil
+ }
+
+ path_ := constant.StringVal(f)
+ if len(path_) == 0 {
+ base.Errorf("import path is empty")
+ return nil
+ }
+
+ if isbadimport(path_, false) {
+ return nil
+ }
+
+ // The package name main is no longer reserved,
+ // but we reserve the import path "main" to identify
+ // the main package, just as we reserve the import
+ // path "math" to identify the standard math package.
+ if path_ == "main" {
+ base.Errorf("cannot import \"main\"")
+ base.ErrorExit()
+ }
+
+ if base.Ctxt.Pkgpath != "" && path_ == base.Ctxt.Pkgpath {
+ base.Errorf("import %q while compiling that package (import cycle)", path_)
+ base.ErrorExit()
+ }
+
+ if mapped, ok := base.Flag.Cfg.ImportMap[path_]; ok {
+ path_ = mapped
+ }
+
+ if path_ == "unsafe" {
+ return ir.Pkgs.Unsafe
+ }
+
+ if islocalname(path_) {
+ if path_[0] == '/' {
+ base.Errorf("import path cannot be absolute path")
+ return nil
+ }
+
+ prefix := base.Ctxt.Pathname
+ if base.Flag.D != "" {
+ prefix = base.Flag.D
+ }
+ path_ = path.Join(prefix, path_)
+
+ if isbadimport(path_, true) {
+ return nil
+ }
+ }
+
+ file, found := findpkg(path_)
+ if !found {
+ base.Errorf("can't find import: %q", path_)
+ base.ErrorExit()
+ }
+
+ importpkg := types.NewPkg(path_, "")
+ if importpkg.Imported {
+ return importpkg
+ }
+
+ importpkg.Imported = true
+
+ imp, err := bio.Open(file)
+ if err != nil {
+ base.Errorf("can't open import: %q: %v", path_, err)
+ base.ErrorExit()
+ }
+ defer imp.Close()
+
+ // check object header
+ p, err := imp.ReadString('\n')
+ if err != nil {
+ base.Errorf("import %s: reading input: %v", file, err)
+ base.ErrorExit()
+ }
+
+ if p == "!<arch>\n" { // package archive
+ // package export block should be first
+ sz := archive.ReadHeader(imp.Reader, "__.PKGDEF")
+ if sz <= 0 {
+ base.Errorf("import %s: not a package file", file)
+ base.ErrorExit()
+ }
+ p, err = imp.ReadString('\n')
+ if err != nil {
+ base.Errorf("import %s: reading input: %v", file, err)
+ base.ErrorExit()
+ }
+ }
+
+ if !strings.HasPrefix(p, "go object ") {
+ base.Errorf("import %s: not a go object file: %s", file, p)
+ base.ErrorExit()
+ }
+ q := fmt.Sprintf("%s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
+ if p[10:] != q {
+ base.Errorf("import %s: object is [%s] expected [%s]", file, p[10:], q)
+ base.ErrorExit()
+ }
+
+ // process header lines
+ for {
+ p, err = imp.ReadString('\n')
+ if err != nil {
+ base.Errorf("import %s: reading input: %v", file, err)
+ base.ErrorExit()
+ }
+ if p == "\n" {
+ break // header ends with blank line
+ }
+ }
+
+ // Expect $$B\n to signal binary import format.
+
+ // look for $$
+ var c byte
+ for {
+ c, err = imp.ReadByte()
+ if err != nil {
+ break
+ }
+ if c == '$' {
+ c, err = imp.ReadByte()
+ if c == '$' || err != nil {
+ break
+ }
+ }
+ }
+
+ // get character after $$
+ if err == nil {
+ c, _ = imp.ReadByte()
+ }
+
+ var fingerprint goobj.FingerprintType
+ switch c {
+ case '\n':
+ base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path_)
+ return nil
+
+ case 'B':
+ if base.Debug.Export != 0 {
+ fmt.Printf("importing %s (%s)\n", path_, file)
+ }
+ imp.ReadByte() // skip \n after $$B
+
+ c, err = imp.ReadByte()
+ if err != nil {
+ base.Errorf("import %s: reading input: %v", file, err)
+ base.ErrorExit()
+ }
+
+ // Indexed format is distinguished by an 'i' byte,
+ // whereas previous export formats started with 'c', 'd', or 'v'.
+ if c != 'i' {
+ base.Errorf("import %s: unexpected package format byte: %v", file, c)
+ base.ErrorExit()
+ }
+ fingerprint = typecheck.ReadImports(importpkg, imp)
+
+ default:
+ base.Errorf("no import in %q", path_)
+ base.ErrorExit()
+ }
+
+ // assume files move (get installed) so don't record the full path
+ if base.Flag.Cfg.PackageFile != nil {
+ // If using a packageFile map, assume path_ can be recorded directly.
+ base.Ctxt.AddImport(path_, fingerprint)
+ } else {
+ // For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
+ base.Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint)
+ }
+
+ if importpkg.Height >= myheight {
+ myheight = importpkg.Height + 1
+ }
+
+ return importpkg
+}
+
+// The linker uses the magic symbol prefixes "go." and "type."
+// Avoid potential confusion between import paths and symbols
+// by rejecting these reserved imports for now. Also, people
+// "can do weird things in GOPATH and we'd prefer they didn't
+// do _that_ weird thing" (per rsc). See also #4257.
+var reservedimports = []string{
+ "go",
+ "type",
+}
+
+func isbadimport(path string, allowSpace bool) bool {
+ if strings.Contains(path, "\x00") {
+ base.Errorf("import path contains NUL")
+ return true
+ }
+
+ for _, ri := range reservedimports {
+ if path == ri {
+ base.Errorf("import path %q is reserved and cannot be used", path)
+ return true
+ }
+ }
+
+ for _, r := range path {
+ if r == utf8.RuneError {
+ base.Errorf("import path contains invalid UTF-8 sequence: %q", path)
+ return true
+ }
+
+ if r < 0x20 || r == 0x7f {
+ base.Errorf("import path contains control character: %q", path)
+ return true
+ }
+
+ if r == '\\' {
+ base.Errorf("import path contains backslash; use slash: %q", path)
+ return true
+ }
+
+ if !allowSpace && unicode.IsSpace(r) {
+ base.Errorf("import path contains space character: %q", path)
+ return true
+ }
+
+ if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
+ base.Errorf("import path contains invalid character '%c': %q", r, path)
+ return true
+ }
+ }
+
+ return false
+}
+
+func pkgnotused(lineno src.XPos, path string, name string) {
+ // If the package was imported with a name other than the final
+ // import path element, show it explicitly in the error message.
+ // Note that this handles both renamed imports and imports of
+ // packages containing unconventional package declarations.
+ // Note that this uses / always, even on Windows, because Go import
+ // paths always use forward slashes.
+ elem := path
+ if i := strings.LastIndex(elem, "/"); i >= 0 {
+ elem = elem[i+1:]
+ }
+ if name == "" || elem == name {
+ base.ErrorfAt(lineno, "imported and not used: %q", path)
+ } else {
+ base.ErrorfAt(lineno, "imported and not used: %q as %s", path, name)
+ }
+}
+
+func mkpackage(pkgname string) {
+ if types.LocalPkg.Name == "" {
+ if pkgname == "_" {
+ base.Errorf("invalid package name _")
+ }
+ types.LocalPkg.Name = pkgname
+ } else {
+ if pkgname != types.LocalPkg.Name {
+ base.Errorf("package %s; expected %s", pkgname, types.LocalPkg.Name)
+ }
+ }
+}
+
+func clearImports() {
+ type importedPkg struct {
+ pos src.XPos
+ path string
+ name string
+ }
+ var unused []importedPkg
+
+ for _, s := range types.LocalPkg.Syms {
+ n := ir.AsNode(s.Def)
+ if n == nil {
+ continue
+ }
+ if n.Op() == ir.OPACK {
+ // throw away top-level package name left over
+ // from previous file.
+ // leave s->block set to cause redeclaration
+ // errors if a conflicting top-level name is
+ // introduced by a different file.
+ p := n.(*ir.PkgName)
+ if !p.Used && base.SyntaxErrors() == 0 {
+ unused = append(unused, importedPkg{p.Pos(), p.Pkg.Path, s.Name})
+ }
+ s.Def = nil
+ continue
+ }
+ if types.IsDotAlias(s) {
+ // throw away top-level name left over
+ // from previous import . "x"
+ // We'll report errors after type checking in checkDotImports.
+ s.Def = nil
+ continue
+ }
+ }
+
+ sort.Slice(unused, func(i, j int) bool { return unused[i].pos.Before(unused[j].pos) })
+ for _, pkg := range unused {
+ pkgnotused(pkg.pos, pkg.path, pkg.name)
+ }
+}
+
+// CheckDotImports reports errors for any unused dot imports.
+func CheckDotImports() {
+ for _, pack := range dotImports {
+ if !pack.Used {
+ base.ErrorfAt(pack.Pos(), "imported and not used: %q", pack.Pkg.Path)
+ }
+ }
+
+ // No longer needed; release memory.
+ dotImports = nil
+ typecheck.DotImportRefs = nil
+}
+
+// dotImports tracks all PkgNames that have been dot-imported.
+var dotImports []*ir.PkgName
+
+// find all the exported symbols in package referenced by PkgName,
+// and make them available in the current package
+func importDot(pack *ir.PkgName) {
+ if typecheck.DotImportRefs == nil {
+ typecheck.DotImportRefs = make(map[*ir.Ident]*ir.PkgName)
+ }
+
+ opkg := pack.Pkg
+ for _, s := range opkg.Syms {
+ if s.Def == nil {
+ if _, ok := typecheck.DeclImporter[s]; !ok {
+ continue
+ }
+ }
+ if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
+ continue
+ }
+ s1 := typecheck.Lookup(s.Name)
+ if s1.Def != nil {
+ pkgerror := fmt.Sprintf("during import %q", opkg.Path)
+ typecheck.Redeclared(base.Pos, s1, pkgerror)
+ continue
+ }
+
+ id := ir.NewIdent(src.NoXPos, s)
+ typecheck.DotImportRefs[id] = pack
+ s1.Def = id
+ s1.Block = 1
+ }
+
+ dotImports = append(dotImports, pack)
+}
+
+// importName is like oldname,
+// but it reports an error if sym is from another package and not exported.
+func importName(sym *types.Sym) ir.Node {
+ n := oldname(sym)
+ if !types.IsExported(sym.Name) && sym.Pkg != types.LocalPkg {
+ n.SetDiag(true)
+ base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
+ }
+ return n
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package noder
import (
- "cmd/compile/internal/base"
+ "fmt"
+ "strings"
+
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/internal/objabi"
- "cmd/internal/src"
- "fmt"
- "strings"
)
-func makePos(b *src.PosBase, line, col uint) src.XPos {
- return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
-}
-
func isSpace(c rune) bool {
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
}
}
const (
- FuncPragmas = ir.Nointerface |
+ funcPragmas = ir.Nointerface |
ir.Noescape |
ir.Norace |
ir.Nosplit |
ir.Nowritebarrierrec |
ir.Yeswritebarrierrec
- TypePragmas = ir.NotInHeap
+ typePragmas = ir.NotInHeap
)
func pragmaFlag(verb string) ir.PragmaFlag {
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package noder
import (
- "cmd/compile/internal/syntax"
"reflect"
"runtime"
"testing"
+
+ "cmd/compile/internal/syntax"
)
func eq(a, b []string) bool {
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package noder
import (
"fmt"
"cmd/compile/internal/importer"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/types2"
"cmd/internal/objabi"
"cmd/internal/src"
)
-// parseFiles concurrently parses files into *syntax.File structures.
+// ParseFiles concurrently parses files into *syntax.File structures.
// Each declaration in every *syntax.File is converted to a syntax tree
// and its root represented by *Node is appended to Target.Decls.
// Returns the total count of parsed lines.
-func parseFiles(filenames []string) (lines uint) {
+func ParseFiles(filenames []string) (lines uint) {
noders := make([]*noder, 0, len(filenames))
// Limit the number of simultaneously open files.
sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10)
base.ExitIfErrors()
// Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure.
- testdclstack()
+ types.CheckDclstack()
}
types.LocalPkg.Height = myheight
}
// Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure.
- testdclstack()
+ types.CheckDclstack()
}
for _, p := range noders {
func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) {
oldScope := p.scope
p.scope = 0
- funchdr(fn)
+ typecheck.StartFuncBody(fn)
if block != nil {
body := p.stmts(block.List)
if body == nil {
- body = []ir.Node{ir.Nod(ir.OBLOCK, nil, nil)}
+ body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
}
- fn.PtrBody().Set(body)
+ fn.Body.Set(body)
base.Pos = p.makeXPos(block.Rbrace)
fn.Endlineno = base.Pos
}
- funcbody()
+ typecheck.FinishFuncBody()
p.scope = oldScope
}
types.Markdcl()
if p.trackScopes {
- Curfn.Parents = append(Curfn.Parents, p.scope)
- p.scopeVars = append(p.scopeVars, len(Curfn.Dcl))
- p.scope = ir.ScopeID(len(Curfn.Parents))
+ ir.CurFunc.Parents = append(ir.CurFunc.Parents, p.scope)
+ p.scopeVars = append(p.scopeVars, len(ir.CurFunc.Dcl))
+ p.scope = ir.ScopeID(len(ir.CurFunc.Parents))
p.markScope(pos)
}
if p.trackScopes {
scopeVars := p.scopeVars[len(p.scopeVars)-1]
p.scopeVars = p.scopeVars[:len(p.scopeVars)-1]
- if scopeVars == len(Curfn.Dcl) {
+ if scopeVars == len(ir.CurFunc.Dcl) {
// no variables were declared in this scope, so we can retract it.
- if int(p.scope) != len(Curfn.Parents) {
+ if int(p.scope) != len(ir.CurFunc.Parents) {
base.Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted")
}
- p.scope = Curfn.Parents[p.scope-1]
- Curfn.Parents = Curfn.Parents[:len(Curfn.Parents)-1]
+ p.scope = ir.CurFunc.Parents[p.scope-1]
+ ir.CurFunc.Parents = ir.CurFunc.Parents[:len(ir.CurFunc.Parents)-1]
- nmarks := len(Curfn.Marks)
- Curfn.Marks[nmarks-1].Scope = p.scope
+ nmarks := len(ir.CurFunc.Marks)
+ ir.CurFunc.Marks[nmarks-1].Scope = p.scope
prevScope := ir.ScopeID(0)
if nmarks >= 2 {
- prevScope = Curfn.Marks[nmarks-2].Scope
+ prevScope = ir.CurFunc.Marks[nmarks-2].Scope
}
- if Curfn.Marks[nmarks-1].Scope == prevScope {
- Curfn.Marks = Curfn.Marks[:nmarks-1]
+ if ir.CurFunc.Marks[nmarks-1].Scope == prevScope {
+ ir.CurFunc.Marks = ir.CurFunc.Marks[:nmarks-1]
}
return
}
- p.scope = Curfn.Parents[p.scope-1]
+ p.scope = ir.CurFunc.Parents[p.scope-1]
p.markScope(pos)
}
func (p *noder) markScope(pos syntax.Pos) {
xpos := p.makeXPos(pos)
- if i := len(Curfn.Marks); i > 0 && Curfn.Marks[i-1].Pos == xpos {
- Curfn.Marks[i-1].Scope = p.scope
+ if i := len(ir.CurFunc.Marks); i > 0 && ir.CurFunc.Marks[i-1].Pos == xpos {
+ ir.CurFunc.Marks[i-1].Scope = p.scope
} else {
- Curfn.Marks = append(Curfn.Marks, ir.Mark{Pos: xpos, Scope: p.scope})
+ ir.CurFunc.Marks = append(ir.CurFunc.Marks, ir.Mark{Pos: xpos, Scope: p.scope})
}
}
p.setlineno(p.file.PkgName)
mkpackage(p.file.PkgName.Value)
- if pragma, ok := p.file.Pragma.(*Pragma); ok {
+ if pragma, ok := p.file.Pragma.(*pragmas); ok {
pragma.Flag &^= ir.GoBuildPragma
p.checkUnused(pragma)
}
- Target.Decls = append(Target.Decls, p.decls(p.file.DeclList)...)
+ typecheck.Target.Decls = append(typecheck.Target.Decls, p.decls(p.file.DeclList)...)
base.Pos = src.NoXPos
clearImports()
p.errorAt(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
continue
}
- n := ir.AsNode(lookup(l.local).Def)
+ n := ir.AsNode(typecheck.Lookup(l.local).Def)
if n == nil || n.Op() != ir.ONAME {
// TODO(mdempsky): Change to p.errorAt before Go 1.17 release.
// base.WarnfAt(p.makeXPos(l.pos), "//go:linkname must refer to declared function or variable (will be an error in Go 1.17)")
}
n.Sym().Linkname = l.remote
}
- Target.CgoPragmas = append(Target.CgoPragmas, p.pragcgobuf...)
+ typecheck.Target.CgoPragmas = append(typecheck.Target.CgoPragmas, p.pragcgobuf...)
}
func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) {
return // avoid follow-on errors if there was a syntax error
}
- if pragma, ok := imp.Pragma.(*Pragma); ok {
+ if pragma, ok := imp.Pragma.(*pragmas); ok {
p.checkUnused(pragma)
}
return
}
- if ipkg == unsafepkg {
+ if ipkg == ir.Pkgs.Unsafe {
p.importedUnsafe = true
}
if ipkg.Path == "embed" {
}
if !ipkg.Direct {
- Target.Imports = append(Target.Imports, ipkg)
+ typecheck.Target.Imports = append(typecheck.Target.Imports, ipkg)
}
ipkg.Direct = true
if imp.LocalPkgName != nil {
my = p.name(imp.LocalPkgName)
} else {
- my = lookup(ipkg.Name)
+ my = typecheck.Lookup(ipkg.Name)
}
pack := ir.NewPkgName(p.pos(imp), my, ipkg)
return
}
if my.Def != nil {
- redeclare(pack.Pos(), my, "as imported package name")
+ typecheck.Redeclared(pack.Pos(), my, "as imported package name")
}
my.Def = pack
my.Lastlineno = pack.Pos()
exprs = p.exprList(decl.Values)
}
- if pragma, ok := decl.Pragma.(*Pragma); ok {
+ if pragma, ok := decl.Pragma.(*pragmas); ok {
if len(pragma.Embeds) > 0 {
if !p.importedEmbed {
// This check can't be done when building the list pragma.Embeds
}
p.setlineno(decl)
- return variter(names, typ, exprs)
+ return typecheck.DeclVars(names, typ, exprs)
}
// constState tracks state between constant specifiers within a
}
}
- if pragma, ok := decl.Pragma.(*Pragma); ok {
+ if pragma, ok := decl.Pragma.(*pragmas); ok {
p.checkUnused(pragma)
}
if decl.Values == nil {
v = ir.DeepCopy(n.Pos(), v)
}
- declare(n, dclcontext)
+ typecheck.Declare(n, typecheck.DeclContext)
n.Ntype = typ
n.Defn = v
n.SetIota(cs.iota)
- nn = append(nn, p.nod(decl, ir.ODCLCONST, n, nil))
+ nn = append(nn, ir.NewDecl(p.pos(decl), ir.ODCLCONST, n))
}
if len(values) > len(names) {
func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node {
n := p.declName(ir.OTYPE, decl.Name)
- declare(n, dclcontext)
+ typecheck.Declare(n, typecheck.DeclContext)
// decl.Type may be nil but in that case we got a syntax error during parsing
typ := p.typeExprOrNil(decl.Type)
n.Ntype = typ
n.SetAlias(decl.Alias)
- if pragma, ok := decl.Pragma.(*Pragma); ok {
+ if pragma, ok := decl.Pragma.(*pragmas); ok {
if !decl.Alias {
- n.SetPragma(pragma.Flag & TypePragmas)
- pragma.Flag &^= TypePragmas
+ n.SetPragma(pragma.Flag & typePragmas)
+ pragma.Flag &^= typePragmas
}
p.checkUnused(pragma)
}
- nod := p.nod(decl, ir.ODCLTYPE, n, nil)
- if n.Alias() && !langSupported(1, 9, types.LocalPkg) {
+ nod := ir.NewDecl(p.pos(decl), ir.ODCLTYPE, n)
+ if n.Alias() && !types.AllowsGoVersion(types.LocalPkg, 1, 9) {
base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9")
}
return nod
if len(t.Params) > 0 || len(t.Results) > 0 {
base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values")
}
- Target.Inits = append(Target.Inits, f)
+ typecheck.Target.Inits = append(typecheck.Target.Inits, f)
}
if types.LocalPkg.Name == "main" && name.Name == "main" {
name = ir.BlankNode.Sym() // filled in by typecheckfunc
}
- f.Nname = newFuncNameAt(p.pos(fun.Name), name, f)
+ f.Nname = ir.NewFuncNameAt(p.pos(fun.Name), name, f)
f.Nname.Defn = f
f.Nname.Ntype = t
- if pragma, ok := fun.Pragma.(*Pragma); ok {
- f.Pragma = pragma.Flag & FuncPragmas
+ if pragma, ok := fun.Pragma.(*pragmas); ok {
+ f.Pragma = pragma.Flag & funcPragmas
if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 {
base.ErrorfAt(f.Pos(), "go:nosplit and go:systemstack cannot be combined")
}
- pragma.Flag &^= FuncPragmas
+ pragma.Flag &^= funcPragmas
p.checkUnused(pragma)
}
if fun.Recv == nil {
- declare(f.Nname, ir.PFUNC)
+ typecheck.Declare(f.Nname, ir.PFUNC)
}
p.funcBody(f, fun.Body)
n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error
return n
case *syntax.CompositeLit:
- n := p.nod(expr, ir.OCOMPLIT, nil, nil)
+ n := ir.NewCompLitExpr(p.pos(expr), ir.OCOMPLIT, nil, nil)
if expr.Type != nil {
- n.SetRight(p.expr(expr.Type))
+ n.Ntype = ir.Node(p.expr(expr.Type)).(ir.Ntype)
}
l := p.exprs(expr.ElemList)
for i, e := range l {
l[i] = p.wrapname(expr.ElemList[i], e)
}
- n.PtrList().Set(l)
+ n.List.Set(l)
base.Pos = p.makeXPos(expr.Rbrace)
return n
case *syntax.KeyValueExpr:
// use position of expr.Key rather than of expr (which has position of ':')
- return p.nod(expr.Key, ir.OKEY, p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
+ return ir.NewKeyExpr(p.pos(expr.Key), p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
case *syntax.FuncLit:
return p.funcLit(expr)
case *syntax.ParenExpr:
- return p.nod(expr, ir.OPAREN, p.expr(expr.X), nil)
+ return ir.NewParenExpr(p.pos(expr), p.expr(expr.X))
case *syntax.SelectorExpr:
// parser.new_dotname
obj := p.expr(expr.X)
pack.Used = true
return importName(pack.Pkg.Lookup(expr.Sel.Value))
}
- n := nodSym(ir.OXDOT, obj, p.name(expr.Sel))
+ n := ir.NewSelectorExpr(base.Pos, ir.OXDOT, obj, p.name(expr.Sel))
n.SetPos(p.pos(expr)) // lineno may have been changed by p.expr(expr.X)
return n
case *syntax.IndexExpr:
- return p.nod(expr, ir.OINDEX, p.expr(expr.X), p.expr(expr.Index))
+ return ir.NewIndexExpr(p.pos(expr), p.expr(expr.X), p.expr(expr.Index))
case *syntax.SliceExpr:
op := ir.OSLICE
if expr.Full {
n.SetSliceBounds(index[0], index[1], index[2])
return n
case *syntax.AssertExpr:
- return p.nod(expr, ir.ODOTTYPE, p.expr(expr.X), p.typeExpr(expr.Type))
+ return ir.NewTypeAssertExpr(p.pos(expr), p.expr(expr.X), p.typeExpr(expr.Type).(ir.Ntype))
case *syntax.Operation:
if expr.Op == syntax.Add && expr.Y != nil {
return p.sum(expr)
pos, op := p.pos(expr), p.unOp(expr.Op)
switch op {
case ir.OADDR:
- return nodAddrAt(pos, x)
+ return typecheck.NodAddrAt(pos, x)
case ir.ODEREF:
return ir.NewStarExpr(pos, x)
}
}
return ir.NewBinaryExpr(pos, op, x, y)
case *syntax.CallExpr:
- n := p.nod(expr, ir.OCALL, p.expr(expr.Fun), nil)
- n.PtrList().Set(p.exprs(expr.ArgList))
- n.SetIsDDD(expr.HasDots)
+ n := ir.NewCallExpr(p.pos(expr), ir.OCALL, p.expr(expr.Fun), nil)
+ n.Args.Set(p.exprs(expr.ArgList))
+ n.IsDDD = expr.HasDots
return n
case *syntax.ArrayType:
nstr = nil
chunks = chunks[:0]
}
- n = p.nod(add, ir.OADD, n, r)
+ n = ir.NewBinaryExpr(p.pos(add), ir.OADD, n, r)
}
if len(chunks) > 1 {
nstr.SetVal(constant.MakeString(strings.Join(chunks, "")))
}
sym := p.packname(typ)
- n := ir.NewField(p.pos(typ), lookup(sym.Name), importName(sym).(ir.Ntype), nil)
+ n := ir.NewField(p.pos(typ), typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
n.Embedded = true
if isStar {
for i, stmt := range stmts {
s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
if s == nil {
- } else if s.Op() == ir.OBLOCK && s.(*ir.BlockStmt).List().Len() > 0 {
+ } else if s.Op() == ir.OBLOCK && len(s.(*ir.BlockStmt).List) > 0 {
// Inline non-empty block.
// Empty blocks must be preserved for checkreturn.
- nodes = append(nodes, s.(*ir.BlockStmt).List().Slice()...)
+ nodes = append(nodes, s.(*ir.BlockStmt).List...)
} else {
nodes = append(nodes, s)
}
l := p.blockStmt(stmt)
if len(l) == 0 {
// TODO(mdempsky): Line number?
- return ir.Nod(ir.OBLOCK, nil, nil)
+ return ir.NewBlockStmt(base.Pos, nil)
}
- return liststmt(l)
+ return ir.NewBlockStmt(src.NoXPos, l)
case *syntax.ExprStmt:
return p.wrapname(stmt, p.expr(stmt.X))
case *syntax.SendStmt:
- return p.nod(stmt, ir.OSEND, p.expr(stmt.Chan), p.expr(stmt.Value))
+ return ir.NewSendStmt(p.pos(stmt), p.expr(stmt.Chan), p.expr(stmt.Value))
case *syntax.DeclStmt:
- return liststmt(p.decls(stmt.DeclList))
+ return ir.NewBlockStmt(src.NoXPos, p.decls(stmt.DeclList))
case *syntax.AssignStmt:
if stmt.Op != 0 && stmt.Op != syntax.Def {
n := ir.NewAssignOpStmt(p.pos(stmt), p.binOp(stmt.Op), p.expr(stmt.Lhs), p.expr(stmt.Rhs))
- n.SetImplicit(stmt.Rhs == syntax.ImplicitOne)
+ n.IncDec = stmt.Rhs == syntax.ImplicitOne
return n
}
rhs := p.exprList(stmt.Rhs)
if list, ok := stmt.Lhs.(*syntax.ListExpr); ok && len(list.ElemList) != 1 || len(rhs) != 1 {
- n := p.nod(stmt, ir.OAS2, nil, nil)
- n.SetColas(stmt.Op == syntax.Def)
- n.PtrList().Set(p.assignList(stmt.Lhs, n, n.Colas()))
- n.PtrRlist().Set(rhs)
+ n := ir.NewAssignListStmt(p.pos(stmt), ir.OAS2, nil, nil)
+ n.Def = stmt.Op == syntax.Def
+ n.Lhs.Set(p.assignList(stmt.Lhs, n, n.Def))
+ n.Rhs.Set(rhs)
return n
}
- n := p.nod(stmt, ir.OAS, nil, nil)
- n.SetColas(stmt.Op == syntax.Def)
- n.SetLeft(p.assignList(stmt.Lhs, n, n.Colas())[0])
- n.SetRight(rhs[0])
+ n := ir.NewAssignStmt(p.pos(stmt), nil, nil)
+ n.Def = stmt.Op == syntax.Def
+ n.X = p.assignList(stmt.Lhs, n, n.Def)[0]
+ n.Y = rhs[0]
return n
case *syntax.BranchStmt:
if stmt.Results != nil {
results = p.exprList(stmt.Results)
}
- n := p.nod(stmt, ir.ORETURN, nil, nil)
- n.PtrList().Set(results)
- if n.List().Len() == 0 && Curfn != nil {
- for _, ln := range Curfn.Dcl {
- if ln.Class() == ir.PPARAM {
+ n := ir.NewReturnStmt(p.pos(stmt), nil)
+ n.Results.Set(results)
+ if len(n.Results) == 0 && ir.CurFunc != nil {
+ for _, ln := range ir.CurFunc.Dcl {
+ if ln.Class_ == ir.PPARAM {
continue
}
- if ln.Class() != ir.PPARAMOUT {
+ if ln.Class_ != ir.PPARAMOUT {
break
}
if ln.Sym().Def != ln {
}
newOrErr = true
- n := NewName(sym)
- declare(n, dclcontext)
+ n := typecheck.NewName(sym)
+ typecheck.Declare(n, typecheck.DeclContext)
n.Defn = defn
- defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
+ defn.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n))
res[i] = n
}
func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node {
p.openScope(stmt.Pos())
- n := p.nod(stmt, ir.OIF, nil, nil)
+ n := ir.NewIfStmt(p.pos(stmt), nil, nil, nil)
if stmt.Init != nil {
- n.PtrInit().Set1(p.stmt(stmt.Init))
+ *n.PtrInit() = []ir.Node{p.stmt(stmt.Init)}
}
if stmt.Cond != nil {
- n.SetLeft(p.expr(stmt.Cond))
+ n.Cond = p.expr(stmt.Cond)
}
- n.PtrBody().Set(p.blockStmt(stmt.Then))
+ n.Body.Set(p.blockStmt(stmt.Then))
if stmt.Else != nil {
e := p.stmt(stmt.Else)
if e.Op() == ir.OBLOCK {
- n.PtrRlist().Set(e.List().Slice())
+ e := e.(*ir.BlockStmt)
+ n.Else.Set(e.List)
} else {
- n.PtrRlist().Set1(e)
+ n.Else = []ir.Node{e}
}
}
p.closeAnotherScope()
panic("unexpected RangeClause")
}
- n := p.nod(r, ir.ORANGE, nil, p.expr(r.X))
+ n := ir.NewRangeStmt(p.pos(r), nil, p.expr(r.X), nil)
if r.Lhs != nil {
- n.SetColas(r.Def)
- n.PtrList().Set(p.assignList(r.Lhs, n, n.Colas()))
+ n.Def = r.Def
+ n.Vars.Set(p.assignList(r.Lhs, n, n.Def))
}
- n.PtrBody().Set(p.blockStmt(stmt.Body))
+ n.Body.Set(p.blockStmt(stmt.Body))
p.closeAnotherScope()
return n
}
- n := p.nod(stmt, ir.OFOR, nil, nil)
+ n := ir.NewForStmt(p.pos(stmt), nil, nil, nil, nil)
if stmt.Init != nil {
- n.PtrInit().Set1(p.stmt(stmt.Init))
+ *n.PtrInit() = []ir.Node{p.stmt(stmt.Init)}
}
if stmt.Cond != nil {
- n.SetLeft(p.expr(stmt.Cond))
+ n.Cond = p.expr(stmt.Cond)
}
if stmt.Post != nil {
- n.SetRight(p.stmt(stmt.Post))
+ n.Post = p.stmt(stmt.Post)
}
- n.PtrBody().Set(p.blockStmt(stmt.Body))
+ n.Body.Set(p.blockStmt(stmt.Body))
p.closeAnotherScope()
return n
}
func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
p.openScope(stmt.Pos())
- n := p.nod(stmt, ir.OSWITCH, nil, nil)
+ n := ir.NewSwitchStmt(p.pos(stmt), nil, nil)
if stmt.Init != nil {
- n.PtrInit().Set1(p.stmt(stmt.Init))
+ *n.PtrInit() = []ir.Node{p.stmt(stmt.Init)}
}
if stmt.Tag != nil {
- n.SetLeft(p.expr(stmt.Tag))
+ n.Tag = p.expr(stmt.Tag)
}
var tswitch *ir.TypeSwitchGuard
- if l := n.Left(); l != nil && l.Op() == ir.OTYPESW {
+ if l := n.Tag; l != nil && l.Op() == ir.OTYPESW {
tswitch = l.(*ir.TypeSwitchGuard)
}
- n.PtrList().Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
+ n.Cases.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
p.closeScope(stmt.Rbrace)
return n
}
p.openScope(clause.Pos())
- n := p.nod(clause, ir.OCASE, nil, nil)
+ n := ir.NewCaseStmt(p.pos(clause), nil, nil)
if clause.Cases != nil {
- n.PtrList().Set(p.exprList(clause.Cases))
+ n.List.Set(p.exprList(clause.Cases))
}
- if tswitch != nil && tswitch.Left() != nil {
- nn := NewName(tswitch.Left().Sym())
- declare(nn, dclcontext)
- n.PtrRlist().Set1(nn)
+ if tswitch != nil && tswitch.Tag != nil {
+ nn := typecheck.NewName(tswitch.Tag.Sym())
+ typecheck.Declare(nn, typecheck.DeclContext)
+ n.Vars = []ir.Node{nn}
// keep track of the instances for reporting unused
nn.Defn = tswitch
}
body = body[:len(body)-1]
}
- n.PtrBody().Set(p.stmtsFall(body, true))
- if l := n.Body().Len(); l > 0 && n.Body().Index(l-1).Op() == ir.OFALL {
+ n.Body.Set(p.stmtsFall(body, true))
+ if l := len(n.Body); l > 0 && n.Body[l-1].Op() == ir.OFALL {
if tswitch != nil {
base.Errorf("cannot fallthrough in type switch")
}
}
func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node {
- n := p.nod(stmt, ir.OSELECT, nil, nil)
- n.PtrList().Set(p.commClauses(stmt.Body, stmt.Rbrace))
+ n := ir.NewSelectStmt(p.pos(stmt), nil)
+ n.Cases.Set(p.commClauses(stmt.Body, stmt.Rbrace))
return n
}
}
p.openScope(clause.Pos())
- n := p.nod(clause, ir.OCASE, nil, nil)
+ n := ir.NewCaseStmt(p.pos(clause), nil, nil)
if clause.Comm != nil {
- n.PtrList().Set1(p.stmt(clause.Comm))
+ n.List = []ir.Node{p.stmt(clause.Comm)}
}
- n.PtrBody().Set(p.stmts(clause.Body))
+ n.Body.Set(p.stmts(clause.Body))
nodes = append(nodes, n)
}
if len(clauses) > 0 {
func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node {
sym := p.name(label.Label)
- lhs := p.nodSym(label, ir.OLABEL, nil, sym)
+ lhs := ir.NewLabelStmt(p.pos(label), sym)
var ls ir.Node
if label.Stmt != nil { // TODO(mdempsky): Should always be present.
if ls != nil {
switch ls.Op() {
case ir.OFOR:
- ls.SetSym(sym)
+ ls := ls.(*ir.ForStmt)
+ ls.Label = sym
case ir.ORANGE:
- ls.SetSym(sym)
+ ls := ls.(*ir.RangeStmt)
+ ls.Label = sym
case ir.OSWITCH:
- ls.SetSym(sym)
+ ls := ls.(*ir.SwitchStmt)
+ ls.Label = sym
case ir.OSELECT:
- ls.SetSym(sym)
+ ls := ls.(*ir.SelectStmt)
+ ls.Label = sym
}
}
}
l := []ir.Node{lhs}
if ls != nil {
if ls.Op() == ir.OBLOCK {
- l = append(l, ls.List().Slice()...)
+ ls := ls.(*ir.BlockStmt)
+ l = append(l, ls.List...)
} else {
l = append(l, ls)
}
}
- return liststmt(l)
+ return ir.NewBlockStmt(src.NoXPos, l)
}
var unOps = [...]ir.Op{
// literal is not compatible with the current language version.
func checkLangCompat(lit *syntax.BasicLit) {
s := lit.Value
- if len(s) <= 2 || langSupported(1, 13, types.LocalPkg) {
+ if len(s) <= 2 || types.AllowsGoVersion(types.LocalPkg, 1, 13) {
return
}
// len(s) > 2
// to big.Float to match cmd/compile's historical precision.
// TODO(mdempsky): Remove.
if v.Kind() == constant.Float {
- v = constant.Make(bigFloatVal(v))
+ v = constant.Make(ir.BigFloat(v))
}
return v
}
func (p *noder) name(name *syntax.Name) *types.Sym {
- return lookup(name.Value)
+ return typecheck.Lookup(name.Value)
}
func (p *noder) mkname(name *syntax.Name) ir.Node {
}
fallthrough
case ir.ONAME, ir.ONONAME, ir.OPACK:
- p := p.nod(n, ir.OPAREN, x, nil)
+ p := ir.NewParenExpr(p.pos(n), x)
p.SetImplicit(true)
return p
}
return x
}
-func (p *noder) nod(orig syntax.Node, op ir.Op, left, right ir.Node) ir.Node {
- return ir.NodAt(p.pos(orig), op, left, right)
-}
-
-func (p *noder) nodSym(orig syntax.Node, op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
- n := nodSym(op, left, sym)
- n.SetPos(p.pos(orig))
- return n
-}
-
func (p *noder) pos(n syntax.Node) src.XPos {
// TODO(gri): orig.Pos() should always be known - fix package syntax
xpos := base.Pos
"go:generate": true,
}
-// *Pragma is the value stored in a syntax.Pragma during parsing.
-type Pragma struct {
+// *pragmas is the value stored in a syntax.pragmas during parsing.
+type pragmas struct {
Flag ir.PragmaFlag // collected bits
- Pos []PragmaPos // position of each individual flag
- Embeds []PragmaEmbed
+ Pos []pragmaPos // position of each individual flag
+ Embeds []pragmaEmbed
}
-type PragmaPos struct {
+type pragmaPos struct {
Flag ir.PragmaFlag
Pos syntax.Pos
}
-type PragmaEmbed struct {
+type pragmaEmbed struct {
Pos syntax.Pos
Patterns []string
}
-func (p *noder) checkUnused(pragma *Pragma) {
+func (p *noder) checkUnused(pragma *pragmas) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
p.errorAt(pos.Pos, "misplaced compiler directive")
}
}
-func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
+func (p *noder) checkUnusedDuringParse(pragma *pragmas) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"})
// pragma is called concurrently if files are parsed concurrently.
func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.Pragma) syntax.Pragma {
- pragma, _ := old.(*Pragma)
+ pragma, _ := old.(*pragmas)
if pragma == nil {
- pragma = new(Pragma)
+ pragma = new(pragmas)
}
if text == "" {
p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."})
break
}
- pragma.Embeds = append(pragma.Embeds, PragmaEmbed{pos, args})
+ pragma.Embeds = append(pragma.Embeds, pragmaEmbed{pos, args})
case strings.HasPrefix(text, "go:cgo_import_dynamic "):
// This is permitted for general use because Solaris
p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)})
}
pragma.Flag |= flag
- pragma.Pos = append(pragma.Pos, PragmaPos{flag, pos})
+ pragma.Pos = append(pragma.Pos, pragmaPos{flag, pos})
}
return pragma
}
return list, nil
}
+
+func fakeRecv() *ir.Field {
+ return ir.NewField(base.Pos, nil, nil, types.FakeRecvType())
+}
+
+func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
+ xtype := p.typeExpr(expr.Type)
+ ntype := p.typeExpr(expr.Type)
+
+ fn := ir.NewFunc(p.pos(expr))
+ fn.SetIsHiddenClosure(ir.CurFunc != nil)
+ fn.Nname = ir.NewFuncNameAt(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure
+ fn.Nname.Ntype = xtype
+ fn.Nname.Defn = fn
+
+ clo := ir.NewClosureExpr(p.pos(expr), fn)
+ fn.ClosureType = ntype
+ fn.OClosure = clo
+
+ p.funcBody(fn, expr.Body)
+
+ // closure-specific variables are hanging off the
+ // ordinary ones in the symbol table; see oldname.
+ // unhook them.
+ // make the list of pointers for the closure call.
+ for _, v := range fn.ClosureVars {
+ // Unlink from v1; see comment in syntax.go type Param for these fields.
+ v1 := v.Defn
+ v1.Name().Innermost = v.Outer
+
+ // If the closure usage of v is not dense,
+ // we need to make it dense; now that we're out
+ // of the function in which v appeared,
+ // look up v.Sym in the enclosing function
+ // and keep it around for use in the compiled code.
+ //
+ // That is, suppose we just finished parsing the innermost
+ // closure f4 in this code:
+ //
+ // func f() {
+ // v := 1
+ // func() { // f2
+ // use(v)
+ // func() { // f3
+ // func() { // f4
+ // use(v)
+ // }()
+ // }()
+ // }()
+ // }
+ //
+ // At this point v.Outer is f2's v; there is no f3's v.
+ // To construct the closure f4 from within f3,
+ // we need to use f3's v and in this case we need to create f3's v.
+ // We are now in the context of f3, so calling oldname(v.Sym)
+ // obtains f3's v, creating it if necessary (as it is in the example).
+ //
+ // capturevars will decide whether to use v directly or &v.
+ v.Outer = oldname(v.Sym()).(*ir.Name)
+ }
+
+ return clo
+}
+
+// A function named init is a special case.
+// It is called by the initialization before main is run.
+// To make it unique within a package and also uncallable,
+// the name, normally "pkg.init", is altered to "pkg.init.0".
+var renameinitgen int
+
+func renameinit() *types.Sym {
+ s := typecheck.LookupNum("init.", renameinitgen)
+ renameinitgen++
+ return s
+}
+
+// oldname returns the Node that declares symbol s in the current scope.
+// If no such Node currently exists, an ONONAME Node is returned instead.
+// Automatically creates a new closure variable if the referenced symbol was
+// declared in a different (containing) function.
+func oldname(s *types.Sym) ir.Node {
+ if s.Pkg != types.LocalPkg {
+ return ir.NewIdent(base.Pos, s)
+ }
+
+ n := ir.AsNode(s.Def)
+ if n == nil {
+ // Maybe a top-level declaration will come along later to
+ // define s. resolve will check s.Def again once all input
+ // source has been processed.
+ return ir.NewIdent(base.Pos, s)
+ }
+
+ if ir.CurFunc != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != ir.CurFunc {
+ // Inner func is referring to var in outer func.
+ //
+ // TODO(rsc): If there is an outer variable x and we
+ // are parsing x := 5 inside the closure, until we get to
+ // the := it looks like a reference to the outer x so we'll
+ // make x a closure variable unnecessarily.
+ n := n.(*ir.Name)
+ c := n.Name().Innermost
+ if c == nil || c.Curfn != ir.CurFunc {
+ // Do not have a closure var for the active closure yet; make one.
+ c = typecheck.NewName(s)
+ c.Class_ = ir.PAUTOHEAP
+ c.SetIsClosureVar(true)
+ c.SetIsDDD(n.IsDDD())
+ c.Defn = n
+
+ // Link into list of active closure variables.
+ // Popped from list in func funcLit.
+ c.Outer = n.Name().Innermost
+ n.Name().Innermost = c
+
+ ir.CurFunc.ClosureVars = append(ir.CurFunc.ClosureVars, c)
+ }
+
+ // return ref to closure var, not original
+ return c
+ }
+
+ return n
+}
+
+func varEmbed(p *noder, names []*ir.Name, typ ir.Ntype, exprs []ir.Node, embeds []pragmaEmbed) (newExprs []ir.Node) {
+ haveEmbed := false
+ for _, decl := range p.file.DeclList {
+ imp, ok := decl.(*syntax.ImportDecl)
+ if !ok {
+ // imports always come first
+ break
+ }
+ path, _ := strconv.Unquote(imp.Path.Value)
+ if path == "embed" {
+ haveEmbed = true
+ break
+ }
+ }
+
+ pos := embeds[0].Pos
+ if !haveEmbed {
+ p.errorAt(pos, "invalid go:embed: missing import \"embed\"")
+ return exprs
+ }
+ if base.Flag.Cfg.Embed.Patterns == nil {
+ p.errorAt(pos, "invalid go:embed: build system did not supply embed configuration")
+ return exprs
+ }
+ if len(names) > 1 {
+ p.errorAt(pos, "go:embed cannot apply to multiple vars")
+ return exprs
+ }
+ if len(exprs) > 0 {
+ p.errorAt(pos, "go:embed cannot apply to var with initializer")
+ return exprs
+ }
+ if typ == nil {
+ // Should not happen, since len(exprs) == 0 now.
+ p.errorAt(pos, "go:embed cannot apply to var without type")
+ return exprs
+ }
+ if typecheck.DeclContext != ir.PEXTERN {
+ p.errorAt(pos, "go:embed cannot apply to var inside func")
+ return exprs
+ }
+
+ v := names[0]
+ typecheck.Target.Embeds = append(typecheck.Target.Embeds, v)
+ v.Embed = new([]ir.Embed)
+ for _, e := range embeds {
+ *v.Embed = append(*v.Embed, ir.Embed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns})
+ }
+ return exprs
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package objw
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+func Uint8(s *obj.LSym, off int, v uint8) int {
+ return UintN(s, off, uint64(v), 1)
+}
+
+func Uint16(s *obj.LSym, off int, v uint16) int {
+ return UintN(s, off, uint64(v), 2)
+}
+
+func Uint32(s *obj.LSym, off int, v uint32) int {
+ return UintN(s, off, uint64(v), 4)
+}
+
+func Uintptr(s *obj.LSym, off int, v uint64) int {
+ return UintN(s, off, v, types.PtrSize)
+}
+
+func UintN(s *obj.LSym, off int, v uint64, wid int) int {
+ if off&(wid-1) != 0 {
+ base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
+ }
+ s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
+ return off + wid
+}
+
+func SymPtr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
+ off = int(types.Rnd(int64(off), int64(types.PtrSize)))
+ s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff))
+ off += types.PtrSize
+ return off
+}
+
+func SymPtrOff(s *obj.LSym, off int, x *obj.LSym) int {
+ s.WriteOff(base.Ctxt, int64(off), x, 0)
+ off += 4
+ return off
+}
+
+func SymPtrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
+ s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
+ off += 4
+ return off
+}
+
+func Global(s *obj.LSym, width int32, flags int16) {
+ if flags&obj.LOCAL != 0 {
+ s.Set(obj.AttrLocal, true)
+ flags &^= obj.LOCAL
+ }
+ base.Ctxt.Globl(s, int64(width), int(flags))
+}
+
+func BitVec(s *obj.LSym, off int, bv bitvec.BitVec) int {
+ // Runtime reads the bitmaps as byte arrays. Oblige.
+ for j := 0; int32(j) < bv.N; j += 8 {
+ word := bv.B[j/32]
+ off = Uint8(s, off, uint8(word>>(uint(j)%32)))
+ }
+ return off
+}
--- /dev/null
+// Derived from Inferno utils/6c/txt.c
+// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package objw
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
+
+// NewProgs returns a new Progs for fn.
+// worker indicates which of the backend workers will use the Progs.
+func NewProgs(fn *ir.Func, worker int) *Progs {
+ pp := new(Progs)
+ if base.Ctxt.CanReuseProgs() {
+ sz := len(sharedProgArray) / base.Flag.LowerC
+ pp.Cache = sharedProgArray[sz*worker : sz*(worker+1)]
+ }
+ pp.CurFunc = fn
+
+ // prime the pump
+ pp.Next = pp.NewProg()
+ pp.Clear(pp.Next)
+
+ pp.Pos = fn.Pos()
+ pp.SetText(fn)
+ // PCDATA tables implicitly start with index -1.
+ pp.PrevLive = LivenessIndex{-1, false}
+ pp.NextLive = pp.PrevLive
+ return pp
+}
+
+// Progs accumulates Progs for a function and converts them into machine code.
+type Progs struct {
+ Text *obj.Prog // ATEXT Prog for this function
+ Next *obj.Prog // next Prog
+ PC int64 // virtual PC; count of Progs
+ Pos src.XPos // position to use for new Progs
+ CurFunc *ir.Func // fn these Progs are for
+ Cache []obj.Prog // local progcache
+ CacheIndex int // first free element of progcache
+
+ NextLive LivenessIndex // liveness index for the next Prog
+ PrevLive LivenessIndex // last emitted liveness index
+}
+
+// LivenessIndex stores the liveness map information for a Value.
+type LivenessIndex struct {
+ StackMapIndex int
+
+ // IsUnsafePoint indicates that this is an unsafe-point.
+ //
+ // Note that it's possible for a call Value to have a stack
+ // map while also being an unsafe-point. This means it cannot
+ // be preempted at this instruction, but that a preemption or
+ // stack growth may happen in the called function.
+ IsUnsafePoint bool
+}
+
+// StackMapDontCare indicates that the stack map index at a Value
+// doesn't matter.
+//
+// This is a sentinel value that should never be emitted to the PCDATA
+// stream. We use -1000 because that's obviously never a valid stack
+// index (but -1 is).
+const StackMapDontCare = -1000
+
+// LivenessDontCare indicates that the liveness information doesn't
+// matter. Currently it is used in deferreturn liveness when we don't
+// actually need it. It should never be emitted to the PCDATA stream.
+var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
+
+func (idx LivenessIndex) StackMapValid() bool {
+ return idx.StackMapIndex != StackMapDontCare
+}
+
+func (pp *Progs) NewProg() *obj.Prog {
+ var p *obj.Prog
+ if pp.CacheIndex < len(pp.Cache) {
+ p = &pp.Cache[pp.CacheIndex]
+ pp.CacheIndex++
+ } else {
+ p = new(obj.Prog)
+ }
+ p.Ctxt = base.Ctxt
+ return p
+}
+
+// Flush converts from pp to machine code.
+func (pp *Progs) Flush() {
+ plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.CurFunc}
+ obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
+}
+
+// Free clears pp and any associated resources.
+func (pp *Progs) Free() {
+ if base.Ctxt.CanReuseProgs() {
+ // Clear progs to enable GC and avoid abuse.
+ s := pp.Cache[:pp.CacheIndex]
+ for i := range s {
+ s[i] = obj.Prog{}
+ }
+ }
+ // Clear pp to avoid abuse.
+ *pp = Progs{}
+}
+
+// Prog adds a Prog with instruction As to pp.
+func (pp *Progs) Prog(as obj.As) *obj.Prog {
+ if pp.NextLive.StackMapValid() && pp.NextLive.StackMapIndex != pp.PrevLive.StackMapIndex {
+ // Emit stack map index change.
+ idx := pp.NextLive.StackMapIndex
+ pp.PrevLive.StackMapIndex = idx
+ p := pp.Prog(obj.APCDATA)
+ p.From.SetConst(objabi.PCDATA_StackMapIndex)
+ p.To.SetConst(int64(idx))
+ }
+ if pp.NextLive.IsUnsafePoint != pp.PrevLive.IsUnsafePoint {
+ // Emit unsafe-point marker.
+ pp.PrevLive.IsUnsafePoint = pp.NextLive.IsUnsafePoint
+ p := pp.Prog(obj.APCDATA)
+ p.From.SetConst(objabi.PCDATA_UnsafePoint)
+ if pp.NextLive.IsUnsafePoint {
+ p.To.SetConst(objabi.PCDATA_UnsafePointUnsafe)
+ } else {
+ p.To.SetConst(objabi.PCDATA_UnsafePointSafe)
+ }
+ }
+
+ p := pp.Next
+ pp.Next = pp.NewProg()
+ pp.Clear(pp.Next)
+ p.Link = pp.Next
+
+ if !pp.Pos.IsKnown() && base.Flag.K != 0 {
+ base.Warn("prog: unknown position (line 0)")
+ }
+
+ p.As = as
+ p.Pos = pp.Pos
+ if pp.Pos.IsStmt() == src.PosIsStmt {
+ // Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
+ if ssa.LosesStmtMark(as) {
+ return p
+ }
+ pp.Pos = pp.Pos.WithNotStmt()
+ }
+ return p
+}
+
+func (pp *Progs) Clear(p *obj.Prog) {
+ obj.Nopout(p)
+ p.As = obj.AEND
+ p.Pc = pp.PC
+ pp.PC++
+}
+
+func (pp *Progs) Append(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
+ q := pp.NewProg()
+ pp.Clear(q)
+ q.As = as
+ q.Pos = p.Pos
+ q.From.Type = ftype
+ q.From.Reg = freg
+ q.From.Offset = foffset
+ q.To.Type = ttype
+ q.To.Reg = treg
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+func (pp *Progs) SetText(fn *ir.Func) {
+ if pp.Text != nil {
+ base.Fatalf("Progs.settext called twice")
+ }
+ ptxt := pp.Prog(obj.ATEXT)
+ pp.Text = ptxt
+
+ fn.LSym.Func().Text = ptxt
+ ptxt.From.Type = obj.TYPE_MEM
+ ptxt.From.Name = obj.NAME_EXTERN
+ ptxt.From.Sym = fn.LSym
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package pkginit
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
)
-// A function named init is a special case.
-// It is called by the initialization before main is run.
-// To make it unique within a package and also uncallable,
-// the name, normally "pkg.init", is altered to "pkg.init.0".
-var renameinitgen int
-
-// Function collecting autotmps generated during typechecking,
-// to be included in the package-level init function.
-var initTodo = ir.NewFunc(base.Pos)
-
-func renameinit() *types.Sym {
- s := lookupN("init.", renameinitgen)
- renameinitgen++
- return s
-}
-
-// fninit makes and returns an initialization record for the package.
+// Task makes and returns an initialization record for the package.
// See runtime/proc.go:initTask for its layout.
// The 3 tasks for initialization are:
// 1) Initialize all of the packages the current package depends on.
// 2) Initialize all the variables that have initializers.
// 3) Run any init functions.
-func fninit() *ir.Name {
- nf := initOrder(Target.Decls)
+func Task() *ir.Name {
+ nf := initOrder(typecheck.Target.Decls)
var deps []*obj.LSym // initTask records for packages the current package depends on
var fns []*obj.LSym // functions to call for package initialization
// Find imported packages with init tasks.
- for _, pkg := range Target.Imports {
- n := resolve(ir.NewIdent(base.Pos, pkg.Lookup(".inittask")))
+ for _, pkg := range typecheck.Target.Imports {
+ n := typecheck.Resolve(ir.NewIdent(base.Pos, pkg.Lookup(".inittask")))
if n.Op() == ir.ONONAME {
continue
}
- if n.Op() != ir.ONAME || n.(*ir.Name).Class() != ir.PEXTERN {
+ if n.Op() != ir.ONAME || n.(*ir.Name).Class_ != ir.PEXTERN {
base.Fatalf("bad inittask: %v", n)
}
deps = append(deps, n.(*ir.Name).Sym().Linksym())
// Make a function that contains all the initialization statements.
if len(nf) > 0 {
base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt
- initializers := lookup("init")
- fn := dclfunc(initializers, ir.NewFuncType(base.Pos, nil, nil, nil))
- for _, dcl := range initTodo.Dcl {
+ initializers := typecheck.Lookup("init")
+ fn := typecheck.DeclFunc(initializers, ir.NewFuncType(base.Pos, nil, nil, nil))
+ for _, dcl := range typecheck.InitTodoFunc.Dcl {
dcl.Curfn = fn
}
- fn.Dcl = append(fn.Dcl, initTodo.Dcl...)
- initTodo.Dcl = nil
+ fn.Dcl = append(fn.Dcl, typecheck.InitTodoFunc.Dcl...)
+ typecheck.InitTodoFunc.Dcl = nil
- fn.PtrBody().Set(nf)
- funcbody()
+ fn.Body.Set(nf)
+ typecheck.FinishFuncBody()
- typecheckFunc(fn)
- Curfn = fn
- typecheckslice(nf, ctxStmt)
- Curfn = nil
- Target.Decls = append(Target.Decls, fn)
+ typecheck.Func(fn)
+ ir.CurFunc = fn
+ typecheck.Stmts(nf)
+ ir.CurFunc = nil
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
fns = append(fns, initializers.Linksym())
}
- if initTodo.Dcl != nil {
+ if typecheck.InitTodoFunc.Dcl != nil {
// We only generate temps using initTodo if there
// are package-scope initialization statements, so
// something's weird if we get here.
base.Fatalf("initTodo still has declarations")
}
- initTodo = nil
+ typecheck.InitTodoFunc = nil
// Record user init functions.
- for _, fn := range Target.Inits {
+ for _, fn := range typecheck.Target.Inits {
// Skip init functions with empty bodies.
- if fn.Body().Len() == 1 {
- if stmt := fn.Body().First(); stmt.Op() == ir.OBLOCK && stmt.(*ir.BlockStmt).List().Len() == 0 {
+ if len(fn.Body) == 1 {
+ if stmt := fn.Body[0]; stmt.Op() == ir.OBLOCK && len(stmt.(*ir.BlockStmt).List) == 0 {
continue
}
}
}
// Make an .inittask structure.
- sym := lookup(".inittask")
- task := NewName(sym)
+ sym := typecheck.Lookup(".inittask")
+ task := typecheck.NewName(sym)
task.SetType(types.Types[types.TUINT8]) // fake type
- task.SetClass(ir.PEXTERN)
+ task.Class_ = ir.PEXTERN
sym.Def = task
lsym := sym.Linksym()
ot := 0
- ot = duintptr(lsym, ot, 0) // state: not initialized yet
- ot = duintptr(lsym, ot, uint64(len(deps)))
- ot = duintptr(lsym, ot, uint64(len(fns)))
+ ot = objw.Uintptr(lsym, ot, 0) // state: not initialized yet
+ ot = objw.Uintptr(lsym, ot, uint64(len(deps)))
+ ot = objw.Uintptr(lsym, ot, uint64(len(fns)))
for _, d := range deps {
- ot = dsymptr(lsym, ot, d, 0)
+ ot = objw.SymPtr(lsym, ot, d, 0)
}
for _, f := range fns {
- ot = dsymptr(lsym, ot, f, 0)
+ ot = objw.SymPtr(lsym, ot, f, 0)
}
// An initTask has pointers, but none into the Go heap.
// It's not quite read only, the state field must be modifiable.
- ggloblsym(lsym, int32(ot), obj.NOPTR)
+ objw.Global(lsym, int32(ot), obj.NOPTR)
return task
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package pkginit
import (
"bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/staticinit"
)
// Package initialization
// corresponding list of statements to include in the init() function
// body.
func initOrder(l []ir.Node) []ir.Node {
- s := InitSchedule{
- initplans: make(map[ir.Node]*InitPlan),
- inittemps: make(map[ir.Node]*ir.Name),
+ s := staticinit.Schedule{
+ Plans: make(map[ir.Node]*staticinit.Plan),
+ Temps: make(map[ir.Node]*ir.Name),
}
o := InitOrder{
blocking: make(map[ir.Node][]ir.Node),
switch n.Op() {
case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
o.processAssign(n)
- o.flushReady(s.staticInit)
+ o.flushReady(s.StaticInit)
case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE:
// nop
default:
base.Fatalf("expected empty map: %v", o.blocking)
}
- return s.out
+ return s.Out
}
func (o *InitOrder) processAssign(n ir.Node) {
defn := dep.Defn
// Skip dependencies on functions (PFUNC) and
// variables already initialized (InitDone).
- if dep.Class() != ir.PEXTERN || o.order[defn] == orderDone {
+ if dep.Class_ != ir.PEXTERN || o.order[defn] == orderDone {
continue
}
o.order[n]++
*path = append(*path, n)
for _, ref := range refers {
// Short-circuit variables that were initialized.
- if ref.Class() == ir.PEXTERN && o.order[ref.Defn] == orderDone {
+ if ref.Class_ == ir.PEXTERN && o.order[ref.Defn] == orderDone {
continue
}
// the start.
i := -1
for j, n := range l {
- if n.Class() == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
+ if n.Class_ == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
i = j
}
}
d := initDeps{transitive: transitive}
switch n.Op() {
case ir.OAS:
- d.inspect(n.Right())
+ n := n.(*ir.AssignStmt)
+ d.inspect(n.Y)
case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
- d.inspect(n.Rlist().First())
+ n := n.(*ir.AssignListStmt)
+ d.inspect(n.Rhs[0])
case ir.ODCLFUNC:
- d.inspectList(n.Body())
+ n := n.(*ir.Func)
+ d.inspectList(n.Body)
default:
base.Fatalf("unexpected Op: %v", n.Op())
}
func (d *initDeps) visit(n ir.Node) {
switch n.Op() {
case ir.OMETHEXPR:
- d.foundDep(methodExprName(n))
+ n := n.(*ir.MethodExpr)
+ d.foundDep(ir.MethodExprName(n))
case ir.ONAME:
n := n.(*ir.Name)
- switch n.Class() {
+ switch n.Class_ {
case ir.PEXTERN, ir.PFUNC:
d.foundDep(n)
}
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
- d.inspectList(n.Func().Body())
+ d.inspectList(n.Func.Body)
case ir.ODOTMETH, ir.OCALLPART:
- d.foundDep(methodExprName(n))
+ d.foundDep(ir.MethodExprName(n))
}
}
return
}
d.seen.Add(n)
- if d.transitive && n.Class() == ir.PFUNC {
- d.inspectList(n.Defn.(*ir.Func).Body())
+ if d.transitive && n.Class_ == ir.PFUNC {
+ d.inspectList(n.Defn.(*ir.Func).Body)
}
}
func firstLHS(n ir.Node) *ir.Name {
switch n.Op() {
case ir.OAS:
- return n.Left().Name()
+ n := n.(*ir.AssignStmt)
+ return n.X.Name()
case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR:
- return n.List().First().Name()
+ n := n.(*ir.AssignListStmt)
+ return n.Lhs[0].Name()
}
base.Fatalf("unexpected Op: %v", n.Op())
package ppc64
import (
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssagen"
"cmd/internal/obj/ppc64"
"cmd/internal/objabi"
)
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &ppc64.Linkppc64
if objabi.GOARCH == "ppc64le" {
arch.LinkArch = &ppc64.Linkppc64le
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
)
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
- if cnt < int64(4*gc.Widthptr) {
- for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
- p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
}
- } else if cnt <= int64(128*gc.Widthptr) {
- p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+ } else if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffzero
- p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
} else {
- p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
- p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
- p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
- p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p.Reg = ppc64.REGRT1
- p = pp.Appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
+ p = pp.Append(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize))
p1 := p
- p = pp.Appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
- p = pp.Appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
- gc.Patch(p, p1)
+ p = pp.Append(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p = pp.Append(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(p1)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(ppc64.AOR)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R0
return p
}
-func ginsnopdefer(pp *gc.Progs) *obj.Prog {
+func ginsnopdefer(pp *objw.Progs) *obj.Prog {
// On PPC64 two nops are required in the defer case.
//
// (see gc/cgen.go, gc/plive.go -- copy of comment below)
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
// flive := b.FlagsLiveAtEnd
// if b.Control != nil && b.Control.Type.IsFlags() {
// flive = true
panic("bad store type")
}
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy:
t := v.Type
// BNE retry
p3 := s.Prog(ppc64.ABNE)
p3.To.Type = obj.TYPE_BRANCH
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
case ssa.OpPPC64LoweredAtomicAdd32,
ssa.OpPPC64LoweredAtomicAdd64:
// BNE retry
p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p)
+ p4.To.SetTarget(p)
// Ensure a 32 bit result
if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
// BNE retry
p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH
- gc.Patch(p2, p)
+ p2.To.SetTarget(p)
// ISYNC
pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
// ISYNC
pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
- gc.Patch(p2, pisync)
+ p2.To.SetTarget(pisync)
case ssa.OpPPC64LoweredAtomicStore8,
ssa.OpPPC64LoweredAtomicStore32,
// BNE retry
p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p)
+ p4.To.SetTarget(p)
// LWSYNC - Assuming shared data not write-through-required nor
// caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
// If the operation is a CAS-Release, then synchronization is not necessary.
p7.From.Offset = 0
p7.To.Type = obj.TYPE_REG
p7.To.Reg = out
- gc.Patch(p2, p7)
+ p2.To.SetTarget(p7)
// done (label)
p8 := s.Prog(obj.ANOP)
- gc.Patch(p6, p8)
+ p6.To.SetTarget(p8)
case ssa.OpPPC64LoweredGetClosurePtr:
// Closure pointer is R11 (already)
- gc.CheckLoweredGetClosurePtr(v)
+ ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpPPC64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
case ssa.OpLoadReg:
loadOp := loadByType(v.Type)
p := s.Prog(loadOp)
- gc.AddrAuto(&p.From, v.Args[0])
+ ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p := s.Prog(storeOp)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
- gc.AddrAuto(&p.To, v)
+ ssagen.AddrAuto(&p.To, v)
case ssa.OpPPC64DIVD:
// For now,
p.To.Reg = r
p.From.Type = obj.TYPE_REG
p.From.Reg = r0
- gc.Patch(pbahead, p)
+ pbahead.To.SetTarget(p)
p = s.Prog(obj.ANOP)
- gc.Patch(pbover, p)
+ pbover.To.SetTarget(p)
case ssa.OpPPC64DIVW:
// word-width version of above
p.To.Reg = r
p.From.Type = obj.TYPE_REG
p.From.Reg = r0
- gc.Patch(pbahead, p)
+ pbahead.To.SetTarget(p)
p = s.Prog(obj.ANOP)
- gc.Patch(pbover, p)
+ pbover.To.SetTarget(p)
case ssa.OpPPC64CLRLSLWI:
r := v.Reg()
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
}
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
// Load go.string using 0 offset
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p.From.Reg = ppc64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
p := s.Prog(v.Op.Asm())
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpPPC64MOVDstoreidx, ssa.OpPPC64MOVWstoreidx, ssa.OpPPC64MOVHstoreidx, ssa.OpPPC64MOVBstoreidx,
ssa.OpPPC64FMOVDstoreidx, ssa.OpPPC64FMOVSstoreidx, ssa.OpPPC64MOVDBRstoreidx, ssa.OpPPC64MOVWBRstoreidx,
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH
- gc.Patch(p, top)
+ p.To.SetTarget(top)
}
// When ctr == 1 the loop was not generated but
// there are at least 64 bytes to clear, so add
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH
- gc.Patch(p, top)
+ p.To.SetTarget(top)
}
// when ctr == 1 the loop was not generated but
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH
- gc.Patch(p, top)
+ p.To.SetTarget(top)
// srcReg and dstReg were incremented in the loop, so
// later instructions start with offset 0.
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH
- gc.Patch(p, top)
+ p.To.SetTarget(top)
// srcReg and dstReg were incremented in the loop, so
// later instructions start with offset 0.
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpPPC64LoweredNilCheck:
// NOP (so the BNE has somewhere to land)
nop := s.Prog(obj.ANOP)
- gc.Patch(p2, nop)
+ p2.To.SetTarget(nop)
} else {
// Issue a load which will fault if arg is nil.
p := s.Prog(ppc64.AMOVBZ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
}
ssa.BlockPPC64FGT: {ppc64.ABGT, ppc64.ABLE, false, false},
}
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockDefer:
// defer returns in R3:
p = s.Prog(ppc64.ABNE)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package reflectdata
import (
+ "fmt"
+ "sort"
+
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
- "fmt"
- "sort"
-)
-
-// AlgKind describes the kind of algorithms used for comparing and
-// hashing a Type.
-type AlgKind int
-
-//go:generate stringer -type AlgKind -trimprefix A
-
-const (
- // These values are known by runtime.
- ANOEQ AlgKind = iota
- AMEM0
- AMEM8
- AMEM16
- AMEM32
- AMEM64
- AMEM128
- ASTRING
- AINTER
- ANILINTER
- AFLOAT32
- AFLOAT64
- ACPLX64
- ACPLX128
-
- // Type can be compared/hashed as regular memory.
- AMEM AlgKind = 100
-
- // Type needs special comparison/hashing functions.
- ASPECIAL AlgKind = -1
)
-// IsComparable reports whether t is a comparable type.
-func IsComparable(t *types.Type) bool {
- a, _ := algtype1(t)
- return a != ANOEQ
+// isRegularMemory reports whether t can be compared/hashed as regular memory.
+func isRegularMemory(t *types.Type) bool {
+ a, _ := types.AlgType(t)
+ return a == types.AMEM
}
-// IsRegularMemory reports whether t can be compared/hashed as regular memory.
-func IsRegularMemory(t *types.Type) bool {
- a, _ := algtype1(t)
- return a == AMEM
-}
-
-// IncomparableField returns an incomparable Field of struct Type t, if any.
-func IncomparableField(t *types.Type) *types.Field {
- for _, f := range t.FieldSlice() {
- if !IsComparable(f.Type) {
- return f
- }
- }
- return nil
-}
-
-// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
+// eqCanPanic reports whether == on type t could panic (has an interface somewhere).
// t must be comparable.
-func EqCanPanic(t *types.Type) bool {
+func eqCanPanic(t *types.Type) bool {
switch t.Kind() {
default:
return false
case types.TINTER:
return true
case types.TARRAY:
- return EqCanPanic(t.Elem())
+ return eqCanPanic(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
- if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
+ if !f.Sym.IsBlank() && eqCanPanic(f.Type) {
return true
}
}
}
}
-// algtype is like algtype1, except it returns the fixed-width AMEMxx variants
+// AlgType is like algtype1, except it returns the fixed-width AMEMxx variants
// instead of the general AMEM kind when possible.
-func algtype(t *types.Type) AlgKind {
- a, _ := algtype1(t)
- if a == AMEM {
+func AlgType(t *types.Type) types.AlgKind {
+ a, _ := types.AlgType(t)
+ if a == types.AMEM {
switch t.Width {
case 0:
- return AMEM0
+ return types.AMEM0
case 1:
- return AMEM8
+ return types.AMEM8
case 2:
- return AMEM16
+ return types.AMEM16
case 4:
- return AMEM32
+ return types.AMEM32
case 8:
- return AMEM64
+ return types.AMEM64
case 16:
- return AMEM128
+ return types.AMEM128
}
}
return a
}
-// algtype1 returns the AlgKind used for comparing and hashing Type t.
-// If it returns ANOEQ, it also returns the component type of t that
-// makes it incomparable.
-func algtype1(t *types.Type) (AlgKind, *types.Type) {
- if t.Broke() {
- return AMEM, nil
- }
- if t.Noalg() {
- return ANOEQ, t
- }
-
- switch t.Kind() {
- case types.TANY, types.TFORW:
- // will be defined later.
- return ANOEQ, t
-
- case types.TINT8, types.TUINT8, types.TINT16, types.TUINT16,
- types.TINT32, types.TUINT32, types.TINT64, types.TUINT64,
- types.TINT, types.TUINT, types.TUINTPTR,
- types.TBOOL, types.TPTR,
- types.TCHAN, types.TUNSAFEPTR:
- return AMEM, nil
-
- case types.TFUNC, types.TMAP:
- return ANOEQ, t
-
- case types.TFLOAT32:
- return AFLOAT32, nil
-
- case types.TFLOAT64:
- return AFLOAT64, nil
-
- case types.TCOMPLEX64:
- return ACPLX64, nil
-
- case types.TCOMPLEX128:
- return ACPLX128, nil
-
- case types.TSTRING:
- return ASTRING, nil
-
- case types.TINTER:
- if t.IsEmptyInterface() {
- return ANILINTER, nil
- }
- return AINTER, nil
-
- case types.TSLICE:
- return ANOEQ, t
-
- case types.TARRAY:
- a, bad := algtype1(t.Elem())
- switch a {
- case AMEM:
- return AMEM, nil
- case ANOEQ:
- return ANOEQ, bad
- }
-
- switch t.NumElem() {
- case 0:
- // We checked above that the element type is comparable.
- return AMEM, nil
- case 1:
- // Single-element array is same as its lone element.
- return a, nil
- }
-
- return ASPECIAL, nil
-
- case types.TSTRUCT:
- fields := t.FieldSlice()
-
- // One-field struct is same as that one field alone.
- if len(fields) == 1 && !fields[0].Sym.IsBlank() {
- return algtype1(fields[0].Type)
- }
-
- ret := AMEM
- for i, f := range fields {
- // All fields must be comparable.
- a, bad := algtype1(f.Type)
- if a == ANOEQ {
- return ANOEQ, bad
- }
-
- // Blank fields, padded fields, fields with non-memory
- // equality need special compare.
- if a != AMEM || f.Sym.IsBlank() || ispaddedfield(t, i) {
- ret = ASPECIAL
- }
- }
-
- return ret, nil
- }
-
- base.Fatalf("algtype1: unexpected type %v", t)
- return 0, nil
-}
-
// genhash returns a symbol which is the closure used to compute
// the hash of a value of type t.
// Note: the generated function must match runtime.typehash exactly.
func genhash(t *types.Type) *obj.LSym {
- switch algtype(t) {
+ switch AlgType(t) {
default:
// genhash is only called for types that have equality
base.Fatalf("genhash %v", t)
- case AMEM0:
+ case types.AMEM0:
return sysClosure("memhash0")
- case AMEM8:
+ case types.AMEM8:
return sysClosure("memhash8")
- case AMEM16:
+ case types.AMEM16:
return sysClosure("memhash16")
- case AMEM32:
+ case types.AMEM32:
return sysClosure("memhash32")
- case AMEM64:
+ case types.AMEM64:
return sysClosure("memhash64")
- case AMEM128:
+ case types.AMEM128:
return sysClosure("memhash128")
- case ASTRING:
+ case types.ASTRING:
return sysClosure("strhash")
- case AINTER:
+ case types.AINTER:
return sysClosure("interhash")
- case ANILINTER:
+ case types.ANILINTER:
return sysClosure("nilinterhash")
- case AFLOAT32:
+ case types.AFLOAT32:
return sysClosure("f32hash")
- case AFLOAT64:
+ case types.AFLOAT64:
return sysClosure("f64hash")
- case ACPLX64:
+ case types.ACPLX64:
return sysClosure("c64hash")
- case ACPLX128:
+ case types.ACPLX128:
return sysClosure("c128hash")
- case AMEM:
+ case types.AMEM:
// For other sizes of plain memory, we build a closure
// that calls memhash_varlen. The size of the memory is
// encoded in the first slot of the closure.
- closure := typeLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym()
+ closure := types.TypeSymLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
if memhashvarlen == nil {
- memhashvarlen = sysfunc("memhash_varlen")
+ memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen")
}
ot := 0
- ot = dsymptr(closure, ot, memhashvarlen, 0)
- ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure
- ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
+ ot = objw.SymPtr(closure, ot, memhashvarlen, 0)
+ ot = objw.Uintptr(closure, ot, uint64(t.Width)) // size encoded in closure
+ objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
- case ASPECIAL:
+ case types.ASPECIAL:
break
}
- closure := typesymprefix(".hashfunc", t).Linksym()
+ closure := TypeSymPrefix(".hashfunc", t).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
}
}
- sym := typesymprefix(".hash", t)
+ sym := TypeSymPrefix(".hash", t)
if base.Flag.LowerR != 0 {
fmt.Printf("genhash %v %v %v\n", closure, sym, t)
}
- base.Pos = autogeneratedPos // less confusing than end of input
- dclcontext = ir.PEXTERN
+ base.Pos = base.AutogeneratedPos // less confusing than end of input
+ typecheck.DeclContext = ir.PEXTERN
// func sym(p *T, h uintptr) uintptr
args := []*ir.Field{
- namedfield("p", types.NewPtr(t)),
- namedfield("h", types.Types[types.TUINTPTR]),
+ ir.NewField(base.Pos, typecheck.Lookup("p"), nil, types.NewPtr(t)),
+ ir.NewField(base.Pos, typecheck.Lookup("h"), nil, types.Types[types.TUINTPTR]),
}
- results := []*ir.Field{anonfield(types.Types[types.TUINTPTR])}
+ results := []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR])}
tfn := ir.NewFuncType(base.Pos, nil, args, results)
- fn := dclfunc(sym, tfn)
+ fn := typecheck.DeclFunc(sym, tfn)
np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
nh := ir.AsNode(tfn.Type().Params().Field(1).Nname)
hashel := hashfor(t.Elem())
// for i := 0; i < nelem; i++
- ni := temp(types.Types[types.TINT])
- init := ir.Nod(ir.OAS, ni, nodintconst(0))
- cond := ir.Nod(ir.OLT, ni, nodintconst(t.NumElem()))
- post := ir.Nod(ir.OAS, ni, ir.Nod(ir.OADD, ni, nodintconst(1)))
- loop := ir.Nod(ir.OFOR, cond, post)
+ ni := typecheck.Temp(types.Types[types.TINT])
+ init := ir.NewAssignStmt(base.Pos, ni, ir.NewInt(0))
+ cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, ir.NewInt(t.NumElem()))
+ post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, ir.NewInt(1)))
+ loop := ir.NewForStmt(base.Pos, nil, cond, post, nil)
loop.PtrInit().Append(init)
// h = hashel(&p[i], h)
- call := ir.Nod(ir.OCALL, hashel, nil)
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
- nx := ir.Nod(ir.OINDEX, np, ni)
+ nx := ir.NewIndexExpr(base.Pos, np, ni)
nx.SetBounded(true)
- na := nodAddr(nx)
- call.PtrList().Append(na)
- call.PtrList().Append(nh)
- loop.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
+ na := typecheck.NodAddr(nx)
+ call.Args.Append(na)
+ call.Args.Append(nh)
+ loop.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
- fn.PtrBody().Append(loop)
+ fn.Body.Append(loop)
case types.TSTRUCT:
// Walk the struct using memhash for runs of AMEM
}
// Hash non-memory fields with appropriate hash function.
- if !IsRegularMemory(f.Type) {
+ if !isRegularMemory(f.Type) {
hashel := hashfor(f.Type)
- call := ir.Nod(ir.OCALL, hashel, nil)
- nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
- na := nodAddr(nx)
- call.PtrList().Append(na)
- call.PtrList().Append(nh)
- fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
+ nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
+ na := typecheck.NodAddr(nx)
+ call.Args.Append(na)
+ call.Args.Append(nh)
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
i++
continue
}
// h = hashel(&p.first, size, h)
hashel := hashmem(f.Type)
- call := ir.Nod(ir.OCALL, hashel, nil)
- nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
- na := nodAddr(nx)
- call.PtrList().Append(na)
- call.PtrList().Append(nh)
- call.PtrList().Append(nodintconst(size))
- fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
+ nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
+ na := typecheck.NodAddr(nx)
+ call.Args.Append(na)
+ call.Args.Append(nh)
+ call.Args.Append(ir.NewInt(size))
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
i = next
}
}
- r := ir.Nod(ir.ORETURN, nil, nil)
- r.PtrList().Append(nh)
- fn.PtrBody().Append(r)
+ r := ir.NewReturnStmt(base.Pos, nil)
+ r.Results.Append(nh)
+ fn.Body.Append(r)
if base.Flag.LowerR != 0 {
- ir.DumpList("genhash body", fn.Body())
+ ir.DumpList("genhash body", fn.Body)
}
- funcbody()
+ typecheck.FinishFuncBody()
fn.SetDupok(true)
- typecheckFunc(fn)
+ typecheck.Func(fn)
- Curfn = fn
- typecheckslice(fn.Body().Slice(), ctxStmt)
- Curfn = nil
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+ ir.CurFunc = nil
if base.Debug.DclStack != 0 {
- testdclstack()
+ types.CheckDclstack()
}
fn.SetNilCheckDisabled(true)
- Target.Decls = append(Target.Decls, fn)
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
// Build closure. It doesn't close over any variables, so
// it contains just the function pointer.
- dsymptr(closure, 0, sym.Linksym(), 0)
- ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ objw.SymPtr(closure, 0, sym.Linksym(), 0)
+ objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
return closure
}
func hashfor(t *types.Type) ir.Node {
var sym *types.Sym
- switch a, _ := algtype1(t); a {
- case AMEM:
+ switch a, _ := types.AlgType(t); a {
+ case types.AMEM:
base.Fatalf("hashfor with AMEM type")
- case AINTER:
- sym = Runtimepkg.Lookup("interhash")
- case ANILINTER:
- sym = Runtimepkg.Lookup("nilinterhash")
- case ASTRING:
- sym = Runtimepkg.Lookup("strhash")
- case AFLOAT32:
- sym = Runtimepkg.Lookup("f32hash")
- case AFLOAT64:
- sym = Runtimepkg.Lookup("f64hash")
- case ACPLX64:
- sym = Runtimepkg.Lookup("c64hash")
- case ACPLX128:
- sym = Runtimepkg.Lookup("c128hash")
+ case types.AINTER:
+ sym = ir.Pkgs.Runtime.Lookup("interhash")
+ case types.ANILINTER:
+ sym = ir.Pkgs.Runtime.Lookup("nilinterhash")
+ case types.ASTRING:
+ sym = ir.Pkgs.Runtime.Lookup("strhash")
+ case types.AFLOAT32:
+ sym = ir.Pkgs.Runtime.Lookup("f32hash")
+ case types.AFLOAT64:
+ sym = ir.Pkgs.Runtime.Lookup("f64hash")
+ case types.ACPLX64:
+ sym = ir.Pkgs.Runtime.Lookup("c64hash")
+ case types.ACPLX128:
+ sym = ir.Pkgs.Runtime.Lookup("c128hash")
default:
// Note: the caller of hashfor ensured that this symbol
// exists and has a body by calling genhash for t.
- sym = typesymprefix(".hash", t)
+ sym = TypeSymPrefix(".hash", t)
}
- n := NewName(sym)
- setNodeNameFunc(n)
- n.SetType(functype(nil, []*ir.Field{
- anonfield(types.NewPtr(t)),
- anonfield(types.Types[types.TUINTPTR]),
+ n := typecheck.NewName(sym)
+ ir.MarkFunc(n)
+ n.SetType(typecheck.NewFuncType(nil, []*ir.Field{
+ ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
+ ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
}, []*ir.Field{
- anonfield(types.Types[types.TUINTPTR]),
+ ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
}))
return n
}
// sysClosure returns a closure which will call the
// given runtime function (with no closed-over variables).
func sysClosure(name string) *obj.LSym {
- s := sysvar(name + "·f")
+ s := typecheck.LookupRuntimeVar(name + "·f")
if len(s.P) == 0 {
- f := sysfunc(name)
- dsymptr(s, 0, f, 0)
- ggloblsym(s, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ f := typecheck.LookupRuntimeFunc(name)
+ objw.SymPtr(s, 0, f, 0)
+ objw.Global(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
}
return s
}
// geneq returns a symbol which is the closure used to compute
// equality for two objects of type t.
func geneq(t *types.Type) *obj.LSym {
- switch algtype(t) {
- case ANOEQ:
+ switch AlgType(t) {
+ case types.ANOEQ:
// The runtime will panic if it tries to compare
// a type with a nil equality function.
return nil
- case AMEM0:
+ case types.AMEM0:
return sysClosure("memequal0")
- case AMEM8:
+ case types.AMEM8:
return sysClosure("memequal8")
- case AMEM16:
+ case types.AMEM16:
return sysClosure("memequal16")
- case AMEM32:
+ case types.AMEM32:
return sysClosure("memequal32")
- case AMEM64:
+ case types.AMEM64:
return sysClosure("memequal64")
- case AMEM128:
+ case types.AMEM128:
return sysClosure("memequal128")
- case ASTRING:
+ case types.ASTRING:
return sysClosure("strequal")
- case AINTER:
+ case types.AINTER:
return sysClosure("interequal")
- case ANILINTER:
+ case types.ANILINTER:
return sysClosure("nilinterequal")
- case AFLOAT32:
+ case types.AFLOAT32:
return sysClosure("f32equal")
- case AFLOAT64:
+ case types.AFLOAT64:
return sysClosure("f64equal")
- case ACPLX64:
+ case types.ACPLX64:
return sysClosure("c64equal")
- case ACPLX128:
+ case types.ACPLX128:
return sysClosure("c128equal")
- case AMEM:
+ case types.AMEM:
// make equality closure. The size of the type
// is encoded in the closure.
- closure := typeLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym()
+ closure := types.TypeSymLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym()
if len(closure.P) != 0 {
return closure
}
if memequalvarlen == nil {
- memequalvarlen = sysvar("memequal_varlen") // asm func
+ memequalvarlen = typecheck.LookupRuntimeVar("memequal_varlen") // asm func
}
ot := 0
- ot = dsymptr(closure, ot, memequalvarlen, 0)
- ot = duintptr(closure, ot, uint64(t.Width))
- ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
+ ot = objw.SymPtr(closure, ot, memequalvarlen, 0)
+ ot = objw.Uintptr(closure, ot, uint64(t.Width))
+ objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
- case ASPECIAL:
+ case types.ASPECIAL:
break
}
- closure := typesymprefix(".eqfunc", t).Linksym()
+ closure := TypeSymPrefix(".eqfunc", t).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
- sym := typesymprefix(".eq", t)
+ sym := TypeSymPrefix(".eq", t)
if base.Flag.LowerR != 0 {
fmt.Printf("geneq %v\n", t)
}
// Autogenerate code for equality of structs and arrays.
- base.Pos = autogeneratedPos // less confusing than end of input
- dclcontext = ir.PEXTERN
+ base.Pos = base.AutogeneratedPos // less confusing than end of input
+ typecheck.DeclContext = ir.PEXTERN
// func sym(p, q *T) bool
tfn := ir.NewFuncType(base.Pos, nil,
- []*ir.Field{namedfield("p", types.NewPtr(t)), namedfield("q", types.NewPtr(t))},
- []*ir.Field{namedfield("r", types.Types[types.TBOOL])})
+ []*ir.Field{ir.NewField(base.Pos, typecheck.Lookup("p"), nil, types.NewPtr(t)), ir.NewField(base.Pos, typecheck.Lookup("q"), nil, types.NewPtr(t))},
+ []*ir.Field{ir.NewField(base.Pos, typecheck.Lookup("r"), nil, types.Types[types.TBOOL])})
- fn := dclfunc(sym, tfn)
+ fn := typecheck.DeclFunc(sym, tfn)
np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
nq := ir.AsNode(tfn.Type().Params().Field(1).Nname)
nr := ir.AsNode(tfn.Type().Results().Field(0).Nname)
// Label to jump to if an equality test fails.
- neq := autolabel(".neq")
+ neq := typecheck.AutoLabel(".neq")
// We reach here only for types that have equality but
// cannot be handled by the standard algorithms,
// checkIdx generates a node to check for equality at index i.
checkIdx := func(i ir.Node) ir.Node {
// pi := p[i]
- pi := ir.Nod(ir.OINDEX, np, i)
+ pi := ir.NewIndexExpr(base.Pos, np, i)
pi.SetBounded(true)
pi.SetType(t.Elem())
// qi := q[i]
- qi := ir.Nod(ir.OINDEX, nq, i)
+ qi := ir.NewIndexExpr(base.Pos, nq, i)
qi.SetBounded(true)
qi.SetType(t.Elem())
return eq(pi, qi)
// Generate a series of checks.
for i := int64(0); i < nelem; i++ {
// if check {} else { goto neq }
- nif := ir.Nod(ir.OIF, checkIdx(nodintconst(i)), nil)
- nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
- fn.PtrBody().Append(nif)
+ nif := ir.NewIfStmt(base.Pos, checkIdx(ir.NewInt(i)), nil, nil)
+ nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+ fn.Body.Append(nif)
}
if last {
- fn.PtrBody().Append(ir.Nod(ir.OAS, nr, checkIdx(nodintconst(nelem))))
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(ir.NewInt(nelem))))
}
} else {
// Generate a for loop.
// for i := 0; i < nelem; i++
- i := temp(types.Types[types.TINT])
- init := ir.Nod(ir.OAS, i, nodintconst(0))
- cond := ir.Nod(ir.OLT, i, nodintconst(nelem))
- post := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1)))
- loop := ir.Nod(ir.OFOR, cond, post)
+ i := typecheck.Temp(types.Types[types.TINT])
+ init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0))
+ cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(nelem))
+ post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1)))
+ loop := ir.NewForStmt(base.Pos, nil, cond, post, nil)
loop.PtrInit().Append(init)
// if eq(pi, qi) {} else { goto neq }
- nif := ir.Nod(ir.OIF, checkIdx(i), nil)
- nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
- loop.PtrBody().Append(nif)
- fn.PtrBody().Append(loop)
+ nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil)
+ nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+ loop.Body.Append(nif)
+ fn.Body.Append(loop)
if last {
- fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true)))
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(true)))
}
}
}
// TODO: when the array size is small, unroll the length match checks.
checkAll(3, false, func(pi, qi ir.Node) ir.Node {
// Compare lengths.
- eqlen, _ := eqstring(pi, qi)
+ eqlen, _ := EqString(pi, qi)
return eqlen
})
checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// Compare contents.
- _, eqmem := eqstring(pi, qi)
+ _, eqmem := EqString(pi, qi)
return eqmem
})
case types.TFLOAT32, types.TFLOAT64:
checkAll(2, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
- return ir.Nod(ir.OEQ, pi, qi)
+ return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
- return ir.Nod(ir.OEQ, pi, qi)
+ return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
})
}
}
// Compare non-memory fields with field equality.
- if !IsRegularMemory(f.Type) {
- if EqCanPanic(f.Type) {
+ if !isRegularMemory(f.Type) {
+ if eqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
conds = append(conds, []ir.Node{})
}
- p := nodSym(ir.OXDOT, np, f.Sym)
- q := nodSym(ir.OXDOT, nq, f.Sym)
+ p := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym)
+ q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym)
switch {
case f.Type.IsString():
- eqlen, eqmem := eqstring(p, q)
+ eqlen, eqmem := EqString(p, q)
and(eqlen)
and(eqmem)
default:
- and(ir.Nod(ir.OEQ, p, q))
+ and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q))
}
- if EqCanPanic(f.Type) {
+ if eqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
conds = append(conds, []ir.Node{})
}
}
if len(flatConds) == 0 {
- fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true)))
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(true)))
} else {
for _, c := range flatConds[:len(flatConds)-1] {
// if cond {} else { goto neq }
- n := ir.Nod(ir.OIF, c, nil)
- n.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
- fn.PtrBody().Append(n)
+ n := ir.NewIfStmt(base.Pos, c, nil, nil)
+ n.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+ fn.Body.Append(n)
}
- fn.PtrBody().Append(ir.Nod(ir.OAS, nr, flatConds[len(flatConds)-1]))
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, flatConds[len(flatConds)-1]))
}
}
// ret:
// return
- ret := autolabel(".ret")
- fn.PtrBody().Append(nodSym(ir.OLABEL, nil, ret))
- fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil))
+ ret := typecheck.AutoLabel(".ret")
+ fn.Body.Append(ir.NewLabelStmt(base.Pos, ret))
+ fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
// neq:
// r = false
// return (or goto ret)
- fn.PtrBody().Append(nodSym(ir.OLABEL, nil, neq))
- fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(false)))
- if EqCanPanic(t) || anyCall(fn) {
+ fn.Body.Append(ir.NewLabelStmt(base.Pos, neq))
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(false)))
+ if eqCanPanic(t) || anyCall(fn) {
// Epilogue is large, so share it with the equal case.
- fn.PtrBody().Append(nodSym(ir.OGOTO, nil, ret))
+ fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret))
} else {
// Epilogue is small, so don't bother sharing.
- fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil))
+ fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
}
// TODO(khr): the epilogue size detection condition above isn't perfect.
// We should really do a generic CL that shares epilogues across
// the board. See #24936.
if base.Flag.LowerR != 0 {
- ir.DumpList("geneq body", fn.Body())
+ ir.DumpList("geneq body", fn.Body)
}
- funcbody()
+ typecheck.FinishFuncBody()
fn.SetDupok(true)
- typecheckFunc(fn)
+ typecheck.Func(fn)
- Curfn = fn
- typecheckslice(fn.Body().Slice(), ctxStmt)
- Curfn = nil
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+ ir.CurFunc = nil
if base.Debug.DclStack != 0 {
- testdclstack()
+ types.CheckDclstack()
}
// Disable checknils while compiling this code.
// neither of which can be nil, and our comparisons
// are shallow.
fn.SetNilCheckDisabled(true)
- Target.Decls = append(Target.Decls, fn)
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
// Generate a closure which points at the function we just generated.
- dsymptr(closure, 0, sym.Linksym(), 0)
- ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ objw.SymPtr(closure, 0, sym.Linksym(), 0)
+ objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
return closure
}
// eqfield returns the node
// p.field == q.field
func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
- nx := nodSym(ir.OXDOT, p, field)
- ny := nodSym(ir.OXDOT, q, field)
- ne := ir.Nod(ir.OEQ, nx, ny)
+ nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)
+ ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)
+ ne := ir.NewBinaryExpr(base.Pos, ir.OEQ, nx, ny)
return ne
}
-// eqstring returns the nodes
+// EqString returns the nodes
// len(s) == len(t)
// and
// memequal(s.ptr, t.ptr, len(s))
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
-func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
- s = conv(s, types.Types[types.TSTRING])
- t = conv(t, types.Types[types.TSTRING])
- sptr := ir.Nod(ir.OSPTR, s, nil)
- tptr := ir.Nod(ir.OSPTR, t, nil)
- slen := conv(ir.Nod(ir.OLEN, s, nil), types.Types[types.TUINTPTR])
- tlen := conv(ir.Nod(ir.OLEN, t, nil), types.Types[types.TUINTPTR])
-
- fn := syslook("memequal")
- fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
+func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
+ s = typecheck.Conv(s, types.Types[types.TSTRING])
+ t = typecheck.Conv(t, types.Types[types.TSTRING])
+ sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
+ tptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, t)
+ slen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, s), types.Types[types.TUINTPTR])
+ tlen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, t), types.Types[types.TUINTPTR])
+
+ fn := typecheck.LookupRuntime("memequal")
+ fn = typecheck.SubstArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{sptr, tptr, ir.Copy(slen)})
- TypecheckCall(call)
+ typecheck.Call(call)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
- cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr)
+ cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
-// eqinterface returns the nodes
+// EqInterface returns the nodes
// s.tab == t.tab (or s.typ == t.typ, as appropriate)
// and
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
-func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
+func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
if !types.Identical(s.Type(), t.Type()) {
base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
}
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
var fn ir.Node
if s.Type().IsEmptyInterface() {
- fn = syslook("efaceeq")
+ fn = typecheck.LookupRuntime("efaceeq")
} else {
- fn = syslook("ifaceeq")
+ fn = typecheck.LookupRuntime("ifaceeq")
}
- stab := ir.Nod(ir.OITAB, s, nil)
- ttab := ir.Nod(ir.OITAB, t, nil)
- sdata := ir.Nod(ir.OIDATA, s, nil)
- tdata := ir.Nod(ir.OIDATA, t, nil)
+ stab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s)
+ ttab := ir.NewUnaryExpr(base.Pos, ir.OITAB, t)
+ sdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s)
+ tdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, t)
sdata.SetType(types.Types[types.TUNSAFEPTR])
tdata.SetType(types.Types[types.TUNSAFEPTR])
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{stab, sdata, tdata})
- TypecheckCall(call)
+ typecheck.Call(call)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab)
- cmp = typecheck(cmp, ctxExpr).(*ir.BinaryExpr)
+ cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
// eqmem returns the node
// memequal(&p.field, &q.field [, size])
func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
- nx := typecheck(nodAddr(nodSym(ir.OXDOT, p, field)), ctxExpr)
- ny := typecheck(nodAddr(nodSym(ir.OXDOT, q, field)), ctxExpr)
+ nx := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)))
+ ny := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)))
fn, needsize := eqmemfunc(size, nx.Type().Elem())
- call := ir.Nod(ir.OCALL, fn, nil)
- call.PtrList().Append(nx)
- call.PtrList().Append(ny)
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+ call.Args.Append(nx)
+ call.Args.Append(ny)
if needsize {
- call.PtrList().Append(nodintconst(size))
+ call.Args.Append(ir.NewInt(size))
}
return call
func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
switch size {
default:
- fn = syslook("memequal")
+ fn = typecheck.LookupRuntime("memequal")
needsize = true
case 1, 2, 4, 8, 16:
buf := fmt.Sprintf("memequal%d", int(size)*8)
- fn = syslook(buf)
+ fn = typecheck.LookupRuntime(buf)
}
- fn = substArgTypes(fn, t, t)
+ fn = typecheck.SubstArgTypes(fn, t, t)
return fn, needsize
}
break
}
// Stop run after a padded field.
- if ispaddedfield(t, next-1) {
+ if types.IsPaddedField(t, next-1) {
break
}
// Also, stop before a blank or non-memory field.
- if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) {
+ if f := t.Field(next); f.Sym.IsBlank() || !isRegularMemory(f.Type) {
break
}
}
return t.Field(next-1).End() - t.Field(start).Offset, next
}
-// ispaddedfield reports whether the i'th field of struct type t is followed
-// by padding.
-func ispaddedfield(t *types.Type, i int) bool {
- if !t.IsStruct() {
- base.Fatalf("ispaddedfield called non-struct %v", t)
- }
- end := t.Width
- if i+1 < t.NumFields() {
- end = t.Field(i + 1).Offset
- }
- return t.Field(i).End() != end
+func hashmem(t *types.Type) ir.Node {
+ sym := ir.Pkgs.Runtime.Lookup("memhash")
+
+ n := typecheck.NewName(sym)
+ ir.MarkFunc(n)
+ n.SetType(typecheck.NewFuncType(nil, []*ir.Field{
+ ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
+ ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
+ ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
+ }, []*ir.Field{
+ ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
+ }))
+ return n
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package reflectdata
import (
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+
"cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/escape"
+ "cmd/compile/internal/inline"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/liveness"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/gcprog"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
- "fmt"
- "os"
- "sort"
- "strings"
- "sync"
)
type itabEntry struct {
ptabs []ptabEntry
)
-type Sig struct {
+type typeSig struct {
name *types.Sym
isym *types.Sym
tsym *types.Sym
MAXELEMSIZE = 128
)
-func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{})
-func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{})
-func commonSize() int { return 4*Widthptr + 8 + 8 } // Sizeof(runtime._type{})
+func structfieldSize() int { return 3 * types.PtrSize } // Sizeof(runtime.structfield{})
+func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{})
+func commonSize() int { return 4*types.PtrSize + 8 + 8 } // Sizeof(runtime._type{})
func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
if t.Sym() == nil && len(methods(t)) == 0 {
return types.NewField(src.NoXPos, sym, t)
}
-// bmap makes the map bucket type given the type of the map.
-func bmap(t *types.Type) *types.Type {
+// MapBucketType makes the map bucket type given the type of the map.
+func MapBucketType(t *types.Type) *types.Type {
if t.MapType().Bucket != nil {
return t.MapType().Bucket
}
keytype := t.Key()
elemtype := t.Elem()
- dowidth(keytype)
- dowidth(elemtype)
+ types.CalcSize(keytype)
+ types.CalcSize(elemtype)
if keytype.Width > MAXKEYSIZE {
keytype = types.NewPtr(keytype)
}
// link up fields
bucket := types.NewStruct(types.NoPkg, field[:])
bucket.SetNoalg(true)
- dowidth(bucket)
+ types.CalcSize(bucket)
// Check invariants that map code depends on.
- if !IsComparable(t.Key()) {
+ if !types.IsComparable(t.Key()) {
base.Fatalf("unsupported map key type for %v", t)
}
if BUCKETSIZE < 8 {
// Double-check that overflow field is final memory in struct,
// with no padding at end.
- if overflow.Offset != bucket.Width-int64(Widthptr) {
+ if overflow.Offset != bucket.Width-int64(types.PtrSize) {
base.Fatalf("bad offset of overflow in bmap for %v", t)
}
return bucket
}
-// hmap builds a type representing a Hmap structure for the given map type.
+// MapType builds a type representing a Hmap structure for the given map type.
// Make sure this stays in sync with runtime/map.go.
-func hmap(t *types.Type) *types.Type {
+func MapType(t *types.Type) *types.Type {
if t.MapType().Hmap != nil {
return t.MapType().Hmap
}
- bmap := bmap(t)
+ bmap := MapBucketType(t)
// build a struct:
// type hmap struct {
hmap := types.NewStruct(types.NoPkg, fields)
hmap.SetNoalg(true)
- dowidth(hmap)
+ types.CalcSize(hmap)
// The size of hmap should be 48 bytes on 64 bit
// and 28 bytes on 32 bit platforms.
- if size := int64(8 + 5*Widthptr); hmap.Width != size {
+ if size := int64(8 + 5*types.PtrSize); hmap.Width != size {
base.Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size)
}
return hmap
}
-// hiter builds a type representing an Hiter structure for the given map type.
+// MapIterType builds a type representing an Hiter structure for the given map type.
// Make sure this stays in sync with runtime/map.go.
-func hiter(t *types.Type) *types.Type {
+func MapIterType(t *types.Type) *types.Type {
if t.MapType().Hiter != nil {
return t.MapType().Hiter
}
- hmap := hmap(t)
- bmap := bmap(t)
+ hmap := MapType(t)
+ bmap := MapBucketType(t)
// build a struct:
// type hiter struct {
// build iterator struct holding the above fields
hiter := types.NewStruct(types.NoPkg, fields)
hiter.SetNoalg(true)
- dowidth(hiter)
- if hiter.Width != int64(12*Widthptr) {
- base.Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr)
+ types.CalcSize(hiter)
+ if hiter.Width != int64(12*types.PtrSize) {
+ base.Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*types.PtrSize)
}
t.MapType().Hiter = hiter
hiter.StructType().Map = t
return hiter
}
-// deferstruct makes a runtime._defer structure, with additional space for
-// stksize bytes of args.
-func deferstruct(stksize int64) *types.Type {
- makefield := func(name string, typ *types.Type) *types.Field {
- // Unlike the global makefield function, this one needs to set Pkg
- // because these types might be compared (in SSA CSE sorting).
- // TODO: unify this makefield and the global one above.
- sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
- return types.NewField(src.NoXPos, sym, typ)
- }
- argtype := types.NewArray(types.Types[types.TUINT8], stksize)
- argtype.Width = stksize
- argtype.Align = 1
- // These fields must match the ones in runtime/runtime2.go:_defer and
- // cmd/compile/internal/gc/ssa.go:(*state).call.
- fields := []*types.Field{
- makefield("siz", types.Types[types.TUINT32]),
- makefield("started", types.Types[types.TBOOL]),
- makefield("heap", types.Types[types.TBOOL]),
- makefield("openDefer", types.Types[types.TBOOL]),
- makefield("sp", types.Types[types.TUINTPTR]),
- makefield("pc", types.Types[types.TUINTPTR]),
- // Note: the types here don't really matter. Defer structures
- // are always scanned explicitly during stack copying and GC,
- // so we make them uintptr type even though they are real pointers.
- makefield("fn", types.Types[types.TUINTPTR]),
- makefield("_panic", types.Types[types.TUINTPTR]),
- makefield("link", types.Types[types.TUINTPTR]),
- makefield("framepc", types.Types[types.TUINTPTR]),
- makefield("varp", types.Types[types.TUINTPTR]),
- makefield("fd", types.Types[types.TUINTPTR]),
- makefield("args", argtype),
- }
-
- // build struct holding the above fields
- s := types.NewStruct(types.NoPkg, fields)
- s.SetNoalg(true)
- CalcStructSize(s)
- return s
-}
-
-// f is method type, with receiver.
-// return function type, receiver as first argument (or not).
-func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
- inLen := f.Params().Fields().Len()
- if receiver != nil {
- inLen++
- }
- in := make([]*ir.Field, 0, inLen)
-
- if receiver != nil {
- d := anonfield(receiver)
- in = append(in, d)
- }
-
- for _, t := range f.Params().Fields().Slice() {
- d := anonfield(t.Type)
- d.IsDDD = t.IsDDD()
- in = append(in, d)
- }
-
- outLen := f.Results().Fields().Len()
- out := make([]*ir.Field, 0, outLen)
- for _, t := range f.Results().Fields().Slice() {
- d := anonfield(t.Type)
- out = append(out, d)
- }
-
- return functype(nil, in, out)
-}
-
// methods returns the methods of the non-interface type t, sorted by name.
// Generates stub functions as needed.
-func methods(t *types.Type) []*Sig {
+func methods(t *types.Type) []*typeSig {
// method type
- mt := methtype(t)
+ mt := types.ReceiverBaseType(t)
if mt == nil {
return nil
}
- expandmeth(mt)
+ typecheck.CalcMethods(mt)
// type stored in interface word
it := t
- if !isdirectiface(it) {
+ if !types.IsDirectIface(it) {
it = types.NewPtr(t)
}
// make list of methods for t,
// generating code if necessary.
- var ms []*Sig
+ var ms []*typeSig
for _, f := range mt.AllMethods().Slice() {
if !f.IsMethod() {
base.Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
// if pointer receiver but non-pointer t and
// this is not an embedded pointer inside a struct,
// method does not apply.
- if !isMethodApplicable(t, f) {
+ if !types.IsMethodApplicable(t, f) {
continue
}
- sig := &Sig{
+ sig := &typeSig{
name: method,
- isym: methodSym(it, method),
- tsym: methodSym(t, method),
- type_: methodfunc(f.Type, t),
- mtype: methodfunc(f.Type, nil),
+ isym: ir.MethodSym(it, method),
+ tsym: ir.MethodSym(t, method),
+ type_: typecheck.NewMethodType(f.Type, t),
+ mtype: typecheck.NewMethodType(f.Type, nil),
}
ms = append(ms, sig)
}
// imethods returns the methods of the interface type t, sorted by name.
-func imethods(t *types.Type) []*Sig {
- var methods []*Sig
+func imethods(t *types.Type) []*typeSig {
+ var methods []*typeSig
for _, f := range t.Fields().Slice() {
if f.Type.Kind() != types.TFUNC || f.Sym == nil {
continue
}
}
- sig := &Sig{
+ sig := &typeSig{
name: f.Sym,
mtype: f.Type,
- type_: methodfunc(f.Type, nil),
+ type_: typecheck.NewMethodType(f.Type, nil),
}
methods = append(methods, sig)
// IfaceType.Method is not in the reflect data.
// Generate the method body, so that compiled
// code can refer to it.
- isym := methodSym(t, f.Sym)
+ isym := ir.MethodSym(t, f.Sym)
if !isym.Siggen() {
isym.SetSiggen(true)
genwrapper(t, f, isym)
// If we are compiling the runtime package, there are two runtime packages around
// -- localpkg and Runtimepkg. We don't want to produce import path symbols for
// both of them, so just produce one for localpkg.
- if base.Ctxt.Pkgpath == "runtime" && p == Runtimepkg {
+ if base.Ctxt.Pkgpath == "runtime" && p == ir.Pkgs.Runtime {
return
}
s := base.Ctxt.Lookup("type..importpath." + p.Prefix + ".")
ot := dnameData(s, 0, str, "", nil, false)
- ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
+ objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
s.Set(obj.AttrContentAddressable, true)
p.Pathsym = s
}
func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
if pkg == nil {
- return duintptr(s, ot, 0)
+ return objw.Uintptr(s, ot, 0)
}
if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
// Every package that imports this one directly defines the symbol.
// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
ns := base.Ctxt.Lookup(`type..importpath."".`)
- return dsymptr(s, ot, ns, 0)
+ return objw.SymPtr(s, ot, ns, 0)
}
dimportpath(pkg)
- return dsymptr(s, ot, pkg.Pathsym, 0)
+ return objw.SymPtr(s, ot, pkg.Pathsym, 0)
}
// dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol.
func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
if pkg == nil {
- return duint32(s, ot, 0)
+ return objw.Uint32(s, ot, 0)
}
if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
// If we don't know the full import path of the package being compiled
// Every package that imports this one directly defines the symbol.
// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
ns := base.Ctxt.Lookup(`type..importpath."".`)
- return dsymptrOff(s, ot, ns)
+ return objw.SymPtrOff(s, ot, ns)
}
dimportpath(pkg)
- return dsymptrOff(s, ot, pkg.Pathsym)
+ return objw.SymPtrOff(s, ot, pkg.Pathsym)
}
// dnameField dumps a reflect.name for a struct field.
base.Fatalf("package mismatch for %v", ft.Sym)
}
nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
- return dsymptr(lsym, ot, nsym, 0)
+ return objw.SymPtr(lsym, ot, nsym, 0)
}
// dnameData writes the contents of a reflect.name into s at offset ot.
return s
}
ot := dnameData(s, 0, name, tag, pkg, exported)
- ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
+ objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
s.Set(obj.AttrContentAddressable, true)
return s
}
if t.Sym() == nil && len(m) == 0 {
return ot
}
- noff := int(Rnd(int64(ot), int64(Widthptr)))
+ noff := int(types.Rnd(int64(ot), int64(types.PtrSize)))
if noff != ot {
base.Fatalf("unexpected alignment in dextratype for %v", t)
}
for _, a := range m {
- dtypesym(a.type_)
+ WriteType(a.type_)
}
ot = dgopkgpathOff(lsym, ot, typePkg(t))
base.Fatalf("methods are too far away on %v: %d", t, dataAdd)
}
- ot = duint16(lsym, ot, uint16(mcount))
- ot = duint16(lsym, ot, uint16(xcount))
- ot = duint32(lsym, ot, uint32(dataAdd))
- ot = duint32(lsym, ot, 0)
+ ot = objw.Uint16(lsym, ot, uint16(mcount))
+ ot = objw.Uint16(lsym, ot, uint16(xcount))
+ ot = objw.Uint32(lsym, ot, uint32(dataAdd))
+ ot = objw.Uint32(lsym, ot, 0)
return ot
}
}
nsym := dname(a.name.Name, "", pkg, exported)
- ot = dsymptrOff(lsym, ot, nsym)
- ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype))
+ ot = objw.SymPtrOff(lsym, ot, nsym)
+ ot = dmethodptrOff(lsym, ot, WriteType(a.mtype))
ot = dmethodptrOff(lsym, ot, a.isym.Linksym())
ot = dmethodptrOff(lsym, ot, a.tsym.Linksym())
}
}
func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int {
- duint32(s, ot, 0)
+ objw.Uint32(s, ot, 0)
r := obj.Addrel(s)
r.Off = int32(ot)
r.Siz = 4
types.TUNSAFEPTR: objabi.KindUnsafePointer,
}
-// typeptrdata returns the length in bytes of the prefix of t
-// containing pointer data. Anything after this offset is scalar data.
-func typeptrdata(t *types.Type) int64 {
- if !t.HasPointers() {
- return 0
- }
-
- switch t.Kind() {
- case types.TPTR,
- types.TUNSAFEPTR,
- types.TFUNC,
- types.TCHAN,
- types.TMAP:
- return int64(Widthptr)
-
- case types.TSTRING:
- // struct { byte *str; intgo len; }
- return int64(Widthptr)
-
- case types.TINTER:
- // struct { Itab *tab; void *data; } or
- // struct { Type *type; void *data; }
- // Note: see comment in plive.go:onebitwalktype1.
- return 2 * int64(Widthptr)
-
- case types.TSLICE:
- // struct { byte *array; uintgo len; uintgo cap; }
- return int64(Widthptr)
-
- case types.TARRAY:
- // haspointers already eliminated t.NumElem() == 0.
- return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem())
-
- case types.TSTRUCT:
- // Find the last field that has pointers.
- var lastPtrField *types.Field
- for _, t1 := range t.Fields().Slice() {
- if t1.Type.HasPointers() {
- lastPtrField = t1
- }
- }
- return lastPtrField.Offset + typeptrdata(lastPtrField.Type)
-
- default:
- base.Fatalf("typeptrdata: unexpected type, %v", t)
- return 0
- }
-}
-
// tflag is documented in reflect/type.go.
//
// tflag values must be kept in sync with copies in:
// dcommontype dumps the contents of a reflect.rtype (runtime._type).
func dcommontype(lsym *obj.LSym, t *types.Type) int {
- dowidth(t)
+ types.CalcSize(t)
eqfunc := geneq(t)
sptrWeak := true
if t.Sym() != nil || methods(tptr) != nil {
sptrWeak = false
}
- sptr = dtypesym(tptr)
+ sptr = WriteType(tptr)
}
gcsym, useGCProg, ptrdata := dgcsym(t)
// ptrToThis typeOff
// }
ot := 0
- ot = duintptr(lsym, ot, uint64(t.Width))
- ot = duintptr(lsym, ot, uint64(ptrdata))
- ot = duint32(lsym, ot, typehash(t))
+ ot = objw.Uintptr(lsym, ot, uint64(t.Width))
+ ot = objw.Uintptr(lsym, ot, uint64(ptrdata))
+ ot = objw.Uint32(lsym, ot, types.TypeHash(t))
var tflag uint8
if uncommonSize(t) != 0 {
if t.Sym() != nil && t.Sym().Name != "" {
tflag |= tflagNamed
}
- if IsRegularMemory(t) {
+ if isRegularMemory(t) {
tflag |= tflagRegularMemory
}
}
}
- ot = duint8(lsym, ot, tflag)
+ ot = objw.Uint8(lsym, ot, tflag)
// runtime (and common sense) expects alignment to be a power of two.
i := int(t.Align)
if i&(i-1) != 0 {
base.Fatalf("invalid alignment %d for %v", t.Align, t)
}
- ot = duint8(lsym, ot, t.Align) // align
- ot = duint8(lsym, ot, t.Align) // fieldAlign
+ ot = objw.Uint8(lsym, ot, t.Align) // align
+ ot = objw.Uint8(lsym, ot, t.Align) // fieldAlign
i = kinds[t.Kind()]
- if isdirectiface(t) {
+ if types.IsDirectIface(t) {
i |= objabi.KindDirectIface
}
if useGCProg {
i |= objabi.KindGCProg
}
- ot = duint8(lsym, ot, uint8(i)) // kind
+ ot = objw.Uint8(lsym, ot, uint8(i)) // kind
if eqfunc != nil {
- ot = dsymptr(lsym, ot, eqfunc, 0) // equality function
+ ot = objw.SymPtr(lsym, ot, eqfunc, 0) // equality function
} else {
- ot = duintptr(lsym, ot, 0) // type we can't do == with
+ ot = objw.Uintptr(lsym, ot, 0) // type we can't do == with
}
- ot = dsymptr(lsym, ot, gcsym, 0) // gcdata
+ ot = objw.SymPtr(lsym, ot, gcsym, 0) // gcdata
nsym := dname(p, "", nil, exported)
- ot = dsymptrOff(lsym, ot, nsym) // str
+ ot = objw.SymPtrOff(lsym, ot, nsym) // str
// ptrToThis
if sptr == nil {
- ot = duint32(lsym, ot, 0)
+ ot = objw.Uint32(lsym, ot, 0)
} else if sptrWeak {
- ot = dsymptrWeakOff(lsym, ot, sptr)
+ ot = objw.SymPtrWeakOff(lsym, ot, sptr)
} else {
- ot = dsymptrOff(lsym, ot, sptr)
+ ot = objw.SymPtrOff(lsym, ot, sptr)
}
return ot
}
-// typeHasNoAlg reports whether t does not have any associated hash/eq
-// algorithms because t, or some component of t, is marked Noalg.
-func typeHasNoAlg(t *types.Type) bool {
- a, bad := algtype1(t)
- return a == ANOEQ && bad.Noalg()
-}
-
-func typesymname(t *types.Type) string {
- name := t.ShortString()
- // Use a separate symbol name for Noalg types for #17752.
- if typeHasNoAlg(t) {
- name = "noalg." + name
- }
- return name
-}
-
-// Fake package for runtime type info (headers)
-// Don't access directly, use typeLookup below.
-var (
- typepkgmu sync.Mutex // protects typepkg lookups
- typepkg = types.NewPkg("type", "type")
-)
-
-func typeLookup(name string) *types.Sym {
- typepkgmu.Lock()
- s := typepkg.Lookup(name)
- typepkgmu.Unlock()
- return s
-}
-
-func typesym(t *types.Type) *types.Sym {
- return typeLookup(typesymname(t))
-}
-
-// tracksym returns the symbol for tracking use of field/method f, assumed
+// TrackSym returns the symbol for tracking use of field/method f, assumed
// to be a member of struct/interface type t.
-func tracksym(t *types.Type, f *types.Field) *types.Sym {
- return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name)
+func TrackSym(t *types.Type, f *types.Field) *types.Sym {
+ return ir.Pkgs.Track.Lookup(t.ShortString() + "." + f.Sym.Name)
}
-func typesymprefix(prefix string, t *types.Type) *types.Sym {
+func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
p := prefix + "." + t.ShortString()
- s := typeLookup(p)
+ s := types.TypeSymLookup(p)
// This function is for looking up type-related generated functions
// (e.g. eq and hash). Make sure they are indeed generated.
signatmu.Lock()
- addsignat(t)
+ NeedRuntimeType(t)
signatmu.Unlock()
//print("algsym: %s -> %+S\n", p, s);
return s
}
-func typenamesym(t *types.Type) *types.Sym {
+func TypeSym(t *types.Type) *types.Sym {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
base.Fatalf("typenamesym %v", t)
}
- s := typesym(t)
+ s := types.TypeSym(t)
signatmu.Lock()
- addsignat(t)
+ NeedRuntimeType(t)
signatmu.Unlock()
return s
}
-func typename(t *types.Type) *ir.AddrExpr {
- s := typenamesym(t)
+func TypePtr(t *types.Type) *ir.AddrExpr {
+ s := TypeSym(t)
if s.Def == nil {
n := ir.NewNameAt(src.NoXPos, s)
n.SetType(types.Types[types.TUINT8])
- n.SetClass(ir.PEXTERN)
+ n.Class_ = ir.PEXTERN
n.SetTypecheck(1)
s.Def = n
}
- n := nodAddr(ir.AsNode(s.Def))
+ n := typecheck.NodAddr(ir.AsNode(s.Def))
n.SetType(types.NewPtr(s.Def.Type()))
n.SetTypecheck(1)
return n
}
-func itabname(t, itype *types.Type) *ir.AddrExpr {
+func ITabAddr(t, itype *types.Type) *ir.AddrExpr {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
base.Fatalf("itabname(%v, %v)", t, itype)
}
- s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString())
+ s := ir.Pkgs.Itab.Lookup(t.ShortString() + "," + itype.ShortString())
if s.Def == nil {
- n := NewName(s)
+ n := typecheck.NewName(s)
n.SetType(types.Types[types.TUINT8])
- n.SetClass(ir.PEXTERN)
+ n.Class_ = ir.PEXTERN
n.SetTypecheck(1)
s.Def = n
itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
}
- n := nodAddr(ir.AsNode(s.Def))
+ n := typecheck.NodAddr(ir.AsNode(s.Def))
n.SetType(types.NewPtr(s.Def.Type()))
n.SetTypecheck(1)
return n
}
-// isreflexive reports whether t has a reflexive equality operator.
-// That is, if x==x for all x of type t.
-func isreflexive(t *types.Type) bool {
- switch t.Kind() {
- case types.TBOOL,
- types.TINT,
- types.TUINT,
- types.TINT8,
- types.TUINT8,
- types.TINT16,
- types.TUINT16,
- types.TINT32,
- types.TUINT32,
- types.TINT64,
- types.TUINT64,
- types.TUINTPTR,
- types.TPTR,
- types.TUNSAFEPTR,
- types.TSTRING,
- types.TCHAN:
- return true
-
- case types.TFLOAT32,
- types.TFLOAT64,
- types.TCOMPLEX64,
- types.TCOMPLEX128,
- types.TINTER:
- return false
-
- case types.TARRAY:
- return isreflexive(t.Elem())
-
- case types.TSTRUCT:
- for _, t1 := range t.Fields().Slice() {
- if !isreflexive(t1.Type) {
- return false
- }
- }
- return true
-
- default:
- base.Fatalf("bad type for map key: %v", t)
- return false
- }
-}
-
// needkeyupdate reports whether map updates with t as a key
// need the key to be updated.
func needkeyupdate(t *types.Type) bool {
return t
}
-func dtypesym(t *types.Type) *obj.LSym {
+func WriteType(t *types.Type) *obj.LSym {
t = formalType(t)
if t.IsUntyped() {
base.Fatalf("dtypesym %v", t)
}
- s := typesym(t)
+ s := types.TypeSym(t)
lsym := s.Linksym()
if s.Siggen() {
return lsym
if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Kind()] && tbase != types.ByteType && tbase != types.RuneType && tbase != types.ErrorType) { // int, float, etc
// named types from other files are defined only by those files
if tbase.Sym() != nil && tbase.Sym().Pkg != types.LocalPkg {
- if i := BaseTypeIndex(t); i >= 0 {
+ if i := typecheck.BaseTypeIndex(t); i >= 0 {
lsym.Pkg = tbase.Sym().Pkg.Prefix
lsym.SymIdx = int32(i)
lsym.Set(obj.AttrIndexed, true)
case types.TARRAY:
// ../../../../runtime/type.go:/arrayType
- s1 := dtypesym(t.Elem())
+ s1 := WriteType(t.Elem())
t2 := types.NewSlice(t.Elem())
- s2 := dtypesym(t2)
+ s2 := WriteType(t2)
ot = dcommontype(lsym, t)
- ot = dsymptr(lsym, ot, s1, 0)
- ot = dsymptr(lsym, ot, s2, 0)
- ot = duintptr(lsym, ot, uint64(t.NumElem()))
+ ot = objw.SymPtr(lsym, ot, s1, 0)
+ ot = objw.SymPtr(lsym, ot, s2, 0)
+ ot = objw.Uintptr(lsym, ot, uint64(t.NumElem()))
ot = dextratype(lsym, ot, t, 0)
case types.TSLICE:
// ../../../../runtime/type.go:/sliceType
- s1 := dtypesym(t.Elem())
+ s1 := WriteType(t.Elem())
ot = dcommontype(lsym, t)
- ot = dsymptr(lsym, ot, s1, 0)
+ ot = objw.SymPtr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
case types.TCHAN:
// ../../../../runtime/type.go:/chanType
- s1 := dtypesym(t.Elem())
+ s1 := WriteType(t.Elem())
ot = dcommontype(lsym, t)
- ot = dsymptr(lsym, ot, s1, 0)
- ot = duintptr(lsym, ot, uint64(t.ChanDir()))
+ ot = objw.SymPtr(lsym, ot, s1, 0)
+ ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir()))
ot = dextratype(lsym, ot, t, 0)
case types.TFUNC:
for _, t1 := range t.Recvs().Fields().Slice() {
- dtypesym(t1.Type)
+ WriteType(t1.Type)
}
isddd := false
for _, t1 := range t.Params().Fields().Slice() {
isddd = t1.IsDDD()
- dtypesym(t1.Type)
+ WriteType(t1.Type)
}
for _, t1 := range t.Results().Fields().Slice() {
- dtypesym(t1.Type)
+ WriteType(t1.Type)
}
ot = dcommontype(lsym, t)
if isddd {
outCount |= 1 << 15
}
- ot = duint16(lsym, ot, uint16(inCount))
- ot = duint16(lsym, ot, uint16(outCount))
- if Widthptr == 8 {
+ ot = objw.Uint16(lsym, ot, uint16(inCount))
+ ot = objw.Uint16(lsym, ot, uint16(outCount))
+ if types.PtrSize == 8 {
ot += 4 // align for *rtype
}
- dataAdd := (inCount + t.NumResults()) * Widthptr
+ dataAdd := (inCount + t.NumResults()) * types.PtrSize
ot = dextratype(lsym, ot, t, dataAdd)
// Array of rtype pointers follows funcType.
for _, t1 := range t.Recvs().Fields().Slice() {
- ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
+ ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
}
for _, t1 := range t.Params().Fields().Slice() {
- ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
+ ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
}
for _, t1 := range t.Results().Fields().Slice() {
- ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
+ ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
}
case types.TINTER:
m := imethods(t)
n := len(m)
for _, a := range m {
- dtypesym(a.type_)
+ WriteType(a.type_)
}
// ../../../../runtime/type.go:/interfaceType
}
ot = dgopkgpath(lsym, ot, tpkg)
- ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
- ot = duintptr(lsym, ot, uint64(n))
- ot = duintptr(lsym, ot, uint64(n))
+ ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
+ ot = objw.Uintptr(lsym, ot, uint64(n))
+ ot = objw.Uintptr(lsym, ot, uint64(n))
dataAdd := imethodSize() * n
ot = dextratype(lsym, ot, t, dataAdd)
}
nsym := dname(a.name.Name, "", pkg, exported)
- ot = dsymptrOff(lsym, ot, nsym)
- ot = dsymptrOff(lsym, ot, dtypesym(a.type_))
+ ot = objw.SymPtrOff(lsym, ot, nsym)
+ ot = objw.SymPtrOff(lsym, ot, WriteType(a.type_))
}
// ../../../../runtime/type.go:/mapType
case types.TMAP:
- s1 := dtypesym(t.Key())
- s2 := dtypesym(t.Elem())
- s3 := dtypesym(bmap(t))
+ s1 := WriteType(t.Key())
+ s2 := WriteType(t.Elem())
+ s3 := WriteType(MapBucketType(t))
hasher := genhash(t.Key())
ot = dcommontype(lsym, t)
- ot = dsymptr(lsym, ot, s1, 0)
- ot = dsymptr(lsym, ot, s2, 0)
- ot = dsymptr(lsym, ot, s3, 0)
- ot = dsymptr(lsym, ot, hasher, 0)
+ ot = objw.SymPtr(lsym, ot, s1, 0)
+ ot = objw.SymPtr(lsym, ot, s2, 0)
+ ot = objw.SymPtr(lsym, ot, s3, 0)
+ ot = objw.SymPtr(lsym, ot, hasher, 0)
var flags uint32
// Note: flags must match maptype accessors in ../../../../runtime/type.go
// and maptype builder in ../../../../reflect/type.go:MapOf.
if t.Key().Width > MAXKEYSIZE {
- ot = duint8(lsym, ot, uint8(Widthptr))
+ ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
flags |= 1 // indirect key
} else {
- ot = duint8(lsym, ot, uint8(t.Key().Width))
+ ot = objw.Uint8(lsym, ot, uint8(t.Key().Width))
}
if t.Elem().Width > MAXELEMSIZE {
- ot = duint8(lsym, ot, uint8(Widthptr))
+ ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
flags |= 2 // indirect value
} else {
- ot = duint8(lsym, ot, uint8(t.Elem().Width))
+ ot = objw.Uint8(lsym, ot, uint8(t.Elem().Width))
}
- ot = duint16(lsym, ot, uint16(bmap(t).Width))
- if isreflexive(t.Key()) {
+ ot = objw.Uint16(lsym, ot, uint16(MapBucketType(t).Width))
+ if types.IsReflexive(t.Key()) {
flags |= 4 // reflexive key
}
if needkeyupdate(t.Key()) {
if hashMightPanic(t.Key()) {
flags |= 16 // hash might panic
}
- ot = duint32(lsym, ot, flags)
+ ot = objw.Uint32(lsym, ot, flags)
ot = dextratype(lsym, ot, t, 0)
case types.TPTR:
}
// ../../../../runtime/type.go:/ptrType
- s1 := dtypesym(t.Elem())
+ s1 := WriteType(t.Elem())
ot = dcommontype(lsym, t)
- ot = dsymptr(lsym, ot, s1, 0)
+ ot = objw.SymPtr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
// ../../../../runtime/type.go:/structType
case types.TSTRUCT:
fields := t.Fields().Slice()
for _, t1 := range fields {
- dtypesym(t1.Type)
+ WriteType(t1.Type)
}
// All non-exported struct field names within a struct
ot = dcommontype(lsym, t)
ot = dgopkgpath(lsym, ot, spkg)
- ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
- ot = duintptr(lsym, ot, uint64(len(fields)))
- ot = duintptr(lsym, ot, uint64(len(fields)))
+ ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
+ ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
+ ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
dataAdd := len(fields) * structfieldSize()
ot = dextratype(lsym, ot, t, dataAdd)
for _, f := range fields {
// ../../../../runtime/type.go:/structField
ot = dnameField(lsym, ot, spkg, f)
- ot = dsymptr(lsym, ot, dtypesym(f.Type), 0)
+ ot = objw.SymPtr(lsym, ot, WriteType(f.Type), 0)
offsetAnon := uint64(f.Offset) << 1
if offsetAnon>>1 != uint64(f.Offset) {
base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
if f.Embedded != 0 {
offsetAnon |= 1
}
- ot = duintptr(lsym, ot, offsetAnon)
+ ot = objw.Uintptr(lsym, ot, offsetAnon)
}
}
ot = dextratypeData(lsym, ot, t)
- ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA))
+ objw.Global(lsym, int32(ot), int16(dupok|obj.RODATA))
// The linker will leave a table of all the typelinks for
// types in the binary, so the runtime can find them.
}
}
// Do not put Noalg types in typelinks. See issue #22605.
- if typeHasNoAlg(t) {
+ if types.TypeHasNoAlg(t) {
keep = false
}
lsym.Set(obj.AttrMakeTypelink, keep)
return lsym
}
-// ifaceMethodOffset returns the offset of the i-th method in the interface
+// InterfaceMethodOffset returns the offset of the i-th method in the interface
// type descriptor, ityp.
-func ifaceMethodOffset(ityp *types.Type, i int64) int64 {
+func InterfaceMethodOffset(ityp *types.Type, i int64) int64 {
// interface type descriptor layout is struct {
// _type // commonSize
// pkgpath // 1 word
// [...]imethod
// }
// The size of imethod is 8.
- return int64(commonSize()+4*Widthptr+uncommonSize(ityp)) + i*8
+ return int64(commonSize()+4*types.PtrSize+uncommonSize(ityp)) + i*8
}
// for each itabEntry, gather the methods on
// the concrete type that implement the interface
-func peekitabs() {
+func CompileITabs() {
for i := range itabs {
tab := &itabs[i]
methods := genfun(tab.t, tab.itype)
return out
}
-// itabsym uses the information gathered in
+// ITabSym uses the information gathered in
// peekitabs to de-virtualize interface methods.
// Since this is called by the SSA backend, it shouldn't
// generate additional Nodes, Syms, etc.
-func itabsym(it *obj.LSym, offset int64) *obj.LSym {
+func ITabSym(it *obj.LSym, offset int64) *obj.LSym {
var syms []*obj.LSym
if it == nil {
return nil
}
// keep this arithmetic in sync with *itab layout
- methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr))
+ methodnum := int((offset - 2*int64(types.PtrSize) - 8) / int64(types.PtrSize))
if methodnum >= len(syms) {
return nil
}
return syms[methodnum]
}
-// addsignat ensures that a runtime type descriptor is emitted for t.
-func addsignat(t *types.Type) {
+// NeedRuntimeType ensures that a runtime type descriptor is emitted for t.
+func NeedRuntimeType(t *types.Type) {
if _, ok := signatset[t]; !ok {
signatset[t] = struct{}{}
signatslice = append(signatslice, t)
}
}
-func addsignats(dcls []ir.Node) {
- // copy types from dcl list to signatset
- for _, n := range dcls {
- if n.Op() == ir.OTYPE {
- addsignat(n.Type())
- }
- }
-}
-
-func dumpsignats() {
+func WriteRuntimeTypes() {
// Process signatset. Use a loop, as dtypesym adds
// entries to signatset while it is being processed.
signats := make([]typeAndStr, len(signatslice))
signats = signats[:0]
// Transfer entries to a slice and sort, for reproducible builds.
for _, t := range signatslice {
- signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()})
+ signats = append(signats, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()})
delete(signatset, t)
}
signatslice = signatslice[:0]
sort.Sort(typesByString(signats))
for _, ts := range signats {
t := ts.t
- dtypesym(t)
+ WriteType(t)
if t.Sym() != nil {
- dtypesym(types.NewPtr(t))
+ WriteType(types.NewPtr(t))
}
}
}
}
-func dumptabs() {
+func WriteTabs() {
// process itabs
for _, i := range itabs {
// dump empty itab symbol into i.sym
// _ [4]byte
// fun [1]uintptr // variable sized
// }
- o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0)
- o = dsymptr(i.lsym, o, dtypesym(i.t), 0)
- o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash
- o += 4 // skip unused field
+ o := objw.SymPtr(i.lsym, 0, WriteType(i.itype), 0)
+ o = objw.SymPtr(i.lsym, o, WriteType(i.t), 0)
+ o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash
+ o += 4 // skip unused field
for _, fn := range genfun(i.t, i.itype) {
- o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method
+ o = objw.SymPtr(i.lsym, o, fn, 0) // method pointer for each method
}
// Nothing writes static itabs, so they are read only.
- ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
+ objw.Global(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
i.lsym.Set(obj.AttrContentAddressable, true)
}
// typ typeOff // pointer to symbol
// }
nsym := dname(p.s.Name, "", nil, true)
- tsym := dtypesym(p.t)
- ot = dsymptrOff(s, ot, nsym)
- ot = dsymptrOff(s, ot, tsym)
+ tsym := WriteType(p.t)
+ ot = objw.SymPtrOff(s, ot, nsym)
+ ot = objw.SymPtrOff(s, ot, tsym)
// Plugin exports symbols as interfaces. Mark their types
// as UsedInIface.
tsym.Set(obj.AttrUsedInIface, true)
}
- ggloblsym(s, int32(ot), int16(obj.RODATA))
+ objw.Global(s, int32(ot), int16(obj.RODATA))
ot = 0
s = base.Ctxt.Lookup("go.plugin.exports")
for _, p := range ptabs {
- ot = dsymptr(s, ot, p.s.Linksym(), 0)
+ ot = objw.SymPtr(s, ot, p.s.Linksym(), 0)
}
- ggloblsym(s, int32(ot), int16(obj.RODATA))
+ objw.Global(s, int32(ot), int16(obj.RODATA))
}
}
-func dumpimportstrings() {
+func WriteImportStrings() {
// generate import strings for imported packages
for _, p := range types.ImportedPkgList() {
dimportpath(p)
}
}
-func dumpbasictypes() {
+func WriteBasicTypes() {
// do basic types if compiling package runtime.
// they have to be in at least one package,
// and runtime is always loaded implicitly,
// but using runtime means fewer copies in object files.
if base.Ctxt.Pkgpath == "runtime" {
for i := types.Kind(1); i <= types.TBOOL; i++ {
- dtypesym(types.NewPtr(types.Types[i]))
+ WriteType(types.NewPtr(types.Types[i]))
}
- dtypesym(types.NewPtr(types.Types[types.TSTRING]))
- dtypesym(types.NewPtr(types.Types[types.TUNSAFEPTR]))
+ WriteType(types.NewPtr(types.Types[types.TSTRING]))
+ WriteType(types.NewPtr(types.Types[types.TUNSAFEPTR]))
// emit type structs for error and func(error) string.
// The latter is the type of an auto-generated wrapper.
- dtypesym(types.NewPtr(types.ErrorType))
+ WriteType(types.NewPtr(types.ErrorType))
- dtypesym(functype(nil, []*ir.Field{anonfield(types.ErrorType)}, []*ir.Field{anonfield(types.Types[types.TSTRING])}))
+ WriteType(typecheck.NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.ErrorType)}, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TSTRING])}))
// add paths for runtime and main, which 6l imports implicitly.
- dimportpath(Runtimepkg)
+ dimportpath(ir.Pkgs.Runtime)
if base.Flag.Race {
- dimportpath(racepkg)
+ dimportpath(ir.Pkgs.Race)
}
if base.Flag.MSan {
- dimportpath(msanpkg)
+ dimportpath(ir.Pkgs.Msan)
}
dimportpath(types.NewPkg("main", ""))
}
// along with a boolean reporting whether the UseGCProg bit should be set in
// the type kind, and the ptrdata field to record in the reflect type information.
func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) {
- ptrdata = typeptrdata(t)
- if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 {
+ ptrdata = types.PtrDataSize(t)
+ if ptrdata/int64(types.PtrSize) <= maxPtrmaskBytes*8 {
lsym = dgcptrmask(t)
return
}
// dgcptrmask emits and returns the symbol containing a pointer mask for type t.
func dgcptrmask(t *types.Type) *obj.LSym {
- ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8)
+ ptrmask := make([]byte, (types.PtrDataSize(t)/int64(types.PtrSize)+7)/8)
fillptrmask(t, ptrmask)
p := fmt.Sprintf("gcbits.%x", ptrmask)
- sym := Runtimepkg.Lookup(p)
+ sym := ir.Pkgs.Runtime.Lookup(p)
lsym := sym.Linksym()
if !sym.Uniq() {
sym.SetUniq(true)
for i, x := range ptrmask {
- duint8(lsym, i, x)
+ objw.Uint8(lsym, i, x)
}
- ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ objw.Global(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
lsym.Set(obj.AttrContentAddressable, true)
}
return lsym
return
}
- vec := bvalloc(8 * int32(len(ptrmask)))
- onebitwalktype1(t, 0, vec)
+ vec := bitvec.New(8 * int32(len(ptrmask)))
+ liveness.SetTypeBits(t, 0, vec)
- nptr := typeptrdata(t) / int64(Widthptr)
+ nptr := types.PtrDataSize(t) / int64(types.PtrSize)
for i := int64(0); i < nptr; i++ {
if vec.Get(int32(i)) {
ptrmask[i/8] |= 1 << (uint(i) % 8)
// In practice, the size is typeptrdata(t) except for non-trivial arrays.
// For non-trivial arrays, the program describes the full t.Width size.
func dgcprog(t *types.Type) (*obj.LSym, int64) {
- dowidth(t)
+ types.CalcSize(t)
if t.Width == types.BADWIDTH {
base.Fatalf("dgcprog: %v badwidth", t)
}
- lsym := typesymprefix(".gcprog", t).Linksym()
- var p GCProg
+ lsym := TypeSymPrefix(".gcprog", t).Linksym()
+ var p gcProg
p.init(lsym)
p.emit(t, 0)
- offset := p.w.BitIndex() * int64(Widthptr)
+ offset := p.w.BitIndex() * int64(types.PtrSize)
p.end()
- if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width {
+ if ptrdata := types.PtrDataSize(t); offset < ptrdata || offset > t.Width {
base.Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
}
return lsym, offset
}
-type GCProg struct {
+type gcProg struct {
lsym *obj.LSym
symoff int
w gcprog.Writer
}
-func (p *GCProg) init(lsym *obj.LSym) {
+func (p *gcProg) init(lsym *obj.LSym) {
p.lsym = lsym
p.symoff = 4 // first 4 bytes hold program length
p.w.Init(p.writeByte)
}
}
-func (p *GCProg) writeByte(x byte) {
- p.symoff = duint8(p.lsym, p.symoff, x)
+func (p *gcProg) writeByte(x byte) {
+ p.symoff = objw.Uint8(p.lsym, p.symoff, x)
}
-func (p *GCProg) end() {
+func (p *gcProg) end() {
p.w.End()
- duint32(p.lsym, 0, uint32(p.symoff-4))
- ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ objw.Uint32(p.lsym, 0, uint32(p.symoff-4))
+ objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
if base.Debug.GCProg > 0 {
fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
}
}
-func (p *GCProg) emit(t *types.Type, offset int64) {
- dowidth(t)
+func (p *gcProg) emit(t *types.Type, offset int64) {
+ types.CalcSize(t)
if !t.HasPointers() {
return
}
- if t.Width == int64(Widthptr) {
- p.w.Ptr(offset / int64(Widthptr))
+ if t.Width == int64(types.PtrSize) {
+ p.w.Ptr(offset / int64(types.PtrSize))
return
}
switch t.Kind() {
base.Fatalf("GCProg.emit: unexpected type %v", t)
case types.TSTRING:
- p.w.Ptr(offset / int64(Widthptr))
+ p.w.Ptr(offset / int64(types.PtrSize))
case types.TINTER:
// Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1.
- p.w.Ptr(offset/int64(Widthptr) + 1)
+ p.w.Ptr(offset/int64(types.PtrSize) + 1)
case types.TSLICE:
- p.w.Ptr(offset / int64(Widthptr))
+ p.w.Ptr(offset / int64(types.PtrSize))
case types.TARRAY:
if t.NumElem() == 0 {
elem = elem.Elem()
}
- if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) {
+ if !p.w.ShouldRepeat(elem.Width/int64(types.PtrSize), count) {
// Cheaper to just emit the bits.
for i := int64(0); i < count; i++ {
p.emit(elem, offset+i*elem.Width)
return
}
p.emit(elem, offset)
- p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr))
- p.w.Repeat(elem.Width/int64(Widthptr), count-1)
+ p.w.ZeroUntil((offset + elem.Width) / int64(types.PtrSize))
+ p.w.Repeat(elem.Width/int64(types.PtrSize), count-1)
case types.TSTRUCT:
for _, t1 := range t.Fields().Slice() {
}
}
-// zeroaddr returns the address of a symbol with at least
+// ZeroAddr returns the address of a symbol with at least
// size bytes of zeros.
-func zeroaddr(size int64) ir.Node {
+func ZeroAddr(size int64) ir.Node {
if size >= 1<<31 {
base.Fatalf("map elem too big %d", size)
}
- if zerosize < size {
- zerosize = size
+ if ZeroSize < size {
+ ZeroSize = size
}
- s := mappkg.Lookup("zero")
+ s := ir.Pkgs.Map.Lookup("zero")
if s.Def == nil {
- x := NewName(s)
+ x := typecheck.NewName(s)
x.SetType(types.Types[types.TUINT8])
- x.SetClass(ir.PEXTERN)
+ x.Class_ = ir.PEXTERN
x.SetTypecheck(1)
s.Def = x
}
- z := nodAddr(ir.AsNode(s.Def))
+ z := typecheck.NodAddr(ir.AsNode(s.Def))
z.SetType(types.NewPtr(types.Types[types.TUINT8]))
z.SetTypecheck(1)
return z
}
+
+func CollectPTabs() {
+ if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
+ return
+ }
+ for _, exportn := range typecheck.Target.Exports {
+ s := exportn.Sym()
+ nn := ir.AsNode(s.Def)
+ if nn == nil {
+ continue
+ }
+ if nn.Op() != ir.ONAME {
+ continue
+ }
+ n := nn.(*ir.Name)
+ if !types.IsExported(s.Name) {
+ continue
+ }
+ if s.Pkg.Name != "main" {
+ continue
+ }
+ if n.Type().Kind() == types.TFUNC && n.Class_ == ir.PFUNC {
+ // function
+ ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()})
+ } else {
+ // variable
+ ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(s.Def.Type())})
+ }
+ }
+}
+
+// Generate a wrapper function to convert from
+// a receiver of type T to a receiver of type U.
+// That is,
+//
+// func (t T) M() {
+// ...
+// }
+//
+// already exists; this function generates
+//
+// func (u U) M() {
+// u.M()
+// }
+//
+// where the types T and U are such that u.M() is valid
+// and calls the T.M method.
+// The resulting function is for use in method tables.
+//
+// rcvr - U
+// method - M func (t T)(), a TFIELD type struct
+// newnam - the eventual mangled name of this function
+func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
+ if false && base.Flag.LowerR != 0 {
+ fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
+ }
+
+ // Only generate (*T).M wrappers for T.M in T's own package.
+ if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
+ rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
+ return
+ }
+
+ // Only generate I.M wrappers for I in I's own package
+ // but keep doing it for error.Error (was issue #29304).
+ if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
+ return
+ }
+
+ base.Pos = base.AutogeneratedPos
+ typecheck.DeclContext = ir.PEXTERN
+
+ tfn := ir.NewFuncType(base.Pos,
+ ir.NewField(base.Pos, typecheck.Lookup(".this"), nil, rcvr),
+ typecheck.NewFuncParams(method.Type.Params(), true),
+ typecheck.NewFuncParams(method.Type.Results(), false))
+
+ fn := typecheck.DeclFunc(newnam, tfn)
+ fn.SetDupok(true)
+
+ nthis := ir.AsNode(tfn.Type().Recv().Nname)
+
+ methodrcvr := method.Type.Recv().Type
+
+ // generate nil pointer check for better error
+ if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
+ // generating wrapper from *T to T.
+ n := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil())
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil)
+ n.Body = []ir.Node{call}
+ fn.Body.Append(n)
+ }
+
+ dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
+
+ // generate call
+ // It's not possible to use a tail call when dynamic linking on ppc64le. The
+ // bad scenario is when a local call is made to the wrapper: the wrapper will
+ // call the implementation, which might be in a different module and so set
+ // the TOC to the appropriate value for that module. But if it returns
+ // directly to the wrapper's caller, nothing will reset it to the correct
+ // value for that function.
+ if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
+ // generate tail call: adjust pointer receiver and jump to embedded method.
+ left := dot.X // skip final .M
+ if !left.Type().IsPtr() {
+ left = typecheck.NodAddr(left)
+ }
+ as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr))
+ fn.Body.Append(as)
+ fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym)))
+ } else {
+ fn.SetWrapper(true) // ignore frame for panic+recover matching
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
+ call.Args.Set(ir.ParamNames(tfn.Type()))
+ call.IsDDD = tfn.Type().IsVariadic()
+ if method.Type.NumResults() > 0 {
+ ret := ir.NewReturnStmt(base.Pos, nil)
+ ret.Results = []ir.Node{call}
+ fn.Body.Append(ret)
+ } else {
+ fn.Body.Append(call)
+ }
+ }
+
+ if false && base.Flag.LowerR != 0 {
+ ir.DumpList("genwrapper body", fn.Body)
+ }
+
+ typecheck.FinishFuncBody()
+ if base.Debug.DclStack != 0 {
+ types.CheckDclstack()
+ }
+
+ typecheck.Func(fn)
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+
+ // Inline calls within (*T).M wrappers. This is safe because we only
+ // generate those wrappers within the same compilation unit as (T).M.
+ // TODO(mdempsky): Investigate why we can't enable this more generally.
+ if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
+ inline.InlineCalls(fn)
+ }
+ escape.Batch([]*ir.Func{fn}, false)
+
+ ir.CurFunc = nil
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+}
+
+var ZeroSize int64
+
+// MarkTypeUsedInInterface marks that type t is converted to an interface.
+// This information is used in the linker in dead method elimination.
+func MarkTypeUsedInInterface(t *types.Type, from *obj.LSym) {
+ tsym := TypeSym(t).Linksym()
+ // Emit a marker relocation. The linker will know the type is converted
+ // to an interface if "from" is reachable.
+ r := obj.Addrel(from)
+ r.Sym = tsym
+ r.Type = objabi.R_USEIFACE
+}
+
+// MarkUsedIfaceMethod marks that an interface method is used in the current
+// function. n is OCALLINTER node.
+func MarkUsedIfaceMethod(n *ir.CallExpr) {
+ dot := n.X.(*ir.SelectorExpr)
+ ityp := dot.X.Type()
+ tsym := TypeSym(ityp).Linksym()
+ r := obj.Addrel(ir.CurFunc.LSym)
+ r.Sym = tsym
+ // dot.Xoffset is the method index * Widthptr (the offset of code pointer
+ // in itab).
+ midx := dot.Offset / int64(types.PtrSize)
+ r.Add = InterfaceMethodOffset(ityp, midx)
+ r.Type = objabi.R_USEIFACEMETHOD
+}
package riscv64
import (
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssagen"
"cmd/internal/obj/riscv"
)
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &riscv.LinkRISCV64
arch.REGSP = riscv.REG_SP
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
)
-func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
// Adjust the frame to account for LR.
off += base.Ctxt.FixedFrameSize()
- if cnt < int64(4*gc.Widthptr) {
- for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
- p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
}
return p
}
- if cnt <= int64(128*gc.Widthptr) {
- p = pp.Appendpp(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
+ if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
p.Reg = riscv.REG_SP
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffzero
- p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
return p
}
// MOV ZERO, (T0)
// ADD $Widthptr, T0
// BNE T0, T1, loop
- p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
+ p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
p.Reg = riscv.REG_SP
- p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
+ p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
p.Reg = riscv.REG_T0
- p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
+ p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
loop := p
- p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, riscv.REG_T0, 0)
- p = pp.Appendpp(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
+ p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0)
+ p = pp.Append(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = riscv.REG_T1
- gc.Patch(p, loop)
+ p.To.SetTarget(loop)
return p
}
package riscv64
import (
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/objw"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
)
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
// Hardware nop is ADD $0, ZERO
p := pp.Prog(riscv.AADD)
p.From.Type = obj.TYPE_CONST
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
// RISC-V has no flags, so this is a no-op.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {}
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
s.SetPos(v.Pos)
switch v.Op {
case ssa.OpArg:
// input args need no code
case ssa.OpPhi:
- gc.CheckLoweredPhi(v)
+ ssagen.CheckLoweredPhi(v)
case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg:
if v.Type.IsMemory() {
return
return
}
p := s.Prog(loadByType(v.Type))
- gc.AddrAuto(&p.From, v.Args[0])
+ ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
- gc.AddrAuto(&p.To, v)
+ ssagen.AddrAuto(&p.To, v)
case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
// nothing to do
case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVW $off(SP), R
wantreg = "SP"
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore,
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = riscv.REG_ZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpRISCV64LoweredAtomicLoad8:
p4.From.Reg = riscv.REG_TMP
p4.Reg = riscv.REG_ZERO
p4.To.Type = obj.TYPE_BRANCH
- gc.Patch(p4, p1)
+ p4.To.SetTarget(p1)
p5 := s.Prog(riscv.AMOV)
p5.From.Type = obj.TYPE_CONST
p5.To.Reg = out
p6 := s.Prog(obj.ANOP)
- gc.Patch(p2, p6)
+ p2.To.SetTarget(p6)
case ssa.OpRISCV64LoweredZero:
mov, sz := largestMove(v.AuxInt)
p3.Reg = v.Args[0].Reg()
p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[1].Reg()
- gc.Patch(p3, p)
+ p3.To.SetTarget(p)
case ssa.OpRISCV64LoweredMove:
mov, sz := largestMove(v.AuxInt)
p5.Reg = v.Args[1].Reg()
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Args[2].Reg()
- gc.Patch(p5, p)
+ p5.To.SetTarget(p)
case ssa.OpRISCV64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(riscv.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = riscv.REG_ZERO
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
case ssa.OpRISCV64LoweredGetClosurePtr:
// Closure pointer is S4 (riscv.REG_CTXT).
- gc.CheckLoweredGetClosurePtr(v)
+ ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpRISCV64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffzero
+ p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpRISCV64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.Duffcopy
+ p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
default:
ssa.BlockRISCV64BNEZ: riscv.ABNEZ,
}
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
s.SetPos(b.Pos)
switch b.Kind {
p.From.Type = obj.TYPE_REG
p.From.Reg = riscv.REG_ZERO
p.Reg = riscv.REG_A0
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:
package s390x
import (
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssagen"
"cmd/internal/obj/s390x"
)
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &s390x.Links390x
arch.REGSP = s390x.REGSP
arch.MAXWIDTH = 1 << 50
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/objw"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
)
const clearLoopCutoff = 1024
// zerorange clears the stack in the given range.
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
// need to create a copy of the stack pointer that we can adjust.
// We also need to do this if we are going to loop.
if off < 0 || off > 4096-clearLoopCutoff || cnt > clearLoopCutoff {
- p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0)
+ p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0)
p.Reg = int16(s390x.REGSP)
reg = s390x.REGRT1
off = 0
// Generate a loop of large clears.
if cnt > clearLoopCutoff {
ireg := int16(s390x.REGRT2) // register holds number of remaining loop iterations
- p = pp.Appendpp(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
- p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
+ p = pp.Append(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
+ p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
pl := p
- p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
- p = pp.Appendpp(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
- gc.Patch(p, pl)
+ p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
+ p = pp.Append(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(pl)
cnt = cnt % 256
}
case 2:
ins = s390x.AMOVH
}
- p = pp.Appendpp(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off)
+ p = pp.Append(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off)
// Handle clears that would require multiple move instructions with CLEAR (assembled as XC).
default:
- p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off)
+ p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off)
}
cnt -= n
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
return pp.Prog(s390x.ANOPH)
}
"math"
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
flive := b.FlagsLiveAtEnd
for _, c := range b.ControlValues() {
flive = c.Type.IsFlags() || flive
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
-func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
// dest := src(From) op off
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
-func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog {
+func opregregimm(s *ssagen.State, op obj.As, dest, src int16, off int64) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_CONST
p.From.Offset = off
return p
}
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpS390XSLD, ssa.OpS390XSLW,
ssa.OpS390XSRD, ssa.OpS390XSRW,
p.From.Type = obj.TYPE_ADDR
p.From.Reg = r
p.From.Index = i
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XMOVDaddr:
p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpS390XMOVDload,
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx,
p.From.Reg = r
p.From.Scale = 1
p.From.Index = i
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx,
ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx,
ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx:
p.To.Reg = r
p.To.Scale = 1
p.To.Index = i
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, sc.Off())
+ ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg,
ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg,
ssa.OpS390XLDGR, ssa.OpS390XLGDR,
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, sc.Off())
+ ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpCopy:
if v.Type.IsMemory() {
return
return
}
p := s.Prog(loadByType(v.Type))
- gc.AddrAuto(&p.From, v.Args[0])
+ ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
- gc.AddrAuto(&p.To, v)
+ ssagen.AddrAuto(&p.To, v)
case ssa.OpS390XLoweredGetClosurePtr:
// Closure pointer is R12 (already)
- gc.CheckLoweredGetClosurePtr(v)
+ ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F:
// input is already rounded
case ssa.OpS390XLoweredGetG:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT,
ssa.OpS390XNEG, ssa.OpS390XNEGW,
p := s.Prog(s390x.AMOVBZ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = s390x.REGTMP
if logopt.Enabled() {
p.Reg = v.Args[len(v.Args)-2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpS390XLoweredMove:
// Inputs must be valid pointers to memory,
// so adjust arg0 and arg1 as part of the expansion.
bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH
- gc.Patch(bne, mvc)
+ bne.To.SetTarget(mvc)
if v.AuxInt > 0 {
mvc := s.Prog(s390x.AMVC)
bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH
- gc.Patch(bne, clear)
+ bne.To.SetTarget(clear)
if v.AuxInt > 0 {
clear := s.Prog(s390x.ACLEAR)
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpS390XMOVBatomicstore, ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore:
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpS390XLAN, ssa.OpS390XLAO:
// LA(N|O) Ry, TMP, 0(Rx)
op := s.Prog(v.Op.Asm())
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64:
// Convert the flags output of CS{,G} into a bool.
// CS{,G} arg1, arg2, arg0
cs.Reg = v.Args[2].Reg() // new
cs.To.Type = obj.TYPE_MEM
cs.To.Reg = v.Args[0].Reg()
- gc.AddAux(&cs.To, v)
+ ssagen.AddAux(&cs.To, v)
// MOVD $0, ret
movd := s.Prog(s390x.AMOVD)
// NOP (so the BNE has somewhere to land)
nop := s.Prog(obj.ANOP)
- gc.Patch(bne, nop)
+ bne.To.SetTarget(nop)
case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64:
// Loop until the CS{,G} succeeds.
// MOV{WZ,D} arg0, ret
load.From.Reg = v.Args[0].Reg()
load.To.Type = obj.TYPE_REG
load.To.Reg = v.Reg0()
- gc.AddAux(&load.From, v)
+ ssagen.AddAux(&load.From, v)
// CS{,G} ret, arg1, arg0
cs := s.Prog(v.Op.Asm())
cs.Reg = v.Args[1].Reg() // new
cs.To.Type = obj.TYPE_MEM
cs.To.Reg = v.Args[0].Reg()
- gc.AddAux(&cs.To, v)
+ ssagen.AddAux(&cs.To, v)
// BNE cs
bne := s.Prog(s390x.ABNE)
bne.To.Type = obj.TYPE_BRANCH
- gc.Patch(bne, cs)
+ bne.To.SetTarget(cs)
case ssa.OpS390XSYNC:
s.Prog(s390x.ASYNC)
case ssa.OpClobber:
panic("unreachable")
}
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
// Handle generic blocks first.
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
return
case ssa.BlockDefer:
// Initialize just enough of the universe and the types package to make our tests function.
// TODO(josharian): move universe initialization to the types package,
// so this test setup can share it.
- types.Dowidth = func(t *types.Type) {}
for _, typ := range [...]struct {
width int64
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
// TODO(mdempsky): Shouldn't be necessary; see discussion at golang.org/cl/275756
func (*Value) CanBeAnSSAAux() {}
+
+// AutoVar returns a *Name and int64 representing the auto variable and offset within it
+// where v should be spilled.
+func AutoVar(v *Value) (*ir.Name, int64) {
+ loc := v.Block.Func.RegAlloc[v.ID].(LocalSlot)
+ if v.Type.Size() > loc.Type.Size() {
+ v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
+ }
+ return loc.N, loc.Off
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mkbuiltin.go
+
+package ssagen
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/escape"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+)
+
+// useNewABIWrapGen returns TRUE if the compiler should generate an
+// ABI wrapper for the function 'f'.
+func useABIWrapGen(f *ir.Func) bool {
+ if !base.Flag.ABIWrap {
+ return false
+ }
+
+ // Support limit option for bisecting.
+ if base.Flag.ABIWrapLimit == 1 {
+ return false
+ }
+ if base.Flag.ABIWrapLimit < 1 {
+ return true
+ }
+ base.Flag.ABIWrapLimit--
+ if base.Debug.ABIWrap != 0 && base.Flag.ABIWrapLimit == 1 {
+ fmt.Fprintf(os.Stderr, "=-= limit reached after new wrapper for %s\n",
+ f.LSym.Name)
+ }
+
+ return true
+}
+
+// symabiDefs and symabiRefs record the defined and referenced ABIs of
+// symbols required by non-Go code. These are keyed by link symbol
+// name, where the local package prefix is always `"".`
+var symabiDefs, symabiRefs map[string]obj.ABI
+
+func CgoSymABIs() {
+ // The linker expects an ABI0 wrapper for all cgo-exported
+ // functions.
+ for _, prag := range typecheck.Target.CgoPragmas {
+ switch prag[0] {
+ case "cgo_export_static", "cgo_export_dynamic":
+ if symabiRefs == nil {
+ symabiRefs = make(map[string]obj.ABI)
+ }
+ symabiRefs[prag[1]] = obj.ABI0
+ }
+ }
+}
+
+// ReadSymABIs reads a symabis file that specifies definitions and
+// references of text symbols by ABI.
+//
+// The symabis format is a set of lines, where each line is a sequence
+// of whitespace-separated fields. The first field is a verb and is
+// either "def" for defining a symbol ABI or "ref" for referencing a
+// symbol using an ABI. For both "def" and "ref", the second field is
+// the symbol name and the third field is the ABI name, as one of the
+// named cmd/internal/obj.ABI constants.
+func ReadSymABIs(file, myimportpath string) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-symabis: %v", err)
+ }
+
+ symabiDefs = make(map[string]obj.ABI)
+ symabiRefs = make(map[string]obj.ABI)
+
+ localPrefix := ""
+ if myimportpath != "" {
+ // Symbols in this package may be written either as
+ // "".X or with the package's import path already in
+ // the symbol.
+ localPrefix = objabi.PathToPrefix(myimportpath) + "."
+ }
+
+ for lineNum, line := range strings.Split(string(data), "\n") {
+ lineNum++ // 1-based
+ line = strings.TrimSpace(line)
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ parts := strings.Fields(line)
+ switch parts[0] {
+ case "def", "ref":
+ // Parse line.
+ if len(parts) != 3 {
+ log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0])
+ }
+ sym, abistr := parts[1], parts[2]
+ abi, valid := obj.ParseABI(abistr)
+ if !valid {
+ log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr)
+ }
+
+ // If the symbol is already prefixed with
+ // myimportpath, rewrite it to start with ""
+ // so it matches the compiler's internal
+ // symbol names.
+ if localPrefix != "" && strings.HasPrefix(sym, localPrefix) {
+ sym = `"".` + sym[len(localPrefix):]
+ }
+
+ // Record for later.
+ if parts[0] == "def" {
+ symabiDefs[sym] = abi
+ } else {
+ symabiRefs[sym] = abi
+ }
+ default:
+ log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0])
+ }
+ }
+}
+
+// InitLSym defines f's obj.LSym and initializes it based on the
+// properties of f. This includes setting the symbol flags and ABI and
+// creating and initializing related DWARF symbols.
+//
+// InitLSym must be called exactly once per function and must be
+// called for both functions with bodies and functions without bodies.
+// For body-less functions, we only create the LSym; for functions
+// with bodies call a helper to setup up / populate the LSym.
+func InitLSym(f *ir.Func, hasBody bool) {
+ // FIXME: for new-style ABI wrappers, we set up the lsym at the
+ // point the wrapper is created.
+ if f.LSym != nil && base.Flag.ABIWrap {
+ return
+ }
+ selectLSym(f, hasBody)
+ if hasBody {
+ setupTextLSym(f, 0)
+ }
+}
+
+// selectLSym sets up the LSym for a given function, and
+// makes calls to helpers to create ABI wrappers if needed.
+func selectLSym(f *ir.Func, hasBody bool) {
+ if f.LSym != nil {
+ base.Fatalf("Func.initLSym called twice")
+ }
+
+ if nam := f.Nname; !ir.IsBlank(nam) {
+
+ var wrapperABI obj.ABI
+ needABIWrapper := false
+ defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()]
+ if hasDefABI && defABI == obj.ABI0 {
+ // Symbol is defined as ABI0. Create an
+ // Internal -> ABI0 wrapper.
+ f.LSym = nam.Sym().LinksymABI0()
+ needABIWrapper, wrapperABI = true, obj.ABIInternal
+ } else {
+ f.LSym = nam.Sym().Linksym()
+ // No ABI override. Check that the symbol is
+ // using the expected ABI.
+ want := obj.ABIInternal
+ if f.LSym.ABI() != want {
+ base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want)
+ }
+ }
+ if f.Pragma&ir.Systemstack != 0 {
+ f.LSym.Set(obj.AttrCFunc, true)
+ }
+
+ isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI)
+ if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
+ // Either 1) this symbol is definitely
+ // referenced as ABI0 from this package; or 2)
+ // this symbol is defined in this package but
+ // given a linkname, indicating that it may be
+ // referenced from another package. Create an
+ // ABI0 -> Internal wrapper so it can be
+ // called as ABI0. In case 2, it's important
+ // that we know it's defined in this package
+ // since other packages may "pull" symbols
+ // using linkname and we don't want to create
+ // duplicate ABI wrappers.
+ if f.LSym.ABI() != obj.ABI0 {
+ needABIWrapper, wrapperABI = true, obj.ABI0
+ }
+ }
+
+ if needABIWrapper {
+ if !useABIWrapGen(f) {
+ // Fallback: use alias instead. FIXME.
+
+ // These LSyms have the same name as the
+ // native function, so we create them directly
+ // rather than looking them up. The uniqueness
+ // of f.lsym ensures uniqueness of asym.
+ asym := &obj.LSym{
+ Name: f.LSym.Name,
+ Type: objabi.SABIALIAS,
+ R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational"
+ }
+ asym.SetABI(wrapperABI)
+ asym.Set(obj.AttrDuplicateOK, true)
+ base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym)
+ } else {
+ if base.Debug.ABIWrap != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %s.%s\n",
+ wrapperABI, 1-wrapperABI, types.LocalPkg.Path, f.LSym.Name)
+ }
+ makeABIWrapper(f, wrapperABI)
+ }
+ }
+ }
+}
+
+// makeABIWrapper creates a new function that wraps a cross-ABI call
+// to "f". The wrapper is marked as an ABIWRAPPER.
+func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
+
+ // Q: is this needed?
+ savepos := base.Pos
+ savedclcontext := typecheck.DeclContext
+ savedcurfn := ir.CurFunc
+
+ base.Pos = base.AutogeneratedPos
+ typecheck.DeclContext = ir.PEXTERN
+
+ // At the moment we don't support wrapping a method, we'd need machinery
+ // below to handle the receiver. Panic if we see this scenario.
+ ft := f.Nname.Ntype.Type()
+ if ft.NumRecvs() != 0 {
+ panic("makeABIWrapper support for wrapping methods not implemented")
+ }
+
+ // Manufacture a new func type to use for the wrapper.
+ var noReceiver *ir.Field
+ tfn := ir.NewFuncType(base.Pos,
+ noReceiver,
+ typecheck.NewFuncParams(ft.Params(), true),
+ typecheck.NewFuncParams(ft.Results(), false))
+
+ // Reuse f's types.Sym to create a new ODCLFUNC/function.
+ fn := typecheck.DeclFunc(f.Nname.Sym(), tfn)
+ fn.SetDupok(true)
+ fn.SetWrapper(true) // ignore frame for panic+recover matching
+
+ // Select LSYM now.
+ asym := base.Ctxt.LookupABI(f.LSym.Name, wrapperABI)
+ asym.Type = objabi.STEXT
+ if fn.LSym != nil {
+ panic("unexpected")
+ }
+ fn.LSym = asym
+
+ // ABI0-to-ABIInternal wrappers will be mainly loading params from
+ // stack into registers (and/or storing stack locations back to
+ // registers after the wrapped call); in most cases they won't
+ // need to allocate stack space, so it should be OK to mark them
+ // as NOSPLIT in these cases. In addition, my assumption is that
+ // functions written in assembly are NOSPLIT in most (but not all)
+ // cases. In the case of an ABIInternal target that has too many
+ // parameters to fit into registers, the wrapper would need to
+ // allocate stack space, but this seems like an unlikely scenario.
+ // Hence: mark these wrappers NOSPLIT.
+ //
+ // ABIInternal-to-ABI0 wrappers on the other hand will be taking
+ // things in registers and pushing them onto the stack prior to
+ // the ABI0 call, meaning that they will always need to allocate
+ // stack space. If the compiler marks them as NOSPLIT this seems
+ // as though it could lead to situations where the the linker's
+ // nosplit-overflow analysis would trigger a link failure. On the
+ // other hand if they not tagged NOSPLIT then this could cause
+ // problems when building the runtime (since there may be calls to
+ // asm routine in cases where it's not safe to grow the stack). In
+ // most cases the wrapper would be (in effect) inlined, but are
+ // there (perhaps) indirect calls from the runtime that could run
+ // into trouble here.
+ // FIXME: at the moment all.bash does not pass when I leave out
+ // NOSPLIT for these wrappers, so all are currently tagged with NOSPLIT.
+ setupTextLSym(fn, obj.NOSPLIT|obj.ABIWRAPPER)
+
+ // Generate call. Use tail call if no params and no returns,
+ // but a regular call otherwise.
+ //
+ // Note: ideally we would be using a tail call in cases where
+ // there are params but no returns for ABI0->ABIInternal wrappers,
+ // provided that all params fit into registers (e.g. we don't have
+ // to allocate any stack space). Doing this will require some
+ // extra work in typecheck/walk/ssa, might want to add a new node
+ // OTAILCALL or something to this effect.
+ var tail ir.Node
+ if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 {
+ tail = ir.NewBranchStmt(base.Pos, ir.ORETJMP, f.Nname.Sym())
+ } else {
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil)
+ call.Args.Set(ir.ParamNames(tfn.Type()))
+ call.IsDDD = tfn.Type().IsVariadic()
+ tail = call
+ if tfn.Type().NumResults() > 0 {
+ n := ir.NewReturnStmt(base.Pos, nil)
+ n.Results = []ir.Node{call}
+ tail = n
+ }
+ }
+ fn.Body.Append(tail)
+
+ typecheck.FinishFuncBody()
+ if base.Debug.DclStack != 0 {
+ types.CheckDclstack()
+ }
+
+ typecheck.Func(fn)
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+
+ escape.Batch([]*ir.Func{fn}, false)
+
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+
+ // Restore previous context.
+ base.Pos = savepos
+ typecheck.DeclContext = savedclcontext
+ ir.CurFunc = savedcurfn
+}
+
+// setupTextLsym initializes the LSym for a with-body text symbol.
+func setupTextLSym(f *ir.Func, flag int) {
+ if f.Dupok() {
+ flag |= obj.DUPOK
+ }
+ if f.Wrapper() {
+ flag |= obj.WRAPPER
+ }
+ if f.Needctxt() {
+ flag |= obj.NEEDCTXT
+ }
+ if f.Pragma&ir.Nosplit != 0 {
+ flag |= obj.NOSPLIT
+ }
+ if f.ReflectMethod() {
+ flag |= obj.REFLECTMETHOD
+ }
+
+ // Clumsy but important.
+ // See test/recover.go for test cases and src/reflect/value.go
+ // for the actual functions being considered.
+ if base.Ctxt.Pkgpath == "reflect" {
+ switch f.Sym().Name {
+ case "callReflect", "callMethod":
+ flag |= obj.WRAPPER
+ }
+ }
+
+ base.Ctxt.InitTextSym(f.LSym, flag)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+)
+
+var Arch ArchInfo
+
+// interface to back end
+
+type ArchInfo struct {
+ LinkArch *obj.LinkArch
+
+ REGSP int
+ MAXWIDTH int64
+ SoftFloat bool
+
+ PadFrame func(int64) int64
+
+ // ZeroRange zeroes a range of memory on stack. It is only inserted
+ // at function entry, and it is ok to clobber registers.
+ ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
+
+ Ginsnop func(*objw.Progs) *obj.Prog
+ Ginsnopdefer func(*objw.Progs) *obj.Prog // special ginsnop for deferreturn
+
+ // SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
+ SSAMarkMoves func(*State, *ssa.Block)
+
+ // SSAGenValue emits Prog(s) for the Value.
+ SSAGenValue func(*State, *ssa.Value)
+
+ // SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
+ // for all values in the block before SSAGenBlock.
+ SSAGenBlock func(s *State, b, next *ssa.Block)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "bytes"
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+func EnableNoWriteBarrierRecCheck() {
+ nowritebarrierrecCheck = newNowritebarrierrecChecker()
+}
+
+func NoWriteBarrierRecCheck() {
+ // Write barriers are now known. Check the
+ // call graph.
+ nowritebarrierrecCheck.check()
+ nowritebarrierrecCheck = nil
+}
+
+var nowritebarrierrecCheck *nowritebarrierrecChecker
+
+type nowritebarrierrecChecker struct {
+ // extraCalls contains extra function calls that may not be
+ // visible during later analysis. It maps from the ODCLFUNC of
+ // the caller to a list of callees.
+ extraCalls map[*ir.Func][]nowritebarrierrecCall
+
+ // curfn is the current function during AST walks.
+ curfn *ir.Func
+}
+
+type nowritebarrierrecCall struct {
+ target *ir.Func // caller or callee
+ lineno src.XPos // line of call
+}
+
+// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
+// must be called before transformclosure and walk.
+func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
+ c := &nowritebarrierrecChecker{
+ extraCalls: make(map[*ir.Func][]nowritebarrierrecCall),
+ }
+
+ // Find all systemstack calls and record their targets. In
+ // general, flow analysis can't see into systemstack, but it's
+ // important to handle it for this check, so we model it
+ // directly. This has to happen before transformclosure since
+ // it's a lot harder to work out the argument after.
+ for _, n := range typecheck.Target.Decls {
+ if n.Op() != ir.ODCLFUNC {
+ continue
+ }
+ c.curfn = n.(*ir.Func)
+ ir.Visit(n, c.findExtraCalls)
+ }
+ c.curfn = nil
+ return c
+}
+
+func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) {
+ if nn.Op() != ir.OCALLFUNC {
+ return
+ }
+ n := nn.(*ir.CallExpr)
+ if n.X == nil || n.X.Op() != ir.ONAME {
+ return
+ }
+ fn := n.X.(*ir.Name)
+ if fn.Class_ != ir.PFUNC || fn.Name().Defn == nil {
+ return
+ }
+ if !types.IsRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" {
+ return
+ }
+
+ var callee *ir.Func
+ arg := n.Args[0]
+ switch arg.Op() {
+ case ir.ONAME:
+ arg := arg.(*ir.Name)
+ callee = arg.Name().Defn.(*ir.Func)
+ case ir.OCLOSURE:
+ arg := arg.(*ir.ClosureExpr)
+ callee = arg.Func
+ default:
+ base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
+ }
+ if callee.Op() != ir.ODCLFUNC {
+ base.Fatalf("expected ODCLFUNC node, got %+v", callee)
+ }
+ c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()})
+}
+
+// recordCall records a call from ODCLFUNC node "from", to function
+// symbol "to" at position pos.
+//
+// This should be done as late as possible during compilation to
+// capture precise call graphs. The target of the call is an LSym
+// because that's all we know after we start SSA.
+//
+// This can be called concurrently for different from Nodes.
+func (c *nowritebarrierrecChecker) recordCall(fn *ir.Func, to *obj.LSym, pos src.XPos) {
+ // We record this information on the *Func so this is concurrent-safe.
+ if fn.NWBRCalls == nil {
+ fn.NWBRCalls = new([]ir.SymAndPos)
+ }
+ *fn.NWBRCalls = append(*fn.NWBRCalls, ir.SymAndPos{Sym: to, Pos: pos})
+}
+
+func (c *nowritebarrierrecChecker) check() {
+ // We walk the call graph as late as possible so we can
+ // capture all calls created by lowering, but this means we
+ // only get to see the obj.LSyms of calls. symToFunc lets us
+ // get back to the ODCLFUNCs.
+ symToFunc := make(map[*obj.LSym]*ir.Func)
+ // funcs records the back-edges of the BFS call graph walk. It
+ // maps from the ODCLFUNC of each function that must not have
+ // write barriers to the call that inhibits them. Functions
+ // that are directly marked go:nowritebarrierrec are in this
+ // map with a zero-valued nowritebarrierrecCall. This also
+ // acts as the set of marks for the BFS of the call graph.
+ funcs := make(map[*ir.Func]nowritebarrierrecCall)
+ // q is the queue of ODCLFUNC Nodes to visit in BFS order.
+ var q ir.NameQueue
+
+ for _, n := range typecheck.Target.Decls {
+ if n.Op() != ir.ODCLFUNC {
+ continue
+ }
+ fn := n.(*ir.Func)
+
+ symToFunc[fn.LSym] = fn
+
+ // Make nowritebarrierrec functions BFS roots.
+ if fn.Pragma&ir.Nowritebarrierrec != 0 {
+ funcs[fn] = nowritebarrierrecCall{}
+ q.PushRight(fn.Nname)
+ }
+ // Check go:nowritebarrier functions.
+ if fn.Pragma&ir.Nowritebarrier != 0 && fn.WBPos.IsKnown() {
+ base.ErrorfAt(fn.WBPos, "write barrier prohibited")
+ }
+ }
+
+ // Perform a BFS of the call graph from all
+ // go:nowritebarrierrec functions.
+ enqueue := func(src, target *ir.Func, pos src.XPos) {
+ if target.Pragma&ir.Yeswritebarrierrec != 0 {
+ // Don't flow into this function.
+ return
+ }
+ if _, ok := funcs[target]; ok {
+ // Already found a path to target.
+ return
+ }
+
+ // Record the path.
+ funcs[target] = nowritebarrierrecCall{target: src, lineno: pos}
+ q.PushRight(target.Nname)
+ }
+ for !q.Empty() {
+ fn := q.PopLeft().Func
+
+ // Check fn.
+ if fn.WBPos.IsKnown() {
+ var err bytes.Buffer
+ call := funcs[fn]
+ for call.target != nil {
+ fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Nname)
+ call = funcs[call.target]
+ }
+ base.ErrorfAt(fn.WBPos, "write barrier prohibited by caller; %v%s", fn.Nname, err.String())
+ continue
+ }
+
+ // Enqueue fn's calls.
+ for _, callee := range c.extraCalls[fn] {
+ enqueue(fn, callee.target, callee.lineno)
+ }
+ if fn.NWBRCalls == nil {
+ continue
+ }
+ for _, callee := range *fn.NWBRCalls {
+ target := symToFunc[callee.Sym]
+ if target != nil {
+ enqueue(fn, target, callee.Pos)
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "internal/race"
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+)
+
+// cmpstackvarlt reports whether the stack variable a sorts before b.
+//
+// Sort the list of stack variables. Autos after anything else,
+// within autos, unused after used, within used, things with
+// pointers first, zeroed things first, and then decreasing size.
+// Because autos are laid out in decreasing addresses
+// on the stack, pointers first, zeroed things first and decreasing size
+// really means, in memory, things with pointers needing zeroing at
+// the top of the stack and increasing in size.
+// Non-autos sort on offset.
+func cmpstackvarlt(a, b *ir.Name) bool {
+ if (a.Class_ == ir.PAUTO) != (b.Class_ == ir.PAUTO) {
+ return b.Class_ == ir.PAUTO
+ }
+
+ if a.Class_ != ir.PAUTO {
+ return a.FrameOffset() < b.FrameOffset()
+ }
+
+ if a.Used() != b.Used() {
+ return a.Used()
+ }
+
+ ap := a.Type().HasPointers()
+ bp := b.Type().HasPointers()
+ if ap != bp {
+ return ap
+ }
+
+ ap = a.Needzero()
+ bp = b.Needzero()
+ if ap != bp {
+ return ap
+ }
+
+ if a.Type().Width != b.Type().Width {
+ return a.Type().Width > b.Type().Width
+ }
+
+ return a.Sym().Name < b.Sym().Name
+}
+
+// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
+type byStackVar []*ir.Name
+
+func (s byStackVar) Len() int { return len(s) }
+func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
+func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func (s *ssafn) AllocFrame(f *ssa.Func) {
+ s.stksize = 0
+ s.stkptrsize = 0
+ fn := s.curfn
+
+ // Mark the PAUTO's unused.
+ for _, ln := range fn.Dcl {
+ if ln.Class_ == ir.PAUTO {
+ ln.SetUsed(false)
+ }
+ }
+
+ for _, l := range f.RegAlloc {
+ if ls, ok := l.(ssa.LocalSlot); ok {
+ ls.N.Name().SetUsed(true)
+ }
+ }
+
+ scratchUsed := false
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if n, ok := v.Aux.(*ir.Name); ok {
+ switch n.Class_ {
+ case ir.PPARAM, ir.PPARAMOUT:
+ // Don't modify nodfp; it is a global.
+ if n != ir.RegFP {
+ n.Name().SetUsed(true)
+ }
+ case ir.PAUTO:
+ n.Name().SetUsed(true)
+ }
+ }
+ if !scratchUsed {
+ scratchUsed = v.Op.UsesScratch()
+ }
+
+ }
+ }
+
+ if f.Config.NeedsFpScratch && scratchUsed {
+ s.scratchFpMem = typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64])
+ }
+
+ sort.Sort(byStackVar(fn.Dcl))
+
+ // Reassign stack offsets of the locals that are used.
+ lastHasPtr := false
+ for i, n := range fn.Dcl {
+ if n.Op() != ir.ONAME || n.Class_ != ir.PAUTO {
+ continue
+ }
+ if !n.Used() {
+ fn.Dcl = fn.Dcl[:i]
+ break
+ }
+
+ types.CalcSize(n.Type())
+ w := n.Type().Width
+ if w >= types.MaxWidth || w < 0 {
+ base.Fatalf("bad width")
+ }
+ if w == 0 && lastHasPtr {
+ // Pad between a pointer-containing object and a zero-sized object.
+ // This prevents a pointer to the zero-sized object from being interpreted
+ // as a pointer to the pointer-containing object (and causing it
+ // to be scanned when it shouldn't be). See issue 24993.
+ w = 1
+ }
+ s.stksize += w
+ s.stksize = types.Rnd(s.stksize, int64(n.Type().Align))
+ if n.Type().HasPointers() {
+ s.stkptrsize = s.stksize
+ lastHasPtr = true
+ } else {
+ lastHasPtr = false
+ }
+ if Arch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
+ s.stksize = types.Rnd(s.stksize, int64(types.PtrSize))
+ }
+ n.SetFrameOffset(-s.stksize)
+ }
+
+ s.stksize = types.Rnd(s.stksize, int64(types.RegSize))
+ s.stkptrsize = types.Rnd(s.stkptrsize, int64(types.RegSize))
+}
+
+const maxStackSize = 1 << 30
+
+// Compile builds an SSA backend function,
+// uses it to generate a plist,
+// and flushes that plist to machine code.
+// worker indicates which of the backend workers is doing the processing.
+func Compile(fn *ir.Func, worker int) {
+ f := buildssa(fn, worker)
+ // Note: check arg size to fix issue 25507.
+ if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
+ largeStackFramesMu.Lock()
+ largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()})
+ largeStackFramesMu.Unlock()
+ return
+ }
+ pp := objw.NewProgs(fn, worker)
+ defer pp.Free()
+ genssa(f, pp)
+ // Check frame size again.
+ // The check above included only the space needed for local variables.
+ // After genssa, the space needed includes local variables and the callee arg region.
+ // We must do this check prior to calling pp.Flush.
+ // If there are any oversized stack frames,
+ // the assembler may emit inscrutable complaints about invalid instructions.
+ if pp.Text.To.Offset >= maxStackSize {
+ largeStackFramesMu.Lock()
+ locals := f.Frontend().(*ssafn).stksize
+ largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
+ largeStackFramesMu.Unlock()
+ return
+ }
+
+ pp.Flush() // assemble, fill in boilerplate, etc.
+ // fieldtrack must be called after pp.Flush. See issue 20014.
+ fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
+}
+
+func init() {
+ if race.Enabled {
+ rand.Seed(time.Now().UnixNano())
+ }
+}
+
+// StackOffset returns the stack location of a LocalSlot relative to the
+// stack pointer, suitable for use in a DWARF location entry. This has nothing
+// to do with its offset in the user variable.
+func StackOffset(slot ssa.LocalSlot) int32 {
+ n := slot.N
+ var off int64
+ switch n.Class_ {
+ case ir.PAUTO:
+ off = n.FrameOffset()
+ if base.Ctxt.FixedFrameSize() == 0 {
+ off -= int64(types.PtrSize)
+ }
+ if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
+ // There is a word space for FP on ARM64 even if the frame pointer is disabled
+ off -= int64(types.PtrSize)
+ }
+ case ir.PPARAM, ir.PPARAMOUT:
+ off = n.FrameOffset() + base.Ctxt.FixedFrameSize()
+ }
+ return int32(off + slot.Off)
+}
+
+// fieldtrack adds R_USEFIELD relocations to fnsym to record any
+// struct fields that it used.
+func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
+ if fnsym == nil {
+ return
+ }
+ if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
+ return
+ }
+
+ trackSyms := make([]*types.Sym, 0, len(tracked))
+ for sym := range tracked {
+ trackSyms = append(trackSyms, sym)
+ }
+ sort.Sort(symByName(trackSyms))
+ for _, sym := range trackSyms {
+ r := obj.Addrel(fnsym)
+ r.Sym = sym.Linksym()
+ r.Type = objabi.R_USEFIELD
+ }
+}
+
+type symByName []*types.Sym
+
+func (a symByName) Len() int { return len(a) }
+func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
+func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// largeStack is info about a function whose stack frame is too large (rare).
+type largeStack struct {
+ locals int64
+ args int64
+ callee int64
+ pos src.XPos
+}
+
+var (
+ largeStackFramesMu sync.Mutex // protects largeStackFrames
+ largeStackFrames []largeStack
+)
+
+func CheckLargeStacks() {
+ // Check whether any of the functions we have compiled have gigantic stack frames.
+ sort.Slice(largeStackFrames, func(i, j int) bool {
+ return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
+ })
+ for _, large := range largeStackFrames {
+ if large.callee != 0 {
+ base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
+ } else {
+ base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
+ }
+ }
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package ssagen
import (
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/src"
"reflect"
"sort"
"testing"
+
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
)
func typeWithoutPointers() *types.Type {
if s == nil {
s = &types.Sym{Name: "."}
}
- n := NewName(s)
+ n := typecheck.NewName(s)
n.SetType(t)
n.SetFrameOffset(xoffset)
- n.SetClass(cl)
+ n.Class_ = cl
return n
}
testdata := []struct {
func TestStackvarSort(t *testing.T) {
nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name {
- n := NewName(s)
+ n := typecheck.NewName(s)
n.SetType(t)
n.SetFrameOffset(xoffset)
- n.SetClass(cl)
+ n.Class_ = cl
return n
}
inp := []*ir.Name{
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package ssagen
import (
+ "container/heap"
+ "fmt"
+
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/src"
- "container/heap"
- "fmt"
)
// This file contains the algorithm to place phi nodes in a function.
const debugPhi = false
-// FwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref.
-type FwdRefAux struct {
+// fwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref.
+type fwdRefAux struct {
_ [0]func() // ensure ir.Node isn't compared for equality
N ir.Node
}
-func (FwdRefAux) CanBeAnSSAAux() {}
+func (fwdRefAux) CanBeAnSSAAux() {}
// insertPhis finds all the places in the function where a phi is
// necessary and inserts them.
if v.Op != ssa.OpFwdRef {
continue
}
- var_ := v.Aux.(FwdRefAux).N
+ var_ := v.Aux.(fwdRefAux).N
// Optimization: look back 1 block for the definition.
if len(b.Preds) == 1 {
if v.Op != ssa.OpFwdRef {
continue
}
- n := s.varnum[v.Aux.(FwdRefAux).N]
+ n := s.varnum[v.Aux.(fwdRefAux).N]
v.Op = ssa.OpCopy
v.Aux = nil
v.AddArg(values[n])
continue
}
s.fwdrefs = append(s.fwdrefs, v)
- var_ := v.Aux.(FwdRefAux).N
+ var_ := v.Aux.(fwdRefAux).N
if _, ok := s.defvars[b.ID][var_]; !ok {
s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
}
v := s.fwdrefs[len(s.fwdrefs)-1]
s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
b := v.Block
- var_ := v.Aux.(FwdRefAux).N
+ var_ := v.Aux.(fwdRefAux).N
if b == s.f.Entry {
// No variable should be live at entry.
s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
}
}
// Generate a FwdRef for the variable and return that.
- v := b.NewValue0A(line, ssa.OpFwdRef, t, FwdRefAux{N: var_})
+ v := b.NewValue0A(line, ssa.OpFwdRef, t, fwdRefAux{N: var_})
s.defvars[b.ID][var_] = v
if var_.Op() == ir.ONAME {
s.s.addNamedValue(var_.(*ir.Name), v)
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package ssagen
import (
+ "bufio"
+ "bytes"
"encoding/binary"
"fmt"
"go/constant"
"sort"
"strings"
- "bufio"
- "bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/liveness"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
var ssaDumpCFG string // generate CFGs for these phases
const ssaDumpFile = "ssa.html"
-// The max number of defers in a function using open-coded defers. We enforce this
-// limit because the deferBits bitmask is currently a single byte (to minimize code size)
-const maxOpenDefers = 8
-
// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
var ssaDumpInlined []*ir.Func
-func ssaDumpInline(fn *ir.Func) {
+func DumpInline(fn *ir.Func) {
if ssaDump != "" && ssaDump == ir.FuncName(fn) {
ssaDumpInlined = append(ssaDumpInlined, fn)
}
}
-func initSSAEnv() {
+func InitEnv() {
ssaDump = os.Getenv("GOSSAFUNC")
ssaDir = os.Getenv("GOSSADIR")
if ssaDump != "" {
}
}
-func initssaconfig() {
+func InitConfig() {
types_ := ssa.NewTypes()
- if thearch.SoftFloat {
+ if Arch.SoftFloat {
softfloatInit()
}
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
types.NewPtrCacheEnabled = false
- ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, base.Ctxt, base.Flag.N == 0)
- ssaConfig.SoftFloat = thearch.SoftFloat
+ ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0)
+ ssaConfig.SoftFloat = Arch.SoftFloat
ssaConfig.Race = base.Flag.Race
ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
// Set up some runtime functions we'll need to call.
- assertE2I = sysfunc("assertE2I")
- assertE2I2 = sysfunc("assertE2I2")
- assertI2I = sysfunc("assertI2I")
- assertI2I2 = sysfunc("assertI2I2")
- deferproc = sysfunc("deferproc")
- deferprocStack = sysfunc("deferprocStack")
- Deferreturn = sysfunc("deferreturn")
- Duffcopy = sysfunc("duffcopy")
- Duffzero = sysfunc("duffzero")
- gcWriteBarrier = sysfunc("gcWriteBarrier")
- goschedguarded = sysfunc("goschedguarded")
- growslice = sysfunc("growslice")
- msanread = sysfunc("msanread")
- msanwrite = sysfunc("msanwrite")
- msanmove = sysfunc("msanmove")
- newobject = sysfunc("newobject")
- newproc = sysfunc("newproc")
- panicdivide = sysfunc("panicdivide")
- panicdottypeE = sysfunc("panicdottypeE")
- panicdottypeI = sysfunc("panicdottypeI")
- panicnildottype = sysfunc("panicnildottype")
- panicoverflow = sysfunc("panicoverflow")
- panicshift = sysfunc("panicshift")
- raceread = sysfunc("raceread")
- racereadrange = sysfunc("racereadrange")
- racewrite = sysfunc("racewrite")
- racewriterange = sysfunc("racewriterange")
- x86HasPOPCNT = sysvar("x86HasPOPCNT") // bool
- x86HasSSE41 = sysvar("x86HasSSE41") // bool
- x86HasFMA = sysvar("x86HasFMA") // bool
- armHasVFPv4 = sysvar("armHasVFPv4") // bool
- arm64HasATOMICS = sysvar("arm64HasATOMICS") // bool
- typedmemclr = sysfunc("typedmemclr")
- typedmemmove = sysfunc("typedmemmove")
- Udiv = sysvar("udiv") // asm func with special ABI
- writeBarrier = sysvar("writeBarrier") // struct { bool; ... }
- zerobaseSym = sysvar("zerobase")
+ ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I")
+ ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2")
+ ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I")
+ ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2")
+ ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
+ ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
+ ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
+ ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
+ ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero")
+ ir.Syms.GCWriteBarrier = typecheck.LookupRuntimeFunc("gcWriteBarrier")
+ ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
+ ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
+ ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
+ ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
+ ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
+ ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
+ ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
+ ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
+ ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE")
+ ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI")
+ ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
+ ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
+ ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
+ ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
+ ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
+ ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
+ ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
+ ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool
+ ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool
+ ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool
+ ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool
+ ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
+ ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr")
+ ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
+ ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI
+ ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... }
+ ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
// asm funcs with special ABI
- if thearch.LinkArch.Name == "amd64" {
+ if base.Ctxt.Arch.Name == "amd64" {
GCWriteBarrierReg = map[int16]*obj.LSym{
- x86.REG_AX: sysfunc("gcWriteBarrier"),
- x86.REG_CX: sysfunc("gcWriteBarrierCX"),
- x86.REG_DX: sysfunc("gcWriteBarrierDX"),
- x86.REG_BX: sysfunc("gcWriteBarrierBX"),
- x86.REG_BP: sysfunc("gcWriteBarrierBP"),
- x86.REG_SI: sysfunc("gcWriteBarrierSI"),
- x86.REG_R8: sysfunc("gcWriteBarrierR8"),
- x86.REG_R9: sysfunc("gcWriteBarrierR9"),
- }
- }
-
- if thearch.LinkArch.Family == sys.Wasm {
- BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("goPanicIndex")
- BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("goPanicIndexU")
- BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("goPanicSliceAlen")
- BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("goPanicSliceAlenU")
- BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("goPanicSliceAcap")
- BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("goPanicSliceAcapU")
- BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("goPanicSliceB")
- BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("goPanicSliceBU")
- BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("goPanicSlice3Alen")
- BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("goPanicSlice3AlenU")
- BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("goPanicSlice3Acap")
- BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("goPanicSlice3AcapU")
- BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("goPanicSlice3B")
- BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("goPanicSlice3BU")
- BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("goPanicSlice3C")
- BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("goPanicSlice3CU")
+ x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"),
+ x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"),
+ x86.REG_DX: typecheck.LookupRuntimeFunc("gcWriteBarrierDX"),
+ x86.REG_BX: typecheck.LookupRuntimeFunc("gcWriteBarrierBX"),
+ x86.REG_BP: typecheck.LookupRuntimeFunc("gcWriteBarrierBP"),
+ x86.REG_SI: typecheck.LookupRuntimeFunc("gcWriteBarrierSI"),
+ x86.REG_R8: typecheck.LookupRuntimeFunc("gcWriteBarrierR8"),
+ x86.REG_R9: typecheck.LookupRuntimeFunc("gcWriteBarrierR9"),
+ }
+ }
+
+ if Arch.LinkArch.Family == sys.Wasm {
+ BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex")
+ BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU")
+ BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen")
+ BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU")
+ BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap")
+ BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU")
+ BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB")
+ BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU")
+ BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen")
+ BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU")
+ BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap")
+ BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU")
+ BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B")
+ BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU")
+ BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
+ BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
} else {
- BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("panicIndex")
- BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("panicIndexU")
- BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("panicSliceAlen")
- BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("panicSliceAlenU")
- BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("panicSliceAcap")
- BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("panicSliceAcapU")
- BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("panicSliceB")
- BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("panicSliceBU")
- BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("panicSlice3Alen")
- BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("panicSlice3AlenU")
- BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("panicSlice3Acap")
- BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("panicSlice3AcapU")
- BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("panicSlice3B")
- BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("panicSlice3BU")
- BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("panicSlice3C")
- BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("panicSlice3CU")
- }
- if thearch.LinkArch.PtrSize == 4 {
- ExtendCheckFunc[ssa.BoundsIndex] = sysvar("panicExtendIndex")
- ExtendCheckFunc[ssa.BoundsIndexU] = sysvar("panicExtendIndexU")
- ExtendCheckFunc[ssa.BoundsSliceAlen] = sysvar("panicExtendSliceAlen")
- ExtendCheckFunc[ssa.BoundsSliceAlenU] = sysvar("panicExtendSliceAlenU")
- ExtendCheckFunc[ssa.BoundsSliceAcap] = sysvar("panicExtendSliceAcap")
- ExtendCheckFunc[ssa.BoundsSliceAcapU] = sysvar("panicExtendSliceAcapU")
- ExtendCheckFunc[ssa.BoundsSliceB] = sysvar("panicExtendSliceB")
- ExtendCheckFunc[ssa.BoundsSliceBU] = sysvar("panicExtendSliceBU")
- ExtendCheckFunc[ssa.BoundsSlice3Alen] = sysvar("panicExtendSlice3Alen")
- ExtendCheckFunc[ssa.BoundsSlice3AlenU] = sysvar("panicExtendSlice3AlenU")
- ExtendCheckFunc[ssa.BoundsSlice3Acap] = sysvar("panicExtendSlice3Acap")
- ExtendCheckFunc[ssa.BoundsSlice3AcapU] = sysvar("panicExtendSlice3AcapU")
- ExtendCheckFunc[ssa.BoundsSlice3B] = sysvar("panicExtendSlice3B")
- ExtendCheckFunc[ssa.BoundsSlice3BU] = sysvar("panicExtendSlice3BU")
- ExtendCheckFunc[ssa.BoundsSlice3C] = sysvar("panicExtendSlice3C")
- ExtendCheckFunc[ssa.BoundsSlice3CU] = sysvar("panicExtendSlice3CU")
+ BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
+ BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
+ BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
+ BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
+ BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
+ BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
+ BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
+ BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
+ BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
+ BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
+ BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
+ BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
+ BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
+ BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
+ BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
+ BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
+ }
+ if Arch.LinkArch.PtrSize == 4 {
+ ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
+ ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
+ ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
+ ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
+ ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
+ ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
+ ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
+ ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
+ ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
+ ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
+ ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
+ ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
+ ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
+ ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
+ ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
+ ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
}
// Wasm (all asm funcs with special ABIs)
- WasmMove = sysvar("wasmMove")
- WasmZero = sysvar("wasmZero")
- WasmDiv = sysvar("wasmDiv")
- WasmTruncS = sysvar("wasmTruncS")
- WasmTruncU = sysvar("wasmTruncU")
- SigPanic = sysfunc("sigpanic")
+ ir.Syms.WasmMove = typecheck.LookupRuntimeVar("wasmMove")
+ ir.Syms.WasmZero = typecheck.LookupRuntimeVar("wasmZero")
+ ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv")
+ ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS")
+ ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU")
+ ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic")
}
// getParam returns the Field of ith param of node n (which is a
// considered as the 0th parameter. This does not include the receiver of an
// interface call.
func getParam(n *ir.CallExpr, i int) *types.Field {
- t := n.Left().Type()
+ t := n.X.Type()
if n.Op() == ir.OCALLMETH {
if i == 0 {
return t.Recv()
panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
}
if v < 1<<7 {
- return duint8(x, off, uint8(v))
+ return objw.Uint8(x, off, uint8(v))
}
- off = duint8(x, off, uint8((v&127)|128))
+ off = objw.Uint8(x, off, uint8((v&127)|128))
if v < 1<<14 {
- return duint8(x, off, uint8(v>>7))
+ return objw.Uint8(x, off, uint8(v>>7))
}
- off = duint8(x, off, uint8(((v>>7)&127)|128))
+ off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
if v < 1<<21 {
- return duint8(x, off, uint8(v>>14))
+ return objw.Uint8(x, off, uint8(v>>14))
}
- off = duint8(x, off, uint8(((v>>14)&127)|128))
+ off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
if v < 1<<28 {
- return duint8(x, off, uint8(v>>21))
+ return objw.Uint8(x, off, uint8(v>>21))
}
- off = duint8(x, off, uint8(((v>>21)&127)|128))
- return duint8(x, off, uint8(v>>28))
+ off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
+ return objw.Uint8(x, off, uint8(v>>28))
}
// emitOpenDeferInfo emits FUNCDATA information about the defers in a function
var maxargsize int64
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- argsize := r.n.Left().Type().ArgWidth()
+ argsize := r.n.X.Type().ArgWidth()
if argsize > maxargsize {
maxargsize = argsize
}
// Write in reverse-order, for ease of running in that order at runtime
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- off = dvarint(x, off, r.n.Left().Type().ArgWidth())
+ off = dvarint(x, off, r.n.X.Type().ArgWidth())
off = dvarint(x, off, -r.closureNode.FrameOffset())
numArgs := len(r.argNodes)
if r.rcvrNode != nil {
if printssa {
astBuf = &bytes.Buffer{}
ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
- ir.FDumpList(astBuf, "buildssa-body", fn.Body())
+ ir.FDumpList(astBuf, "buildssa-body", fn.Body)
ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
if ssaDumpStdout {
fmt.Println("generating SSA for", name)
s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
switch {
- case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
+ case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
// libraries, because there is extra code (added by rewriteToUseGot())
// preceding the deferreturn/ret code that is generated by gencallret()
// that we don't track correctly.
s.hasOpenDefers = false
}
- if s.hasOpenDefers && s.curfn.Exit.Len() > 0 {
+ if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
// Skip doing open defers if there is any extra exit code (likely
// copying heap-allocated return values or race detection), since
// we will not generate that code in the case of the extra
// Create the deferBits variable and stack slot. deferBits is a
// bitmask showing which of the open-coded defers in this function
// have been activated.
- deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
+ deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
s.deferBitsTemp = deferBitsTemp
// For this value, AuxInt is initialized to zero by default
startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
var args []ssa.Param
var results []ssa.Param
for _, n := range fn.Dcl {
- switch n.Class() {
+ switch n.Class_ {
case ir.PPARAM:
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())})
case ir.PFUNC:
// local function - already handled by frontend
default:
- s.Fatalf("local variable with class %v unimplemented", n.Class())
+ s.Fatalf("local variable with class %v unimplemented", n.Class_)
}
}
// Populate SSAable arguments.
for _, n := range fn.Dcl {
- if n.Class() == ir.PPARAM && s.canSSA(n) {
+ if n.Class_ == ir.PPARAM && s.canSSA(n) {
v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.vars[n] = v
s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
// Convert the AST-based IR to the SSA-based IR
s.stmtList(fn.Enter)
- s.stmtList(fn.Body())
+ s.stmtList(fn.Body)
// fallthrough to exit
if s.curBlock != nil {
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
func ssaMarker(name string) *ir.Name {
- return NewName(&types.Sym{Name: name})
+ return typecheck.NewName(&types.Sym{Name: name})
}
var (
}
func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
- if !s.curfn.Func().InstrumentBody() {
+ if !s.curfn.InstrumentBody() {
return
}
if base.Flag.MSan {
switch kind {
case instrumentRead:
- fn = msanread
+ fn = ir.Syms.Msanread
case instrumentWrite:
- fn = msanwrite
+ fn = ir.Syms.Msanwrite
case instrumentMove:
- fn = msanmove
+ fn = ir.Syms.Msanmove
default:
panic("unreachable")
}
// composites with only one element don't have subobjects, though.
switch kind {
case instrumentRead:
- fn = racereadrange
+ fn = ir.Syms.Racereadrange
case instrumentWrite:
- fn = racewriterange
+ fn = ir.Syms.Racewriterange
default:
panic("unreachable")
}
// address, as any write must write the first byte.
switch kind {
case instrumentRead:
- fn = raceread
+ fn = ir.Syms.Raceread
case instrumentWrite:
- fn = racewrite
+ fn = ir.Syms.Racewrite
default:
panic("unreachable")
}
// stmtList converts the statement list n to SSA and adds it to s.
func (s *state) stmtList(l ir.Nodes) {
- for _, n := range l.Slice() {
+ for _, n := range l {
s.stmt(n)
}
}
switch n.Op() {
case ir.OBLOCK:
- s.stmtList(n.List())
+ n := n.(*ir.BlockStmt)
+ s.stmtList(n.List)
// No-ops
case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
// Expression statements
case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
- if IsIntrinsicCall(n) {
+ if ir.IsIntrinsicCall(n) {
s.intrinsicCall(n)
return
}
case ir.OCALLMETH, ir.OCALLINTER:
n := n.(*ir.CallExpr)
s.callResult(n, callNormal)
- if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && n.Left().(*ir.Name).Class() == ir.PFUNC {
- if fn := n.Left().Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
- n.Left().Sym().Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
+ if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PFUNC {
+ if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
+ n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
}
}
case ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
if base.Debug.Defer > 0 {
var defertype string
if s.hasOpenDefers {
defertype = "open-coded"
- } else if n.Esc() == EscNever {
+ } else if n.Esc() == ir.EscNever {
defertype = "stack-allocated"
} else {
defertype = "heap-allocated"
base.WarnfAt(n.Pos(), "%s defer", defertype)
}
if s.hasOpenDefers {
- s.openDeferRecord(n.Left().(*ir.CallExpr))
+ s.openDeferRecord(n.Call.(*ir.CallExpr))
} else {
d := callDefer
- if n.Esc() == EscNever {
+ if n.Esc() == ir.EscNever {
d = callDeferStack
}
- s.callResult(n.Left().(*ir.CallExpr), d)
+ s.callResult(n.Call.(*ir.CallExpr), d)
}
case ir.OGO:
- s.callResult(n.Left().(*ir.CallExpr), callGo)
+ n := n.(*ir.GoDeferStmt)
+ s.callResult(n.Call.(*ir.CallExpr), callGo)
case ir.OAS2DOTTYPE:
- res, resok := s.dottype(n.Rlist().First().(*ir.TypeAssertExpr), true)
+ n := n.(*ir.AssignListStmt)
+ res, resok := s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
deref := false
- if !canSSAType(n.Rlist().First().Type()) {
+ if !TypeOK(n.Rhs[0].Type()) {
if res.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
deref = true
res = res.Args[0]
}
- s.assign(n.List().First(), res, deref, 0)
- s.assign(n.List().Second(), resok, false, 0)
+ s.assign(n.Lhs[0], res, deref, 0)
+ s.assign(n.Lhs[1], resok, false, 0)
return
case ir.OAS2FUNC:
// We come here only when it is an intrinsic call returning two values.
- call := n.Rlist().First().(*ir.CallExpr)
- if !IsIntrinsicCall(call) {
+ n := n.(*ir.AssignListStmt)
+ call := n.Rhs[0].(*ir.CallExpr)
+ if !ir.IsIntrinsicCall(call) {
s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call)
}
v := s.intrinsicCall(call)
- v1 := s.newValue1(ssa.OpSelect0, n.List().First().Type(), v)
- v2 := s.newValue1(ssa.OpSelect1, n.List().Second().Type(), v)
- s.assign(n.List().First(), v1, false, 0)
- s.assign(n.List().Second(), v2, false, 0)
+ v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v)
+ v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v)
+ s.assign(n.Lhs[0], v1, false, 0)
+ s.assign(n.Lhs[1], v2, false, 0)
return
case ir.ODCL:
- if n.Left().(*ir.Name).Class() == ir.PAUTOHEAP {
+ n := n.(*ir.Decl)
+ if n.X.(*ir.Name).Class_ == ir.PAUTOHEAP {
s.Fatalf("DCL %v", n)
}
case ir.OLABEL:
- sym := n.Sym()
+ n := n.(*ir.LabelStmt)
+ sym := n.Label
lab := s.label(sym)
// The label might already have a target block via a goto.
s.startBlock(lab.target)
case ir.OGOTO:
- sym := n.Sym()
+ n := n.(*ir.BranchStmt)
+ sym := n.Label
lab := s.label(sym)
if lab.target == nil {
b.AddEdgeTo(lab.target)
case ir.OAS:
- if n.Left() == n.Right() && n.Left().Op() == ir.ONAME {
+ n := n.(*ir.AssignStmt)
+ if n.X == n.Y && n.X.Op() == ir.ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
}
// Evaluate RHS.
- rhs := n.Right()
+ rhs := n.Y
if rhs != nil {
switch rhs.Op() {
case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
- if !isZero(rhs) {
+ if !ir.IsZero(rhs) {
s.Fatalf("literal with nonzero value in SSA: %v", rhs)
}
rhs = nil
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
- if !samesafeexpr(n.Left(), rhs.List().First()) || base.Flag.N != 0 {
+ if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 {
break
}
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
- if s.canSSA(n.Left()) {
+ if s.canSSA(n.X) {
if base.Debug.Append > 0 { // replicating old diagnostic message
base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
}
}
}
- if ir.IsBlank(n.Left()) {
+ if ir.IsBlank(n.X) {
// _ = rhs
// Just evaluate rhs for side-effects.
if rhs != nil {
}
var t *types.Type
- if n.Right() != nil {
- t = n.Right().Type()
+ if n.Y != nil {
+ t = n.Y.Type()
} else {
- t = n.Left().Type()
+ t = n.X.Type()
}
var r *ssa.Value
- deref := !canSSAType(t)
+ deref := !TypeOK(t)
if deref {
if rhs == nil {
r = nil // Signal assign to use OpZero.
}
var skip skipMask
- if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && samesafeexpr(rhs.(*ir.SliceExpr).Left(), n.Left()) {
+ if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
+ rhs := rhs.(*ir.SliceExpr)
i, j, k := rhs.SliceBounds()
if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) {
// [0:...] is the same as [:...]
}
}
- s.assign(n.Left(), r, deref, skip)
+ s.assign(n.X, r, deref, skip)
case ir.OIF:
- if ir.IsConst(n.Left(), constant.Bool) {
- s.stmtList(n.Left().Init())
- if ir.BoolVal(n.Left()) {
- s.stmtList(n.Body())
+ n := n.(*ir.IfStmt)
+ if ir.IsConst(n.Cond, constant.Bool) {
+ s.stmtList(n.Cond.Init())
+ if ir.BoolVal(n.Cond) {
+ s.stmtList(n.Body)
} else {
- s.stmtList(n.Rlist())
+ s.stmtList(n.Else)
}
break
}
bEnd := s.f.NewBlock(ssa.BlockPlain)
var likely int8
- if n.Likely() {
+ if n.Likely {
likely = 1
}
var bThen *ssa.Block
- if n.Body().Len() != 0 {
+ if len(n.Body) != 0 {
bThen = s.f.NewBlock(ssa.BlockPlain)
} else {
bThen = bEnd
}
var bElse *ssa.Block
- if n.Rlist().Len() != 0 {
+ if len(n.Else) != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
} else {
bElse = bEnd
}
- s.condBranch(n.Left(), bThen, bElse, likely)
+ s.condBranch(n.Cond, bThen, bElse, likely)
- if n.Body().Len() != 0 {
+ if len(n.Body) != 0 {
s.startBlock(bThen)
- s.stmtList(n.Body())
+ s.stmtList(n.Body)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
- if n.Rlist().Len() != 0 {
+ if len(n.Else) != 0 {
s.startBlock(bElse)
- s.stmtList(n.Rlist())
+ s.stmtList(n.Else)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
s.startBlock(bEnd)
case ir.ORETURN:
- s.stmtList(n.List())
+ n := n.(*ir.ReturnStmt)
+ s.stmtList(n.Results)
b := s.exit()
b.Pos = s.lastPos.WithIsStmt()
case ir.ORETJMP:
+ n := n.(*ir.BranchStmt)
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
- b.Aux = callTargetLSym(n.Sym(), s.curfn.LSym)
+ b.Aux = callTargetLSym(n.Label, s.curfn.LSym)
case ir.OCONTINUE, ir.OBREAK:
+ n := n.(*ir.BranchStmt)
var to *ssa.Block
- if n.Sym() == nil {
+ if n.Label == nil {
// plain break/continue
switch n.Op() {
case ir.OCONTINUE:
}
} else {
// labeled break/continue; look up the target
- sym := n.Sym()
+ sym := n.Label
lab := s.label(sym)
switch n.Op() {
case ir.OCONTINUE:
//
// OFORUNTIL: for Ninit; Left; Right; List { Nbody }
// => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end:
+ n := n.(*ir.ForStmt)
bCond := s.f.NewBlock(ssa.BlockPlain)
bBody := s.f.NewBlock(ssa.BlockPlain)
bIncr := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
- if n.Left() != nil {
- s.condBranch(n.Left(), bBody, bEnd, 1)
+ if n.Cond != nil {
+ s.condBranch(n.Cond, bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
s.continueTo = bIncr
s.breakTo = bEnd
var lab *ssaLabel
- if sym := n.Sym(); sym != nil {
+ if sym := n.Label; sym != nil {
// labeled for loop
lab = s.label(sym)
lab.continueTarget = bIncr
// generate body
s.startBlock(bBody)
- s.stmtList(n.Body())
+ s.stmtList(n.Body)
// tear down continue/break
s.continueTo = prevContinue
// generate incr (and, for OFORUNTIL, condition)
s.startBlock(bIncr)
- if n.Right() != nil {
- s.stmt(n.Right())
+ if n.Post != nil {
+ s.stmt(n.Post)
}
if n.Op() == ir.OFOR {
if b := s.endBlock(); b != nil {
// bCond is unused in OFORUNTIL, so repurpose it.
bLateIncr := bCond
// test condition
- s.condBranch(n.Left(), bLateIncr, bEnd, 1)
+ s.condBranch(n.Cond, bLateIncr, bEnd, 1)
// generate late increment
s.startBlock(bLateIncr)
- s.stmtList(n.List())
+ s.stmtList(n.Late)
s.endBlock().AddEdgeTo(bBody)
}
var body ir.Nodes
if n.Op() == ir.OSWITCH {
n := n.(*ir.SwitchStmt)
- sym = n.Sym()
- body = n.Body()
+ sym = n.Label
+ body = n.Compiled
} else {
n := n.(*ir.SelectStmt)
- sym = n.Sym()
- body = n.Body()
+ sym = n.Label
+ body = n.Compiled
}
var lab *ssaLabel
s.startBlock(bEnd)
case ir.OVARDEF:
- if !s.canSSA(n.Left()) {
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left().(*ir.Name), s.mem(), false)
+ n := n.(*ir.UnaryExpr)
+ if !s.canSSA(n.X) {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
}
case ir.OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
- if !s.canSSA(n.Left()) {
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left().(*ir.Name), s.mem(), false)
+ n := n.(*ir.UnaryExpr)
+ if !s.canSSA(n.X) {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
}
case ir.OVARLIVE:
// Insert a varlive op to record that a variable is still live.
- v := n.Left().(*ir.Name)
+ n := n.(*ir.UnaryExpr)
+ v := n.X.(*ir.Name)
if !v.Addrtaken() {
s.Fatalf("VARLIVE variable %v must have Addrtaken set", v)
}
- switch v.Class() {
+ switch v.Class_ {
case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
default:
s.Fatalf("VARLIVE variable %v must be Auto or Arg", v)
s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem())
case ir.OCHECKNIL:
- p := s.expr(n.Left())
+ n := n.(*ir.UnaryExpr)
+ p := s.expr(n.X)
s.nilCheck(p)
case ir.OINLMARK:
- s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Offset(), s.mem())
+ n := n.(*ir.InlineMarkStmt)
+ s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem())
default:
s.Fatalf("unhandled stmt %v", n.Op())
}
s.openDeferExit()
} else {
- s.rtcall(Deferreturn, true, nil)
+ s.rtcall(ir.Syms.Deferreturn, true, nil)
}
}
return x
}
-func floatForComplex(t *types.Type) *types.Type {
- switch t.Kind() {
- case types.TCOMPLEX64:
- return types.Types[types.TFLOAT32]
- case types.TCOMPLEX128:
- return types.Types[types.TFLOAT64]
- }
- base.Fatalf("unexpected type: %v", t)
- return nil
-}
-
-func complexForFloat(t *types.Type) *types.Type {
- switch t.Kind() {
- case types.TFLOAT32:
- return types.Types[types.TCOMPLEX64]
- case types.TFLOAT64:
- return types.Types[types.TCOMPLEX128]
- }
- base.Fatalf("unexpected type: %v", t)
- return nil
-}
-
type opAndTwoTypes struct {
op ir.Op
etype1 types.Kind
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
func (s *state) expr(n ir.Node) *ssa.Value {
- if hasUniquePos(n) {
+ if ir.HasUniquePos(n) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
s.pushLine(n.Pos())
s.stmtList(n.Init())
switch n.Op() {
case ir.OBYTES2STRTMP:
- slice := s.expr(n.Left())
+ n := n.(*ir.ConvExpr)
+ slice := s.expr(n.X)
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
case ir.OSTR2BYTESTMP:
- str := s.expr(n.Left())
+ n := n.(*ir.ConvExpr)
+ str := s.expr(n.X)
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
case ir.OCFUNC:
- aux := n.Left().Sym().Linksym()
+ n := n.(*ir.UnaryExpr)
+ aux := n.X.Sym().Linksym()
return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
case ir.OMETHEXPR:
n := n.(*ir.MethodExpr)
- sym := funcsym(n.FuncName().Sym()).Linksym()
+ sym := staticdata.FuncSym(n.FuncName().Sym()).Linksym()
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
case ir.ONAME:
- if n.Class() == ir.PFUNC {
+ n := n.(*ir.Name)
+ if n.Class_ == ir.PFUNC {
// "value" of a function is the address of the function's closure
- sym := funcsym(n.Sym()).Linksym()
+ sym := staticdata.FuncSym(n.Sym()).Linksym()
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
}
if s.canSSA(n) {
return s.load(n.Type(), addr)
case ir.ONAMEOFFSET:
n := n.(*ir.NameOffsetExpr)
- if s.canSSAName(n.Name_) && canSSAType(n.Type()) {
+ if s.canSSAName(n.Name_) && TypeOK(n.Type()) {
return s.variable(n, n.Type())
}
addr := s.addr(n)
addr := s.addr(n)
return s.load(n.Type(), addr)
case ir.ONIL:
+ n := n.(*ir.NilExpr)
t := n.Type()
switch {
case t.IsSlice():
return nil
}
case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
to := n.Type()
- from := n.Left().Type()
+ from := n.X.Type()
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
- x := s.expr(n.Left())
+ x := s.expr(n.X)
if to == from {
return x
}
return v
}
- dowidth(from)
- dowidth(to)
+ types.CalcSize(from)
+ types.CalcSize(to)
if from.Width != to.Width {
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
return nil
return nil
}
- if instrumenting {
+ if base.Flag.Cfg.Instrumenting {
// These appear to be fine, but they fail the
// integer constraint below, so okay them here.
// Sample non-integer conversion: map[string]string -> *uint8
return v
case ir.OCONV:
- x := s.expr(n.Left())
- ft := n.Left().Type() // from type
- tt := n.Type() // to type
+ n := n.(*ir.ConvExpr)
+ x := s.expr(n.X)
+ ft := n.X.Type() // from type
+ tt := n.Type() // to type
if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
return s.newValue1(ssa.OpCopy, n.Type(), x)
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
- if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat {
+ if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
- if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || thearch.LinkArch.Family == sys.S390X || s.softFloat {
+ if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
- if thearch.LinkArch.Family == sys.MIPS && !s.softFloat {
+ if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
} else {
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
}
- ftp := floatForComplex(ft)
- ttp := floatForComplex(tt)
+ ftp := types.FloatForComplex(ft)
+ ttp := types.FloatForComplex(tt)
return s.newValue2(ssa.OpComplexMake, tt,
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
- s.Fatalf("unhandled OCONV %s -> %s", n.Left().Type().Kind(), n.Type().Kind())
+ s.Fatalf("unhandled OCONV %s -> %s", n.X.Type().Kind(), n.Type().Kind())
return nil
case ir.ODOTTYPE:
// binary ops
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
- a := s.expr(n.Left())
- b := s.expr(n.Right())
- if n.Left().Type().IsComplex() {
- pt := floatForComplex(n.Left().Type())
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ if n.X.Type().IsComplex() {
+ pt := types.FloatForComplex(n.X.Type())
op := s.ssaOp(ir.OEQ, pt)
r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
case ir.OGT:
op, a, b = ir.OLT, b, a
}
- if n.Left().Type().IsFloat() {
+ if n.X.Type().IsFloat() {
// float comparison
- return s.newValueOrSfCall2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b)
+ return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
}
// integer comparison
- return s.newValue2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b)
+ return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
case ir.OMUL:
- a := s.expr(n.Left())
- b := s.expr(n.Right())
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
if n.Type().IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
- pt := floatForComplex(n.Type()) // Could be Float32 or Float64
- wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
+ pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
+ wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.ODIV:
- a := s.expr(n.Left())
- b := s.expr(n.Right())
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
if n.Type().IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
- pt := floatForComplex(n.Type()) // Could be Float32 or Float64
- wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
+ pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
+ wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
}
return s.intDivide(n, a, b)
case ir.OMOD:
- a := s.expr(n.Left())
- b := s.expr(n.Right())
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
return s.intDivide(n, a, b)
case ir.OADD, ir.OSUB:
- a := s.expr(n.Left())
- b := s.expr(n.Right())
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
if n.Type().IsComplex() {
- pt := floatForComplex(n.Type())
+ pt := types.FloatForComplex(n.Type())
op := s.ssaOp(n.Op(), pt)
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
}
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.OAND, ir.OOR, ir.OXOR:
- a := s.expr(n.Left())
- b := s.expr(n.Right())
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.OANDNOT:
- a := s.expr(n.Left())
- b := s.expr(n.Right())
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
case ir.OLSH, ir.ORSH:
- a := s.expr(n.Left())
- b := s.expr(n.Right())
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
bt := b.Type
if bt.IsSigned() {
cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
- s.check(cmp, panicshift)
+ s.check(cmp, ir.Syms.Panicshift)
bt = bt.ToUnsigned()
}
return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
- el := s.expr(n.Left())
+ n := n.(*ir.LogicalExpr)
+ el := s.expr(n.X)
s.vars[n] = el
b := s.endBlock()
}
s.startBlock(bRight)
- er := s.expr(n.Right())
+ er := s.expr(n.Y)
s.vars[n] = er
b = s.endBlock()
s.startBlock(bResult)
return s.variable(n, types.Types[types.TBOOL])
case ir.OCOMPLEX:
- r := s.expr(n.Left())
- i := s.expr(n.Right())
+ n := n.(*ir.BinaryExpr)
+ r := s.expr(n.X)
+ i := s.expr(n.Y)
return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
// unary ops
case ir.ONEG:
- a := s.expr(n.Left())
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
if n.Type().IsComplex() {
- tp := floatForComplex(n.Type())
+ tp := types.FloatForComplex(n.Type())
negop := s.ssaOp(n.Op(), tp)
return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
}
return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
case ir.ONOT, ir.OBITNOT:
- a := s.expr(n.Left())
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
case ir.OIMAG, ir.OREAL:
- a := s.expr(n.Left())
- return s.newValue1(s.ssaOp(n.Op(), n.Left().Type()), n.Type(), a)
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a)
case ir.OPLUS:
- return s.expr(n.Left())
+ n := n.(*ir.UnaryExpr)
+ return s.expr(n.X)
case ir.OADDR:
- return s.addr(n.Left())
+ n := n.(*ir.AddrExpr)
+ return s.addr(n.X)
case ir.ORESULT:
+ n := n.(*ir.ResultExpr)
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
// Do the old thing
- addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset())
+ addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset)
return s.rawLoad(n.Type(), addr)
}
- which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset())
+ which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset)
if which == -1 {
// Do the old thing // TODO: Panic instead.
- addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset())
+ addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset)
return s.rawLoad(n.Type(), addr)
}
- if canSSAType(n.Type()) {
+ if TypeOK(n.Type()) {
return s.newValue1I(ssa.OpSelectN, n.Type(), which, s.prevCall)
} else {
addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type()), which, s.prevCall)
}
case ir.ODEREF:
- p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ n := n.(*ir.StarExpr)
+ p := s.exprPtr(n.X, n.Bounded(), n.Pos())
return s.load(n.Type(), p)
case ir.ODOT:
n := n.(*ir.SelectorExpr)
- if n.Left().Op() == ir.OSTRUCTLIT {
+ if n.X.Op() == ir.OSTRUCTLIT {
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
- if !isZero(n.Left()) {
- s.Fatalf("literal with nonzero value in SSA: %v", n.Left())
+ if !ir.IsZero(n.X) {
+ s.Fatalf("literal with nonzero value in SSA: %v", n.X)
}
return s.zeroVal(n.Type())
}
// SSA, then load just the selected field. This
// prevents false memory dependencies in race/msan
// instrumentation.
- if islvalue(n) && !s.canSSA(n) {
+ if ir.IsAssignable(n) && !s.canSSA(n) {
p := s.addr(n)
return s.load(n.Type(), p)
}
- v := s.expr(n.Left())
+ v := s.expr(n.X)
return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
case ir.ODOTPTR:
- p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
- p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
+ n := n.(*ir.SelectorExpr)
+ p := s.exprPtr(n.X, n.Bounded(), n.Pos())
+ p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset, p)
return s.load(n.Type(), p)
case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
switch {
- case n.Left().Type().IsString():
- if n.Bounded() && ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.Int) {
+ case n.X.Type().IsString():
+ if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) {
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
- return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.Left())[ir.Int64Val(n.Right())])))
+ return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)])))
}
- a := s.expr(n.Left())
- i := s.expr(n.Right())
+ a := s.expr(n.X)
+ i := s.expr(n.Index)
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
- if ir.IsConst(n.Right(), constant.Int) {
- ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Right()), ptr)
+ if ir.IsConst(n.Index, constant.Int) {
+ ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
return s.load(types.Types[types.TUINT8], ptr)
- case n.Left().Type().IsSlice():
+ case n.X.Type().IsSlice():
p := s.addr(n)
- return s.load(n.Left().Type().Elem(), p)
- case n.Left().Type().IsArray():
- if canSSAType(n.Left().Type()) {
+ return s.load(n.X.Type().Elem(), p)
+ case n.X.Type().IsArray():
+ if TypeOK(n.X.Type()) {
// SSA can handle arrays of length at most 1.
- bound := n.Left().Type().NumElem()
- a := s.expr(n.Left())
- i := s.expr(n.Right())
+ bound := n.X.Type().NumElem()
+ a := s.expr(n.X)
+ i := s.expr(n.Index)
if bound == 0 {
// Bounds check will never succeed. Might as well
// use constants for the bounds check.
return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
}
p := s.addr(n)
- return s.load(n.Left().Type().Elem(), p)
+ return s.load(n.X.Type().Elem(), p)
default:
- s.Fatalf("bad type for index %v", n.Left().Type())
+ s.Fatalf("bad type for index %v", n.X.Type())
return nil
}
case ir.OLEN, ir.OCAP:
n := n.(*ir.UnaryExpr)
switch {
- case n.Left().Type().IsSlice():
+ case n.X.Type().IsSlice():
op := ssa.OpSliceLen
if n.Op() == ir.OCAP {
op = ssa.OpSliceCap
}
- return s.newValue1(op, types.Types[types.TINT], s.expr(n.Left()))
- case n.Left().Type().IsString(): // string; not reachable for OCAP
- return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.Left()))
- case n.Left().Type().IsMap(), n.Left().Type().IsChan():
- return s.referenceTypeBuiltin(n, s.expr(n.Left()))
+ return s.newValue1(op, types.Types[types.TINT], s.expr(n.X))
+ case n.X.Type().IsString(): // string; not reachable for OCAP
+ return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X))
+ case n.X.Type().IsMap(), n.X.Type().IsChan():
+ return s.referenceTypeBuiltin(n, s.expr(n.X))
default: // array
- return s.constInt(types.Types[types.TINT], n.Left().Type().NumElem())
+ return s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
}
case ir.OSPTR:
- a := s.expr(n.Left())
- if n.Left().Type().IsSlice() {
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ if n.X.Type().IsSlice() {
return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
} else {
return s.newValue1(ssa.OpStringPtr, n.Type(), a)
}
case ir.OITAB:
- a := s.expr(n.Left())
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
return s.newValue1(ssa.OpITab, n.Type(), a)
case ir.OIDATA:
- a := s.expr(n.Left())
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
return s.newValue1(ssa.OpIData, n.Type(), a)
case ir.OEFACE:
- tab := s.expr(n.Left())
- data := s.expr(n.Right())
+ n := n.(*ir.BinaryExpr)
+ tab := s.expr(n.X)
+ data := s.expr(n.Y)
return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
case ir.OSLICEHEADER:
- p := s.expr(n.Left())
- l := s.expr(n.List().First())
- c := s.expr(n.List().Second())
+ n := n.(*ir.SliceHeaderExpr)
+ p := s.expr(n.Ptr)
+ l := s.expr(n.LenCap[0])
+ c := s.expr(n.LenCap[1])
return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
- v := s.expr(n.Left())
+ n := n.(*ir.SliceExpr)
+ v := s.expr(n.X)
var i, j, k *ssa.Value
low, high, max := n.SliceBounds()
if low != nil {
return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
case ir.OSLICESTR:
- v := s.expr(n.Left())
+ n := n.(*ir.SliceExpr)
+ v := s.expr(n.X)
var i, j *ssa.Value
low, high, _ := n.SliceBounds()
if low != nil {
case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
- if IsIntrinsicCall(n) {
+ if ir.IsIntrinsicCall(n) {
return s.intrinsicCall(n)
}
fallthrough
return s.callResult(n, callNormal)
case ir.OGETG:
+ n := n.(*ir.CallExpr)
return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
case ir.OAPPEND:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
- if !isZero(n) {
+ n := n.(*ir.CompLitExpr)
+ if !ir.IsZero(n) {
s.Fatalf("literal with nonzero value in SSA: %v", n)
}
return s.zeroVal(n.Type())
case ir.ONEWOBJ:
+ n := n.(*ir.UnaryExpr)
if n.Type().Elem().Size() == 0 {
- return s.newValue1A(ssa.OpAddr, n.Type(), zerobaseSym, s.sb)
+ return s.newValue1A(ssa.OpAddr, n.Type(), ir.Syms.Zerobase, s.sb)
}
- typ := s.expr(n.Left())
- vv := s.rtcall(newobject, true, []*types.Type{n.Type()}, typ)
+ typ := s.expr(n.X)
+ vv := s.rtcall(ir.Syms.Newobject, true, []*types.Type{n.Type()}, typ)
return vv[0]
default:
pt := types.NewPtr(et)
// Evaluate slice
- sn := n.List().First() // the slice node is the first in the list
+ sn := n.Args[0] // the slice node is the first in the list
var slice, addr *ssa.Value
if inplace {
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
- nargs := int64(n.List().Len() - 1)
+ nargs := int64(len(n.Args) - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
// Call growslice
s.startBlock(grow)
- taddr := s.expr(n.Left())
- r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
+ taddr := s.expr(n.X)
+ r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
if inplace {
if sn.Op() == ir.ONAME {
sn := sn.(*ir.Name)
- if sn.Class() != ir.PEXTERN {
+ if sn.Class_ != ir.PEXTERN {
// Tell liveness we're about to build a new slice
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
}
}
- capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr)
+ capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr)
s.store(types.Types[types.TINT], capaddr, r[2])
s.store(pt, addr, r[0])
// load the value we just stored to avoid having to spill it
if inplace {
l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
- lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceLenOffset, addr)
+ lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr)
s.store(types.Types[types.TINT], lenaddr, nl)
}
store bool
}
args := make([]argRec, 0, nargs)
- for _, n := range n.List().Slice()[1:] {
- if canSSAType(n.Type()) {
+ for _, n := range n.Args[1:] {
+ if TypeOK(n.Type()) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v := s.addr(n)
func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
switch cond.Op() {
case ir.OANDAND:
+ cond := cond.(*ir.LogicalExpr)
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Init())
- s.condBranch(cond.Left(), mid, no, max8(likely, 0))
+ s.condBranch(cond.X, mid, no, max8(likely, 0))
s.startBlock(mid)
- s.condBranch(cond.Right(), yes, no, likely)
+ s.condBranch(cond.Y, yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
case ir.OOROR:
+ cond := cond.(*ir.LogicalExpr)
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Init())
- s.condBranch(cond.Left(), yes, mid, min8(likely, 0))
+ s.condBranch(cond.X, yes, mid, min8(likely, 0))
s.startBlock(mid)
- s.condBranch(cond.Right(), yes, no, likely)
+ s.condBranch(cond.Y, yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
case ir.ONOT:
+ cond := cond.(*ir.UnaryExpr)
s.stmtList(cond.Init())
- s.condBranch(cond.Left(), no, yes, -likely)
+ s.condBranch(cond.X, no, yes, -likely)
return
case ir.OCONVNOP:
+ cond := cond.(*ir.ConvExpr)
s.stmtList(cond.Init())
- s.condBranch(cond.Left(), yes, no, likely)
+ s.condBranch(cond.X, yes, no, likely)
return
}
c := s.expr(cond)
return
}
t := left.Type()
- dowidth(t)
+ types.CalcSize(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
// Grab information about the structure type.
left := left.(*ir.SelectorExpr)
- t := left.Left().Type()
+ t := left.X.Type()
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
- old := s.expr(left.Left())
+ old := s.expr(left.X)
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
}
// Recursively assign the new value we've made to the base of the dot op.
- s.assign(left.Left(), new, false, 0)
+ s.assign(left.X, new, false, 0)
// TODO: do we need to update named values here?
return
}
- if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).Left().Type().IsArray() {
+ if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() {
+ left := left.(*ir.IndexExpr)
s.pushLine(left.Pos())
defer s.popLine()
// We're assigning to an element of an ssa-able array.
// a[i] = v
- t := left.Left().Type()
+ t := left.X.Type()
n := t.NumElem()
- i := s.expr(left.Right()) // index
+ i := s.expr(left.Index) // index
if n == 0 {
// The bounds check must fail. Might as well
// ignore the actual index and just use zeros.
len := s.constInt(types.Types[types.TINT], 1)
s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
v := s.newValue1(ssa.OpArrayMake1, t, right)
- s.assign(left.Left(), v, false, 0)
+ s.assign(left.X, v, false, 0)
return
}
left := left.(*ir.Name)
// If this assignment clobbers an entire local variable, then emit
// OpVarDef so liveness analysis knows the variable is redefined.
- if base := clobberBase(left); base.Op() == ir.ONAME && base.(*ir.Name).Class() != ir.PEXTERN && skip == 0 {
+ if base := clobberBase(left); base.Op() == ir.ONAME && base.(*ir.Name).Class_ != ir.PEXTERN && skip == 0 {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base.(*ir.Name), s.mem(), !ir.IsAutoTmp(base))
}
// Left is not ssa-able. Compute its address.
addr := s.addr(left)
- if isReflectHeaderDataField(left) {
+ if ir.IsReflectHeaderDataField(left) {
// Package unsafe's documentation says storing pointers into
// reflect.SliceHeader and reflect.StringHeader's Data fields
// is valid, even though they have type uintptr (#19168).
func softfloatInit() {
// Some of these operations get transformed by sfcall.
softFloatOps = map[ssa.Op]sfRtCallDef{
- ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), types.TFLOAT32},
- ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), types.TFLOAT64},
- ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), types.TFLOAT32},
- ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), types.TFLOAT64},
- ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), types.TFLOAT32},
- ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), types.TFLOAT64},
- ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), types.TFLOAT32},
- ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), types.TFLOAT64},
-
- ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), types.TBOOL},
- ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), types.TBOOL},
- ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), types.TBOOL},
- ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), types.TBOOL},
- ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), types.TBOOL},
- ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), types.TBOOL},
- ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), types.TBOOL},
- ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), types.TBOOL},
-
- ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), types.TFLOAT32},
- ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), types.TINT32},
- ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), types.TFLOAT32},
- ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), types.TINT64},
- ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), types.TFLOAT32},
- ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), types.TUINT64},
- ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), types.TFLOAT64},
- ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), types.TINT32},
- ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), types.TFLOAT64},
- ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), types.TINT64},
- ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), types.TFLOAT64},
- ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), types.TUINT64},
- ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), types.TFLOAT64},
- ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), types.TFLOAT32},
+ ssa.OpAdd32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
+ ssa.OpAdd64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
+ ssa.OpSub32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
+ ssa.OpSub64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
+ ssa.OpMul32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32},
+ ssa.OpMul64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64},
+ ssa.OpDiv32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32},
+ ssa.OpDiv64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64},
+
+ ssa.OpEq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
+ ssa.OpEq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
+ ssa.OpNeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
+ ssa.OpNeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
+ ssa.OpLess64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL},
+ ssa.OpLess32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL},
+ ssa.OpLeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge64"), types.TBOOL},
+ ssa.OpLeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge32"), types.TBOOL},
+
+ ssa.OpCvt32to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32},
+ ssa.OpCvt64to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64},
+ ssa.OpCvt64Uto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64},
+ ssa.OpCvt32to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32},
+ ssa.OpCvt64to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64},
+ ssa.OpCvt64Uto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64},
+ ssa.OpCvt32Fto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32},
}
}
fn string
}
-func initSSATables() {
+func InitTables() {
intrinsics = map[intrinsicKey]intrinsicBuilder{}
var all []*sys.Arch
}
/******** runtime ********/
- if !instrumenting {
+ if !base.Flag.Cfg.Instrumenting {
add("runtime", "slicebytetostringtmp",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// Compiler frontend optimizations emit OBYTES2STRTMP nodes
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// Target Atomic feature is identified by dynamic detection
- addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb)
v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[types.TFLOAT64])
}
- v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasFMA)
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[types.TFLOAT64])
}
- addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), armHasVFPv4, s.sb)
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb)
v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
- v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasSSE41)
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
- v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasPOPCNT)
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
// check for divide-by-zero/overflow and panic with appropriate message
cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
- s.check(cmpZero, panicdivide)
+ s.check(cmpZero, ir.Syms.Panicdivide)
cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
- s.check(cmpOverflow, panicoverflow)
+ s.check(cmpOverflow, ir.Syms.Panicoverflow)
return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64)
}
// Skip intrinsifying math functions (which may contain hard-float
// instructions) when soft-float
- if thearch.SoftFloat && pkg == "math" {
+ if Arch.SoftFloat && pkg == "math" {
return nil
}
return nil
}
}
- return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
+ return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}]
}
-func isIntrinsicCall(n *ir.CallExpr) bool {
+func IsIntrinsicCall(n *ir.CallExpr) bool {
if n == nil {
return false
}
- name, ok := n.Left().(*ir.Name)
+ name, ok := n.X.(*ir.Name)
if !ok {
return false
}
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
- v := findIntrinsic(n.Left().Sym())(s, n, s.intrinsicArgs(n))
+ v := findIntrinsic(n.X.Sym())(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
if x == nil {
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
x = x.Args[0]
}
- base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.Left().Sym().Name, x.LongString())
+ base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.X.Sym().Name, x.LongString())
}
return v
}
func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
// Construct map of temps; see comments in s.call about the structure of n.
temps := map[ir.Node]*ssa.Value{}
- for _, a := range n.List().Slice() {
+ for _, a := range n.Args {
if a.Op() != ir.OAS {
s.Fatalf("non-assignment as a temp function argument %v", a.Op())
}
a := a.(*ir.AssignStmt)
- l, r := a.Left(), a.Right()
+ l, r := a.X, a.Y
if l.Op() != ir.ONAME {
s.Fatalf("non-ONAME temp function argument %v", a.Op())
}
// Walk ensures these temporaries are dead outside of n.
temps[l] = s.expr(r)
}
- args := make([]*ssa.Value, n.Rlist().Len())
- for i, n := range n.Rlist().Slice() {
+ args := make([]*ssa.Value, len(n.Rargs))
+ for i, n := range n.Rargs {
// Store a value to an argument slot.
if x, ok := temps[n]; ok {
// This is a previously computed temporary.
// once.mutex'. Such a statement will create a mapping in s.vars[] from
// the autotmp name to the evaluated SSA arg value, but won't do any
// stores to the stack.
- s.stmtList(n.List())
+ s.stmtList(n.Args)
var args []*ssa.Value
var argNodes []*ir.Name
opendefer := &openDeferInfo{
n: n,
}
- fn := n.Left()
+ fn := n.X
if n.Op() == ir.OCALLFUNC {
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
closureVal := s.expr(fn)
closure := s.openDeferSave(nil, fn.Type(), closureVal)
opendefer.closureNode = closure.Aux.(*ir.Name)
- if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class() == ir.PFUNC) {
+ if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class_ == ir.PFUNC) {
opendefer.closure = closure
}
} else if n.Op() == ir.OCALLMETH {
opendefer.closureNode = opendefer.closure.Aux.(*ir.Name)
opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name)
}
- for _, argn := range n.Rlist().Slice() {
+ for _, argn := range n.Rargs {
var v *ssa.Value
- if canSSAType(argn.Type()) {
+ if TypeOK(argn.Type()) {
v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
} else {
v = s.openDeferSave(argn, argn.Type(), nil)
// evaluated (via s.addr() below) to get the value that is to be stored. The
// function returns an SSA value representing a pointer to the autotmp location.
func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
- canSSA := canSSAType(t)
+ canSSA := TypeOK(t)
var pos src.XPos
if canSSA {
pos = val.Pos
} else {
pos = n.Pos()
}
- argTemp := tempAt(pos.WithNotStmt(), s.curfn, t)
+ argTemp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
argTemp.SetOpenDeferSlot(true)
var addrArgTemp *ssa.Value
// Use OpVarLive to make sure stack slots for the args, etc. are not
// closure/receiver/args that were stored in argtmps at the point
// of the defer statement.
argStart := base.Ctxt.FixedFrameSize()
- fn := r.n.Left()
+ fn := r.n.X
stksize := fn.Type().ArgWidth()
var ACArgs []ssa.Param
var ACResults []ssa.Param
ACArgs = append(ACArgs, ssa.Param{Type: f.Type, Offset: int32(argStart + f.Offset)})
if testLateExpansion {
var a *ssa.Value
- if !canSSAType(f.Type) {
+ if !TypeOK(f.Type) {
a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
} else {
a = s.load(f.Type, argAddrVal)
callArgs = append(callArgs, a)
} else {
addr := s.constOffPtrSP(pt, argStart+f.Offset)
- if !canSSAType(f.Type) {
+ if !TypeOK(f.Type) {
s.move(f.Type, addr, argAddrVal)
} else {
argVal := s.load(f.Type, argAddrVal)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
- fn := n.Left()
+ fn := n.X
var ACArgs []ssa.Param
var ACResults []ssa.Param
var callArgs []*ssa.Value
- res := n.Left().Type().Results()
+ res := n.X.Type().Results()
if k == callNormal {
nf := res.NumFields()
for i := 0; i < nf; i++ {
switch n.Op() {
case ir.OCALLFUNC:
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
- if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class() == ir.PFUNC {
+ if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class_ == ir.PFUNC {
+ fn := fn.(*ir.Name)
sym = fn.Sym()
break
}
fn := fn.(*ir.SelectorExpr)
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
if k == callNormal {
- sym = fn.Sym()
+ sym = fn.Sel
break
}
closure = s.getMethodClosure(fn)
closure = iclosure
}
}
- dowidth(fn.Type())
+ types.CalcSize(fn.Type())
stksize := fn.Type().ArgWidth() // includes receiver, args, and results
// Run all assignments of temps.
// The temps are introduced to avoid overwriting argument
// slots when arguments themselves require function calls.
- s.stmtList(n.List())
+ s.stmtList(n.Args)
var call *ssa.Value
if k == callDeferStack {
testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f)
// Make a defer struct d on the stack.
t := deferstruct(stksize)
- d := tempAt(n.Pos(), s.curfn, t)
+ d := typecheck.TempAt(n.Pos(), s.curfn, t)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d)
// Then, store all the arguments of the defer call.
ft := fn.Type()
off := t.FieldOff(12)
- args := n.Rlist().Slice()
+ args := n.Rargs
// Set receiver (for interface calls). Always a pointer.
if rcvr != nil {
// Call runtime.deferprocStack with pointer to _defer record.
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(base.Ctxt.FixedFrameSize())})
- aux := ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults)
+ aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, ACArgs, ACResults)
if testLateExpansion {
callArgs = append(callArgs, addr, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
s.store(types.Types[types.TUINTPTR], arg0, addr)
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
}
- if stksize < int64(Widthptr) {
+ if stksize < int64(types.PtrSize) {
// We need room for both the call to deferprocStack and the call to
// the deferred function.
// TODO Revisit this if/when we pass args in registers.
- stksize = int64(Widthptr)
+ stksize = int64(types.PtrSize)
}
call.AuxInt = stksize
} else {
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
s.store(types.Types[types.TUINT32], addr, argsize)
}
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart) + int32(Widthptr)})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart) + int32(types.PtrSize)})
if testLateExpansion {
callArgs = append(callArgs, closure)
} else {
- addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
+ addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(types.PtrSize))
s.store(types.Types[types.TUINTPTR], addr, closure)
}
- stksize += 2 * int64(Widthptr)
- argStart += 2 * int64(Widthptr)
+ stksize += 2 * int64(types.PtrSize)
+ argStart += 2 * int64(types.PtrSize)
}
// Set receiver (for interface calls).
}
// Write args.
- t := n.Left().Type()
- args := n.Rlist().Slice()
+ t := n.X.Type()
+ args := n.Rargs
if n.Op() == ir.OCALLMETH {
f := t.Recv()
ACArg, arg := s.putArg(args[0], f.Type, argStart+f.Offset, testLateExpansion)
// call target
switch {
case k == callDefer:
- aux := ssa.StaticAuxCall(deferproc, ACArgs, ACResults)
+ aux := ssa.StaticAuxCall(ir.Syms.Deferproc, ACArgs, ACResults)
if testLateExpansion {
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
}
case k == callGo:
- aux := ssa.StaticAuxCall(newproc, ACArgs, ACResults)
+ aux := ssa.StaticAuxCall(ir.Syms.Newproc, ACArgs, ACResults)
if testLateExpansion {
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
s.vars[memVar] = call
}
// Insert OVARLIVE nodes
- s.stmtList(n.Body())
+ s.stmtList(n.Body)
// Finish block for defers
if k == callDefer || k == callDeferStack {
// maybeNilCheckClosure checks if a nil check of a closure is needed in some
// architecture-dependent situations and, if so, emits the nil check.
func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
- if thearch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo {
+ if Arch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo {
// On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
// TODO(neelance): On other architectures this should be eliminated by the optimization steps
s.nilCheck(closure)
// Make a PFUNC node out of that, then evaluate it.
// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
// We can then pass that to defer or go.
- n2 := ir.NewNameAt(fn.Pos(), fn.Sym())
+ n2 := ir.NewNameAt(fn.Pos(), fn.Sel)
n2.Curfn = s.curfn
- n2.SetClass(ir.PFUNC)
+ n2.Class_ = ir.PFUNC
// n2.Sym already existed, so it's already marked as a function.
n2.SetPos(fn.Pos())
n2.SetType(types.Types[types.TUINT8]) // fake type for a static closure. Could use runtime.funcval if we had it.
// getClosureAndRcvr returns values for the appropriate closure and receiver of an
// interface call
func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) {
- i := s.expr(fn.Left())
+ i := s.expr(fn.X)
itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
s.nilCheck(itab)
- itabidx := fn.Offset() + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
+ itabidx := fn.Offset + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab
closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
return closure, rcvr
fallthrough
case ir.ONAME:
n := n.(*ir.Name)
- switch n.Class() {
+ switch n.Class_ {
case ir.PEXTERN:
// global variable
v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym().Linksym(), s.sb)
if v != nil {
return v
}
- if n == nodfp {
+ if n == ir.RegFP {
// Special arg that points to the frame pointer (Used by ORECOVER).
return s.entryNewValue2A(ssa.OpLocalAddr, t, n, s.sp, s.startmem)
}
// that cse works on their addresses
return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
default:
- s.Fatalf("variable address class %v not implemented", n.Class())
+ s.Fatalf("variable address class %v not implemented", n.Class_)
return nil
}
case ir.ORESULT:
// load return from callee
+ n := n.(*ir.ResultExpr)
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
- return s.constOffPtrSP(t, n.Offset())
+ return s.constOffPtrSP(t, n.Offset)
}
- which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset())
+ which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset)
if which == -1 {
// Do the old thing // TODO: Panic instead.
- return s.constOffPtrSP(t, n.Offset())
+ return s.constOffPtrSP(t, n.Offset)
}
x := s.newValue1I(ssa.OpSelectNAddr, t, which, s.prevCall)
return x
case ir.OINDEX:
- if n.Left().Type().IsSlice() {
- a := s.expr(n.Left())
- i := s.expr(n.Right())
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsSlice() {
+ a := s.expr(n.X)
+ i := s.expr(n.Index)
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i)
} else { // array
- a := s.addr(n.Left())
- i := s.expr(n.Right())
- len := s.constInt(types.Types[types.TINT], n.Left().Type().NumElem())
+ a := s.addr(n.X)
+ i := s.expr(n.Index)
+ len := s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
- return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left().Type().Elem()), a, i)
+ return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.X.Type().Elem()), a, i)
}
case ir.ODEREF:
- return s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ n := n.(*ir.StarExpr)
+ return s.exprPtr(n.X, n.Bounded(), n.Pos())
case ir.ODOT:
- p := s.addr(n.Left())
- return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+ n := n.(*ir.SelectorExpr)
+ p := s.addr(n.X)
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset, p)
case ir.ODOTPTR:
- p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
- return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+ n := n.(*ir.SelectorExpr)
+ p := s.exprPtr(n.X, n.Bounded(), n.Pos())
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset, p)
case ir.OCLOSUREREAD:
n := n.(*ir.ClosureReadExpr)
- return s.newValue1I(ssa.OpOffPtr, t, n.Offset(),
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset,
s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
case ir.OCONVNOP:
- if n.Type() == n.Left().Type() {
- return s.addr(n.Left())
+ n := n.(*ir.ConvExpr)
+ if n.Type() == n.X.Type() {
+ return s.addr(n.X)
}
- addr := s.addr(n.Left())
+ addr := s.addr(n.X)
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
n := n.(*ir.CallExpr)
for {
nn := n
if nn.Op() == ir.ODOT {
- n = nn.Left()
+ nn := nn.(*ir.SelectorExpr)
+ n = nn.X
continue
}
if nn.Op() == ir.OINDEX {
- if nn.Left().Type().IsArray() {
- n = nn.Left()
+ nn := nn.(*ir.IndexExpr)
+ if nn.X.Type().IsArray() {
+ n = nn.X
continue
}
}
if n.Op() != ir.ONAME {
return false
}
- return s.canSSAName(n.(*ir.Name)) && canSSAType(n.Type())
+ return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type())
}
func (s *state) canSSAName(name *ir.Name) bool {
if name.Addrtaken() {
return false
}
- if isParamHeapCopy(name) {
+ if ir.IsParamHeapCopy(name) {
return false
}
- if name.Class() == ir.PAUTOHEAP {
+ if name.Class_ == ir.PAUTOHEAP {
s.Fatalf("canSSA of PAUTOHEAP %v", name)
}
- switch name.Class() {
+ switch name.Class_ {
case ir.PEXTERN:
return false
case ir.PPARAMOUT:
return false
}
}
- if name.Class() == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" {
+ if name.Class_ == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARAMOUT?
}
// canSSA reports whether variables of type t are SSA-able.
-func canSSAType(t *types.Type) bool {
- dowidth(t)
- if t.Width > int64(4*Widthptr) {
+func TypeOK(t *types.Type) bool {
+ types.CalcSize(t)
+ if t.Width > int64(4*types.PtrSize) {
// 4*Widthptr is an arbitrary constant. We want it
// to be at least 3*Widthptr so slices can be registerized.
// Too big and we'll introduce too much register pressure.
// not supported on SSA variables.
// TODO: allow if all indexes are constant.
if t.NumElem() <= 1 {
- return canSSAType(t.Elem())
+ return TypeOK(t.Elem())
}
return false
case types.TSTRUCT:
return false
}
for _, t1 := range t.Fields().Slice() {
- if !canSSAType(t1.Type) {
+ if !TypeOK(t1.Type) {
return false
}
}
b.AddEdgeTo(bPanic)
s.startBlock(bPanic)
- if thearch.LinkArch.Family == sys.Wasm {
+ if Arch.LinkArch.Family == sys.Wasm {
// TODO(khr): figure out how to do "register" based calling convention for bounds checks.
// Should be similar to gcWriteBarrier, but I can't make it work.
s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
if needcheck {
// do a size-appropriate check for zero
cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
- s.check(cmp, panicdivide)
+ s.check(cmp, ir.Syms.Panicdivide)
}
return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
for _, arg := range args {
t := arg.Type
- off = Rnd(off, t.Alignment())
+ off = types.Rnd(off, t.Alignment())
size := t.Size()
ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)})
if testLateExpansion {
}
off += size
}
- off = Rnd(off, int64(Widthreg))
+ off = types.Rnd(off, int64(types.RegSize))
// Accumulate results types and offsets
offR := off
for _, t := range results {
- offR = Rnd(offR, t.Alignment())
+ offR = types.Rnd(offR, t.Alignment())
ACResults = append(ACResults, ssa.Param{Type: t, Offset: int32(offR)})
offR += t.Size()
}
res := make([]*ssa.Value, len(results))
if testLateExpansion {
for i, t := range results {
- off = Rnd(off, t.Alignment())
- if canSSAType(t) {
+ off = types.Rnd(off, t.Alignment())
+ if TypeOK(t) {
res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call)
} else {
addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call)
}
} else {
for i, t := range results {
- off = Rnd(off, t.Alignment())
+ off = types.Rnd(off, t.Alignment())
ptr := s.constOffPtrSP(types.NewPtr(t), off)
res[i] = s.load(t, ptr)
off += t.Size()
}
}
- off = Rnd(off, int64(Widthptr))
+ off = types.Rnd(off, int64(types.PtrSize))
// Remember how much callee stack space we needed.
call.AuxInt = off
func (s *state) putArg(n ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
var a *ssa.Value
if forLateExpandedCall {
- if !canSSAType(t) {
+ if !TypeOK(t) {
a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
} else {
a = s.expr(n)
addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
}
- if !canSSAType(t) {
+ if !TypeOK(t) {
a := s.addr(n)
s.move(t, addr, a)
return
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
- if !n.Left().Type().IsMap() && !n.Left().Type().IsChan() {
+ if !n.X.Type().IsMap() && !n.X.Type().IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
- iface := s.expr(n.Left()) // input interface
- target := s.expr(n.Right()) // target type
+ iface := s.expr(n.X) // input interface
+ target := s.expr(n.Ntype) // target type
byteptr := s.f.Config.Types.BytePtr
if n.Type().IsInterface() {
// Conversion succeeds iff that field is not nil.
cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
- if n.Left().Type().IsEmptyInterface() && commaok {
+ if n.X.Type().IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
return iface, cond
}
if !commaok {
// On failure, panic by calling panicnildottype.
s.startBlock(bFail)
- s.rtcall(panicnildottype, false, nil, target)
+ s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
// On success, return (perhaps modified) input interface.
s.startBlock(bOk)
- if n.Left().Type().IsEmptyInterface() {
+ if n.X.Type().IsEmptyInterface() {
res = iface // Use input interface unchanged.
return
}
// Load type out of itab, build interface with existing idata.
- off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
+ off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
typ := s.load(byteptr, off)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata)
s.startBlock(bOk)
// nonempty -> empty
// Need to load type from itab
- off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
+ off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
s.vars[typVar] = s.load(byteptr, off)
s.endBlock()
if base.Debug.TypeAssert > 0 {
base.WarnfAt(n.Pos(), "type assertion not inlined")
}
- if n.Left().Type().IsEmptyInterface() {
+ if n.X.Type().IsEmptyInterface() {
if commaok {
- call := s.rtcall(assertE2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
+ call := s.rtcall(ir.Syms.AssertE2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
return call[0], call[1]
}
- return s.rtcall(assertE2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
+ return s.rtcall(ir.Syms.AssertE2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
}
if commaok {
- call := s.rtcall(assertI2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
+ call := s.rtcall(ir.Syms.AssertI2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
return call[0], call[1]
}
- return s.rtcall(assertI2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
+ return s.rtcall(ir.Syms.AssertI2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
}
if base.Debug.TypeAssert > 0 {
}
// Converting to a concrete type.
- direct := isdirectiface(n.Type())
+ direct := types.IsDirectIface(n.Type())
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
if base.Debug.TypeAssert > 0 {
base.WarnfAt(n.Pos(), "type assertion inlined")
}
var targetITab *ssa.Value
- if n.Left().Type().IsEmptyInterface() {
+ if n.X.Type().IsEmptyInterface() {
// Looking for pointer to target type.
targetITab = target
} else {
// Looking for pointer to itab for target type and source interface.
- targetITab = s.expr(n.List().First())
+ targetITab = s.expr(n.Itab[0])
}
var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
- if commaok && !canSSAType(n.Type()) {
+ if commaok && !TypeOK(n.Type()) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
- tmp = tempAt(n.Pos(), s.curfn, n.Type())
+ tmp = typecheck.TempAt(n.Pos(), s.curfn, n.Type())
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp.(*ir.Name), s.mem())
addr = s.addr(tmp)
}
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
- taddr := s.expr(n.Right().(*ir.AddrExpr).Right())
- if n.Left().Type().IsEmptyInterface() {
- s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
+ taddr := s.expr(n.Ntype.(*ir.AddrExpr).Alloc)
+ if n.X.Type().IsEmptyInterface() {
+ s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
} else {
- s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
+ s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
}
// on success, return data from interface
}
// Make a FwdRef, which records a value that's live on block input.
// We'll find the matching definition as part of insertPhis.
- v = s.newValue0A(ssa.OpFwdRef, t, FwdRefAux{N: n})
+ v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n})
s.fwdVars[n] = v
if n.Op() == ir.ONAME {
s.addNamedValue(n.(*ir.Name), v)
}
func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
- if n.Class() == ir.Pxxx {
+ if n.Class_ == ir.Pxxx {
// Don't track our marker nodes (memVar etc.).
return
}
// Don't track temporary variables.
return
}
- if n.Class() == ir.PPARAMOUT {
+ if n.Class_ == ir.PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
// Generate a disconnected call to a runtime routine and a return.
-func gencallret(pp *Progs, sym *obj.LSym) *obj.Prog {
+func gencallret(pp *objw.Progs, sym *obj.LSym) *obj.Prog {
p := pp.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
B *ssa.Block // target
}
-// SSAGenState contains state needed during Prog generation.
-type SSAGenState struct {
- pp *Progs
+// State contains state needed during Prog generation.
+type State struct {
+ pp *objw.Progs
// Branches remembers all the branch instructions we've seen
// and where they would like to go.
// Map from GC safe points to liveness index, generated by
// liveness analysis.
- livenessMap LivenessMap
+ livenessMap liveness.Map
// lineRunStart records the beginning of the current run of instructions
// within a single block sharing the same line number
}
// Prog appends a new Prog.
-func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
+func (s *State) Prog(as obj.As) *obj.Prog {
p := s.pp.Prog(as)
if ssa.LosesStmtMark(as) {
return p
}
// Pc returns the current Prog.
-func (s *SSAGenState) Pc() *obj.Prog {
- return s.pp.next
+func (s *State) Pc() *obj.Prog {
+ return s.pp.Next
}
// SetPos sets the current source position.
-func (s *SSAGenState) SetPos(pos src.XPos) {
- s.pp.pos = pos
+func (s *State) SetPos(pos src.XPos) {
+ s.pp.Pos = pos
}
// Br emits a single branch instruction and returns the instruction.
// Not all architectures need the returned instruction, but otherwise
// the boilerplate is common to all.
-func (s *SSAGenState) Br(op obj.As, target *ssa.Block) *obj.Prog {
+func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog {
p := s.Prog(op)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{P: p, B: target})
// Spill/fill/copy instructions from the register allocator,
// phi functions, and instructions with a no-pos position
// are examples of instructions that can cause churn.
-func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
+func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
switch v.Op {
case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
// These are not statements
}
s.SetPos(p)
} else {
- s.SetPos(s.pp.pos.WithNotStmt())
+ s.SetPos(s.pp.Pos.WithNotStmt())
}
}
}
func (s byXoffset) Less(i, j int) bool { return s[i].FrameOffset() < s[j].FrameOffset() }
func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func emitStackObjects(e *ssafn, pp *Progs) {
+func emitStackObjects(e *ssafn, pp *objw.Progs) {
var vars []*ir.Name
for _, n := range e.curfn.Dcl {
- if livenessShouldTrack(n) && n.Addrtaken() {
+ if liveness.ShouldTrack(n) && n.Addrtaken() {
vars = append(vars, n)
}
}
// Format must match runtime/stack.go:stackObjectRecord.
x := e.curfn.LSym.Func().StackObjects
off := 0
- off = duintptr(x, off, uint64(len(vars)))
+ off = objw.Uintptr(x, off, uint64(len(vars)))
for _, v := range vars {
// Note: arguments and return values have non-negative Xoffset,
// in which case the offset is relative to argp.
// Locals have a negative Xoffset, in which case the offset is relative to varp.
- off = duintptr(x, off, uint64(v.FrameOffset()))
- if !typesym(v.Type()).Siggen() {
+ off = objw.Uintptr(x, off, uint64(v.FrameOffset()))
+ if !types.TypeSym(v.Type()).Siggen() {
e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type())
}
- off = dsymptr(x, off, dtypesym(v.Type()), 0)
+ off = objw.SymPtr(x, off, reflectdata.WriteType(v.Type()), 0)
}
// Emit a funcdata pointing at the stack object data.
p := pp.Prog(obj.AFUNCDATA)
- Addrconst(&p.From, objabi.FUNCDATA_StackObjects)
+ p.From.SetConst(objabi.FUNCDATA_StackObjects)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = x
}
// genssa appends entries to pp for each instruction in f.
-func genssa(f *ssa.Func, pp *Progs) {
- var s SSAGenState
+func genssa(f *ssa.Func, pp *objw.Progs) {
+ var s State
e := f.Frontend().(*ssafn)
- s.livenessMap = liveness(e.curfn, f, e.stkptrsize, pp)
+ s.livenessMap = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
emitStackObjects(e, pp)
openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
// This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa.
p := pp.Prog(obj.AFUNCDATA)
- Addrconst(&p.From, objabi.FUNCDATA_OpenCodedDeferInfo)
+ p.From.SetConst(objabi.FUNCDATA_OpenCodedDeferInfo)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = openDeferInfo
progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name)
- progToBlock[s.pp.next] = f.Blocks[0]
+ progToBlock[s.pp.Next] = f.Blocks[0]
}
s.ScratchFpMem = e.scratchFpMem
// Emit basic blocks
for i, b := range f.Blocks {
- s.bstart[b.ID] = s.pp.next
+ s.bstart[b.ID] = s.pp.Next
s.lineRunStart = nil
// Attach a "default" liveness info. Normally this will be
// instruction. We won't use the actual liveness map on a
// control instruction. Just mark it something that is
// preemptible, unless this function is "all unsafe".
- s.pp.nextLive = LivenessIndex{-1, allUnsafe(f)}
+ s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
// Emit values in block
- thearch.SSAMarkMoves(&s, b)
+ Arch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
- x := s.pp.next
+ x := s.pp.Next
s.DebugFriendlySetPosFrom(v)
switch v.Op {
v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
}
case ssa.OpInlMark:
- p := thearch.Ginsnop(s.pp)
+ p := Arch.Ginsnop(s.pp)
if inlMarks == nil {
inlMarks = map[*obj.Prog]int32{}
inlMarksByPos = map[src.XPos][]*obj.Prog{}
default:
// Attach this safe point to the next
// instruction.
- s.pp.nextLive = s.livenessMap.Get(v)
+ s.pp.NextLive = s.livenessMap.Get(v)
// Special case for first line in function; move it to the start.
if firstPos != src.NoXPos {
firstPos = src.NoXPos
}
// let the backend handle it
- thearch.SSAGenValue(&s, v)
+ Arch.SSAGenValue(&s, v)
}
if base.Ctxt.Flag_locationlists {
- valueToProgAfter[v.ID] = s.pp.next
+ valueToProgAfter[v.ID] = s.pp.Next
}
if f.PrintOrHtmlSSA {
- for ; x != s.pp.next; x = x.Link {
+ for ; x != s.pp.Next; x = x.Link {
progToValue[x] = v
}
}
}
// If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
- if s.bstart[b.ID] == s.pp.next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
- p := thearch.Ginsnop(s.pp)
+ if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
+ p := Arch.Ginsnop(s.pp)
p.Pos = p.Pos.WithIsStmt()
if b.Pos == src.NoXPos {
b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652.
// line numbers for otherwise empty blocks.
next = f.Blocks[i+1]
}
- x := s.pp.next
+ x := s.pp.Next
s.SetPos(b.Pos)
- thearch.SSAGenBlock(&s, b, next)
+ Arch.SSAGenBlock(&s, b, next)
if f.PrintOrHtmlSSA {
- for ; x != s.pp.next; x = x.Link {
+ for ; x != s.pp.Next; x = x.Link {
progToBlock[x] = b
}
}
// still be inside the function in question. So if
// it ends in a call which doesn't return, add a
// nop (which will never execute) after the call.
- thearch.Ginsnop(pp)
+ Arch.Ginsnop(pp)
}
if openDeferInfo != nil {
// When doing open-coded defers, generate a disconnected call to
// deferreturn and a return. This will be used to during panic
// recovery to unwind the stack and return back to the runtime.
- s.pp.nextLive = s.livenessMap.deferreturn
- gencallret(pp, Deferreturn)
+ s.pp.NextLive = s.livenessMap.DeferReturn
+ gencallret(pp, ir.Syms.Deferreturn)
}
if inlMarks != nil {
// going to emit anyway, and use those instructions instead of the
// inline marks.
for p := pp.Text; p != nil; p = p.Link {
- if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || thearch.LinkArch.Family == sys.Wasm {
+ if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
// Don't use 0-sized instructions as inline marks, because we need
// to identify inline mark instructions by pc offset.
// (Some of these instructions are sometimes zero-sized, sometimes not.
// some of the inline marks.
// Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement
- pp.curfn.LSym.Func().AddInlMark(p, inlMarks[m])
+ pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP
m.Pos = src.NoXPos
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList {
if p.As != obj.ANOP {
- pp.curfn.LSym.Func().AddInlMark(p, inlMarks[p])
+ pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
}
}
}
if base.Ctxt.Flag_locationlists {
- debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset)
+ debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
e.curfn.DebugInfo = debugInfo
bstart := s.bstart
// Note that at this moment, Prog.Pc is a sequence number; it's
f.HTMLWriter = nil
}
-func defframe(s *SSAGenState, e *ssafn) {
+func defframe(s *State, e *ssafn) {
pp := s.pp
- frame := Rnd(s.maxarg+e.stksize, int64(Widthreg))
- if thearch.PadFrame != nil {
- frame = thearch.PadFrame(frame)
+ frame := types.Rnd(s.maxarg+e.stksize, int64(types.RegSize))
+ if Arch.PadFrame != nil {
+ frame = Arch.PadFrame(frame)
}
// Fill in argument and frame size.
pp.Text.To.Type = obj.TYPE_TEXTSIZE
- pp.Text.To.Val = int32(Rnd(e.curfn.Type().ArgWidth(), int64(Widthreg)))
+ pp.Text.To.Val = int32(types.Rnd(e.curfn.Type().ArgWidth(), int64(types.RegSize)))
pp.Text.To.Offset = frame
// Insert code to zero ambiguously live variables so that the
if !n.Needzero() {
continue
}
- if n.Class() != ir.PAUTO {
- e.Fatalf(n.Pos(), "needzero class %d", n.Class())
+ if n.Class_ != ir.PAUTO {
+ e.Fatalf(n.Pos(), "needzero class %d", n.Class_)
}
- if n.Type().Size()%int64(Widthptr) != 0 || n.FrameOffset()%int64(Widthptr) != 0 || n.Type().Size() == 0 {
- e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset())
+ if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 {
+ e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_)
}
- if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*Widthreg) {
+ if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) {
// Merge with range we already have.
lo = n.FrameOffset()
continue
}
// Zero old range
- p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
+ p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
// Set new range.
lo = n.FrameOffset()
}
// Zero final range.
- thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
+ Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
}
// For generating consecutive jump instructions to model a specific branching
Index int
}
-func (s *SSAGenState) oneJump(b *ssa.Block, jump *IndexJump) {
+func (s *State) oneJump(b *ssa.Block, jump *IndexJump) {
p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
p.Pos = b.Pos
}
// CombJump generates combinational instructions (2 at present) for a block jump,
// thereby the behaviour of non-standard condition codes could be simulated
-func (s *SSAGenState) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
+func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
switch next {
case b.Succs[0].Block():
s.oneJump(b, &jumps[0][0])
a.Name = obj.NAME_EXTERN
a.Sym = n
case *ir.Name:
- if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
+ if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
a.Sym = ir.Orig(n).Sym().Linksym()
a.Offset += n.FrameOffset()
}
}
-// AutoVar returns a *Name and int64 representing the auto variable and offset within it
-// where v should be spilled.
-func AutoVar(v *ssa.Value) (*ir.Name, int64) {
- loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
- if v.Type.Size() > loc.Type.Size() {
- v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
- }
- return loc.N, loc.Off
-}
-
func AddrAuto(a *obj.Addr, v *ssa.Value) {
- n, off := AutoVar(v)
+ n, off := ssa.AutoVar(v)
a.Type = obj.TYPE_MEM
a.Sym = n.Sym().Linksym()
- a.Reg = int16(thearch.REGSP)
+ a.Reg = int16(Arch.REGSP)
a.Offset = n.FrameOffset() + off
- if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
+ if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
} else {
a.Name = obj.NAME_AUTO
}
}
-func (s *SSAGenState) AddrScratch(a *obj.Addr) {
+func (s *State) AddrScratch(a *obj.Addr) {
if s.ScratchFpMem == nil {
panic("no scratch memory available; forgot to declare usesScratch for Op?")
}
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_AUTO
a.Sym = s.ScratchFpMem.Sym().Linksym()
- a.Reg = int16(thearch.REGSP)
- a.Offset = s.ScratchFpMem.Offset()
+ a.Reg = int16(Arch.REGSP)
+ a.Offset = s.ScratchFpMem.Offset_
}
// Call returns a new CALL instruction for the SSA value v.
// It uses PrepareCall to prepare the call.
-func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
- pPosIsStmt := s.pp.pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
+func (s *State) Call(v *ssa.Value) *obj.Prog {
+ pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
s.PrepareCall(v)
p := s.Prog(obj.ACALL)
p.To.Sym = sym.Fn
} else {
// TODO(mdempsky): Can these differences be eliminated?
- switch thearch.LinkArch.Family {
+ switch Arch.LinkArch.Family {
case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
p.To.Type = obj.TYPE_REG
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
// It must be called immediately before emitting the actual CALL instruction,
// since it emits PCDATA for the stack map at the call (calls are safe points).
-func (s *SSAGenState) PrepareCall(v *ssa.Value) {
+func (s *State) PrepareCall(v *ssa.Value) {
idx := s.livenessMap.Get(v)
if !idx.StackMapValid() {
// See Liveness.hasStackMap.
- if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == typedmemclr || sym.Fn == typedmemmove) {
+ if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
base.Fatalf("missing stack map index for %v", v.LongString())
}
}
call, ok := v.Aux.(*ssa.AuxCall)
- if ok && call.Fn == Deferreturn {
+ if ok && call.Fn == ir.Syms.Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
- thearch.Ginsnopdefer(s.pp)
+ Arch.Ginsnopdefer(s.pp)
}
if ok {
// Record call graph information for nowritebarrierrec
// analysis.
if nowritebarrierrecCheck != nil {
- nowritebarrierrecCheck.recordCall(s.pp.curfn, call.Fn, v.Pos)
+ nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
}
}
// UseArgs records the fact that an instruction needs a certain amount of
// callee args space for its use.
-func (s *SSAGenState) UseArgs(n int64) {
+func (s *State) UseArgs(n int64) {
if s.maxarg < n {
s.maxarg = n
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
func fieldIdx(n *ir.SelectorExpr) int {
- t := n.Left().Type()
- f := n.Sym()
+ t := n.X.Type()
+ f := n.Sel
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
i++
continue
}
- if t1.Offset != n.Offset() {
+ if t1.Offset != n.Offset {
panic("field offset doesn't match")
}
return i
if e.strings == nil {
e.strings = make(map[string]*obj.LSym)
}
- data := stringsym(e.curfn.Pos(), s)
+ data := staticdata.StringSym(e.curfn.Pos(), s)
e.strings[s] = data
return data
}
func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
- return tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
+ return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
}
func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
} else {
t = types.Types[types.TUINT32]
}
- if thearch.LinkArch.ByteOrder == binary.BigEndian {
+ if Arch.LinkArch.ByteOrder == binary.BigEndian {
return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[types.TUINT32])
}
return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[types.TUINT32])
}
func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
- return itabsym(it, offset)
+ return reflectdata.ITabSym(it, offset)
}
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
node := parent.N
- if node.Class() != ir.PAUTO || node.Name().Addrtaken() {
+ if node.Class_ != ir.PAUTO || node.Name().Addrtaken() {
// addressed things and non-autos retain their parents (i.e., cannot truly be split)
return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
}
s.Def = n
ir.AsNode(s.Def).Name().SetUsed(true)
n.SetType(t)
- n.SetClass(ir.PAUTO)
- n.SetEsc(EscNever)
+ n.Class_ = ir.PAUTO
+ n.SetEsc(ir.EscNever)
n.Curfn = e.curfn
e.curfn.Dcl = append(e.curfn.Dcl, n)
- dowidth(t)
+ types.CalcSize(t)
return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
}
func (e *ssafn) CanSSA(t *types.Type) bool {
- return canSSAType(t)
+ return TypeOK(t)
}
func (e *ssafn) Line(pos src.XPos) string {
func (e *ssafn) Syslook(name string) *obj.LSym {
switch name {
case "goschedguarded":
- return goschedguarded
+ return ir.Syms.Goschedguarded
case "writeBarrier":
- return writeBarrier
+ return ir.Syms.WriteBarrier
case "gcWriteBarrier":
- return gcWriteBarrier
+ return ir.Syms.GCWriteBarrier
case "typedmemmove":
- return typedmemmove
+ return ir.Syms.Typedmemmove
case "typedmemclr":
- return typedmemclr
+ return ir.Syms.Typedmemclr
}
e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
return nil
func clobberBase(n ir.Node) ir.Node {
if n.Op() == ir.ODOT {
- if n.Left().Type().NumFields() == 1 {
- return clobberBase(n.Left())
+ n := n.(*ir.SelectorExpr)
+ if n.X.Type().NumFields() == 1 {
+ return clobberBase(n.X)
}
}
if n.Op() == ir.OINDEX {
- if n.Left().Type().IsArray() && n.Left().Type().NumElem() == 1 {
- return clobberBase(n.Left())
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 {
+ return clobberBase(n.X)
}
}
return n
}
return lsym
}
+
+func min8(a, b int8) int8 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max8(a, b int8) int8 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// deferstruct makes a runtime._defer structure, with additional space for
+// stksize bytes of args.
+func deferstruct(stksize int64) *types.Type {
+ makefield := func(name string, typ *types.Type) *types.Field {
+ // Unlike the global makefield function, this one needs to set Pkg
+ // because these types might be compared (in SSA CSE sorting).
+ // TODO: unify this makefield and the global one above.
+ sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
+ return types.NewField(src.NoXPos, sym, typ)
+ }
+ argtype := types.NewArray(types.Types[types.TUINT8], stksize)
+ argtype.Width = stksize
+ argtype.Align = 1
+ // These fields must match the ones in runtime/runtime2.go:_defer and
+ // cmd/compile/internal/gc/ssa.go:(*state).call.
+ fields := []*types.Field{
+ makefield("siz", types.Types[types.TUINT32]),
+ makefield("started", types.Types[types.TBOOL]),
+ makefield("heap", types.Types[types.TBOOL]),
+ makefield("openDefer", types.Types[types.TBOOL]),
+ makefield("sp", types.Types[types.TUINTPTR]),
+ makefield("pc", types.Types[types.TUINTPTR]),
+ // Note: the types here don't really matter. Defer structures
+ // are always scanned explicitly during stack copying and GC,
+ // so we make them uintptr type even though they are real pointers.
+ makefield("fn", types.Types[types.TUINTPTR]),
+ makefield("_panic", types.Types[types.TUINTPTR]),
+ makefield("link", types.Types[types.TUINTPTR]),
+ makefield("framepc", types.Types[types.TUINTPTR]),
+ makefield("varp", types.Types[types.TUINTPTR]),
+ makefield("fd", types.Types[types.TUINTPTR]),
+ makefield("args", argtype),
+ }
+
+ // build struct holding the above fields
+ s := types.NewStruct(types.NoPkg, fields)
+ s.SetNoalg(true)
+ types.CalcStructSize(s)
+ return s
+}
+
+var (
+ BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
+ ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
+)
+
+// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
+var GCWriteBarrierReg map[int16]*obj.LSym
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package staticdata
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "go/constant"
+ "io"
+ "io/ioutil"
+ "os"
+ "sort"
+ "strconv"
+ "sync"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// InitAddr writes the static address of a to n. a must be an ONAME.
+// Neither n nor a is modified.
+func InitAddr(n *ir.Name, noff int64, a *ir.Name, aoff int64) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("addrsym n op %v", n.Op())
+ }
+ if n.Sym() == nil {
+ base.Fatalf("addrsym nil n sym")
+ }
+ if a.Op() != ir.ONAME {
+ base.Fatalf("addrsym a op %v", a.Op())
+ }
+ s := n.Sym().Linksym()
+ s.WriteAddr(base.Ctxt, noff, types.PtrSize, a.Sym().Linksym(), aoff)
+}
+
+// InitFunc writes the static address of f to n. f must be a global function.
+// Neither n nor f is modified.
+func InitFunc(n *ir.Name, noff int64, f *ir.Name) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("pfuncsym n op %v", n.Op())
+ }
+ if n.Sym() == nil {
+ base.Fatalf("pfuncsym nil n sym")
+ }
+ if f.Class_ != ir.PFUNC {
+ base.Fatalf("pfuncsym class not PFUNC %d", f.Class_)
+ }
+ s := n.Sym().Linksym()
+ s.WriteAddr(base.Ctxt, noff, types.PtrSize, FuncSym(f.Sym()).Linksym(), 0)
+}
+
+// InitSlice writes a static slice symbol {&arr, lencap, lencap} to n+noff.
+// InitSlice does not modify n.
+func InitSlice(n *ir.Name, noff int64, arr *ir.Name, lencap int64) {
+ s := n.Sym().Linksym()
+ if arr.Op() != ir.ONAME {
+ base.Fatalf("slicesym non-name arr %v", arr)
+ }
+ s.WriteAddr(base.Ctxt, noff, types.PtrSize, arr.Sym().Linksym(), 0)
+ s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap)
+ s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap)
+}
+
+func InitSliceBytes(nam *ir.Name, off int64, s string) {
+ if nam.Op() != ir.ONAME {
+ base.Fatalf("slicebytes %v", nam)
+ }
+ InitSlice(nam, off, slicedata(nam.Pos(), s), int64(len(s)))
+}
+
+const (
+ stringSymPrefix = "go.string."
+ stringSymPattern = ".gostring.%d.%x"
+)
+
+// StringSym returns a symbol containing the string s.
+// The symbol contains the string data, not a string header.
+func StringSym(pos src.XPos, s string) (data *obj.LSym) {
+ var symname string
+ if len(s) > 100 {
+ // Huge strings are hashed to avoid long names in object files.
+ // Indulge in some paranoia by writing the length of s, too,
+ // as protection against length extension attacks.
+ // Same pattern is known to fileStringSym below.
+ h := sha256.New()
+ io.WriteString(h, s)
+ symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
+ } else {
+ // Small strings get named directly by their contents.
+ symname = strconv.Quote(s)
+ }
+
+ symdata := base.Ctxt.Lookup(stringSymPrefix + symname)
+ if !symdata.OnList() {
+ off := dstringdata(symdata, 0, s, pos, "string")
+ objw.Global(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ symdata.Set(obj.AttrContentAddressable, true)
+ }
+
+ return symdata
+}
+
+// fileStringSym returns a symbol for the contents and the size of file.
+// If readonly is true, the symbol shares storage with any literal string
+// or other file with the same content and is placed in a read-only section.
+// If readonly is false, the symbol is a read-write copy separate from any other,
+// for use as the backing store of a []byte.
+// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
+// The returned symbol contains the data itself, not a string header.
+func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer f.Close()
+ info, err := f.Stat()
+ if err != nil {
+ return nil, 0, err
+ }
+ if !info.Mode().IsRegular() {
+ return nil, 0, fmt.Errorf("not a regular file")
+ }
+ size := info.Size()
+ if size <= 1*1024 {
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, 0, err
+ }
+ if int64(len(data)) != size {
+ return nil, 0, fmt.Errorf("file changed between reads")
+ }
+ var sym *obj.LSym
+ if readonly {
+ sym = StringSym(pos, string(data))
+ } else {
+ sym = slicedata(pos, string(data)).Sym().Linksym()
+ }
+ if len(hash) > 0 {
+ sum := sha256.Sum256(data)
+ copy(hash, sum[:])
+ }
+ return sym, size, nil
+ }
+ if size > 2e9 {
+ // ggloblsym takes an int32,
+ // and probably the rest of the toolchain
+ // can't handle such big symbols either.
+ // See golang.org/issue/9862.
+ return nil, 0, fmt.Errorf("file too large")
+ }
+
+ // File is too big to read and keep in memory.
+ // Compute hash if needed for read-only content hashing or if the caller wants it.
+ var sum []byte
+ if readonly || len(hash) > 0 {
+ h := sha256.New()
+ n, err := io.Copy(h, f)
+ if err != nil {
+ return nil, 0, err
+ }
+ if n != size {
+ return nil, 0, fmt.Errorf("file changed between reads")
+ }
+ sum = h.Sum(nil)
+ copy(hash, sum)
+ }
+
+ var symdata *obj.LSym
+ if readonly {
+ symname := fmt.Sprintf(stringSymPattern, size, sum)
+ symdata = base.Ctxt.Lookup(stringSymPrefix + symname)
+ if !symdata.OnList() {
+ info := symdata.NewFileInfo()
+ info.Name = file
+ info.Size = size
+ objw.Global(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ // Note: AttrContentAddressable cannot be set here,
+ // because the content-addressable-handling code
+ // does not know about file symbols.
+ }
+ } else {
+ // Emit a zero-length data symbol
+ // and then fix up length and content to use file.
+ symdata = slicedata(pos, "").Sym().Linksym()
+ symdata.Size = size
+ symdata.Type = objabi.SNOPTRDATA
+ info := symdata.NewFileInfo()
+ info.Name = file
+ info.Size = size
+ }
+
+ return symdata, size, nil
+}
+
+var slicedataGen int
+
+func slicedata(pos src.XPos, s string) *ir.Name {
+ slicedataGen++
+ symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
+ sym := types.LocalPkg.Lookup(symname)
+ symnode := typecheck.NewName(sym)
+ sym.Def = symnode
+
+ lsym := sym.Linksym()
+ off := dstringdata(lsym, 0, s, pos, "slice")
+ objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL)
+
+ return symnode
+}
+
+func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
+ // Objects that are too large will cause the data section to overflow right away,
+ // causing a cryptic error message by the linker. Check for oversize objects here
+ // and provide a useful error message instead.
+ if int64(len(t)) > 2e9 {
+ base.ErrorfAt(pos, "%v with length %v is too big", what, len(t))
+ return 0
+ }
+
+ s.WriteString(base.Ctxt, int64(off), len(t), t)
+ return off + len(t)
+}
+
+var (
+ funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
+ funcsyms []*types.Sym
+)
+
+// FuncSym returns s·f.
+func FuncSym(s *types.Sym) *types.Sym {
+ // funcsymsmu here serves to protect not just mutations of funcsyms (below),
+ // but also the package lookup of the func sym name,
+ // since this function gets called concurrently from the backend.
+ // There are no other concurrent package lookups in the backend,
+ // except for the types package, which is protected separately.
+ // Reusing funcsymsmu to also cover this package lookup
+ // avoids a general, broader, expensive package lookup mutex.
+ // Note makefuncsym also does package look-up of func sym names,
+ // but that it is only called serially, from the front end.
+ funcsymsmu.Lock()
+ sf, existed := s.Pkg.LookupOK(ir.FuncSymName(s))
+ // Don't export s·f when compiling for dynamic linking.
+ // When dynamically linking, the necessary function
+ // symbols will be created explicitly with makefuncsym.
+ // See the makefuncsym comment for details.
+ if !base.Ctxt.Flag_dynlink && !existed {
+ funcsyms = append(funcsyms, s)
+ }
+ funcsymsmu.Unlock()
+ return sf
+}
+
+// NeedFuncSym ensures that s·f is exported.
+// It is only used with -dynlink.
+// When not compiling for dynamic linking,
+// the funcsyms are created as needed by
+// the packages that use them.
+// Normally we emit the s·f stubs as DUPOK syms,
+// but DUPOK doesn't work across shared library boundaries.
+// So instead, when dynamic linking, we only create
+// the s·f stubs in s's package.
+func NeedFuncSym(s *types.Sym) {
+ if !base.Ctxt.Flag_dynlink {
+ base.Fatalf("makefuncsym dynlink")
+ }
+ if s.IsBlank() {
+ return
+ }
+ if base.Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
+ // runtime.getg(), getclosureptr(), getcallerpc(), and
+ // getcallersp() are not real functions and so do not
+ // get funcsyms.
+ return
+ }
+ if _, existed := s.Pkg.LookupOK(ir.FuncSymName(s)); !existed {
+ funcsyms = append(funcsyms, s)
+ }
+}
+
+func WriteFuncSyms() {
+ sort.Slice(funcsyms, func(i, j int) bool {
+ return funcsyms[i].LinksymName() < funcsyms[j].LinksymName()
+ })
+ for _, s := range funcsyms {
+ sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym()
+ objw.SymPtr(sf, 0, s.Linksym(), 0)
+ objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+ }
+}
+
+// InitConst writes the static literal c to n.
+// Neither n nor c is modified.
+func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("litsym n op %v", n.Op())
+ }
+ if n.Sym() == nil {
+ base.Fatalf("litsym nil n sym")
+ }
+ if c.Op() == ir.ONIL {
+ return
+ }
+ if c.Op() != ir.OLITERAL {
+ base.Fatalf("litsym c op %v", c.Op())
+ }
+ s := n.Sym().Linksym()
+ switch u := c.Val(); u.Kind() {
+ case constant.Bool:
+ i := int64(obj.Bool2int(constant.BoolVal(u)))
+ s.WriteInt(base.Ctxt, noff, wid, i)
+
+ case constant.Int:
+ s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u))
+
+ case constant.Float:
+ f, _ := constant.Float64Val(u)
+ switch c.Type().Kind() {
+ case types.TFLOAT32:
+ s.WriteFloat32(base.Ctxt, noff, float32(f))
+ case types.TFLOAT64:
+ s.WriteFloat64(base.Ctxt, noff, f)
+ }
+
+ case constant.Complex:
+ re, _ := constant.Float64Val(constant.Real(u))
+ im, _ := constant.Float64Val(constant.Imag(u))
+ switch c.Type().Kind() {
+ case types.TCOMPLEX64:
+ s.WriteFloat32(base.Ctxt, noff, float32(re))
+ s.WriteFloat32(base.Ctxt, noff+4, float32(im))
+ case types.TCOMPLEX128:
+ s.WriteFloat64(base.Ctxt, noff, re)
+ s.WriteFloat64(base.Ctxt, noff+8, im)
+ }
+
+ case constant.String:
+ i := constant.StringVal(u)
+ symdata := StringSym(n.Pos(), i)
+ s.WriteAddr(base.Ctxt, noff, types.PtrSize, symdata, 0)
+ s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i)))
+
+ default:
+ base.Fatalf("litsym unhandled OLITERAL %v", c)
+ }
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package staticdata
import (
+ "path"
+ "sort"
+ "strings"
+
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
- "cmd/compile/internal/syntax"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
-
- "path"
- "sort"
- "strconv"
- "strings"
)
const (
embedFiles
)
-func varEmbed(p *noder, names []*ir.Name, typ ir.Ntype, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) {
- haveEmbed := false
- for _, decl := range p.file.DeclList {
- imp, ok := decl.(*syntax.ImportDecl)
- if !ok {
- // imports always come first
- break
- }
- path, _ := strconv.Unquote(imp.Path.Value)
- if path == "embed" {
- haveEmbed = true
- break
- }
- }
-
- pos := embeds[0].Pos
- if !haveEmbed {
- p.errorAt(pos, "invalid go:embed: missing import \"embed\"")
- return exprs
- }
- if base.Flag.Cfg.Embed.Patterns == nil {
- p.errorAt(pos, "invalid go:embed: build system did not supply embed configuration")
- return exprs
- }
- if len(names) > 1 {
- p.errorAt(pos, "go:embed cannot apply to multiple vars")
- return exprs
- }
- if len(exprs) > 0 {
- p.errorAt(pos, "go:embed cannot apply to var with initializer")
- return exprs
- }
- if typ == nil {
- // Should not happen, since len(exprs) == 0 now.
- p.errorAt(pos, "go:embed cannot apply to var without type")
- return exprs
- }
- if dclcontext != ir.PEXTERN {
- p.errorAt(pos, "go:embed cannot apply to var inside func")
- return exprs
- }
-
- v := names[0]
- Target.Embeds = append(Target.Embeds, v)
- v.Embed = new([]ir.Embed)
- for _, e := range embeds {
- *v.Embed = append(*v.Embed, ir.Embed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns})
- }
- return exprs
-}
-
func embedFileList(v *ir.Name) []string {
kind := embedKind(v.Type())
if kind == embedUnknown {
return xdir < ydir || xdir == ydir && xelem < yelem
}
-func dumpembeds() {
- for _, v := range Target.Embeds {
- initEmbed(v)
- }
-}
-
-// initEmbed emits the init data for a //go:embed variable,
+// WriteEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
-func initEmbed(v *ir.Name) {
+func WriteEmbed(v *ir.Name) {
files := embedFileList(v)
switch kind := embedKind(v.Type()); kind {
case embedUnknown:
}
sym := v.Sym().Linksym()
off := 0
- off = dsymptr(sym, off, fsym, 0) // data string
- off = duintptr(sym, off, uint64(size)) // len
+ off = objw.SymPtr(sym, off, fsym, 0) // data string
+ off = objw.Uintptr(sym, off, uint64(size)) // len
if kind == embedBytes {
- duintptr(sym, off, uint64(size)) // cap for slice
+ objw.Uintptr(sym, off, uint64(size)) // cap for slice
}
case embedFiles:
slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`)
off := 0
// []files pointed at by Files
- off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
- off = duintptr(slicedata, off, uint64(len(files)))
- off = duintptr(slicedata, off, uint64(len(files)))
+ off = objw.SymPtr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice
+ off = objw.Uintptr(slicedata, off, uint64(len(files)))
+ off = objw.Uintptr(slicedata, off, uint64(len(files)))
// embed/embed.go type file is:
// name string
const hashSize = 16
hash := make([]byte, hashSize)
for _, file := range files {
- off = dsymptr(slicedata, off, stringsym(v.Pos(), file), 0) // file string
- off = duintptr(slicedata, off, uint64(len(file)))
+ off = objw.SymPtr(slicedata, off, StringSym(v.Pos(), file), 0) // file string
+ off = objw.Uintptr(slicedata, off, uint64(len(file)))
if strings.HasSuffix(file, "/") {
// entry for directory - no data
- off = duintptr(slicedata, off, 0)
- off = duintptr(slicedata, off, 0)
+ off = objw.Uintptr(slicedata, off, 0)
+ off = objw.Uintptr(slicedata, off, 0)
off += hashSize
} else {
fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash)
if err != nil {
base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
}
- off = dsymptr(slicedata, off, fsym, 0) // data string
- off = duintptr(slicedata, off, uint64(size))
+ off = objw.SymPtr(slicedata, off, fsym, 0) // data string
+ off = objw.Uintptr(slicedata, off, uint64(size))
off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash))
}
}
- ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
+ objw.Global(slicedata, int32(off), obj.RODATA|obj.LOCAL)
sym := v.Sym().Linksym()
- dsymptr(sym, 0, slicedata, 0)
+ objw.SymPtr(sym, 0, slicedata, 0)
}
}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package staticinit
+
+import (
+ "fmt"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+type Entry struct {
+ Xoffset int64 // struct, array only
+ Expr ir.Node // bytes of run-time computed expressions
+}
+
+type Plan struct {
+ E []Entry
+}
+
+// An Schedule is used to decompose assignment statements into
+// static and dynamic initialization parts. Static initializations are
+// handled by populating variables' linker symbol data, while dynamic
+// initializations are accumulated to be executed in order.
+type Schedule struct {
+ // Out is the ordered list of dynamic initialization
+ // statements.
+ Out []ir.Node
+
+ Plans map[ir.Node]*Plan
+ Temps map[ir.Node]*ir.Name
+}
+
+func (s *Schedule) append(n ir.Node) {
+ s.Out = append(s.Out, n)
+}
+
+// StaticInit adds an initialization statement n to the schedule.
+func (s *Schedule) StaticInit(n ir.Node) {
+ if !s.tryStaticInit(n) {
+ if base.Flag.Percent != 0 {
+ ir.Dump("nonstatic", n)
+ }
+ s.append(n)
+ }
+}
+
+// tryStaticInit attempts to statically execute an initialization
+// statement and reports whether it succeeded.
+func (s *Schedule) tryStaticInit(nn ir.Node) bool {
+ // Only worry about simple "l = r" assignments. Multiple
+ // variable/expression OAS2 assignments have already been
+ // replaced by multiple simple OAS assignments, and the other
+ // OAS2* assignments mostly necessitate dynamic execution
+ // anyway.
+ if nn.Op() != ir.OAS {
+ return false
+ }
+ n := nn.(*ir.AssignStmt)
+ if ir.IsBlank(n.X) && !AnySideEffects(n.Y) {
+ // Discard.
+ return true
+ }
+ lno := ir.SetPos(n)
+ defer func() { base.Pos = lno }()
+ nam := n.X.(*ir.Name)
+ return s.StaticAssign(nam, 0, n.Y, nam.Type())
+}
+
+// like staticassign but we are copying an already
+// initialized value r.
+func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool {
+ if rn.Class_ == ir.PFUNC {
+ // TODO if roff != 0 { panic }
+ staticdata.InitFunc(l, loff, rn)
+ return true
+ }
+ if rn.Class_ != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
+ return false
+ }
+ if rn.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
+ return false
+ }
+ if rn.Defn.Op() != ir.OAS {
+ return false
+ }
+ if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675)
+ return false
+ }
+ orig := rn
+ r := rn.Defn.(*ir.AssignStmt).Y
+
+ for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) {
+ r = r.(*ir.ConvExpr).X
+ }
+
+ switch r.Op() {
+ case ir.OMETHEXPR:
+ r = r.(*ir.MethodExpr).FuncName()
+ fallthrough
+ case ir.ONAME:
+ r := r.(*ir.Name)
+ if s.staticcopy(l, loff, r, typ) {
+ return true
+ }
+ // We may have skipped past one or more OCONVNOPs, so
+ // use conv to ensure r is assignable to l (#13263).
+ dst := ir.Node(l)
+ if loff != 0 || !types.Identical(typ, l.Type()) {
+ dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ)
+ }
+ s.append(ir.NewAssignStmt(base.Pos, dst, typecheck.Conv(r, typ)))
+ return true
+
+ case ir.ONIL:
+ return true
+
+ case ir.OLITERAL:
+ if ir.IsZero(r) {
+ return true
+ }
+ staticdata.InitConst(l, loff, r, int(typ.Width))
+ return true
+
+ case ir.OADDR:
+ r := r.(*ir.AddrExpr)
+ if a := r.X; a.Op() == ir.ONAME {
+ a := a.(*ir.Name)
+ staticdata.InitAddr(l, loff, a, 0)
+ return true
+ }
+
+ case ir.OPTRLIT:
+ r := r.(*ir.AddrExpr)
+ switch r.X.Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
+ // copy pointer
+ staticdata.InitAddr(l, loff, s.Temps[r], 0)
+ return true
+ }
+
+ case ir.OSLICELIT:
+ r := r.(*ir.CompLitExpr)
+ // copy slice
+ staticdata.InitSlice(l, loff, s.Temps[r], r.Len)
+ return true
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ r := r.(*ir.CompLitExpr)
+ p := s.Plans[r]
+ for i := range p.E {
+ e := &p.E[i]
+ typ := e.Expr.Type()
+ if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+ staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(typ.Width))
+ continue
+ }
+ x := e.Expr
+ if x.Op() == ir.OMETHEXPR {
+ x = x.(*ir.MethodExpr).FuncName()
+ }
+ if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) {
+ continue
+ }
+ // Requires computation, but we're
+ // copying someone else's computation.
+ ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ)
+ rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ)
+ ir.SetPos(rr)
+ s.append(ir.NewAssignStmt(base.Pos, ll, rr))
+ }
+
+ return true
+ }
+
+ return false
+}
+
+func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool {
+ for r.Op() == ir.OCONVNOP {
+ r = r.(*ir.ConvExpr).X
+ }
+
+ switch r.Op() {
+ case ir.ONAME:
+ r := r.(*ir.Name)
+ return s.staticcopy(l, loff, r, typ)
+
+ case ir.OMETHEXPR:
+ r := r.(*ir.MethodExpr)
+ return s.staticcopy(l, loff, r.FuncName(), typ)
+
+ case ir.ONIL:
+ return true
+
+ case ir.OLITERAL:
+ if ir.IsZero(r) {
+ return true
+ }
+ staticdata.InitConst(l, loff, r, int(typ.Width))
+ return true
+
+ case ir.OADDR:
+ r := r.(*ir.AddrExpr)
+ if name, offset, ok := StaticLoc(r.X); ok {
+ staticdata.InitAddr(l, loff, name, offset)
+ return true
+ }
+ fallthrough
+
+ case ir.OPTRLIT:
+ r := r.(*ir.AddrExpr)
+ switch r.X.Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT:
+ // Init pointer.
+ a := StaticName(r.X.Type())
+
+ s.Temps[r] = a
+ staticdata.InitAddr(l, loff, a, 0)
+
+ // Init underlying literal.
+ if !s.StaticAssign(a, 0, r.X, a.Type()) {
+ s.append(ir.NewAssignStmt(base.Pos, a, r.X))
+ }
+ return true
+ }
+ //dump("not static ptrlit", r);
+
+ case ir.OSTR2BYTES:
+ r := r.(*ir.ConvExpr)
+ if l.Class_ == ir.PEXTERN && r.X.Op() == ir.OLITERAL {
+ sval := ir.StringVal(r.X)
+ staticdata.InitSliceBytes(l, loff, sval)
+ return true
+ }
+
+ case ir.OSLICELIT:
+ r := r.(*ir.CompLitExpr)
+ s.initplan(r)
+ // Init slice.
+ ta := types.NewArray(r.Type().Elem(), r.Len)
+ ta.SetNoalg(true)
+ a := StaticName(ta)
+ s.Temps[r] = a
+ staticdata.InitSlice(l, loff, a, r.Len)
+ // Fall through to init underlying array.
+ l = a
+ loff = 0
+ fallthrough
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ r := r.(*ir.CompLitExpr)
+ s.initplan(r)
+
+ p := s.Plans[r]
+ for i := range p.E {
+ e := &p.E[i]
+ if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+ staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Width))
+ continue
+ }
+ ir.SetPos(e.Expr)
+ if !s.StaticAssign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) {
+ a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type())
+ s.append(ir.NewAssignStmt(base.Pos, a, e.Expr))
+ }
+ }
+
+ return true
+
+ case ir.OMAPLIT:
+ break
+
+ case ir.OCLOSURE:
+ r := r.(*ir.ClosureExpr)
+ if ir.IsTrivialClosure(r) {
+ if base.Debug.Closure > 0 {
+ base.WarnfAt(r.Pos(), "closure converted to global")
+ }
+ // Closures with no captured variables are globals,
+ // so the assignment can be done at link time.
+ // TODO if roff != 0 { panic }
+ staticdata.InitFunc(l, loff, r.Func.Nname)
+ return true
+ }
+ ir.ClosureDebugRuntimeCheck(r)
+
+ case ir.OCONVIFACE:
+ // This logic is mirrored in isStaticCompositeLiteral.
+ // If you change something here, change it there, and vice versa.
+
+ // Determine the underlying concrete type and value we are converting from.
+ r := r.(*ir.ConvExpr)
+ val := ir.Node(r)
+ for val.Op() == ir.OCONVIFACE {
+ val = val.(*ir.ConvExpr).X
+ }
+
+ if val.Type().IsInterface() {
+ // val is an interface type.
+ // If val is nil, we can statically initialize l;
+ // both words are zero and so there no work to do, so report success.
+ // If val is non-nil, we have no concrete type to record,
+ // and we won't be able to statically initialize its value, so report failure.
+ return val.Op() == ir.ONIL
+ }
+
+ reflectdata.MarkTypeUsedInInterface(val.Type(), l.Sym().Linksym())
+
+ var itab *ir.AddrExpr
+ if typ.IsEmptyInterface() {
+ itab = reflectdata.TypePtr(val.Type())
+ } else {
+ itab = reflectdata.ITabAddr(val.Type(), typ)
+ }
+
+ // Create a copy of l to modify while we emit data.
+
+ // Emit itab, advance offset.
+ staticdata.InitAddr(l, loff, itab.X.(*ir.Name), 0)
+
+ // Emit data.
+ if types.IsDirectIface(val.Type()) {
+ if val.Op() == ir.ONIL {
+ // Nil is zero, nothing to do.
+ return true
+ }
+ // Copy val directly into n.
+ ir.SetPos(val)
+ if !s.StaticAssign(l, loff+int64(types.PtrSize), val, val.Type()) {
+ a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(types.PtrSize), val.Type())
+ s.append(ir.NewAssignStmt(base.Pos, a, val))
+ }
+ } else {
+ // Construct temp to hold val, write pointer to temp into n.
+ a := StaticName(val.Type())
+ s.Temps[val] = a
+ if !s.StaticAssign(a, 0, val, val.Type()) {
+ s.append(ir.NewAssignStmt(base.Pos, a, val))
+ }
+ staticdata.InitAddr(l, loff+int64(types.PtrSize), a, 0)
+ }
+
+ return true
+ }
+
+ //dump("not static", r);
+ return false
+}
+
+func (s *Schedule) initplan(n ir.Node) {
+ if s.Plans[n] != nil {
+ return
+ }
+ p := new(Plan)
+ s.Plans[n] = p
+ switch n.Op() {
+ default:
+ base.Fatalf("initplan")
+
+ case ir.OARRAYLIT, ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ var k int64
+ for _, a := range n.List {
+ if a.Op() == ir.OKEY {
+ kv := a.(*ir.KeyExpr)
+ k = typecheck.IndexConst(kv.Key)
+ if k < 0 {
+ base.Fatalf("initplan arraylit: invalid index %v", kv.Key)
+ }
+ a = kv.Value
+ }
+ s.addvalue(p, k*n.Type().Elem().Width, a)
+ k++
+ }
+
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, a := range n.List {
+ if a.Op() != ir.OSTRUCTKEY {
+ base.Fatalf("initplan structlit")
+ }
+ a := a.(*ir.StructKeyExpr)
+ if a.Field.IsBlank() {
+ continue
+ }
+ s.addvalue(p, a.Offset, a.Value)
+ }
+
+ case ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, a := range n.List {
+ if a.Op() != ir.OKEY {
+ base.Fatalf("initplan maplit")
+ }
+ a := a.(*ir.KeyExpr)
+ s.addvalue(p, -1, a.Value)
+ }
+ }
+}
+
+func (s *Schedule) addvalue(p *Plan, xoffset int64, n ir.Node) {
+ // special case: zero can be dropped entirely
+ if ir.IsZero(n) {
+ return
+ }
+
+ // special case: inline struct and array (not slice) literals
+ if isvaluelit(n) {
+ s.initplan(n)
+ q := s.Plans[n]
+ for _, qe := range q.E {
+ // qe is a copy; we are not modifying entries in q.E
+ qe.Xoffset += xoffset
+ p.E = append(p.E, qe)
+ }
+ return
+ }
+
+ // add to plan
+ p.E = append(p.E, Entry{Xoffset: xoffset, Expr: n})
+}
+
+// from here down is the walk analysis
+// of composite literals.
+// most of the work is to generate
+// data statements for the constant
+// part of the composite literal.
+
+var statuniqgen int // name generator for static temps
+
+// StaticName returns a name backed by a (writable) static data symbol.
+// Use readonlystaticname for read-only node.
+func StaticName(t *types.Type) *ir.Name {
+ // Don't use lookupN; it interns the resulting string, but these are all unique.
+ n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
+ statuniqgen++
+ typecheck.Declare(n, ir.PEXTERN)
+ n.SetType(t)
+ n.Sym().Linksym().Set(obj.AttrLocal, true)
+ return n
+}
+
+// StaticLoc returns the static address of n, if n has one, or else nil.
+func StaticLoc(n ir.Node) (name *ir.Name, offset int64, ok bool) {
+ if n == nil {
+ return nil, 0, false
+ }
+
+ switch n.Op() {
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ return n, 0, true
+
+ case ir.OMETHEXPR:
+ n := n.(*ir.MethodExpr)
+ return StaticLoc(n.FuncName())
+
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ if name, offset, ok = StaticLoc(n.X); !ok {
+ break
+ }
+ offset += n.Offset
+ return name, offset, true
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsSlice() {
+ break
+ }
+ if name, offset, ok = StaticLoc(n.X); !ok {
+ break
+ }
+ l := getlit(n.Index)
+ if l < 0 {
+ break
+ }
+
+ // Check for overflow.
+ if n.Type().Width != 0 && types.MaxWidth/n.Type().Width <= int64(l) {
+ break
+ }
+ offset += int64(l) * n.Type().Width
+ return name, offset, true
+ }
+
+ return nil, 0, false
+}
+
+// AnySideEffects reports whether n contains any operations that could have observable side effects.
+func AnySideEffects(n ir.Node) bool {
+ return ir.Any(n, func(n ir.Node) bool {
+ switch n.Op() {
+ // Assume side effects unless we know otherwise.
+ default:
+ return true
+
+ // No side effects here (arguments are checked separately).
+ case ir.ONAME,
+ ir.ONONAME,
+ ir.OTYPE,
+ ir.OPACK,
+ ir.OLITERAL,
+ ir.ONIL,
+ ir.OADD,
+ ir.OSUB,
+ ir.OOR,
+ ir.OXOR,
+ ir.OADDSTR,
+ ir.OADDR,
+ ir.OANDAND,
+ ir.OBYTES2STR,
+ ir.ORUNES2STR,
+ ir.OSTR2BYTES,
+ ir.OSTR2RUNES,
+ ir.OCAP,
+ ir.OCOMPLIT,
+ ir.OMAPLIT,
+ ir.OSTRUCTLIT,
+ ir.OARRAYLIT,
+ ir.OSLICELIT,
+ ir.OPTRLIT,
+ ir.OCONV,
+ ir.OCONVIFACE,
+ ir.OCONVNOP,
+ ir.ODOT,
+ ir.OEQ,
+ ir.ONE,
+ ir.OLT,
+ ir.OLE,
+ ir.OGT,
+ ir.OGE,
+ ir.OKEY,
+ ir.OSTRUCTKEY,
+ ir.OLEN,
+ ir.OMUL,
+ ir.OLSH,
+ ir.ORSH,
+ ir.OAND,
+ ir.OANDNOT,
+ ir.ONEW,
+ ir.ONOT,
+ ir.OBITNOT,
+ ir.OPLUS,
+ ir.ONEG,
+ ir.OOROR,
+ ir.OPAREN,
+ ir.ORUNESTR,
+ ir.OREAL,
+ ir.OIMAG,
+ ir.OCOMPLEX:
+ return false
+
+ // Only possible side effect is division by zero.
+ case ir.ODIV, ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ if n.Y.Op() != ir.OLITERAL || constant.Sign(n.Y.Val()) == 0 {
+ return true
+ }
+
+ // Only possible side effect is panic on invalid size,
+ // but many makechan and makemap use size zero, which is definitely OK.
+ case ir.OMAKECHAN, ir.OMAKEMAP:
+ n := n.(*ir.MakeExpr)
+ if !ir.IsConst(n.Len, constant.Int) || constant.Sign(n.Len.Val()) != 0 {
+ return true
+ }
+
+ // Only possible side effect is panic on invalid size.
+ // TODO(rsc): Merge with previous case (probably breaks toolstash -cmp).
+ case ir.OMAKESLICE, ir.OMAKESLICECOPY:
+ return true
+ }
+ return false
+ })
+}
+
+func getlit(lit ir.Node) int {
+ if ir.IsSmallIntConst(lit) {
+ return int(ir.Int64Val(lit))
+ }
+ return -1
+}
+
+func isvaluelit(n ir.Node) bool {
+ return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
import "testing"
// run
// Code generated by gen/constFoldGen.go. DO NOT EDIT.
-package gc
+package test
import "testing"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
import (
"internal/testenv"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
import (
"internal/testenv"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
import (
"math"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
import (
"bytes"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
+
+import "testing"
// Test to make sure we make copies of the values we
// put in interfaces.
-import (
- "testing"
-)
-
var x int
func TestEfaceConv1(t *testing.T) {
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
import (
"bufio"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
import (
"internal/testenv"
-package gc
+package test
import "testing"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc_test
+package test
import (
"bytes"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
import (
"reflect"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
import (
"bytes"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
import "testing"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
-import (
- "testing"
-)
+import "testing"
var glob = 3
var globp *int64
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import "cmd/compile/internal/types"
+
+// ----------------------------------------------------------------------------
+// Export format
+
+// Tags. Must be < 0.
+const (
+ // Objects
+ packageTag = -(iota + 1)
+ constTag
+ typeTag
+ varTag
+ funcTag
+ endTag
+
+ // Types
+ namedTag
+ arrayTag
+ sliceTag
+ dddTag
+ structTag
+ pointerTag
+ signatureTag
+ interfaceTag
+ mapTag
+ chanTag
+
+ // Values
+ falseTag
+ trueTag
+ int64Tag
+ floatTag
+ fractionTag // not used by gc
+ complexTag
+ stringTag
+ nilTag
+ unknownTag // not used by gc (only appears in packages with errors)
+
+ // Type aliases
+ aliasTag
+)
+
+var predecl []*types.Type // initialized lazily
+
+func predeclared() []*types.Type {
+ if predecl == nil {
+ // initialize lazily to be sure that all
+ // elements have been initialized before
+ predecl = []*types.Type{
+ // basic types
+ types.Types[types.TBOOL],
+ types.Types[types.TINT],
+ types.Types[types.TINT8],
+ types.Types[types.TINT16],
+ types.Types[types.TINT32],
+ types.Types[types.TINT64],
+ types.Types[types.TUINT],
+ types.Types[types.TUINT8],
+ types.Types[types.TUINT16],
+ types.Types[types.TUINT32],
+ types.Types[types.TUINT64],
+ types.Types[types.TUINTPTR],
+ types.Types[types.TFLOAT32],
+ types.Types[types.TFLOAT64],
+ types.Types[types.TCOMPLEX64],
+ types.Types[types.TCOMPLEX128],
+ types.Types[types.TSTRING],
+
+ // basic type aliases
+ types.ByteType,
+ types.RuneType,
+
+ // error
+ types.ErrorType,
+
+ // untyped types
+ types.UntypedBool,
+ types.UntypedInt,
+ types.UntypedRune,
+ types.UntypedFloat,
+ types.UntypedComplex,
+ types.UntypedString,
+ types.Types[types.TNIL],
+
+ // package unsafe
+ types.Types[types.TUNSAFEPTR],
+
+ // invalid type (package contains errors)
+ types.Types[types.Txxx],
+
+ // any type, for builtin export data
+ types.Types[types.TANY],
+ }
+ }
+ return predecl
+}
--- /dev/null
+// Code generated by mkbuiltin.go. DO NOT EDIT.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+var runtimeDecls = [...]struct {
+ name string
+ tag int
+ typ int
+}{
+ {"newobject", funcTag, 4},
+ {"mallocgc", funcTag, 8},
+ {"panicdivide", funcTag, 9},
+ {"panicshift", funcTag, 9},
+ {"panicmakeslicelen", funcTag, 9},
+ {"panicmakeslicecap", funcTag, 9},
+ {"throwinit", funcTag, 9},
+ {"panicwrap", funcTag, 9},
+ {"gopanic", funcTag, 11},
+ {"gorecover", funcTag, 14},
+ {"goschedguarded", funcTag, 9},
+ {"goPanicIndex", funcTag, 16},
+ {"goPanicIndexU", funcTag, 18},
+ {"goPanicSliceAlen", funcTag, 16},
+ {"goPanicSliceAlenU", funcTag, 18},
+ {"goPanicSliceAcap", funcTag, 16},
+ {"goPanicSliceAcapU", funcTag, 18},
+ {"goPanicSliceB", funcTag, 16},
+ {"goPanicSliceBU", funcTag, 18},
+ {"goPanicSlice3Alen", funcTag, 16},
+ {"goPanicSlice3AlenU", funcTag, 18},
+ {"goPanicSlice3Acap", funcTag, 16},
+ {"goPanicSlice3AcapU", funcTag, 18},
+ {"goPanicSlice3B", funcTag, 16},
+ {"goPanicSlice3BU", funcTag, 18},
+ {"goPanicSlice3C", funcTag, 16},
+ {"goPanicSlice3CU", funcTag, 18},
+ {"printbool", funcTag, 19},
+ {"printfloat", funcTag, 21},
+ {"printint", funcTag, 23},
+ {"printhex", funcTag, 25},
+ {"printuint", funcTag, 25},
+ {"printcomplex", funcTag, 27},
+ {"printstring", funcTag, 29},
+ {"printpointer", funcTag, 30},
+ {"printuintptr", funcTag, 31},
+ {"printiface", funcTag, 30},
+ {"printeface", funcTag, 30},
+ {"printslice", funcTag, 30},
+ {"printnl", funcTag, 9},
+ {"printsp", funcTag, 9},
+ {"printlock", funcTag, 9},
+ {"printunlock", funcTag, 9},
+ {"concatstring2", funcTag, 34},
+ {"concatstring3", funcTag, 35},
+ {"concatstring4", funcTag, 36},
+ {"concatstring5", funcTag, 37},
+ {"concatstrings", funcTag, 39},
+ {"cmpstring", funcTag, 40},
+ {"intstring", funcTag, 43},
+ {"slicebytetostring", funcTag, 44},
+ {"slicebytetostringtmp", funcTag, 45},
+ {"slicerunetostring", funcTag, 48},
+ {"stringtoslicebyte", funcTag, 50},
+ {"stringtoslicerune", funcTag, 53},
+ {"slicecopy", funcTag, 54},
+ {"decoderune", funcTag, 55},
+ {"countrunes", funcTag, 56},
+ {"convI2I", funcTag, 57},
+ {"convT16", funcTag, 58},
+ {"convT32", funcTag, 58},
+ {"convT64", funcTag, 58},
+ {"convTstring", funcTag, 58},
+ {"convTslice", funcTag, 58},
+ {"convT2E", funcTag, 59},
+ {"convT2Enoptr", funcTag, 59},
+ {"convT2I", funcTag, 59},
+ {"convT2Inoptr", funcTag, 59},
+ {"assertE2I", funcTag, 57},
+ {"assertE2I2", funcTag, 60},
+ {"assertI2I", funcTag, 57},
+ {"assertI2I2", funcTag, 60},
+ {"panicdottypeE", funcTag, 61},
+ {"panicdottypeI", funcTag, 61},
+ {"panicnildottype", funcTag, 62},
+ {"ifaceeq", funcTag, 64},
+ {"efaceeq", funcTag, 64},
+ {"fastrand", funcTag, 66},
+ {"makemap64", funcTag, 68},
+ {"makemap", funcTag, 69},
+ {"makemap_small", funcTag, 70},
+ {"mapaccess1", funcTag, 71},
+ {"mapaccess1_fast32", funcTag, 72},
+ {"mapaccess1_fast64", funcTag, 72},
+ {"mapaccess1_faststr", funcTag, 72},
+ {"mapaccess1_fat", funcTag, 73},
+ {"mapaccess2", funcTag, 74},
+ {"mapaccess2_fast32", funcTag, 75},
+ {"mapaccess2_fast64", funcTag, 75},
+ {"mapaccess2_faststr", funcTag, 75},
+ {"mapaccess2_fat", funcTag, 76},
+ {"mapassign", funcTag, 71},
+ {"mapassign_fast32", funcTag, 72},
+ {"mapassign_fast32ptr", funcTag, 72},
+ {"mapassign_fast64", funcTag, 72},
+ {"mapassign_fast64ptr", funcTag, 72},
+ {"mapassign_faststr", funcTag, 72},
+ {"mapiterinit", funcTag, 77},
+ {"mapdelete", funcTag, 77},
+ {"mapdelete_fast32", funcTag, 78},
+ {"mapdelete_fast64", funcTag, 78},
+ {"mapdelete_faststr", funcTag, 78},
+ {"mapiternext", funcTag, 79},
+ {"mapclear", funcTag, 80},
+ {"makechan64", funcTag, 82},
+ {"makechan", funcTag, 83},
+ {"chanrecv1", funcTag, 85},
+ {"chanrecv2", funcTag, 86},
+ {"chansend1", funcTag, 88},
+ {"closechan", funcTag, 30},
+ {"writeBarrier", varTag, 90},
+ {"typedmemmove", funcTag, 91},
+ {"typedmemclr", funcTag, 92},
+ {"typedslicecopy", funcTag, 93},
+ {"selectnbsend", funcTag, 94},
+ {"selectnbrecv", funcTag, 95},
+ {"selectnbrecv2", funcTag, 97},
+ {"selectsetpc", funcTag, 98},
+ {"selectgo", funcTag, 99},
+ {"block", funcTag, 9},
+ {"makeslice", funcTag, 100},
+ {"makeslice64", funcTag, 101},
+ {"makeslicecopy", funcTag, 102},
+ {"growslice", funcTag, 104},
+ {"memmove", funcTag, 105},
+ {"memclrNoHeapPointers", funcTag, 106},
+ {"memclrHasPointers", funcTag, 106},
+ {"memequal", funcTag, 107},
+ {"memequal0", funcTag, 108},
+ {"memequal8", funcTag, 108},
+ {"memequal16", funcTag, 108},
+ {"memequal32", funcTag, 108},
+ {"memequal64", funcTag, 108},
+ {"memequal128", funcTag, 108},
+ {"f32equal", funcTag, 109},
+ {"f64equal", funcTag, 109},
+ {"c64equal", funcTag, 109},
+ {"c128equal", funcTag, 109},
+ {"strequal", funcTag, 109},
+ {"interequal", funcTag, 109},
+ {"nilinterequal", funcTag, 109},
+ {"memhash", funcTag, 110},
+ {"memhash0", funcTag, 111},
+ {"memhash8", funcTag, 111},
+ {"memhash16", funcTag, 111},
+ {"memhash32", funcTag, 111},
+ {"memhash64", funcTag, 111},
+ {"memhash128", funcTag, 111},
+ {"f32hash", funcTag, 111},
+ {"f64hash", funcTag, 111},
+ {"c64hash", funcTag, 111},
+ {"c128hash", funcTag, 111},
+ {"strhash", funcTag, 111},
+ {"interhash", funcTag, 111},
+ {"nilinterhash", funcTag, 111},
+ {"int64div", funcTag, 112},
+ {"uint64div", funcTag, 113},
+ {"int64mod", funcTag, 112},
+ {"uint64mod", funcTag, 113},
+ {"float64toint64", funcTag, 114},
+ {"float64touint64", funcTag, 115},
+ {"float64touint32", funcTag, 116},
+ {"int64tofloat64", funcTag, 117},
+ {"uint64tofloat64", funcTag, 118},
+ {"uint32tofloat64", funcTag, 119},
+ {"complex128div", funcTag, 120},
+ {"racefuncenter", funcTag, 31},
+ {"racefuncenterfp", funcTag, 9},
+ {"racefuncexit", funcTag, 9},
+ {"raceread", funcTag, 31},
+ {"racewrite", funcTag, 31},
+ {"racereadrange", funcTag, 121},
+ {"racewriterange", funcTag, 121},
+ {"msanread", funcTag, 121},
+ {"msanwrite", funcTag, 121},
+ {"msanmove", funcTag, 122},
+ {"checkptrAlignment", funcTag, 123},
+ {"checkptrArithmetic", funcTag, 125},
+ {"libfuzzerTraceCmp1", funcTag, 127},
+ {"libfuzzerTraceCmp2", funcTag, 129},
+ {"libfuzzerTraceCmp4", funcTag, 130},
+ {"libfuzzerTraceCmp8", funcTag, 131},
+ {"libfuzzerTraceConstCmp1", funcTag, 127},
+ {"libfuzzerTraceConstCmp2", funcTag, 129},
+ {"libfuzzerTraceConstCmp4", funcTag, 130},
+ {"libfuzzerTraceConstCmp8", funcTag, 131},
+ {"x86HasPOPCNT", varTag, 6},
+ {"x86HasSSE41", varTag, 6},
+ {"x86HasFMA", varTag, 6},
+ {"armHasVFPv4", varTag, 6},
+ {"arm64HasATOMICS", varTag, 6},
+}
+
+func runtimeTypes() []*types.Type {
+ var typs [132]*types.Type
+ typs[0] = types.ByteType
+ typs[1] = types.NewPtr(typs[0])
+ typs[2] = types.Types[types.TANY]
+ typs[3] = types.NewPtr(typs[2])
+ typs[4] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])})
+ typs[5] = types.Types[types.TUINTPTR]
+ typs[6] = types.Types[types.TBOOL]
+ typs[7] = types.Types[types.TUNSAFEPTR]
+ typs[8] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[6])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])})
+ typs[9] = NewFuncType(nil, nil, nil)
+ typs[10] = types.Types[types.TINTER]
+ typs[11] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[10])}, nil)
+ typs[12] = types.Types[types.TINT32]
+ typs[13] = types.NewPtr(typs[12])
+ typs[14] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[13])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[10])})
+ typs[15] = types.Types[types.TINT]
+ typs[16] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15])}, nil)
+ typs[17] = types.Types[types.TUINT]
+ typs[18] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[17]), ir.NewField(base.Pos, nil, nil, typs[15])}, nil)
+ typs[19] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])}, nil)
+ typs[20] = types.Types[types.TFLOAT64]
+ typs[21] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, nil)
+ typs[22] = types.Types[types.TINT64]
+ typs[23] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}, nil)
+ typs[24] = types.Types[types.TUINT64]
+ typs[25] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}, nil)
+ typs[26] = types.Types[types.TCOMPLEX128]
+ typs[27] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26])}, nil)
+ typs[28] = types.Types[types.TSTRING]
+ typs[29] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}, nil)
+ typs[30] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}, nil)
+ typs[31] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])}, nil)
+ typs[32] = types.NewArray(typs[0], 32)
+ typs[33] = types.NewPtr(typs[32])
+ typs[34] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])})
+ typs[35] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])})
+ typs[36] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])})
+ typs[37] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])})
+ typs[38] = types.NewSlice(typs[28])
+ typs[39] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[38])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])})
+ typs[40] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])})
+ typs[41] = types.NewArray(typs[0], 4)
+ typs[42] = types.NewPtr(typs[41])
+ typs[43] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[42]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])})
+ typs[44] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])})
+ typs[45] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])})
+ typs[46] = types.RuneType
+ typs[47] = types.NewSlice(typs[46])
+ typs[48] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[47])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])})
+ typs[49] = types.NewSlice(typs[0])
+ typs[50] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[33]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[49])})
+ typs[51] = types.NewArray(typs[46], 32)
+ typs[52] = types.NewPtr(typs[51])
+ typs[53] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[52]), ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[47])})
+ typs[54] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])})
+ typs[55] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[46]), ir.NewField(base.Pos, nil, nil, typs[15])})
+ typs[56] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[28])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])})
+ typs[57] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])})
+ typs[58] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])})
+ typs[59] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2])})
+ typs[60] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[2]), ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[61] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1])}, nil)
+ typs[62] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1])}, nil)
+ typs[63] = types.NewPtr(typs[5])
+ typs[64] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[63]), ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[65] = types.Types[types.TUINT32]
+ typs[66] = NewFuncType(nil, nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])})
+ typs[67] = types.NewMap(typs[2], typs[2])
+ typs[68] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])})
+ typs[69] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])})
+ typs[70] = NewFuncType(nil, nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[67])})
+ typs[71] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])})
+ typs[72] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])})
+ typs[73] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])})
+ typs[74] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[75] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[76] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[1])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[77] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil)
+ typs[78] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67]), ir.NewField(base.Pos, nil, nil, typs[2])}, nil)
+ typs[79] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3])}, nil)
+ typs[80] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[67])}, nil)
+ typs[81] = types.NewChan(typs[2], types.Cboth)
+ typs[82] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[81])})
+ typs[83] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[81])})
+ typs[84] = types.NewChan(typs[2], types.Crecv)
+ typs[85] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[84]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil)
+ typs[86] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[84]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[87] = types.NewChan(typs[2], types.Csend)
+ typs[88] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[87]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil)
+ typs[89] = types.NewArray(typs[0], 3)
+ typs[90] = NewStructType([]*ir.Field{ir.NewField(base.Pos, Lookup("enabled"), nil, typs[6]), ir.NewField(base.Pos, Lookup("pad"), nil, typs[89]), ir.NewField(base.Pos, Lookup("needed"), nil, typs[6]), ir.NewField(base.Pos, Lookup("cgo"), nil, typs[6]), ir.NewField(base.Pos, Lookup("alignme"), nil, typs[24])})
+ typs[91] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil)
+ typs[92] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3])}, nil)
+ typs[93] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15])})
+ typs[94] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[87]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[95] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[84])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[96] = types.NewPtr(typs[6])
+ typs[97] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[96]), ir.NewField(base.Pos, nil, nil, typs[84])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[98] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[63])}, nil)
+ typs[99] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[63]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[6])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[100] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])})
+ typs[101] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])})
+ typs[102] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[15]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7])})
+ typs[103] = types.NewSlice(typs[2])
+ typs[104] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[103]), ir.NewField(base.Pos, nil, nil, typs[15])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[103])})
+ typs[105] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil)
+ typs[106] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil)
+ typs[107] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[108] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[3]), ir.NewField(base.Pos, nil, nil, typs[3])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[109] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[7])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[6])})
+ typs[110] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])})
+ typs[111] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[5])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5])})
+ typs[112] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22]), ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])})
+ typs[113] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24]), ir.NewField(base.Pos, nil, nil, typs[24])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])})
+ typs[114] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])})
+ typs[115] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])})
+ typs[116] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])})
+ typs[117] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[22])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])})
+ typs[118] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])})
+ typs[119] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[20])})
+ typs[120] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26]), ir.NewField(base.Pos, nil, nil, typs[26])}, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[26])})
+ typs[121] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil)
+ typs[122] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil)
+ typs[123] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[1]), ir.NewField(base.Pos, nil, nil, typs[5])}, nil)
+ typs[124] = types.NewSlice(typs[7])
+ typs[125] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[7]), ir.NewField(base.Pos, nil, nil, typs[124])}, nil)
+ typs[126] = types.Types[types.TUINT8]
+ typs[127] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[126]), ir.NewField(base.Pos, nil, nil, typs[126])}, nil)
+ typs[128] = types.Types[types.TUINT16]
+ typs[129] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[128]), ir.NewField(base.Pos, nil, nil, typs[128])}, nil)
+ typs[130] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[65]), ir.NewField(base.Pos, nil, nil, typs[65])}, nil)
+ typs[131] = NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, typs[24]), ir.NewField(base.Pos, nil, nil, typs[24])}, nil)
+ return typs[:]
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc_test
+package typecheck
import (
"bytes"
)
func TestBuiltin(t *testing.T) {
+ t.Skip("mkbuiltin needs fixing")
testenv.MustHaveGoRun(t)
t.Parallel()
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package typecheck
import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/src"
"fmt"
"go/constant"
"go/token"
"math/big"
"strings"
"unicode"
-)
-const (
- // Maximum size in bits for big.Ints before signalling
- // overflow and also mantissa precision for big.Floats.
- Mpprec = 512
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
)
-func bigFloatVal(v constant.Value) *big.Float {
- f := new(big.Float)
- f.SetPrec(Mpprec)
- switch u := constant.Val(v).(type) {
- case int64:
- f.SetInt64(u)
- case *big.Int:
- f.SetInt(u)
- case *big.Float:
- f.Set(u)
- case *big.Rat:
- f.SetRat(u)
- default:
- base.Fatalf("unexpected: %v", u)
- }
- return f
-}
-
func roundFloat(v constant.Value, sz int64) constant.Value {
switch sz {
case 4:
// TODO(mdempsky): Replace these with better APIs.
func convlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
-func defaultlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
+func DefaultLit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
// convlit1 converts an untyped expression n to type t. If n already
// has a type, convlit1 has no effect.
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.OREAL, ir.OIMAG:
ot := operandType(n.Op(), t)
if ot == nil {
- n = defaultlit(n, nil)
+ n = DefaultLit(n, nil)
break
}
n := n.(*ir.UnaryExpr)
- n.SetLeft(convlit(n.Left(), ot))
- if n.Left().Type() == nil {
+ n.X = convlit(n.X, ot)
+ if n.X.Type() == nil {
n.SetType(nil)
return n
}
case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND, ir.OCOMPLEX:
ot := operandType(n.Op(), t)
if ot == nil {
- n = defaultlit(n, nil)
+ n = DefaultLit(n, nil)
break
}
var l, r ir.Node
switch n := n.(type) {
case *ir.BinaryExpr:
- n.SetLeft(convlit(n.Left(), ot))
- n.SetRight(convlit(n.Right(), ot))
- l, r = n.Left(), n.Right()
+ n.X = convlit(n.X, ot)
+ n.Y = convlit(n.Y, ot)
+ l, r = n.X, n.Y
case *ir.LogicalExpr:
- n.SetLeft(convlit(n.Left(), ot))
- n.SetRight(convlit(n.Right(), ot))
- l, r = n.Left(), n.Right()
+ n.X = convlit(n.X, ot)
+ n.Y = convlit(n.Y, ot)
+ l, r = n.X, n.Y
}
if l.Type() == nil || r.Type() == nil {
return n
case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
if !t.IsBoolean() {
break
}
return n
case ir.OLSH, ir.ORSH:
- n.SetLeft(convlit1(n.Left(), t, explicit, nil))
- n.SetType(n.Left().Type())
+ n := n.(*ir.BinaryExpr)
+ n.X = convlit1(n.X, t, explicit, nil)
+ n.SetType(n.X.Type())
if n.Type() != nil && !n.Type().IsInteger() {
base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type())
n.SetType(nil)
switch op {
case ir.OCOMPLEX:
if t.IsComplex() {
- return floatForComplex(t)
+ return types.FloatForComplex(t)
}
case ir.OREAL, ir.OIMAG:
if t.IsFloat() {
- return complexForFloat(t)
+ return types.ComplexForFloat(t)
}
default:
if okfor[op][t.Kind()] {
// something that looks like an integer we omit the
// value from the error message.
// (See issue #11371).
- f := bigFloatVal(v)
- if f.MantExp(nil) > 2*Mpprec {
+ f := ir.BigFloat(v)
+ if f.MantExp(nil) > 2*ir.ConstPrec {
base.Errorf("integer too large")
} else {
var t big.Float
return constant.MakeInt64(1)
}
-// doesoverflow reports whether constant value v is too large
-// to represent with type t.
-func doesoverflow(v constant.Value, t *types.Type) bool {
- switch {
- case t.IsInteger():
- bits := uint(8 * t.Size())
- if t.IsUnsigned() {
- x, ok := constant.Uint64Val(v)
- return !ok || x>>bits != 0
- }
- x, ok := constant.Int64Val(v)
- if x < 0 {
- x = ^x
- }
- return !ok || x>>(bits-1) != 0
- case t.IsFloat():
- switch t.Size() {
- case 4:
- f, _ := constant.Float32Val(v)
- return math.IsInf(float64(f), 0)
- case 8:
- f, _ := constant.Float64Val(v)
- return math.IsInf(f, 0)
- }
- case t.IsComplex():
- ft := floatForComplex(t)
- return doesoverflow(constant.Real(v), ft) || doesoverflow(constant.Imag(v), ft)
- }
- base.Fatalf("doesoverflow: %v, %v", v, t)
- panic("unreachable")
-}
-
// overflow reports whether constant value v is too large
// to represent with type t, and emits an error message if so.
func overflow(v constant.Value, t *types.Type) bool {
if t.IsUntyped() {
return false
}
- if v.Kind() == constant.Int && constant.BitLen(v) > Mpprec {
+ if v.Kind() == constant.Int && constant.BitLen(v) > ir.ConstPrec {
base.Errorf("integer too large")
return true
}
- if doesoverflow(v, t) {
+ if ir.ConstOverflow(v, t) {
base.Errorf("constant %v overflows %v", types.FmtConst(v, false), t)
return true
}
ir.ORSH: token.SHR,
}
-// evalConst returns a constant-evaluated expression equivalent to n.
-// If n is not a constant, evalConst returns n.
-// Otherwise, evalConst returns a new OLITERAL with the same value as n,
+// EvalConst returns a constant-evaluated expression equivalent to n.
+// If n is not a constant, EvalConst returns n.
+// Otherwise, EvalConst returns a new OLITERAL with the same value as n,
// and with .Orig pointing back to n.
-func evalConst(n ir.Node) ir.Node {
+func EvalConst(n ir.Node) ir.Node {
// Pick off just the opcodes that can be constant evaluated.
switch n.Op() {
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
- nl := n.Left()
+ n := n.(*ir.UnaryExpr)
+ nl := n.X
if nl.Op() == ir.OLITERAL {
var prec uint
if n.Type().IsUnsigned() {
prec = uint(n.Type().Size() * 8)
}
- return origConst(n, constant.UnaryOp(tokenForOp[n.Op()], nl.Val(), prec))
+ return OrigConst(n, constant.UnaryOp(tokenForOp[n.Op()], nl.Val(), prec))
}
case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT:
- nl, nr := n.Left(), n.Right()
+ n := n.(*ir.BinaryExpr)
+ nl, nr := n.X, n.Y
if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
rval := nr.Val()
if n.Op() == ir.ODIV && n.Type().IsInteger() {
tok = token.QUO_ASSIGN // integer division
}
- return origConst(n, constant.BinaryOp(nl.Val(), tok, rval))
+ return OrigConst(n, constant.BinaryOp(nl.Val(), tok, rval))
}
case ir.OOROR, ir.OANDAND:
- nl, nr := n.Left(), n.Right()
+ n := n.(*ir.LogicalExpr)
+ nl, nr := n.X, n.Y
if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
- return origConst(n, constant.BinaryOp(nl.Val(), tokenForOp[n.Op()], nr.Val()))
+ return OrigConst(n, constant.BinaryOp(nl.Val(), tokenForOp[n.Op()], nr.Val()))
}
case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
- nl, nr := n.Left(), n.Right()
+ n := n.(*ir.BinaryExpr)
+ nl, nr := n.X, n.Y
if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
- return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[n.Op()], nr.Val()))
+ return OrigBool(n, constant.Compare(nl.Val(), tokenForOp[n.Op()], nr.Val()))
}
case ir.OLSH, ir.ORSH:
- nl, nr := n.Left(), n.Right()
+ n := n.(*ir.BinaryExpr)
+ nl, nr := n.X, n.Y
if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
// shiftBound from go/types; "so we can express smallestFloat64"
const shiftBound = 1023 - 1 + 52
n.SetType(nil)
break
}
- return origConst(n, constant.Shift(toint(nl.Val()), tokenForOp[n.Op()], uint(s)))
+ return OrigConst(n, constant.Shift(toint(nl.Val()), tokenForOp[n.Op()], uint(s)))
}
case ir.OCONV, ir.ORUNESTR:
- nl := n.Left()
+ n := n.(*ir.ConvExpr)
+ nl := n.X
if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL {
- return origConst(n, convertVal(nl.Val(), n.Type(), true))
+ return OrigConst(n, convertVal(nl.Val(), n.Type(), true))
}
case ir.OCONVNOP:
- nl := n.Left()
+ n := n.(*ir.ConvExpr)
+ nl := n.X
if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL {
// set so n.Orig gets OCONV instead of OCONVNOP
n.SetOp(ir.OCONV)
- return origConst(n, nl.Val())
+ return OrigConst(n, nl.Val())
}
case ir.OADDSTR:
// Merge adjacent constants in the argument list.
- s := n.List().Slice()
+ n := n.(*ir.AddStringExpr)
+ s := n.List
need := 0
for i := 0; i < len(s); i++ {
if i == 0 || !ir.IsConst(s[i-1], constant.String) || !ir.IsConst(s[i], constant.String) {
for _, c := range s {
strs = append(strs, ir.StringVal(c))
}
- return origConst(n, constant.MakeString(strings.Join(strs, "")))
+ return OrigConst(n, constant.MakeString(strings.Join(strs, "")))
}
newList := make([]ir.Node, 0, need)
for i := 0; i < len(s); i++ {
}
nl := ir.Copy(n).(*ir.AddStringExpr)
- nl.PtrList().Set(s[i:i2])
- newList = append(newList, origConst(nl, constant.MakeString(strings.Join(strs, ""))))
+ nl.List.Set(s[i:i2])
+ newList = append(newList, OrigConst(nl, constant.MakeString(strings.Join(strs, ""))))
i = i2 - 1
} else {
newList = append(newList, s[i])
}
nn := ir.Copy(n).(*ir.AddStringExpr)
- nn.PtrList().Set(newList)
+ nn.List.Set(newList)
return nn
case ir.OCAP, ir.OLEN:
- nl := n.Left()
+ n := n.(*ir.UnaryExpr)
+ nl := n.X
switch nl.Type().Kind() {
case types.TSTRING:
if ir.IsConst(nl, constant.String) {
- return origIntConst(n, int64(len(ir.StringVal(nl))))
+ return OrigInt(n, int64(len(ir.StringVal(nl))))
}
case types.TARRAY:
if !anyCallOrChan(nl) {
- return origIntConst(n, nl.Type().NumElem())
+ return OrigInt(n, nl.Type().NumElem())
}
}
case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
- return origIntConst(n, evalunsafe(n))
+ n := n.(*ir.UnaryExpr)
+ return OrigInt(n, evalunsafe(n))
case ir.OREAL:
- nl := n.Left()
+ n := n.(*ir.UnaryExpr)
+ nl := n.X
if nl.Op() == ir.OLITERAL {
- return origConst(n, constant.Real(nl.Val()))
+ return OrigConst(n, constant.Real(nl.Val()))
}
case ir.OIMAG:
- nl := n.Left()
+ n := n.(*ir.UnaryExpr)
+ nl := n.X
if nl.Op() == ir.OLITERAL {
- return origConst(n, constant.Imag(nl.Val()))
+ return OrigConst(n, constant.Imag(nl.Val()))
}
case ir.OCOMPLEX:
- nl, nr := n.Left(), n.Right()
+ n := n.(*ir.BinaryExpr)
+ nl, nr := n.X, n.Y
if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
- return origConst(n, makeComplex(nl.Val(), nr.Val()))
+ return OrigConst(n, makeComplex(nl.Val(), nr.Val()))
}
}
ir.OBITNOT: "bitwise complement",
}
-// origConst returns an OLITERAL with orig n and value v.
-func origConst(n ir.Node, v constant.Value) ir.Node {
- lno := setlineno(n)
+// OrigConst returns an OLITERAL with orig n and value v.
+func OrigConst(n ir.Node, v constant.Value) ir.Node {
+ lno := ir.SetPos(n)
v = convertVal(v, n.Type(), false)
base.Pos = lno
switch v.Kind() {
case constant.Int:
- if constant.BitLen(v) <= Mpprec {
+ if constant.BitLen(v) <= ir.ConstPrec {
break
}
fallthrough
return ir.NewConstExpr(v, n)
}
-func origBoolConst(n ir.Node, v bool) ir.Node {
- return origConst(n, constant.MakeBool(v))
+func OrigBool(n ir.Node, v bool) ir.Node {
+ return OrigConst(n, constant.MakeBool(v))
}
-func origIntConst(n ir.Node, v int64) ir.Node {
- return origConst(n, constant.MakeInt64(v))
+func OrigInt(n ir.Node, v int64) ir.Node {
+ return OrigConst(n, constant.MakeInt64(v))
}
// defaultlit on both nodes simultaneously;
return nil
}
-func smallintconst(n ir.Node) bool {
- if n.Op() == ir.OLITERAL {
- v, ok := constant.Int64Val(n.Val())
- return ok && int64(int32(v)) == v
- }
- return false
-}
-
-// indexconst checks if Node n contains a constant expression
+// IndexConst checks if Node n contains a constant expression
// representable as a non-negative int and returns its value.
// If n is not a constant expression, not representable as an
// integer, or negative, it returns -1. If n is too large, it
// returns -2.
-func indexconst(n ir.Node) int64 {
+func IndexConst(n ir.Node) int64 {
if n.Op() != ir.OLITERAL {
return -1
}
if v.Kind() != constant.Int || constant.Sign(v) < 0 {
return -1
}
- if doesoverflow(v, types.Types[types.TINT]) {
+ if ir.ConstOverflow(v, types.Types[types.TINT]) {
return -2
}
return ir.IntVal(types.Types[types.TINT], v)
}
-// isGoConst reports whether n is a Go language constant (as opposed to a
-// compile-time constant).
-//
-// Expressions derived from nil, like string([]byte(nil)), while they
-// may be known at compile time, are not Go language constants.
-func isGoConst(n ir.Node) bool {
- return n.Op() == ir.OLITERAL
-}
-
// anyCallOrChan reports whether n contains any calls or channel operations.
func anyCallOrChan(n ir.Node) bool {
return ir.Any(n, func(n ir.Node) bool {
// n must not be an untyped constant.
func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) {
if conv := n; conv.Op() == ir.OCONVIFACE {
+ conv := conv.(*ir.ConvExpr)
if conv.Implicit() {
- n = conv.Left()
+ n = conv.X
}
}
- if !isGoConst(n) {
+ if !ir.IsConstNode(n) {
return
}
if n.Type().IsUntyped() {
}
k := constSetKey{typ, ir.ConstValue(n)}
- if hasUniquePos(n) {
+ if ir.HasUniquePos(n) {
pos = n.Pos()
}
}
return show
}
+
+// evalunsafe evaluates a package unsafe operation and returns the result.
+func evalunsafe(n ir.Node) int64 {
+ switch n.Op() {
+ case ir.OALIGNOF, ir.OSIZEOF:
+ n := n.(*ir.UnaryExpr)
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ tr := n.X.Type()
+ if tr == nil {
+ return 0
+ }
+ types.CalcSize(tr)
+ if n.Op() == ir.OALIGNOF {
+ return int64(tr.Align)
+ }
+ return tr.Width
+
+ case ir.OOFFSETOF:
+ // must be a selector.
+ n := n.(*ir.UnaryExpr)
+ if n.X.Op() != ir.OXDOT {
+ base.Errorf("invalid expression %v", n)
+ return 0
+ }
+ sel := n.X.(*ir.SelectorExpr)
+
+ // Remember base of selector to find it back after dot insertion.
+ // Since r->left may be mutated by typechecking, check it explicitly
+ // first to track it correctly.
+ sel.X = Expr(sel.X)
+ sbase := sel.X
+
+ tsel := Expr(sel)
+ n.X = tsel
+ if tsel.Type() == nil {
+ return 0
+ }
+ switch tsel.Op() {
+ case ir.ODOT, ir.ODOTPTR:
+ break
+ case ir.OCALLPART:
+ base.Errorf("invalid expression %v: argument is a method value", n)
+ return 0
+ default:
+ base.Errorf("invalid expression %v", n)
+ return 0
+ }
+
+ // Sum offsets for dots until we reach sbase.
+ var v int64
+ var next ir.Node
+ for r := tsel; r != sbase; r = next {
+ switch r.Op() {
+ case ir.ODOTPTR:
+ // For Offsetof(s.f), s may itself be a pointer,
+ // but accessing f must not otherwise involve
+ // indirection via embedded pointer types.
+ r := r.(*ir.SelectorExpr)
+ if r.X != sbase {
+ base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.X)
+ return 0
+ }
+ fallthrough
+ case ir.ODOT:
+ r := r.(*ir.SelectorExpr)
+ v += r.Offset
+ next = r.X
+ default:
+ ir.Dump("unsafenmagic", tsel)
+ base.Fatalf("impossible %v node after dot insertion", r.Op())
+ }
+ }
+ return v
+ }
+
+ base.Fatalf("unexpected op %v", n.Op())
+ return 0
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+var DeclContext ir.Class // PEXTERN/PAUTO
+
+func AssignDefn(left []ir.Node, defn ir.Node) {
+ for _, n := range left {
+ if n.Sym() != nil {
+ n.Sym().SetUniq(true)
+ }
+ }
+
+ var nnew, nerr int
+ for i, n := range left {
+ if ir.IsBlank(n) {
+ continue
+ }
+ if !assignableName(n) {
+ base.ErrorfAt(defn.Pos(), "non-name %v on left side of :=", n)
+ nerr++
+ continue
+ }
+
+ if !n.Sym().Uniq() {
+ base.ErrorfAt(defn.Pos(), "%v repeated on left side of :=", n.Sym())
+ n.SetDiag(true)
+ nerr++
+ continue
+ }
+
+ n.Sym().SetUniq(false)
+ if n.Sym().Block == types.Block {
+ continue
+ }
+
+ nnew++
+ n := NewName(n.Sym())
+ Declare(n, DeclContext)
+ n.Defn = defn
+ defn.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n))
+ left[i] = n
+ }
+
+ if nnew == 0 && nerr == 0 {
+ base.ErrorfAt(defn.Pos(), "no new variables on left side of :=")
+ }
+}
+
+// := declarations
+func assignableName(n ir.Node) bool {
+ switch n.Op() {
+ case ir.ONAME,
+ ir.ONONAME,
+ ir.OPACK,
+ ir.OTYPE,
+ ir.OLITERAL:
+ return n.Sym() != nil
+ }
+
+ return false
+}
+
+func DeclFunc(sym *types.Sym, tfn ir.Ntype) *ir.Func {
+ if tfn.Op() != ir.OTFUNC {
+ base.Fatalf("expected OTFUNC node, got %v", tfn)
+ }
+
+ fn := ir.NewFunc(base.Pos)
+ fn.Nname = ir.NewFuncNameAt(base.Pos, sym, fn)
+ fn.Nname.Defn = fn
+ fn.Nname.Ntype = tfn
+ ir.MarkFunc(fn.Nname)
+ StartFuncBody(fn)
+ fn.Nname.Ntype = typecheckNtype(fn.Nname.Ntype)
+ return fn
+}
+
+// declare variables from grammar
+// new_name_list (type | [type] = expr_list)
+func DeclVars(vl []*ir.Name, t ir.Ntype, el []ir.Node) []ir.Node {
+ var init []ir.Node
+ doexpr := len(el) > 0
+
+ if len(el) == 1 && len(vl) > 1 {
+ e := el[0]
+ as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ as2.Rhs = []ir.Node{e}
+ for _, v := range vl {
+ as2.Lhs.Append(v)
+ Declare(v, DeclContext)
+ v.Ntype = t
+ v.Defn = as2
+ if ir.CurFunc != nil {
+ init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v))
+ }
+ }
+
+ return append(init, as2)
+ }
+
+ for i, v := range vl {
+ var e ir.Node
+ if doexpr {
+ if i >= len(el) {
+ base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el))
+ break
+ }
+ e = el[i]
+ }
+
+ Declare(v, DeclContext)
+ v.Ntype = t
+
+ if e != nil || ir.CurFunc != nil || ir.IsBlank(v) {
+ if ir.CurFunc != nil {
+ init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v))
+ }
+ as := ir.NewAssignStmt(base.Pos, v, e)
+ init = append(init, as)
+ if e != nil {
+ v.Defn = as
+ }
+ }
+ }
+
+ if len(el) > len(vl) {
+ base.Errorf("assignment mismatch: %d variables but %d values", len(vl), len(el))
+ }
+ return init
+}
+
+// Declare records that Node n declares symbol n.Sym in the specified
+// declaration context.
+func Declare(n *ir.Name, ctxt ir.Class) {
+ if ir.IsBlank(n) {
+ return
+ }
+
+ s := n.Sym()
+
+ // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
+ if !inimport && !TypecheckAllowed && s.Pkg != types.LocalPkg {
+ base.ErrorfAt(n.Pos(), "cannot declare name %v", s)
+ }
+
+ gen := 0
+ if ctxt == ir.PEXTERN {
+ if s.Name == "init" {
+ base.ErrorfAt(n.Pos(), "cannot declare init - must be func")
+ }
+ if s.Name == "main" && s.Pkg.Name == "main" {
+ base.ErrorfAt(n.Pos(), "cannot declare main - must be func")
+ }
+ Target.Externs = append(Target.Externs, n)
+ } else {
+ if ir.CurFunc == nil && ctxt == ir.PAUTO {
+ base.Pos = n.Pos()
+ base.Fatalf("automatic outside function")
+ }
+ if ir.CurFunc != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME {
+ ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
+ }
+ if n.Op() == ir.OTYPE {
+ declare_typegen++
+ gen = declare_typegen
+ } else if n.Op() == ir.ONAME && ctxt == ir.PAUTO && !strings.Contains(s.Name, "·") {
+ vargen++
+ gen = vargen
+ }
+ types.Pushdcl(s)
+ n.Curfn = ir.CurFunc
+ }
+
+ if ctxt == ir.PAUTO {
+ n.SetFrameOffset(0)
+ }
+
+ if s.Block == types.Block {
+ // functype will print errors about duplicate function arguments.
+ // Don't repeat the error here.
+ if ctxt != ir.PPARAM && ctxt != ir.PPARAMOUT {
+ Redeclared(n.Pos(), s, "in this block")
+ }
+ }
+
+ s.Block = types.Block
+ s.Lastlineno = base.Pos
+ s.Def = n
+ n.Vargen = int32(gen)
+ n.Class_ = ctxt
+ if ctxt == ir.PFUNC {
+ n.Sym().SetFunc(true)
+ }
+
+ autoexport(n, ctxt)
+}
+
+// Export marks n for export (or reexport).
+func Export(n *ir.Name) {
+ if n.Sym().OnExportList() {
+ return
+ }
+ n.Sym().SetOnExportList(true)
+
+ if base.Flag.E != 0 {
+ fmt.Printf("export symbol %v\n", n.Sym())
+ }
+
+ Target.Exports = append(Target.Exports, n)
+}
+
+// Redeclared emits a diagnostic about symbol s being redeclared at pos.
+func Redeclared(pos src.XPos, s *types.Sym, where string) {
+ if !s.Lastlineno.IsKnown() {
+ pkgName := DotImportRefs[s.Def.(*ir.Ident)]
+ base.ErrorfAt(pos, "%v redeclared %s\n"+
+ "\t%v: previous declaration during import %q", s, where, base.FmtPos(pkgName.Pos()), pkgName.Pkg.Path)
+ } else {
+ prevPos := s.Lastlineno
+
+ // When an import and a declaration collide in separate files,
+ // present the import as the "redeclared", because the declaration
+ // is visible where the import is, but not vice versa.
+ // See issue 4510.
+ if s.Def == nil {
+ pos, prevPos = prevPos, pos
+ }
+
+ base.ErrorfAt(pos, "%v redeclared %s\n"+
+ "\t%v: previous declaration", s, where, base.FmtPos(prevPos))
+ }
+}
+
+// declare the function proper
+// and declare the arguments.
+// called in extern-declaration context
+// returns in auto-declaration context.
+func StartFuncBody(fn *ir.Func) {
+ // change the declaration context from extern to auto
+ funcStack = append(funcStack, funcStackEnt{ir.CurFunc, DeclContext})
+ ir.CurFunc = fn
+ DeclContext = ir.PAUTO
+
+ types.Markdcl()
+
+ if fn.Nname.Ntype != nil {
+ funcargs(fn.Nname.Ntype.(*ir.FuncType))
+ } else {
+ funcargs2(fn.Type())
+ }
+}
+
+// finish the body.
+// called in auto-declaration context.
+// returns in extern-declaration context.
+func FinishFuncBody() {
+ // change the declaration context from auto to previous context
+ types.Popdcl()
+ var e funcStackEnt
+ funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
+ ir.CurFunc, DeclContext = e.curfn, e.dclcontext
+}
+
+func CheckFuncStack() {
+ if len(funcStack) != 0 {
+ base.Fatalf("funcStack is non-empty: %v", len(funcStack))
+ }
+}
+
+// turn a parsed function declaration into a type
+func NewFuncType(nrecv *ir.Field, nparams, nresults []*ir.Field) *types.Type {
+ funarg := func(n *ir.Field) *types.Field {
+ lno := base.Pos
+ base.Pos = n.Pos
+
+ if n.Ntype != nil {
+ n.Type = typecheckNtype(n.Ntype).Type()
+ n.Ntype = nil
+ }
+
+ f := types.NewField(n.Pos, n.Sym, n.Type)
+ f.SetIsDDD(n.IsDDD)
+ if n.Decl != nil {
+ n.Decl.SetType(f.Type)
+ f.Nname = n.Decl
+ }
+
+ base.Pos = lno
+ return f
+ }
+ funargs := func(nn []*ir.Field) []*types.Field {
+ res := make([]*types.Field, len(nn))
+ for i, n := range nn {
+ res[i] = funarg(n)
+ }
+ return res
+ }
+
+ var recv *types.Field
+ if nrecv != nil {
+ recv = funarg(nrecv)
+ }
+
+ t := types.NewSignature(types.LocalPkg, recv, funargs(nparams), funargs(nresults))
+ checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice())
+ return t
+}
+
+// convert a parsed id/type list into
+// a type for struct/interface/arglist
+func NewStructType(l []*ir.Field) *types.Type {
+ lno := base.Pos
+
+ fields := make([]*types.Field, len(l))
+ for i, n := range l {
+ base.Pos = n.Pos
+
+ if n.Ntype != nil {
+ n.Type = typecheckNtype(n.Ntype).Type()
+ n.Ntype = nil
+ }
+ f := types.NewField(n.Pos, n.Sym, n.Type)
+ if n.Embedded {
+ checkembeddedtype(n.Type)
+ f.Embedded = 1
+ }
+ f.Note = n.Note
+ fields[i] = f
+ }
+ checkdupfields("field", fields)
+
+ base.Pos = lno
+ return types.NewStruct(types.LocalPkg, fields)
+}
+
+// Add a method, declared as a function.
+// - msym is the method symbol
+// - t is function type (with receiver)
+// Returns a pointer to the existing or added Field; or nil if there's an error.
+func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
+ if msym == nil {
+ base.Fatalf("no method symbol")
+ }
+
+ // get parent type sym
+ rf := t.Recv() // ptr to this structure
+ if rf == nil {
+ base.Errorf("missing receiver")
+ return nil
+ }
+
+ mt := types.ReceiverBaseType(rf.Type)
+ if mt == nil || mt.Sym() == nil {
+ pa := rf.Type
+ t := pa
+ if t != nil && t.IsPtr() {
+ if t.Sym() != nil {
+ base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
+ return nil
+ }
+ t = t.Elem()
+ }
+
+ switch {
+ case t == nil || t.Broke():
+ // rely on typecheck having complained before
+ case t.Sym() == nil:
+ base.Errorf("invalid receiver type %v (%v is not a defined type)", pa, t)
+ case t.IsPtr():
+ base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
+ case t.IsInterface():
+ base.Errorf("invalid receiver type %v (%v is an interface type)", pa, t)
+ default:
+ // Should have picked off all the reasons above,
+ // but just in case, fall back to generic error.
+ base.Errorf("invalid receiver type %v (%L / %L)", pa, pa, t)
+ }
+ return nil
+ }
+
+ if local && mt.Sym().Pkg != types.LocalPkg {
+ base.Errorf("cannot define new methods on non-local type %v", mt)
+ return nil
+ }
+
+ if msym.IsBlank() {
+ return nil
+ }
+
+ if mt.IsStruct() {
+ for _, f := range mt.Fields().Slice() {
+ if f.Sym == msym {
+ base.Errorf("type %v has both field and method named %v", mt, msym)
+ f.SetBroke(true)
+ return nil
+ }
+ }
+ }
+
+ for _, f := range mt.Methods().Slice() {
+ if msym.Name != f.Sym.Name {
+ continue
+ }
+ // types.Identical only checks that incoming and result parameters match,
+ // so explicitly check that the receiver parameters match too.
+ if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) {
+ base.Errorf("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
+ }
+ return f
+ }
+
+ f := types.NewField(base.Pos, msym, t)
+ f.Nname = n.Nname
+ f.SetNointerface(nointerface)
+
+ mt.Methods().Append(f)
+ return f
+}
+
+func autoexport(n *ir.Name, ctxt ir.Class) {
+ if n.Sym().Pkg != types.LocalPkg {
+ return
+ }
+ if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || DeclContext != ir.PEXTERN {
+ return
+ }
+ if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) {
+ return
+ }
+
+ if types.IsExported(n.Sym().Name) || initname(n.Sym().Name) {
+ Export(n)
+ }
+ if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
+ n.Sym().SetAsm(true)
+ Target.Asms = append(Target.Asms, n)
+ }
+}
+
+// checkdupfields emits errors for duplicately named fields or methods in
+// a list of struct or interface types.
+func checkdupfields(what string, fss ...[]*types.Field) {
+ seen := make(map[*types.Sym]bool)
+ for _, fs := range fss {
+ for _, f := range fs {
+ if f.Sym == nil || f.Sym.IsBlank() {
+ continue
+ }
+ if seen[f.Sym] {
+ base.ErrorfAt(f.Pos, "duplicate %s %s", what, f.Sym.Name)
+ continue
+ }
+ seen[f.Sym] = true
+ }
+ }
+}
+
+// structs, functions, and methods.
+// they don't belong here, but where do they belong?
+func checkembeddedtype(t *types.Type) {
+ if t == nil {
+ return
+ }
+
+ if t.Sym() == nil && t.IsPtr() {
+ t = t.Elem()
+ if t.IsInterface() {
+ base.Errorf("embedded type cannot be a pointer to interface")
+ }
+ }
+
+ if t.IsPtr() || t.IsUnsafePtr() {
+ base.Errorf("embedded type cannot be a pointer")
+ } else if t.Kind() == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() {
+ t.ForwardType().Embedlineno = base.Pos
+ }
+}
+
+// declare individual names - var, typ, const
+
+var declare_typegen int
+
+func fakeRecvField() *types.Field {
+ return types.NewField(src.NoXPos, nil, types.FakeRecvType())
+}
+
+var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
+
+type funcStackEnt struct {
+ curfn *ir.Func
+ dclcontext ir.Class
+}
+
+func funcarg(n *ir.Field, ctxt ir.Class) {
+ if n.Sym == nil {
+ return
+ }
+
+ name := ir.NewNameAt(n.Pos, n.Sym)
+ n.Decl = name
+ name.Ntype = n.Ntype
+ name.SetIsDDD(n.IsDDD)
+ Declare(name, ctxt)
+
+ vargen++
+ n.Decl.Vargen = int32(vargen)
+}
+
+func funcarg2(f *types.Field, ctxt ir.Class) {
+ if f.Sym == nil {
+ return
+ }
+ n := ir.NewNameAt(f.Pos, f.Sym)
+ f.Nname = n
+ n.SetType(f.Type)
+ n.SetIsDDD(f.IsDDD())
+ Declare(n, ctxt)
+}
+
+func funcargs(nt *ir.FuncType) {
+ if nt.Op() != ir.OTFUNC {
+ base.Fatalf("funcargs %v", nt.Op())
+ }
+
+ // re-start the variable generation number
+ // we want to use small numbers for the return variables,
+ // so let them have the chunk starting at 1.
+ //
+ // TODO(mdempsky): This is ugly, and only necessary because
+ // esc.go uses Vargen to figure out result parameters' index
+ // within the result tuple.
+ vargen = len(nt.Results)
+
+ // declare the receiver and in arguments.
+ if nt.Recv != nil {
+ funcarg(nt.Recv, ir.PPARAM)
+ }
+ for _, n := range nt.Params {
+ funcarg(n, ir.PPARAM)
+ }
+
+ oldvargen := vargen
+ vargen = 0
+
+ // declare the out arguments.
+ gen := len(nt.Params)
+ for _, n := range nt.Results {
+ if n.Sym == nil {
+ // Name so that escape analysis can track it. ~r stands for 'result'.
+ n.Sym = LookupNum("~r", gen)
+ gen++
+ }
+ if n.Sym.IsBlank() {
+ // Give it a name so we can assign to it during return. ~b stands for 'blank'.
+ // The name must be different from ~r above because if you have
+ // func f() (_ int)
+ // func g() int
+ // f is allowed to use a plain 'return' with no arguments, while g is not.
+ // So the two cases must be distinguished.
+ n.Sym = LookupNum("~b", gen)
+ gen++
+ }
+
+ funcarg(n, ir.PPARAMOUT)
+ }
+
+ vargen = oldvargen
+}
+
+// Same as funcargs, except run over an already constructed TFUNC.
+// This happens during import, where the hidden_fndcl rule has
+// used functype directly to parse the function's type.
+func funcargs2(t *types.Type) {
+ if t.Kind() != types.TFUNC {
+ base.Fatalf("funcargs2 %v", t)
+ }
+
+ for _, f := range t.Recvs().Fields().Slice() {
+ funcarg2(f, ir.PPARAM)
+ }
+ for _, f := range t.Params().Fields().Slice() {
+ funcarg2(f, ir.PPARAM)
+ }
+ for _, f := range t.Results().Fields().Slice() {
+ funcarg2(f, ir.PPARAMOUT)
+ }
+}
+
+func initname(s string) bool {
+ return s == "init"
+}
+
+func tointerface(nmethods []*ir.Field) *types.Type {
+ if len(nmethods) == 0 {
+ return types.Types[types.TINTER]
+ }
+
+ lno := base.Pos
+
+ methods := make([]*types.Field, len(nmethods))
+ for i, n := range nmethods {
+ base.Pos = n.Pos
+ if n.Ntype != nil {
+ n.Type = typecheckNtype(n.Ntype).Type()
+ n.Ntype = nil
+ }
+ methods[i] = types.NewField(n.Pos, n.Sym, n.Type)
+ }
+
+ base.Pos = lno
+ return types.NewInterface(types.LocalPkg, methods)
+}
+
+var vargen int
+
+func Temp(t *types.Type) *ir.Name {
+ return TempAt(base.Pos, ir.CurFunc, t)
+}
+
+// make a new Node off the books
+func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
+ if curfn == nil {
+ base.Fatalf("no curfn for tempAt")
+ }
+ if curfn.Op() == ir.OCLOSURE {
+ ir.Dump("tempAt", curfn)
+ base.Fatalf("adding tempAt to wrong closure function")
+ }
+ if t == nil {
+ base.Fatalf("tempAt called with nil type")
+ }
+
+ s := &types.Sym{
+ Name: autotmpname(len(curfn.Dcl)),
+ Pkg: types.LocalPkg,
+ }
+ n := ir.NewNameAt(pos, s)
+ s.Def = n
+ n.SetType(t)
+ n.Class_ = ir.PAUTO
+ n.SetEsc(ir.EscNever)
+ n.Curfn = curfn
+ n.SetUsed(true)
+ n.SetAutoTemp(true)
+ curfn.Dcl = append(curfn.Dcl, n)
+
+ types.CalcSize(t)
+
+ return n
+}
+
+// autotmpname returns the name for an autotmp variable numbered n.
+func autotmpname(n int) string {
+ // Give each tmp a different name so that they can be registerized.
+ // Add a preceding . to avoid clashing with legal names.
+ const prefix = ".autotmp_"
+ // Start with a buffer big enough to hold a large n.
+ b := []byte(prefix + " ")[:len(prefix)]
+ b = strconv.AppendInt(b, int64(n), 10)
+ return types.InternString(b)
+}
+
+// f is method type, with receiver.
+// return function type, receiver as first argument (or not).
+func NewMethodType(f *types.Type, receiver *types.Type) *types.Type {
+ inLen := f.Params().Fields().Len()
+ if receiver != nil {
+ inLen++
+ }
+ in := make([]*ir.Field, 0, inLen)
+
+ if receiver != nil {
+ d := ir.NewField(base.Pos, nil, nil, receiver)
+ in = append(in, d)
+ }
+
+ for _, t := range f.Params().Fields().Slice() {
+ d := ir.NewField(base.Pos, nil, nil, t.Type)
+ d.IsDDD = t.IsDDD()
+ in = append(in, d)
+ }
+
+ outLen := f.Results().Fields().Len()
+ out := make([]*ir.Field, 0, outLen)
+ for _, t := range f.Results().Fields().Slice() {
+ d := ir.NewField(base.Pos, nil, nil, t.Type)
+ out = append(out, d)
+ }
+
+ return NewFuncType(nil, in, out)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// importalias declares symbol s as an imported type alias with type t.
+// ipkg is the package being imported
+func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ return importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t)
+}
+
+// importconst declares symbol s as an imported constant with type t and value val.
+// ipkg is the package being imported
+func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
+ n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t)
+ n.SetVal(val)
+ return n
+}
+
+// importfunc declares symbol s as an imported function with type t.
+// ipkg is the package being imported
+func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t)
+
+ fn := ir.NewFunc(pos)
+ fn.SetType(t)
+ n.SetFunc(fn)
+ fn.Nname = n
+
+ return n
+}
+
+// importobj declares symbol s as an imported object representable by op.
+// ipkg is the package being imported
+func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
+ n := importsym(ipkg, pos, s, op, ctxt)
+ n.SetType(t)
+ if ctxt == ir.PFUNC {
+ n.Sym().SetFunc(true)
+ }
+ return n
+}
+
+func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name {
+ if n := s.PkgDef(); n != nil {
+ base.Fatalf("importsym of symbol that already exists: %v", n)
+ }
+
+ n := ir.NewDeclNameAt(pos, op, s)
+ n.Class_ = ctxt // TODO(mdempsky): Move this into NewDeclNameAt too?
+ s.SetPkgDef(n)
+ s.Importdef = ipkg
+ return n
+}
+
+// importtype returns the named type declared by symbol s.
+// If no such type has been declared yet, a forward declaration is returned.
+// ipkg is the package being imported
+func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *ir.Name {
+ n := importsym(ipkg, pos, s, ir.OTYPE, ir.PEXTERN)
+ n.SetType(types.NewNamed(n))
+ return n
+}
+
+// importvar declares symbol s as an imported variable with type t.
+// ipkg is the package being imported
+func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ return importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// tcAddr typechecks an OADDR node.
+func tcAddr(n *ir.AddrExpr) ir.Node {
+ n.X = Expr(n.X)
+ if n.X.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ switch n.X.Op() {
+ case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
+ n.SetOp(ir.OPTRLIT)
+
+ default:
+ checklvalue(n.X, "take the address of")
+ r := ir.OuterValue(n.X)
+ if r.Op() == ir.ONAME {
+ r := r.(*ir.Name)
+ if ir.Orig(r) != r {
+ base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
+ }
+ r.Name().SetAddrtaken(true)
+ if r.Name().IsClosureVar() && !CaptureVarsComplete {
+ // Mark the original variable as Addrtaken so that capturevars
+ // knows not to pass it by value.
+ // But if the capturevars phase is complete, don't touch it,
+ // in case l.Name's containing function has not yet been compiled.
+ r.Name().Defn.Name().SetAddrtaken(true)
+ }
+ }
+ n.X = DefaultLit(n.X, nil)
+ if n.X.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ n.SetType(types.NewPtr(n.X.Type()))
+ return n
+}
+
+// tcArith typechecks a binary arithmetic expression.
+func tcArith(n ir.Node) ir.Node {
+ var l, r ir.Node
+ var setLR func()
+ switch n := n.(type) {
+ case *ir.AssignOpStmt:
+ l, r = n.X, n.Y
+ setLR = func() { n.X = l; n.Y = r }
+ case *ir.BinaryExpr:
+ l, r = n.X, n.Y
+ setLR = func() { n.X = l; n.Y = r }
+ case *ir.LogicalExpr:
+ l, r = n.X, n.Y
+ setLR = func() { n.X = l; n.Y = r }
+ }
+ l = Expr(l)
+ r = Expr(r)
+ setLR()
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ op := n.Op()
+ if n.Op() == ir.OASOP {
+ n := n.(*ir.AssignOpStmt)
+ checkassign(n, l)
+ if n.IncDec && !okforarith[l.Type().Kind()] {
+ base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type())
+ n.SetType(nil)
+ return n
+ }
+ // TODO(marvin): Fix Node.EType type union.
+ op = n.AsOp
+ }
+ if op == ir.OLSH || op == ir.ORSH {
+ r = DefaultLit(r, types.Types[types.TUINT])
+ setLR()
+ t := r.Type()
+ if !t.IsInteger() {
+ base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type())
+ n.SetType(nil)
+ return n
+ }
+ if t.IsSigned() && !types.AllowsGoVersion(curpkg(), 1, 13) {
+ base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type())
+ n.SetType(nil)
+ return n
+ }
+ t = l.Type()
+ if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() {
+ base.Errorf("invalid operation: %v (shift of type %v)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ // no defaultlit for left
+ // the outer context gives the type
+ n.SetType(l.Type())
+ if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL {
+ n.SetType(types.UntypedInt)
+ }
+ return n
+ }
+
+ // For "x == x && len(s)", it's better to report that "len(s)" (type int)
+ // can't be used with "&&" than to report that "x == x" (type untyped bool)
+ // can't be converted to int (see issue #41500).
+ if n.Op() == ir.OANDAND || n.Op() == ir.OOROR {
+ n := n.(*ir.LogicalExpr)
+ if !n.X.Type().IsBoolean() {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.X.Type()))
+ n.SetType(nil)
+ return n
+ }
+ if !n.Y.Type().IsBoolean() {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Y.Type()))
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ // ideal mixed with non-ideal
+ l, r = defaultlit2(l, r, false)
+ setLR()
+
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ t := l.Type()
+ if t.Kind() == types.TIDEAL {
+ t = r.Type()
+ }
+ et := t.Kind()
+ if et == types.TIDEAL {
+ et = types.TINT
+ }
+ aop := ir.OXXX
+ if iscmp[n.Op()] && t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
+ // comparison is okay as long as one side is
+ // assignable to the other. convert so they have
+ // the same type.
+ //
+ // the only conversion that isn't a no-op is concrete == interface.
+ // in that case, check comparability of the concrete type.
+ // The conversion allocates, so only do it if the concrete type is huge.
+ converted := false
+ if r.Type().Kind() != types.TBLANK {
+ aop, _ = assignop(l.Type(), r.Type())
+ if aop != ir.OXXX {
+ if r.Type().IsInterface() && !l.Type().IsInterface() && !types.IsComparable(l.Type()) {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type()))
+ n.SetType(nil)
+ return n
+ }
+
+ types.CalcSize(l.Type())
+ if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 {
+ l = ir.NewConvExpr(base.Pos, aop, r.Type(), l)
+ l.SetTypecheck(1)
+ setLR()
+ }
+
+ t = r.Type()
+ converted = true
+ }
+ }
+
+ if !converted && l.Type().Kind() != types.TBLANK {
+ aop, _ = assignop(r.Type(), l.Type())
+ if aop != ir.OXXX {
+ if l.Type().IsInterface() && !r.Type().IsInterface() && !types.IsComparable(r.Type()) {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type()))
+ n.SetType(nil)
+ return n
+ }
+
+ types.CalcSize(r.Type())
+ if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 {
+ r = ir.NewConvExpr(base.Pos, aop, l.Type(), r)
+ r.SetTypecheck(1)
+ setLR()
+ }
+
+ t = l.Type()
+ }
+ }
+
+ et = t.Kind()
+ }
+
+ if t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
+ l, r = defaultlit2(l, r, true)
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ if t.Kind() == types.TIDEAL {
+ t = mixUntyped(l.Type(), r.Type())
+ }
+ if dt := defaultType(t); !okfor[op][dt.Kind()] {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
+ n.SetType(nil)
+ return n
+ }
+
+ // okfor allows any array == array, map == map, func == func.
+ // restrict to slice/map/func == nil and nil == slice/map/func.
+ if l.Type().IsArray() && !types.IsComparable(l.Type()) {
+ base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type())
+ n.SetType(nil)
+ return n
+ }
+
+ if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (slice can only be compared to nil)", n)
+ n.SetType(nil)
+ return n
+ }
+
+ if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (map can only be compared to nil)", n)
+ n.SetType(nil)
+ return n
+ }
+
+ if l.Type().Kind() == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (func can only be compared to nil)", n)
+ n.SetType(nil)
+ return n
+ }
+
+ if l.Type().IsStruct() {
+ if f := types.IncomparableField(l.Type()); f != nil {
+ base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ if iscmp[n.Op()] {
+ t = types.UntypedBool
+ n.SetType(t)
+ if con := EvalConst(n); con.Op() == ir.OLITERAL {
+ return con
+ }
+ l, r = defaultlit2(l, r, true)
+ setLR()
+ return n
+ }
+
+ if et == types.TSTRING && n.Op() == ir.OADD {
+ // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ...
+ n := n.(*ir.BinaryExpr)
+ var add *ir.AddStringExpr
+ if l.Op() == ir.OADDSTR {
+ add = l.(*ir.AddStringExpr)
+ add.SetPos(n.Pos())
+ } else {
+ add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l})
+ }
+ if r.Op() == ir.OADDSTR {
+ r := r.(*ir.AddStringExpr)
+ add.List.Append(r.List.Take()...)
+ } else {
+ add.List.Append(r)
+ }
+ add.SetType(t)
+ return add
+ }
+
+ if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) {
+ if constant.Sign(r.Val()) == 0 {
+ base.Errorf("division by zero")
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ n.SetType(t)
+ return n
+}
+
+// The result of tcCompLit MUST be assigned back to n, e.g.
+// n.Left = tcCompLit(n.Left)
+func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheckcomplit", n)(&res)
+ }
+
+ lno := base.Pos
+ defer func() {
+ base.Pos = lno
+ }()
+
+ if n.Ntype == nil {
+ base.ErrorfAt(n.Pos(), "missing type in composite literal")
+ n.SetType(nil)
+ return n
+ }
+
+ // Save original node (including n.Right)
+ n.SetOrig(ir.Copy(n))
+
+ ir.SetPos(n.Ntype)
+
+ // Need to handle [...]T arrays specially.
+ if array, ok := n.Ntype.(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil {
+ array.Elem = typecheck(array.Elem, ctxType)
+ elemType := array.Elem.Type()
+ if elemType == nil {
+ n.SetType(nil)
+ return n
+ }
+ length := typecheckarraylit(elemType, -1, n.List, "array literal")
+ n.SetOp(ir.OARRAYLIT)
+ n.SetType(types.NewArray(elemType, length))
+ n.Ntype = nil
+ return n
+ }
+
+ n.Ntype = ir.Node(typecheck(n.Ntype, ctxType)).(ir.Ntype)
+ t := n.Ntype.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.SetType(t)
+
+ switch t.Kind() {
+ default:
+ base.Errorf("invalid composite literal type %v", t)
+ n.SetType(nil)
+
+ case types.TARRAY:
+ typecheckarraylit(t.Elem(), t.NumElem(), n.List, "array literal")
+ n.SetOp(ir.OARRAYLIT)
+ n.Ntype = nil
+
+ case types.TSLICE:
+ length := typecheckarraylit(t.Elem(), -1, n.List, "slice literal")
+ n.SetOp(ir.OSLICELIT)
+ n.Ntype = nil
+ n.Len = length
+
+ case types.TMAP:
+ var cs constSet
+ for i3, l := range n.List {
+ ir.SetPos(l)
+ if l.Op() != ir.OKEY {
+ n.List[i3] = Expr(l)
+ base.Errorf("missing key in map literal")
+ continue
+ }
+ l := l.(*ir.KeyExpr)
+
+ r := l.Key
+ r = pushtype(r, t.Key())
+ r = Expr(r)
+ l.Key = AssignConv(r, t.Key(), "map key")
+ cs.add(base.Pos, l.Key, "key", "map literal")
+
+ r = l.Value
+ r = pushtype(r, t.Elem())
+ r = Expr(r)
+ l.Value = AssignConv(r, t.Elem(), "map value")
+ }
+
+ n.SetOp(ir.OMAPLIT)
+ n.Ntype = nil
+
+ case types.TSTRUCT:
+ // Need valid field offsets for Xoffset below.
+ types.CalcSize(t)
+
+ errored := false
+ if len(n.List) != 0 && nokeys(n.List) {
+ // simple list of variables
+ ls := n.List
+ for i, n1 := range ls {
+ ir.SetPos(n1)
+ n1 = Expr(n1)
+ ls[i] = n1
+ if i >= t.NumFields() {
+ if !errored {
+ base.Errorf("too many values in %v", n)
+ errored = true
+ }
+ continue
+ }
+
+ f := t.Field(i)
+ s := f.Sym
+ if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
+ base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+ }
+ // No pushtype allowed here. Must name fields for that.
+ n1 = AssignConv(n1, f.Type, "field value")
+ sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1)
+ sk.Offset = f.Offset
+ ls[i] = sk
+ }
+ if len(ls) < t.NumFields() {
+ base.Errorf("too few values in %v", n)
+ }
+ } else {
+ hash := make(map[string]bool)
+
+ // keyed list
+ ls := n.List
+ for i, l := range ls {
+ ir.SetPos(l)
+
+ if l.Op() == ir.OKEY {
+ kv := l.(*ir.KeyExpr)
+ key := kv.Key
+
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ s := key.Sym()
+ if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil {
+ s = Lookup(s.Name)
+ }
+
+ // An OXDOT uses the Sym field to hold
+ // the field to the right of the dot,
+ // so s will be non-nil, but an OXDOT
+ // is never a valid struct literal key.
+ if s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank() {
+ base.Errorf("invalid field name %v in struct initializer", key)
+ continue
+ }
+
+ l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value)
+ ls[i] = l
+ }
+
+ if l.Op() != ir.OSTRUCTKEY {
+ if !errored {
+ base.Errorf("mixture of field:value and value initializers")
+ errored = true
+ }
+ ls[i] = Expr(ls[i])
+ continue
+ }
+ l := l.(*ir.StructKeyExpr)
+
+ f := lookdot1(nil, l.Field, t, t.Fields(), 0)
+ if f == nil {
+ if ci := lookdot1(nil, l.Field, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
+ if visible(ci.Sym) {
+ base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Field, t, ci.Sym)
+ } else if nonexported(l.Field) && l.Field.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
+ base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Field, t)
+ } else {
+ base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t)
+ }
+ continue
+ }
+ var f *types.Field
+ p, _ := dotpath(l.Field, t, &f, true)
+ if p == nil || f.IsMethod() {
+ base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t)
+ continue
+ }
+ // dotpath returns the parent embedded types in reverse order.
+ var ep []string
+ for ei := len(p) - 1; ei >= 0; ei-- {
+ ep = append(ep, p[ei].field.Sym.Name)
+ }
+ ep = append(ep, l.Field.Name)
+ base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
+ continue
+ }
+ fielddup(f.Sym.Name, hash)
+ l.Offset = f.Offset
+
+ // No pushtype allowed here. Tried and rejected.
+ l.Value = Expr(l.Value)
+ l.Value = AssignConv(l.Value, f.Type, "field value")
+ }
+ }
+
+ n.SetOp(ir.OSTRUCTLIT)
+ n.Ntype = nil
+ }
+
+ return n
+}
+
+// tcConv typechecks an OCONV node.
+func tcConv(n *ir.ConvExpr) ir.Node {
+ types.CheckSize(n.Type()) // ensure width is calculated for backend
+ n.X = Expr(n.X)
+ n.X = convlit1(n.X, n.Type(), true, nil)
+ t := n.X.Type()
+ if t == nil || n.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ op, why := convertop(n.X.Op() == ir.OLITERAL, t, n.Type())
+ if op == ir.OXXX {
+ if !n.Diag() && !n.Type().Broke() && !n.X.Diag() {
+ base.Errorf("cannot convert %L to type %v%s", n.X, n.Type(), why)
+ n.SetDiag(true)
+ }
+ n.SetOp(ir.OCONV)
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetOp(op)
+ switch n.Op() {
+ case ir.OCONVNOP:
+ if t.Kind() == n.Type().Kind() {
+ switch t.Kind() {
+ case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128:
+ // Floating point casts imply rounding and
+ // so the conversion must be kept.
+ n.SetOp(ir.OCONV)
+ }
+ }
+
+ // do not convert to []byte literal. See CL 125796.
+ // generated code and compiler memory footprint is better without it.
+ case ir.OSTR2BYTES:
+ // ok
+
+ case ir.OSTR2RUNES:
+ if n.X.Op() == ir.OLITERAL {
+ return stringtoruneslit(n)
+ }
+ }
+ return n
+}
+
+// tcDot typechecks an OXDOT or ODOT node.
+func tcDot(n *ir.SelectorExpr, top int) ir.Node {
+ if n.Op() == ir.OXDOT {
+ n = AddImplicitDots(n)
+ n.SetOp(ir.ODOT)
+ if n.X == nil {
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ n.X = typecheck(n.X, ctxExpr|ctxType)
+
+ n.X = DefaultLit(n.X, nil)
+
+ t := n.X.Type()
+ if t == nil {
+ base.UpdateErrorDot(ir.Line(n), fmt.Sprint(n.X), fmt.Sprint(n))
+ n.SetType(nil)
+ return n
+ }
+
+ s := n.Sel
+
+ if n.X.Op() == ir.OTYPE {
+ return typecheckMethodExpr(n)
+ }
+
+ if t.IsPtr() && !t.Elem().IsInterface() {
+ t = t.Elem()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.SetOp(ir.ODOTPTR)
+ types.CheckSize(t)
+ }
+
+ if n.Sel.IsBlank() {
+ base.Errorf("cannot refer to blank field or method")
+ n.SetType(nil)
+ return n
+ }
+
+ if lookdot(n, t, 0) == nil {
+ // Legitimate field or method lookup failed, try to explain the error
+ switch {
+ case t.IsEmptyInterface():
+ base.Errorf("%v undefined (type %v is interface with no methods)", n, n.X.Type())
+
+ case t.IsPtr() && t.Elem().IsInterface():
+ // Pointer to interface is almost always a mistake.
+ base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.X.Type())
+
+ case lookdot(n, t, 1) != nil:
+ // Field or method matches by name, but it is not exported.
+ base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sel)
+
+ default:
+ if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup.
+ base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.X.Type(), n.Sel, mt.Sym)
+ } else {
+ base.Errorf("%v undefined (type %v has no field or method %v)", n, n.X.Type(), n.Sel)
+ }
+ }
+ n.SetType(nil)
+ return n
+ }
+
+ if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 {
+ return tcCallPart(n, s)
+ }
+ return n
+}
+
+// tcDotType typechecks an ODOTTYPE node.
+func tcDotType(n *ir.TypeAssertExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsInterface() {
+ base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ if n.Ntype != nil {
+ n.Ntype = typecheck(n.Ntype, ctxType)
+ n.SetType(n.Ntype.Type())
+ n.Ntype = nil
+ if n.Type() == nil {
+ return n
+ }
+ }
+
+ if n.Type() != nil && !n.Type().IsInterface() {
+ var missing, have *types.Field
+ var ptr int
+ if !implements(n.Type(), t, &missing, &have, &ptr) {
+ if have != nil && have.Sym == missing.Sym {
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+
+ "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else if ptr != 0 {
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type(), t, missing.Sym)
+ } else if have != nil {
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+
+ "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else {
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type(), t, missing.Sym)
+ }
+ n.SetType(nil)
+ return n
+ }
+ }
+ return n
+}
+
+// tcITab typechecks an OITAB node.
+func tcITab(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ t := n.X.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsInterface() {
+ base.Fatalf("OITAB of %v", t)
+ }
+ n.SetType(types.NewPtr(types.Types[types.TUINTPTR]))
+ return n
+}
+
+// tcIndex typechecks an OINDEX node.
+func tcIndex(n *ir.IndexExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ n.X = implicitstar(n.X)
+ l := n.X
+ n.Index = Expr(n.Index)
+ r := n.Index
+ t := l.Type()
+ if t == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ switch t.Kind() {
+ default:
+ base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t)
+ n.SetType(nil)
+ return n
+
+ case types.TSTRING, types.TARRAY, types.TSLICE:
+ n.Index = indexlit(n.Index)
+ if t.IsString() {
+ n.SetType(types.ByteType)
+ } else {
+ n.SetType(t.Elem())
+ }
+ why := "string"
+ if t.IsArray() {
+ why = "array"
+ } else if t.IsSlice() {
+ why = "slice"
+ }
+
+ if n.Index.Type() != nil && !n.Index.Type().IsInteger() {
+ base.Errorf("non-integer %s index %v", why, n.Index)
+ return n
+ }
+
+ if !n.Bounded() && ir.IsConst(n.Index, constant.Int) {
+ x := n.Index.Val()
+ if constant.Sign(x) < 0 {
+ base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Index)
+ } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) {
+ base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Index, t.NumElem())
+ } else if ir.IsConst(n.X, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.X))))) {
+ base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Index, len(ir.StringVal(n.X)))
+ } else if ir.ConstOverflow(x, types.Types[types.TINT]) {
+ base.Errorf("invalid %s index %v (index too large)", why, n.Index)
+ }
+ }
+
+ case types.TMAP:
+ n.Index = AssignConv(n.Index, t.Key(), "map index")
+ n.SetType(t.Elem())
+ n.SetOp(ir.OINDEXMAP)
+ n.Assigned = false
+ }
+ return n
+}
+
+// tcLenCap typechecks an OLEN or OCAP node.
+func tcLenCap(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ n.X = implicitstar(n.X)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ var ok bool
+ if n.Op() == ir.OLEN {
+ ok = okforlen[t.Kind()]
+ } else {
+ ok = okforcap[t.Kind()]
+ }
+ if !ok {
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(types.Types[types.TINT])
+ return n
+}
+
+// tcRecv typechecks an ORECV node.
+func tcRecv(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsChan() {
+ base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ if !t.ChanDir().CanRecv() {
+ base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(t.Elem())
+ return n
+}
+
+// tcSPtr typechecks an OSPTR node.
+func tcSPtr(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ t := n.X.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsSlice() && !t.IsString() {
+ base.Fatalf("OSPTR of %v", t)
+ }
+ if t.IsString() {
+ n.SetType(types.NewPtr(types.Types[types.TUINT8]))
+ } else {
+ n.SetType(types.NewPtr(t.Elem()))
+ }
+ return n
+}
+
+// tcSlice typechecks an OSLICE or OSLICE3 node.
+func tcSlice(n *ir.SliceExpr) ir.Node {
+ n.X = Expr(n.X)
+ low, high, max := n.SliceBounds()
+ hasmax := n.Op().IsSlice3()
+ low = Expr(low)
+ high = Expr(high)
+ max = Expr(max)
+ n.X = DefaultLit(n.X, nil)
+ low = indexlit(low)
+ high = indexlit(high)
+ max = indexlit(max)
+ n.SetSliceBounds(low, high, max)
+ l := n.X
+ if l.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if l.Type().IsArray() {
+ if !ir.IsAssignable(n.X) {
+ base.Errorf("invalid operation %v (slice of unaddressable value)", n)
+ n.SetType(nil)
+ return n
+ }
+
+ addr := NodAddr(n.X)
+ addr.SetImplicit(true)
+ n.X = Expr(addr)
+ l = n.X
+ }
+ t := l.Type()
+ var tp *types.Type
+ if t.IsString() {
+ if hasmax {
+ base.Errorf("invalid operation %v (3-index slice of string)", n)
+ n.SetType(nil)
+ return n
+ }
+ n.SetType(t)
+ n.SetOp(ir.OSLICESTR)
+ } else if t.IsPtr() && t.Elem().IsArray() {
+ tp = t.Elem()
+ n.SetType(types.NewSlice(tp.Elem()))
+ types.CalcSize(n.Type())
+ if hasmax {
+ n.SetOp(ir.OSLICE3ARR)
+ } else {
+ n.SetOp(ir.OSLICEARR)
+ }
+ } else if t.IsSlice() {
+ n.SetType(t)
+ } else {
+ base.Errorf("cannot slice %v (type %v)", l, t)
+ n.SetType(nil)
+ return n
+ }
+
+ if low != nil && !checksliceindex(l, low, tp) {
+ n.SetType(nil)
+ return n
+ }
+ if high != nil && !checksliceindex(l, high, tp) {
+ n.SetType(nil)
+ return n
+ }
+ if max != nil && !checksliceindex(l, max, tp) {
+ n.SetType(nil)
+ return n
+ }
+ if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) {
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcSliceHeader typechecks an OSLICEHEADER node.
+func tcSliceHeader(n *ir.SliceHeaderExpr) ir.Node {
+ // Errors here are Fatalf instead of Errorf because only the compiler
+ // can construct an OSLICEHEADER node.
+ // Components used in OSLICEHEADER that are supplied by parsed source code
+ // have already been typechecked in e.g. OMAKESLICE earlier.
+ t := n.Type()
+ if t == nil {
+ base.Fatalf("no type specified for OSLICEHEADER")
+ }
+
+ if !t.IsSlice() {
+ base.Fatalf("invalid type %v for OSLICEHEADER", n.Type())
+ }
+
+ if n.Ptr == nil || n.Ptr.Type() == nil || !n.Ptr.Type().IsUnsafePtr() {
+ base.Fatalf("need unsafe.Pointer for OSLICEHEADER")
+ }
+
+ if x := len(n.LenCap); x != 2 {
+ base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x)
+ }
+
+ n.Ptr = Expr(n.Ptr)
+ l := Expr(n.LenCap[0])
+ c := Expr(n.LenCap[1])
+ l = DefaultLit(l, types.Types[types.TINT])
+ c = DefaultLit(c, types.Types[types.TINT])
+
+ if ir.IsConst(l, constant.Int) && ir.Int64Val(l) < 0 {
+ base.Fatalf("len for OSLICEHEADER must be non-negative")
+ }
+
+ if ir.IsConst(c, constant.Int) && ir.Int64Val(c) < 0 {
+ base.Fatalf("cap for OSLICEHEADER must be non-negative")
+ }
+
+ if ir.IsConst(l, constant.Int) && ir.IsConst(c, constant.Int) && constant.Compare(l.Val(), token.GTR, c.Val()) {
+ base.Fatalf("len larger than cap for OSLICEHEADER")
+ }
+
+ n.LenCap[0] = l
+ n.LenCap[1] = c
+ return n
+}
+
+// tcStar typechecks an ODEREF node, which may be an expression or a type.
+func tcStar(n *ir.StarExpr, top int) ir.Node {
+ n.X = typecheck(n.X, ctxExpr|ctxType)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if l.Op() == ir.OTYPE {
+ n.SetOTYPE(types.NewPtr(l.Type()))
+ // Ensure l.Type gets dowidth'd for the backend. Issue 20174.
+ types.CheckSize(l.Type())
+ return n
+ }
+
+ if !t.IsPtr() {
+ if top&(ctxExpr|ctxStmt) != 0 {
+ base.Errorf("invalid indirect of %L", n.X)
+ n.SetType(nil)
+ return n
+ }
+ base.Errorf("%v is not a type", l)
+ return n
+ }
+
+ n.SetType(t.Elem())
+ return n
+}
+
+// tcUnaryArith typechecks a unary arithmetic expression.
+func tcUnaryArith(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !okfor[n.Op()][defaultType(t).Kind()] {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t))
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(t)
+ return n
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+
+ "fmt"
+ "go/constant"
+ "go/token"
+)
+
+// package all the arguments that match a ... T parameter into a []T.
+func MakeDotArgs(typ *types.Type, args []ir.Node) ir.Node {
+ var n ir.Node
+ if len(args) == 0 {
+ n = NodNil()
+ n.SetType(typ)
+ } else {
+ lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil)
+ lit.List.Append(args...)
+ lit.SetImplicit(true)
+ n = lit
+ }
+
+ n = Expr(n)
+ if n.Type() == nil {
+ base.Fatalf("mkdotargslice: typecheck failed")
+ }
+ return n
+}
+
+// FixVariadicCall rewrites calls to variadic functions to use an
+// explicit ... argument if one is not already present.
+func FixVariadicCall(call *ir.CallExpr) {
+ fntype := call.X.Type()
+ if !fntype.IsVariadic() || call.IsDDD {
+ return
+ }
+
+ vi := fntype.NumParams() - 1
+ vt := fntype.Params().Field(vi).Type
+
+ args := call.Args
+ extra := args[vi:]
+ slice := MakeDotArgs(vt, extra)
+ for i := range extra {
+ extra[i] = nil // allow GC
+ }
+
+ call.Args.Set(append(args[:vi], slice))
+ call.IsDDD = true
+}
+
+// ClosureType returns the struct type used to hold all the information
+// needed in the closure for clo (clo must be a OCLOSURE node).
+// The address of a variable of the returned type can be cast to a func.
+func ClosureType(clo *ir.ClosureExpr) *types.Type {
+ // Create closure in the form of a composite literal.
+ // supposing the closure captures an int i and a string s
+ // and has one float64 argument and no results,
+ // the generated code looks like:
+ //
+ // clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
+ //
+ // The use of the struct provides type information to the garbage
+ // collector so that it can walk the closure. We could use (in this case)
+ // [3]unsafe.Pointer instead, but that would leave the gc in the dark.
+ // The information appears in the binary in the form of type descriptors;
+ // the struct is unnamed so that closures in multiple packages with the
+ // same struct type can share the descriptor.
+ fields := []*ir.Field{
+ ir.NewField(base.Pos, Lookup(".F"), nil, types.Types[types.TUINTPTR]),
+ }
+ for _, v := range clo.Func.ClosureVars {
+ typ := v.Type()
+ if !v.Byval() {
+ typ = types.NewPtr(typ)
+ }
+ fields = append(fields, ir.NewField(base.Pos, v.Sym(), nil, typ))
+ }
+ typ := NewStructType(fields)
+ typ.SetNoalg(true)
+ return typ
+}
+
+// PartialCallType returns the struct type used to hold all the information
+// needed in the closure for n (n must be a OCALLPART node).
+// The address of a variable of the returned type can be cast to a func.
+func PartialCallType(n *ir.CallPartExpr) *types.Type {
+ t := NewStructType([]*ir.Field{
+ ir.NewField(base.Pos, Lookup("F"), nil, types.Types[types.TUINTPTR]),
+ ir.NewField(base.Pos, Lookup("R"), nil, n.X.Type()),
+ })
+ t.SetNoalg(true)
+ return t
+}
+
+// CaptureVars is called in a separate phase after all typechecking is done.
+// It decides whether each variable captured by a closure should be captured
+// by value or by reference.
+// We use value capturing for values <= 128 bytes that are never reassigned
+// after capturing (effectively constant).
+func CaptureVars(fn *ir.Func) {
+ lno := base.Pos
+ base.Pos = fn.Pos()
+ cvars := fn.ClosureVars
+ out := cvars[:0]
+ for _, v := range cvars {
+ if v.Type() == nil {
+ // If v.Type is nil, it means v looked like it
+ // was going to be used in the closure, but
+ // isn't. This happens in struct literals like
+ // s{f: x} where we can't distinguish whether
+ // f is a field identifier or expression until
+ // resolving s.
+ continue
+ }
+ out = append(out, v)
+
+ // type check the & of closed variables outside the closure,
+ // so that the outer frame also grabs them and knows they escape.
+ types.CalcSize(v.Type())
+
+ var outer ir.Node
+ outer = v.Outer
+ outermost := v.Defn.(*ir.Name)
+
+ // out parameters will be assigned to implicitly upon return.
+ if outermost.Class_ != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 {
+ v.SetByval(true)
+ } else {
+ outermost.Name().SetAddrtaken(true)
+ outer = NodAddr(outer)
+ }
+
+ if base.Flag.LowerM > 1 {
+ var name *types.Sym
+ if v.Curfn != nil && v.Curfn.Nname != nil {
+ name = v.Curfn.Sym()
+ }
+ how := "ref"
+ if v.Byval() {
+ how = "value"
+ }
+ base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width))
+ }
+
+ outer = Expr(outer)
+ fn.ClosureEnter.Append(outer)
+ }
+
+ fn.ClosureVars = out
+ base.Pos = lno
+}
+
+// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
+// because they're a copy of an already checked body.
+func ImportedBody(fn *ir.Func) {
+ lno := ir.SetPos(fn.Nname)
+
+ ImportBody(fn)
+
+ // typecheckinl is only for imported functions;
+ // their bodies may refer to unsafe as long as the package
+ // was marked safe during import (which was checked then).
+ // the ->inl of a local function has been typechecked before caninl copied it.
+ pkg := fnpkg(fn.Nname)
+
+ if pkg == types.LocalPkg || pkg == nil {
+ return // typecheckinl on local function
+ }
+
+ if base.Flag.LowerM > 2 || base.Debug.Export != 0 {
+ fmt.Printf("typecheck import [%v] %L { %v }\n", fn.Sym(), fn, ir.Nodes(fn.Inl.Body))
+ }
+
+ savefn := ir.CurFunc
+ ir.CurFunc = fn
+ Stmts(fn.Inl.Body)
+ ir.CurFunc = savefn
+
+ // During expandInline (which imports fn.Func.Inl.Body),
+ // declarations are added to fn.Func.Dcl by funcHdr(). Move them
+ // to fn.Func.Inl.Dcl for consistency with how local functions
+ // behave. (Append because typecheckinl may be called multiple
+ // times.)
+ fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...)
+ fn.Dcl = nil
+
+ base.Pos = lno
+}
+
+// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
+// the ->sym can be re-used in the local package, so peel it off the receiver's type.
+func fnpkg(fn *ir.Name) *types.Pkg {
+ if ir.IsMethod(fn) {
+ // method
+ rcvr := fn.Type().Recv().Type
+
+ if rcvr.IsPtr() {
+ rcvr = rcvr.Elem()
+ }
+ if rcvr.Sym() == nil {
+ base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym(), fn, rcvr)
+ }
+ return rcvr.Sym().Pkg
+ }
+
+ // non-method
+ return fn.Sym().Pkg
+}
+
+// CaptureVarsComplete is set to true when the capturevars phase is done.
+var CaptureVarsComplete bool
+
+// closurename generates a new unique name for a closure within
+// outerfunc.
+func closurename(outerfunc *ir.Func) *types.Sym {
+ outer := "glob."
+ prefix := "func"
+ gen := &globClosgen
+
+ if outerfunc != nil {
+ if outerfunc.OClosure != nil {
+ prefix = ""
+ }
+
+ outer = ir.FuncName(outerfunc)
+
+ // There may be multiple functions named "_". In those
+ // cases, we can't use their individual Closgens as it
+ // would lead to name clashes.
+ if !ir.IsBlank(outerfunc.Nname) {
+ gen = &outerfunc.Closgen
+ }
+ }
+
+ *gen++
+ return Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
+}
+
+// globClosgen is like Func.Closgen, but for the global scope.
+var globClosgen int32
+
+// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
+// for partial calls.
+func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir.Func {
+ rcvrtype := dot.X.Type()
+ sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm")
+
+ if sym.Uniq() {
+ return sym.Def.(*ir.Func)
+ }
+ sym.SetUniq(true)
+
+ savecurfn := ir.CurFunc
+ saveLineNo := base.Pos
+ ir.CurFunc = nil
+
+ // Set line number equal to the line number where the method is declared.
+ var m *types.Field
+ if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() {
+ base.Pos = m.Pos
+ }
+ // Note: !m.Pos.IsKnown() happens for method expressions where
+ // the method is implicitly declared. The Error method of the
+ // built-in error type is one such method. We leave the line
+ // number at the use of the method expression in this
+ // case. See issue 29389.
+
+ tfn := ir.NewFuncType(base.Pos, nil,
+ NewFuncParams(t0.Params(), true),
+ NewFuncParams(t0.Results(), false))
+
+ fn := DeclFunc(sym, tfn)
+ fn.SetDupok(true)
+ fn.SetNeedctxt(true)
+
+ // Declare and initialize variable holding receiver.
+ cr := ir.NewClosureRead(rcvrtype, types.Rnd(int64(types.PtrSize), int64(rcvrtype.Align)))
+ ptr := NewName(Lookup(".this"))
+ Declare(ptr, ir.PAUTO)
+ ptr.SetUsed(true)
+ var body []ir.Node
+ if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
+ ptr.SetType(rcvrtype)
+ body = append(body, ir.NewAssignStmt(base.Pos, ptr, cr))
+ } else {
+ ptr.SetType(types.NewPtr(rcvrtype))
+ body = append(body, ir.NewAssignStmt(base.Pos, ptr, NodAddr(cr)))
+ }
+
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil)
+ call.Args.Set(ir.ParamNames(tfn.Type()))
+ call.IsDDD = tfn.Type().IsVariadic()
+ if t0.NumResults() != 0 {
+ ret := ir.NewReturnStmt(base.Pos, nil)
+ ret.Results = []ir.Node{call}
+ body = append(body, ret)
+ } else {
+ body = append(body, call)
+ }
+
+ fn.Body.Set(body)
+ FinishFuncBody()
+
+ Func(fn)
+ // Need to typecheck the body of the just-generated wrapper.
+ // typecheckslice() requires that Curfn is set when processing an ORETURN.
+ ir.CurFunc = fn
+ Stmts(fn.Body)
+ sym.Def = fn
+ Target.Decls = append(Target.Decls, fn)
+ ir.CurFunc = savecurfn
+ base.Pos = saveLineNo
+
+ return fn
+}
+
+// tcClosure typechecks an OCLOSURE node. It also creates the named
+// function associated with the closure.
+// TODO: This creation of the named function should probably really be done in a
+// separate pass from type-checking.
+func tcClosure(clo *ir.ClosureExpr, top int) {
+ fn := clo.Func
+ // Set current associated iota value, so iota can be used inside
+ // function in ConstSpec, see issue #22344
+ if x := getIotaValue(); x >= 0 {
+ fn.Iota = x
+ }
+
+ fn.ClosureType = typecheck(fn.ClosureType, ctxType)
+ clo.SetType(fn.ClosureType.Type())
+ fn.SetClosureCalled(top&ctxCallee != 0)
+
+ // Do not typecheck fn twice, otherwise, we will end up pushing
+ // fn to Target.Decls multiple times, causing initLSym called twice.
+ // See #30709
+ if fn.Typecheck() == 1 {
+ return
+ }
+
+ for _, ln := range fn.ClosureVars {
+ n := ln.Defn
+ if !n.Name().Captured() {
+ n.Name().SetCaptured(true)
+ if n.Name().Decldepth == 0 {
+ base.Fatalf("typecheckclosure: var %v does not have decldepth assigned", n)
+ }
+
+ // Ignore assignments to the variable in straightline code
+ // preceding the first capturing by a closure.
+ if n.Name().Decldepth == decldepth {
+ n.Name().SetAssigned(false)
+ }
+ }
+ }
+
+ fn.Nname.SetSym(closurename(ir.CurFunc))
+ ir.MarkFunc(fn.Nname)
+ Func(fn)
+
+ // Type check the body now, but only if we're inside a function.
+ // At top level (in a variable initialization: curfn==nil) we're not
+ // ready to type check code yet; we'll check it later, because the
+ // underlying closure function we create is added to Target.Decls.
+ if ir.CurFunc != nil && clo.Type() != nil {
+ oldfn := ir.CurFunc
+ ir.CurFunc = fn
+ olddd := decldepth
+ decldepth = 1
+ Stmts(fn.Body)
+ decldepth = olddd
+ ir.CurFunc = oldfn
+ }
+
+ Target.Decls = append(Target.Decls, fn)
+}
+
+func tcCallPart(n ir.Node, sym *types.Sym) *ir.CallPartExpr {
+ switch n.Op() {
+ case ir.ODOTINTER, ir.ODOTMETH:
+ break
+
+ default:
+ base.Fatalf("invalid typecheckpartialcall")
+ }
+ dot := n.(*ir.SelectorExpr)
+
+ // Create top-level function.
+ fn := makepartialcall(dot, dot.Type(), sym)
+ fn.SetWrapper(true)
+
+ return ir.NewCallPartExpr(dot.Pos(), dot.X, dot.Selection, fn)
+}
+
+// type check function definition
+// To be called by typecheck, not directly.
+// (Call typecheckFunc instead.)
+func tcFunc(n *ir.Func) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheckfunc", n)(nil)
+ }
+
+ for _, ln := range n.Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class_ == ir.PPARAM || ln.Class_ == ir.PPARAMOUT) {
+ ln.Decldepth = 1
+ }
+ }
+
+ n.Nname = AssignExpr(n.Nname).(*ir.Name)
+ t := n.Nname.Type()
+ if t == nil {
+ return
+ }
+ n.SetType(t)
+ rcvr := t.Recv()
+ if rcvr != nil && n.Shortname != nil {
+ m := addmethod(n, n.Shortname, t, true, n.Pragma&ir.Nointerface != 0)
+ if m == nil {
+ return
+ }
+
+ n.Nname.SetSym(ir.MethodSym(rcvr.Type, n.Shortname))
+ Declare(n.Nname, ir.PFUNC)
+ }
+
+ if base.Ctxt.Flag_dynlink && !inimport && n.Nname != nil {
+ NeedFuncSym(n.Sym())
+ }
+}
+
+// tcCall typechecks an OCALL node.
+func tcCall(n *ir.CallExpr, top int) ir.Node {
+ n.Use = ir.CallUseExpr
+ if top == ctxStmt {
+ n.Use = ir.CallUseStmt
+ }
+ Stmts(n.Init()) // imported rewritten f(g()) calls (#30907)
+ n.X = typecheck(n.X, ctxExpr|ctxType|ctxCallee)
+ if n.X.Diag() {
+ n.SetDiag(true)
+ }
+
+ l := n.X
+
+ if l.Op() == ir.ONAME && l.(*ir.Name).BuiltinOp != 0 {
+ l := l.(*ir.Name)
+ if n.IsDDD && l.BuiltinOp != ir.OAPPEND {
+ base.Errorf("invalid use of ... with builtin %v", l)
+ }
+
+ // builtin: OLEN, OCAP, etc.
+ switch l.BuiltinOp {
+ default:
+ base.Fatalf("unknown builtin %v", l)
+
+ case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ n.SetOp(l.BuiltinOp)
+ n.X = nil
+ n.SetTypecheck(0) // re-typechecking new op is OK, not a loop
+ return typecheck(n, top)
+
+ case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL:
+ typecheckargs(n)
+ fallthrough
+ case ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ arg, ok := needOneArg(n, "%v", n.Op())
+ if !ok {
+ n.SetType(nil)
+ return n
+ }
+ u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg)
+ return typecheck(ir.InitExpr(n.Init(), u), top) // typecheckargs can add to old.Init
+
+ case ir.OCOMPLEX, ir.OCOPY:
+ typecheckargs(n)
+ arg1, arg2, ok := needTwoArgs(n)
+ if !ok {
+ n.SetType(nil)
+ return n
+ }
+ b := ir.NewBinaryExpr(n.Pos(), l.BuiltinOp, arg1, arg2)
+ return typecheck(ir.InitExpr(n.Init(), b), top) // typecheckargs can add to old.Init
+ }
+ panic("unreachable")
+ }
+
+ n.X = DefaultLit(n.X, nil)
+ l = n.X
+ if l.Op() == ir.OTYPE {
+ if n.IsDDD {
+ if !l.Type().Broke() {
+ base.Errorf("invalid use of ... in type conversion to %v", l.Type())
+ }
+ n.SetDiag(true)
+ }
+
+ // pick off before type-checking arguments
+ arg, ok := needOneArg(n, "conversion to %v", l.Type())
+ if !ok {
+ n.SetType(nil)
+ return n
+ }
+
+ n := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg)
+ n.SetType(l.Type())
+ return typecheck1(n, top)
+ }
+
+ typecheckargs(n)
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ types.CheckSize(t)
+
+ switch l.Op() {
+ case ir.ODOTINTER:
+ n.SetOp(ir.OCALLINTER)
+
+ case ir.ODOTMETH:
+ l := l.(*ir.SelectorExpr)
+ n.SetOp(ir.OCALLMETH)
+
+ // typecheckaste was used here but there wasn't enough
+ // information further down the call chain to know if we
+ // were testing a method receiver for unexported fields.
+ // It isn't necessary, so just do a sanity check.
+ tp := t.Recv().Type
+
+ if l.X == nil || !types.Identical(l.X.Type(), tp) {
+ base.Fatalf("method receiver")
+ }
+
+ default:
+ n.SetOp(ir.OCALLFUNC)
+ if t.Kind() != types.TFUNC {
+ // TODO(mdempsky): Remove "o.Sym() != nil" once we stop
+ // using ir.Name for numeric literals.
+ if o := ir.Orig(l); o.Name() != nil && o.Sym() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil {
+ // be more specific when the non-function
+ // name matches a predeclared function
+ base.Errorf("cannot call non-function %L, declared at %s",
+ l, base.FmtPos(o.Name().Pos()))
+ } else {
+ base.Errorf("cannot call non-function %L", l)
+ }
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.X) })
+ if t.NumResults() == 0 {
+ return n
+ }
+ if t.NumResults() == 1 {
+ n.SetType(l.Type().Results().Field(0).Type)
+
+ if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME {
+ if sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == "getg" {
+ // Emit code for runtime.getg() directly instead of calling function.
+ // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
+ // so that the ordering pass can make sure to preserve the semantics of the original code
+ // (in particular, the exact time of the function call) by introducing temporaries.
+ // In this case, we know getg() always returns the same result within a given function
+ // and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
+ n.SetOp(ir.OGETG)
+ }
+ }
+ return n
+ }
+
+ // multiple return
+ if top&(ctxMultiOK|ctxStmt) == 0 {
+ base.Errorf("multiple-value %v() in single-value context", l)
+ return n
+ }
+
+ n.SetType(l.Type().Results())
+ return n
+}
+
+// tcAppend typechecks an OAPPEND node.
+func tcAppend(n *ir.CallExpr) ir.Node {
+ typecheckargs(n)
+ args := n.Args
+ if len(args) == 0 {
+ base.Errorf("missing arguments to append")
+ n.SetType(nil)
+ return n
+ }
+
+ t := args[0].Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(t)
+ if !t.IsSlice() {
+ if ir.IsNil(args[0]) {
+ base.Errorf("first argument to append must be typed slice; have untyped nil")
+ n.SetType(nil)
+ return n
+ }
+
+ base.Errorf("first argument to append must be slice; have %L", t)
+ n.SetType(nil)
+ return n
+ }
+
+ if n.IsDDD {
+ if len(args) == 1 {
+ base.Errorf("cannot use ... on first argument to append")
+ n.SetType(nil)
+ return n
+ }
+
+ if len(args) != 2 {
+ base.Errorf("too many arguments to append")
+ n.SetType(nil)
+ return n
+ }
+
+ if t.Elem().IsKind(types.TUINT8) && args[1].Type().IsString() {
+ args[1] = DefaultLit(args[1], types.Types[types.TSTRING])
+ return n
+ }
+
+ args[1] = AssignConv(args[1], t.Underlying(), "append")
+ return n
+ }
+
+ as := args[1:]
+ for i, n := range as {
+ if n.Type() == nil {
+ continue
+ }
+ as[i] = AssignConv(n, t.Elem(), "append")
+ types.CheckSize(as[i].Type()) // ensure width is calculated for backend
+ }
+ return n
+}
+
+// tcClose typechecks an OCLOSE node.
+func tcClose(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsChan() {
+ base.Errorf("invalid operation: %v (non-chan type %v)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ if !t.ChanDir().CanSend() {
+ base.Errorf("invalid operation: %v (cannot close receive-only channel)", n)
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcComplex typechecks an OCOMPLEX node.
+func tcComplex(n *ir.BinaryExpr) ir.Node {
+ l := Expr(n.X)
+ r := Expr(n.Y)
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ l, r = defaultlit2(l, r, false)
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.X = l
+ n.Y = r
+
+ if !types.Identical(l.Type(), r.Type()) {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
+ return n
+ }
+
+ var t *types.Type
+ switch l.Type().Kind() {
+ default:
+ base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type())
+ n.SetType(nil)
+ return n
+
+ case types.TIDEAL:
+ t = types.UntypedComplex
+
+ case types.TFLOAT32:
+ t = types.Types[types.TCOMPLEX64]
+
+ case types.TFLOAT64:
+ t = types.Types[types.TCOMPLEX128]
+ }
+ n.SetType(t)
+ return n
+}
+
+// tcCopy typechecks an OCOPY node.
+func tcCopy(n *ir.BinaryExpr) ir.Node {
+ n.SetType(types.Types[types.TINT])
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ n.Y = Expr(n.Y)
+ n.Y = DefaultLit(n.Y, nil)
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ // copy([]byte, string)
+ if n.X.Type().IsSlice() && n.Y.Type().IsString() {
+ if types.Identical(n.X.Type().Elem(), types.ByteType) {
+ return n
+ }
+ base.Errorf("arguments to copy have different element types: %L and string", n.X.Type())
+ n.SetType(nil)
+ return n
+ }
+
+ if !n.X.Type().IsSlice() || !n.Y.Type().IsSlice() {
+ if !n.X.Type().IsSlice() && !n.Y.Type().IsSlice() {
+ base.Errorf("arguments to copy must be slices; have %L, %L", n.X.Type(), n.Y.Type())
+ } else if !n.X.Type().IsSlice() {
+ base.Errorf("first argument to copy should be slice; have %L", n.X.Type())
+ } else {
+ base.Errorf("second argument to copy should be slice or string; have %L", n.Y.Type())
+ }
+ n.SetType(nil)
+ return n
+ }
+
+ if !types.Identical(n.X.Type().Elem(), n.Y.Type().Elem()) {
+ base.Errorf("arguments to copy have different element types: %L and %L", n.X.Type(), n.Y.Type())
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcDelete typechecks an ODELETE node.
+func tcDelete(n *ir.CallExpr) ir.Node {
+ typecheckargs(n)
+ args := n.Args
+ if len(args) == 0 {
+ base.Errorf("missing arguments to delete")
+ n.SetType(nil)
+ return n
+ }
+
+ if len(args) == 1 {
+ base.Errorf("missing second (key) argument to delete")
+ n.SetType(nil)
+ return n
+ }
+
+ if len(args) != 2 {
+ base.Errorf("too many arguments to delete")
+ n.SetType(nil)
+ return n
+ }
+
+ l := args[0]
+ r := args[1]
+ if l.Type() != nil && !l.Type().IsMap() {
+ base.Errorf("first argument to delete must be map; have %L", l.Type())
+ n.SetType(nil)
+ return n
+ }
+
+ args[1] = AssignConv(r, l.Type().Key(), "delete")
+ return n
+}
+
+// tcMake typechecks an OMAKE node.
+func tcMake(n *ir.CallExpr) ir.Node {
+ args := n.Args
+ if len(args) == 0 {
+ base.Errorf("missing argument to make")
+ n.SetType(nil)
+ return n
+ }
+
+ n.Args.Set(nil)
+ l := args[0]
+ l = typecheck(l, ctxType)
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ i := 1
+ var nn ir.Node
+ switch t.Kind() {
+ default:
+ base.Errorf("cannot make type %v", t)
+ n.SetType(nil)
+ return n
+
+ case types.TSLICE:
+ if i >= len(args) {
+ base.Errorf("missing len argument to make(%v)", t)
+ n.SetType(nil)
+ return n
+ }
+
+ l = args[i]
+ i++
+ l = Expr(l)
+ var r ir.Node
+ if i < len(args) {
+ r = args[i]
+ i++
+ r = Expr(r)
+ }
+
+ if l.Type() == nil || (r != nil && r.Type() == nil) {
+ n.SetType(nil)
+ return n
+ }
+ if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) {
+ n.SetType(nil)
+ return n
+ }
+ if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) {
+ base.Errorf("len larger than cap in make(%v)", t)
+ n.SetType(nil)
+ return n
+ }
+ nn = ir.NewMakeExpr(n.Pos(), ir.OMAKESLICE, l, r)
+
+ case types.TMAP:
+ if i < len(args) {
+ l = args[i]
+ i++
+ l = Expr(l)
+ l = DefaultLit(l, types.Types[types.TINT])
+ if l.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !checkmake(t, "size", &l) {
+ n.SetType(nil)
+ return n
+ }
+ } else {
+ l = ir.NewInt(0)
+ }
+ nn = ir.NewMakeExpr(n.Pos(), ir.OMAKEMAP, l, nil)
+ nn.SetEsc(n.Esc())
+
+ case types.TCHAN:
+ l = nil
+ if i < len(args) {
+ l = args[i]
+ i++
+ l = Expr(l)
+ l = DefaultLit(l, types.Types[types.TINT])
+ if l.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !checkmake(t, "buffer", &l) {
+ n.SetType(nil)
+ return n
+ }
+ } else {
+ l = ir.NewInt(0)
+ }
+ nn = ir.NewMakeExpr(n.Pos(), ir.OMAKECHAN, l, nil)
+ }
+
+ if i < len(args) {
+ base.Errorf("too many arguments to make(%v)", t)
+ n.SetType(nil)
+ return n
+ }
+
+ nn.SetType(t)
+ return nn
+}
+
+// tcMakeSliceCopy typechecks an OMAKESLICECOPY node.
+func tcMakeSliceCopy(n *ir.MakeExpr) ir.Node {
+ // Errors here are Fatalf instead of Errorf because only the compiler
+ // can construct an OMAKESLICECOPY node.
+ // Components used in OMAKESCLICECOPY that are supplied by parsed source code
+ // have already been typechecked in OMAKE and OCOPY earlier.
+ t := n.Type()
+
+ if t == nil {
+ base.Fatalf("no type specified for OMAKESLICECOPY")
+ }
+
+ if !t.IsSlice() {
+ base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type())
+ }
+
+ if n.Len == nil {
+ base.Fatalf("missing len argument for OMAKESLICECOPY")
+ }
+
+ if n.Cap == nil {
+ base.Fatalf("missing slice argument to copy for OMAKESLICECOPY")
+ }
+
+ n.Len = Expr(n.Len)
+ n.Cap = Expr(n.Cap)
+
+ n.Len = DefaultLit(n.Len, types.Types[types.TINT])
+
+ if !n.Len.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+ base.Errorf("non-integer len argument in OMAKESLICECOPY")
+ }
+
+ if ir.IsConst(n.Len, constant.Int) {
+ if ir.ConstOverflow(n.Len.Val(), types.Types[types.TINT]) {
+ base.Fatalf("len for OMAKESLICECOPY too large")
+ }
+ if constant.Sign(n.Len.Val()) < 0 {
+ base.Fatalf("len for OMAKESLICECOPY must be non-negative")
+ }
+ }
+ return n
+}
+
+// tcNew typechecks an ONEW node.
+func tcNew(n *ir.UnaryExpr) ir.Node {
+ if n.X == nil {
+ // Fatalf because the OCALL above checked for us,
+ // so this must be an internally-generated mistake.
+ base.Fatalf("missing argument to new")
+ }
+ l := n.X
+ l = typecheck(l, ctxType)
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.X = l
+ n.SetType(types.NewPtr(t))
+ return n
+}
+
+// tcPanic typechecks an OPANIC node.
+func tcPanic(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, types.Types[types.TINTER])
+ if n.X.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcPrint typechecks an OPRINT or OPRINTN node.
+func tcPrint(n *ir.CallExpr) ir.Node {
+ typecheckargs(n)
+ ls := n.Args
+ for i1, n1 := range ls {
+ // Special case for print: int constant is int64, not int.
+ if ir.IsConst(n1, constant.Int) {
+ ls[i1] = DefaultLit(ls[i1], types.Types[types.TINT64])
+ } else {
+ ls[i1] = DefaultLit(ls[i1], nil)
+ }
+ }
+ return n
+}
+
+// tcRealImag typechecks an OREAL or OIMAG node.
+func tcRealImag(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ // Determine result type.
+ switch t.Kind() {
+ case types.TIDEAL:
+ n.SetType(types.UntypedFloat)
+ case types.TCOMPLEX64:
+ n.SetType(types.Types[types.TFLOAT32])
+ case types.TCOMPLEX128:
+ n.SetType(types.Types[types.TFLOAT64])
+ default:
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcRecover typechecks an ORECOVER node.
+func tcRecover(n *ir.CallExpr) ir.Node {
+ if len(n.Args) != 0 {
+ base.Errorf("too many arguments to recover")
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(types.Types[types.TINTER])
+ return n
+}
// they're expected to change much more rapidly, so they're omitted
// here. See exportWriter's varExt/funcExt/etc methods for details.
-package gc
+package typecheck
import (
"bufio"
"bytes"
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/goobj"
- "cmd/internal/src"
"crypto/md5"
"encoding/binary"
"fmt"
"math/big"
"sort"
"strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/goobj"
+ "cmd/internal/src"
)
// Current indexed export format version. Increase with each format change.
interfaceType
)
-func iexport(out *bufio.Writer) {
+func WriteExports(out *bufio.Writer) {
p := iexporter{
allPkgs: map[*types.Pkg]bool{},
stringIndex: map[string]uint64{},
}
// Don't export predeclared declarations.
- if n.Sym().Pkg == types.BuiltinPkg || n.Sym().Pkg == unsafepkg {
+ if n.Sym().Pkg == types.BuiltinPkg || n.Sym().Pkg == ir.Pkgs.Unsafe {
return
}
switch n.Op() {
case ir.ONAME:
- switch n.Class() {
+ switch n.Class_ {
case ir.PEXTERN:
// Variable.
w.tag('V')
w.funcExt(n)
default:
- base.Fatalf("unexpected class: %v, %v", n, n.Class())
+ base.Fatalf("unexpected class: %v, %v", n, n.Class_)
}
case ir.OLITERAL:
// Constant.
// TODO(mdempsky): Do we still need this typecheck? If so, why?
- n = typecheck(n, ctxExpr).(*ir.Name)
+ n = Expr(n).(*ir.Name)
w.tag('C')
w.pos(n.Pos())
w.value(n.Type(), n.Val())
case ir.OTYPE:
- if IsAlias(n.Sym()) {
+ if types.IsDotAlias(n.Sym()) {
// Alias.
w.tag('A')
w.pos(n.Pos())
w := p.newWriter()
w.setPkg(fnpkg(f), false)
- w.stmtList(ir.AsNodes(f.Func().Inl.Body))
+ w.stmtList(ir.Nodes(f.Func.Inl.Body))
w.finish("inl", p.inlineIndex, f.Sym())
}
func (w *exportWriter) doTyp(t *types.Type) {
if t.Sym() != nil {
- if t.Sym().Pkg == types.BuiltinPkg || t.Sym().Pkg == unsafepkg {
+ if t.Sym().Pkg == types.BuiltinPkg || t.Sym().Pkg == ir.Pkgs.Unsafe {
base.Fatalf("builtin type missing from typIndex: %v", t)
}
func intSize(typ *types.Type) (signed bool, maxBytes uint) {
if typ.IsUntyped() {
- return true, Mpprec / 8
+ return true, ir.ConstPrec / 8
}
switch typ.Kind() {
// multi-precision integer) and then the exponent, except exponent is
// omitted if mantissa is zero.
func (w *exportWriter) mpfloat(v constant.Value, typ *types.Type) {
- f := bigFloatVal(v)
+ f := ir.BigFloat(v)
if f.IsInf() {
base.Fatalf("infinite constant")
}
}
// Inline body.
- if n.Func().Inl != nil {
- w.uint64(1 + uint64(n.Func().Inl.Cost))
- if n.Func().ExportInline() {
+ if n.Func.Inl != nil {
+ w.uint64(1 + uint64(n.Func.Inl.Cost))
+ if n.Func.ExportInline() {
w.p.doInline(n)
}
// Endlineno for inlined function.
- w.pos(n.Func().Endlineno)
+ w.pos(n.Func.Endlineno)
} else {
w.uint64(0)
}
w.int64(i[1])
return
}
- w.symIdx(typesym(t))
- w.symIdx(typesym(t.PtrTo()))
+ w.symIdx(types.TypeSym(t))
+ w.symIdx(types.TypeSym(t.PtrTo()))
}
// Inline bodies.
func (w *exportWriter) stmtList(list ir.Nodes) {
- for _, n := range list.Slice() {
+ for _, n := range list {
w.node(n)
}
w.op(ir.OEND)
// Caution: stmt will emit more than one node for statement nodes n that have a non-empty
// n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.).
func (w *exportWriter) stmt(n ir.Node) {
- if n.Init().Len() > 0 && !ir.StmtWithInit(n.Op()) {
+ if len(n.Init()) > 0 && !ir.StmtWithInit(n.Op()) {
// can't use stmtList here since we don't want the final OEND
- for _, n := range n.Init().Slice() {
+ for _, n := range n.Init() {
w.stmt(n)
}
}
// (At the moment neither the parser nor the typechecker
// generate OBLOCK nodes except to denote an empty
// function body, although that may change.)
- for _, n := range n.List().Slice() {
+ n := n.(*ir.BlockStmt)
+ for _, n := range n.List {
w.stmt(n)
}
case ir.ODCL:
+ n := n.(*ir.Decl)
w.op(ir.ODCL)
- w.pos(n.Left().Pos())
- w.localName(n.Left().(*ir.Name))
- w.typ(n.Left().Type())
+ w.pos(n.X.Pos())
+ w.localName(n.X.(*ir.Name))
+ w.typ(n.X.Type())
case ir.OAS:
// Don't export "v = <N>" initializing statements, hope they're always
// preceded by the DCL which will be re-parsed and typecheck to reproduce
// the "v = <N>" again.
- if n.Right() != nil {
+ n := n.(*ir.AssignStmt)
+ if n.Y != nil {
w.op(ir.OAS)
w.pos(n.Pos())
- w.expr(n.Left())
- w.expr(n.Right())
+ w.expr(n.X)
+ w.expr(n.Y)
}
case ir.OASOP:
w.op(ir.OASOP)
w.pos(n.Pos())
w.op(n.AsOp)
- w.expr(n.Left())
- if w.bool(!n.Implicit()) {
- w.expr(n.Right())
+ w.expr(n.X)
+ if w.bool(!n.IncDec) {
+ w.expr(n.Y)
}
case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
w.op(ir.OAS2)
w.pos(n.Pos())
- w.exprList(n.List())
- w.exprList(n.Rlist())
+ w.exprList(n.Lhs)
+ w.exprList(n.Rhs)
case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
w.op(ir.ORETURN)
w.pos(n.Pos())
- w.exprList(n.List())
+ w.exprList(n.Results)
// case ORETJMP:
// unreachable - generated by compiler for trampolin routines
case ir.OGO, ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
w.op(n.Op())
w.pos(n.Pos())
- w.expr(n.Left())
+ w.expr(n.Call)
case ir.OIF:
+ n := n.(*ir.IfStmt)
w.op(ir.OIF)
w.pos(n.Pos())
w.stmtList(n.Init())
- w.expr(n.Left())
- w.stmtList(n.Body())
- w.stmtList(n.Rlist())
+ w.expr(n.Cond)
+ w.stmtList(n.Body)
+ w.stmtList(n.Else)
case ir.OFOR:
+ n := n.(*ir.ForStmt)
w.op(ir.OFOR)
w.pos(n.Pos())
w.stmtList(n.Init())
- w.exprsOrNil(n.Left(), n.Right())
- w.stmtList(n.Body())
+ w.exprsOrNil(n.Cond, n.Post)
+ w.stmtList(n.Body)
case ir.ORANGE:
+ n := n.(*ir.RangeStmt)
w.op(ir.ORANGE)
w.pos(n.Pos())
- w.stmtList(n.List())
- w.expr(n.Right())
- w.stmtList(n.Body())
+ w.stmtList(n.Vars)
+ w.expr(n.X)
+ w.stmtList(n.Body)
case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
w.op(n.Op())
w.pos(n.Pos())
w.stmtList(n.Init())
w.caseList(n)
case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
w.op(n.Op())
w.pos(n.Pos())
w.stmtList(n.Init())
- w.exprsOrNil(n.Left(), nil)
+ w.exprsOrNil(n.Tag, nil)
w.caseList(n)
// case OCASE:
// handled by caseList
case ir.OFALL:
+ n := n.(*ir.BranchStmt)
w.op(ir.OFALL)
w.pos(n.Pos())
return false
}
sw := n.(*ir.SwitchStmt)
- if sw.Left() == nil || sw.Left().Op() != ir.OTYPESW {
+ if sw.Tag == nil || sw.Tag.Op() != ir.OTYPESW {
return false
}
- guard := sw.Left().(*ir.TypeSwitchGuard)
- return guard.Left() != nil
+ guard := sw.Tag.(*ir.TypeSwitchGuard)
+ return guard.Tag != nil
}
func (w *exportWriter) caseList(sw ir.Node) {
var cases []ir.Node
if sw.Op() == ir.OSWITCH {
- cases = sw.(*ir.SwitchStmt).List().Slice()
+ cases = sw.(*ir.SwitchStmt).Cases
} else {
- cases = sw.(*ir.SelectStmt).List().Slice()
+ cases = sw.(*ir.SelectStmt).Cases
}
w.uint64(uint64(len(cases)))
for _, cas := range cases {
cas := cas.(*ir.CaseStmt)
w.pos(cas.Pos())
- w.stmtList(cas.List())
+ w.stmtList(cas.List)
if namedTypeSwitch {
- w.localName(cas.Rlist().First().(*ir.Name))
+ w.localName(cas.Vars[0].(*ir.Name))
}
- w.stmtList(cas.Body())
+ w.stmtList(cas.Body)
}
}
func (w *exportWriter) exprList(list ir.Nodes) {
- for _, n := range list.Slice() {
+ for _, n := range list {
w.expr(n)
}
w.op(ir.OEND)
func simplifyForExport(n ir.Node) ir.Node {
switch n.Op() {
case ir.OPAREN:
- return simplifyForExport(n.Left())
+ n := n.(*ir.ParenExpr)
+ return simplifyForExport(n.X)
case ir.ODEREF:
+ n := n.(*ir.StarExpr)
if n.Implicit() {
- return simplifyForExport(n.Left())
+ return simplifyForExport(n.X)
}
case ir.OADDR:
+ n := n.(*ir.AddrExpr)
if n.Implicit() {
- return simplifyForExport(n.Left())
+ return simplifyForExport(n.X)
}
case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
if n.Implicit() {
- return simplifyForExport(n.Left())
+ return simplifyForExport(n.X)
}
}
return n
// expressions
// (somewhat closely following the structure of exprfmt in fmt.go)
case ir.ONIL:
+ n := n.(*ir.NilExpr)
if !n.Type().HasNil() {
base.Fatalf("unexpected type for nil: %v", n.Type())
}
case ir.ONAME:
// Package scope name.
n := n.(*ir.Name)
- if (n.Class() == ir.PEXTERN || n.Class() == ir.PFUNC) && !ir.IsBlank(n) {
+ if (n.Class_ == ir.PEXTERN || n.Class_ == ir.PFUNC) && !ir.IsBlank(n) {
w.op(ir.ONONAME)
w.qualifiedIdent(n)
break
w.typ(n.Type())
case ir.OTYPESW:
+ n := n.(*ir.TypeSwitchGuard)
w.op(ir.OTYPESW)
w.pos(n.Pos())
var s *types.Sym
- if n.Left() != nil {
- if n.Left().Op() != ir.ONONAME {
- base.Fatalf("expected ONONAME, got %v", n.Left())
+ if n.Tag != nil {
+ if n.Tag.Op() != ir.ONONAME {
+ base.Fatalf("expected ONONAME, got %v", n.Tag)
}
- s = n.Left().Sym()
+ s = n.Tag.Sym()
}
w.localIdent(s, 0) // declared pseudo-variable, if any
- w.exprsOrNil(n.Right(), nil)
+ w.exprsOrNil(n.X, nil)
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// should have been resolved by typechecking - handled by default case
// should have been resolved by typechecking - handled by default case
case ir.OPTRLIT:
+ n := n.(*ir.AddrExpr)
w.op(ir.OADDR)
w.pos(n.Pos())
- w.expr(n.Left())
+ w.expr(n.X)
case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
w.op(ir.OSTRUCTLIT)
w.pos(n.Pos())
w.typ(n.Type())
- w.fieldList(n.List()) // special handling of field names
+ w.fieldList(n.List) // special handling of field names
case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
w.op(ir.OCOMPLIT)
w.pos(n.Pos())
w.typ(n.Type())
- w.exprList(n.List())
+ w.exprList(n.List)
case ir.OKEY:
+ n := n.(*ir.KeyExpr)
w.op(ir.OKEY)
w.pos(n.Pos())
- w.exprsOrNil(n.Left(), n.Right())
+ w.exprsOrNil(n.Key, n.Value)
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
case ir.OCALLPART:
// An OCALLPART is an OXDOT before type checking.
+ n := n.(*ir.CallPartExpr)
w.op(ir.OXDOT)
w.pos(n.Pos())
- w.expr(n.Left())
- w.selector(n.Sym())
+ w.expr(n.X)
+ w.selector(n.Method.Sym)
case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH:
+ n := n.(*ir.SelectorExpr)
w.op(ir.OXDOT)
w.pos(n.Pos())
- w.expr(n.Left())
- w.selector(n.Sym())
+ w.expr(n.X)
+ w.selector(n.Sel)
case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := n.(*ir.TypeAssertExpr)
w.op(ir.ODOTTYPE)
w.pos(n.Pos())
- w.expr(n.Left())
+ w.expr(n.X)
w.typ(n.Type())
case ir.OINDEX, ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
w.op(ir.OINDEX)
w.pos(n.Pos())
- w.expr(n.Left())
- w.expr(n.Right())
+ w.expr(n.X)
+ w.expr(n.Index)
case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR:
+ n := n.(*ir.SliceExpr)
w.op(ir.OSLICE)
w.pos(n.Pos())
- w.expr(n.Left())
+ w.expr(n.X)
low, high, _ := n.SliceBounds()
w.exprsOrNil(low, high)
case ir.OSLICE3, ir.OSLICE3ARR:
+ n := n.(*ir.SliceExpr)
w.op(ir.OSLICE3)
w.pos(n.Pos())
- w.expr(n.Left())
+ w.expr(n.X)
low, high, max := n.SliceBounds()
w.exprsOrNil(low, high)
w.expr(max)
case ir.OCOPY, ir.OCOMPLEX:
// treated like other builtin calls (see e.g., OREAL)
+ n := n.(*ir.BinaryExpr)
w.op(n.Op())
w.pos(n.Pos())
- w.expr(n.Left())
- w.expr(n.Right())
+ w.expr(n.X)
+ w.expr(n.Y)
w.op(ir.OEND)
case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR:
+ n := n.(*ir.ConvExpr)
w.op(ir.OCONV)
w.pos(n.Pos())
- w.expr(n.Left())
+ w.expr(n.X)
w.typ(n.Type())
case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC:
+ n := n.(*ir.UnaryExpr)
w.op(n.Op())
w.pos(n.Pos())
- w.expr(n.Left())
+ w.expr(n.X)
w.op(ir.OEND)
case ir.OAPPEND, ir.ODELETE, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
+ n := n.(*ir.CallExpr)
w.op(n.Op())
w.pos(n.Pos())
- w.exprList(n.List()) // emits terminating OEND
+ w.exprList(n.Args) // emits terminating OEND
// only append() calls may contain '...' arguments
if n.Op() == ir.OAPPEND {
- w.bool(n.IsDDD())
- } else if n.IsDDD() {
+ w.bool(n.IsDDD)
+ } else if n.IsDDD {
base.Fatalf("exporter: unexpected '...' with %v call", n.Op())
}
case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG:
+ n := n.(*ir.CallExpr)
w.op(ir.OCALL)
w.pos(n.Pos())
w.stmtList(n.Init())
- w.expr(n.Left())
- w.exprList(n.List())
- w.bool(n.IsDDD())
+ w.expr(n.X)
+ w.exprList(n.Args)
+ w.bool(n.IsDDD)
case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
+ n := n.(*ir.MakeExpr)
w.op(n.Op()) // must keep separate from OMAKE for importer
w.pos(n.Pos())
w.typ(n.Type())
default:
// empty list
w.op(ir.OEND)
- case n.Right() != nil:
- w.expr(n.Left())
- w.expr(n.Right())
+ case n.Cap != nil:
+ w.expr(n.Len)
+ w.expr(n.Cap)
w.op(ir.OEND)
- case n.Left() != nil && (n.Op() == ir.OMAKESLICE || !n.Left().Type().IsUntyped()):
- w.expr(n.Left())
+ case n.Len != nil && (n.Op() == ir.OMAKESLICE || !n.Len.Type().IsUntyped()):
+ w.expr(n.Len)
w.op(ir.OEND)
}
// unary expressions
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
+ n := n.(*ir.UnaryExpr)
w.op(n.Op())
w.pos(n.Pos())
- w.expr(n.Left())
+ w.expr(n.X)
case ir.OADDR:
+ n := n.(*ir.AddrExpr)
w.op(n.Op())
w.pos(n.Pos())
- w.expr(n.Left())
+ w.expr(n.X)
case ir.ODEREF:
+ n := n.(*ir.StarExpr)
w.op(n.Op())
w.pos(n.Pos())
- w.expr(n.Left())
+ w.expr(n.X)
case ir.OSEND:
+ n := n.(*ir.SendStmt)
w.op(n.Op())
w.pos(n.Pos())
- w.expr(n.Left())
- w.expr(n.Right())
+ w.expr(n.Chan)
+ w.expr(n.Value)
// binary expressions
case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR:
+ n := n.(*ir.BinaryExpr)
w.op(n.Op())
w.pos(n.Pos())
- w.expr(n.Left())
- w.expr(n.Right())
+ w.expr(n.X)
+ w.expr(n.Y)
case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
w.op(n.Op())
w.pos(n.Pos())
- w.expr(n.Left())
- w.expr(n.Right())
+ w.expr(n.X)
+ w.expr(n.Y)
case ir.OADDSTR:
+ n := n.(*ir.AddStringExpr)
w.op(ir.OADDSTR)
w.pos(n.Pos())
- w.exprList(n.List())
+ w.exprList(n.List)
case ir.ODCLCONST:
// if exporting, DCLCONST should just be removed as its usage
}
func (w *exportWriter) fieldList(list ir.Nodes) {
- w.uint64(uint64(list.Len()))
- for _, n := range list.Slice() {
+ w.uint64(uint64(len(list)))
+ for _, n := range list {
n := n.(*ir.StructKeyExpr)
- w.selector(n.Sym())
- w.expr(n.Left())
+ w.selector(n.Field)
+ w.expr(n.Value)
}
}
// PPARAM/PPARAMOUT, because we only want to include vargen in
// non-param names.
var v int32
- if n.Class() == ir.PAUTO || (n.Class() == ir.PAUTOHEAP && n.Name().Stackcopy == nil) {
+ if n.Class_ == ir.PAUTO || (n.Class_ == ir.PAUTOHEAP && n.Name().Stackcopy == nil) {
v = n.Name().Vargen
}
// Indexed package import.
// See iexport.go for the export data format.
-package gc
+package typecheck
import (
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/types"
- "cmd/internal/bio"
- "cmd/internal/goobj"
- "cmd/internal/obj"
- "cmd/internal/src"
"encoding/binary"
"fmt"
"go/constant"
"math/big"
"os"
"strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/bio"
+ "cmd/internal/goobj"
+ "cmd/internal/obj"
+ "cmd/internal/src"
)
// An iimporterAndOffset identifies an importer and an offset within
}
var (
- // declImporter maps from imported identifiers to an importer
+ // DeclImporter maps from imported identifiers to an importer
// and offset where that identifier's declaration can be read.
- declImporter = map[*types.Sym]iimporterAndOffset{}
+ DeclImporter = map[*types.Sym]iimporterAndOffset{}
// inlineImporter is like declImporter, but for inline bodies
// for function and method symbols.
return n.(*ir.Name)
}
- r := importReaderFor(id.Sym(), declImporter)
+ r := importReaderFor(id.Sym(), DeclImporter)
if r == nil {
// Can happen if user tries to reference an undeclared name.
return n
return r.doDecl(n.Sym())
}
-func expandInline(fn *ir.Func) {
+func ImportBody(fn *ir.Func) {
if fn.Inl.Body != nil {
return
}
return i
}
-func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) {
+func ReadImports(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) {
ird := &intReader{in, pkg}
version := ird.uint64()
s := pkg.Lookup(p.stringAt(ird.uint64()))
off := ird.uint64()
- if _, ok := declImporter[s]; !ok {
- declImporter[s] = iimporterAndOffset{p, off}
+ if _, ok := DeclImporter[s]; !ok {
+ DeclImporter[s] = iimporterAndOffset{p, off}
}
}
}
// We also need to defer width calculations until
// after the underlying type has been assigned.
- defercheckwidth()
+ types.DeferCheckSize()
underlying := r.typ()
t.SetUnderlying(underlying)
- resumecheckwidth()
+ types.ResumeCheckSize()
if underlying.IsInterface() {
r.typeExt(t)
fn := ir.NewFunc(mpos)
fn.SetType(mtyp)
- m := newFuncNameAt(mpos, methodSym(recv.Type, msym), fn)
+ m := ir.NewFuncNameAt(mpos, ir.MethodSym(recv.Type, msym), fn)
m.SetType(mtyp)
- m.SetClass(ir.PFUNC)
+ m.Class_ = ir.PFUNC
// methodSym already marked m.Sym as a function.
f := types.NewField(mpos, msym, mtyp)
t := types.NewInterface(r.currPkg, append(embeddeds, methods...))
// Ensure we expand the interface in the frontend (#25055).
- checkwidth(t)
+ types.CheckSize(t)
return t
}
}
// Inline body.
if u := r.uint64(); u > 0 {
- n.Func().Inl = &ir.Inline{
+ n.Func.Inl = &ir.Inline{
Cost: int32(u - 1),
}
- n.Func().Endlineno = r.pos()
+ n.Func.Endlineno = r.pos()
}
}
base.Fatalf("%v already has inline body", fn)
}
- funchdr(fn)
+ StartFuncBody(fn)
body := r.stmtList()
- funcbody()
+ FinishFuncBody()
if body == nil {
//
// Make sure empty body is not interpreted as
if base.Flag.E > 0 && base.Flag.LowerM > 2 {
if base.Flag.LowerM > 3 {
- fmt.Printf("inl body for %v %v: %+v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body))
+ fmt.Printf("inl body for %v %v: %+v\n", fn, fn.Type(), ir.Nodes(fn.Inl.Body))
} else {
- fmt.Printf("inl body for %v %v: %v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body))
+ fmt.Printf("inl body for %v %v: %v\n", fn, fn.Type(), ir.Nodes(fn.Inl.Body))
}
}
}
// but the handling of ODCL calls liststmt, which creates one.
// Inline them into the statement list.
if n.Op() == ir.OBLOCK {
- list = append(list, n.List().Slice()...)
+ n := n.(*ir.BlockStmt)
+ list = append(list, n.List...)
} else {
list = append(list, n)
}
cases := make([]ir.Node, r.uint64())
for i := range cases {
- cas := ir.NodAt(r.pos(), ir.OCASE, nil, nil)
- cas.PtrList().Set(r.stmtList())
+ cas := ir.NewCaseStmt(r.pos(), nil, nil)
+ cas.List.Set(r.stmtList())
if namedTypeSwitch {
// Note: per-case variables will have distinct, dotted
// names after import. That's okay: swt.go only needs
// Sym for diagnostics anyway.
caseVar := ir.NewNameAt(cas.Pos(), r.ident())
- declare(caseVar, dclcontext)
- cas.PtrRlist().Set1(caseVar)
- caseVar.Defn = sw.(*ir.SwitchStmt).Left()
+ Declare(caseVar, DeclContext)
+ cas.Vars = []ir.Node{caseVar}
+ caseVar.Defn = sw.(*ir.SwitchStmt).Tag
}
- cas.PtrBody().Set(r.stmtList())
+ cas.Body.Set(r.stmtList())
cases[i] = cas
}
return cases
func (r *importReader) expr() ir.Node {
n := r.node()
if n != nil && n.Op() == ir.OBLOCK {
+ n := n.(*ir.BlockStmt)
base.Fatalf("unexpected block node: %v", n)
}
return n
pos := r.pos()
typ := r.typ()
- n := npos(pos, nodnil())
+ n := npos(pos, NodNil())
n.SetType(typ)
return n
// TODO(mdempsky): Export position information for OSTRUCTKEY nodes.
savedlineno := base.Pos
base.Pos = r.pos()
- n := ir.NodAt(base.Pos, ir.OCOMPLIT, nil, ir.TypeNode(r.typ()))
- n.PtrList().Set(r.elemList()) // special handling of field names
+ n := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(r.typ()).(ir.Ntype), nil)
+ n.List.Set(r.elemList()) // special handling of field names
base.Pos = savedlineno
return n
// unreachable - mapped to case OCOMPLIT below by exporter
case ir.OCOMPLIT:
- n := ir.NodAt(r.pos(), ir.OCOMPLIT, nil, ir.TypeNode(r.typ()))
- n.PtrList().Set(r.exprList())
+ n := ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()).(ir.Ntype), nil)
+ n.List.Set(r.exprList())
return n
case ir.OKEY:
pos := r.pos()
left, right := r.exprsOrNil()
- return ir.NodAt(pos, ir.OKEY, left, right)
+ return ir.NewKeyExpr(pos, left, right)
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
case ir.OXDOT:
// see parser.new_dotname
- return npos(r.pos(), nodSym(ir.OXDOT, r.expr(), r.ident()))
+ return ir.NewSelectorExpr(r.pos(), ir.OXDOT, r.expr(), r.ident())
// case ODOTTYPE, ODOTTYPE2:
// unreachable - mapped to case ODOTTYPE below by exporter
case ir.ODOTTYPE:
- n := ir.NodAt(r.pos(), ir.ODOTTYPE, r.expr(), nil)
+ n := ir.NewTypeAssertExpr(r.pos(), r.expr(), nil)
n.SetType(r.typ())
return n
// unreachable - mapped to cases below by exporter
case ir.OINDEX:
- return ir.NodAt(r.pos(), ir.OINDEX, r.expr(), r.expr())
+ return ir.NewIndexExpr(r.pos(), r.expr(), r.expr())
case ir.OSLICE, ir.OSLICE3:
n := ir.NewSliceExpr(r.pos(), op, r.expr())
// unreachable - mapped to OCONV case below by exporter
case ir.OCONV:
- n := ir.NodAt(r.pos(), ir.OCONV, r.expr(), nil)
+ n := ir.NewConvExpr(r.pos(), ir.OCONV, nil, r.expr())
n.SetType(r.typ())
return n
case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
n := builtinCall(r.pos(), op)
- n.PtrList().Set(r.exprList())
+ n.Args.Set(r.exprList())
if op == ir.OAPPEND {
- n.SetIsDDD(r.bool())
+ n.IsDDD = r.bool()
}
return n
// unreachable - mapped to OCALL case below by exporter
case ir.OCALL:
- n := ir.NodAt(r.pos(), ir.OCALL, nil, nil)
+ n := ir.NewCallExpr(r.pos(), ir.OCALL, nil, nil)
n.PtrInit().Set(r.stmtList())
- n.SetLeft(r.expr())
- n.PtrList().Set(r.exprList())
- n.SetIsDDD(r.bool())
+ n.X = r.expr()
+ n.Args.Set(r.exprList())
+ n.IsDDD = r.bool()
return n
case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
n := builtinCall(r.pos(), ir.OMAKE)
- n.PtrList().Append(ir.TypeNode(r.typ()))
- n.PtrList().Append(r.exprList()...)
+ n.Args.Append(ir.TypeNode(r.typ()))
+ n.Args.Append(r.exprList()...)
return n
// unary expressions
return ir.NewUnaryExpr(r.pos(), op, r.expr())
case ir.OADDR:
- return nodAddrAt(r.pos(), r.expr())
+ return NodAddrAt(r.pos(), r.expr())
case ir.ODEREF:
return ir.NewStarExpr(r.pos(), r.expr())
list := r.exprList()
x := npos(pos, list[0])
for _, y := range list[1:] {
- x = ir.NodAt(pos, ir.OADD, x, y)
+ x = ir.NewBinaryExpr(pos, ir.OADD, x, y)
}
return x
lhs := ir.NewDeclNameAt(pos, ir.ONAME, r.ident())
lhs.SetType(r.typ())
- declare(lhs, ir.PAUTO)
+ Declare(lhs, ir.PAUTO)
var stmts ir.Nodes
- stmts.Append(ir.Nod(ir.ODCL, lhs, nil))
- stmts.Append(ir.Nod(ir.OAS, lhs, nil))
- return npos(pos, liststmt(stmts.Slice()))
+ stmts.Append(ir.NewDecl(base.Pos, ir.ODCL, lhs))
+ stmts.Append(ir.NewAssignStmt(base.Pos, lhs, nil))
+ return ir.NewBlockStmt(pos, stmts)
// case OAS, OASWB:
// unreachable - mapped to OAS case below by exporter
case ir.OAS:
- return ir.NodAt(r.pos(), ir.OAS, r.expr(), r.expr())
+ return ir.NewAssignStmt(r.pos(), r.expr(), r.expr())
case ir.OASOP:
- n := ir.NodAt(r.pos(), ir.OASOP, nil, nil)
- n.SetSubOp(r.op())
- n.SetLeft(r.expr())
+ n := ir.NewAssignOpStmt(r.pos(), ir.OXXX, nil, nil)
+ n.AsOp = r.op()
+ n.X = r.expr()
if !r.bool() {
- n.SetRight(nodintconst(1))
- n.SetImplicit(true)
+ n.Y = ir.NewInt(1)
+ n.IncDec = true
} else {
- n.SetRight(r.expr())
+ n.Y = r.expr()
}
return n
// unreachable - mapped to OAS2 case below by exporter
case ir.OAS2:
- n := ir.NodAt(r.pos(), ir.OAS2, nil, nil)
- n.PtrList().Set(r.exprList())
- n.PtrRlist().Set(r.exprList())
+ n := ir.NewAssignListStmt(r.pos(), ir.OAS2, nil, nil)
+ n.Lhs.Set(r.exprList())
+ n.Rhs.Set(r.exprList())
return n
case ir.ORETURN:
- n := ir.NodAt(r.pos(), ir.ORETURN, nil, nil)
- n.PtrList().Set(r.exprList())
+ n := ir.NewReturnStmt(r.pos(), nil)
+ n.Results.Set(r.exprList())
return n
// case ORETJMP:
return ir.NewGoDeferStmt(r.pos(), op, r.expr())
case ir.OIF:
- n := ir.NodAt(r.pos(), ir.OIF, nil, nil)
+ n := ir.NewIfStmt(r.pos(), nil, nil, nil)
n.PtrInit().Set(r.stmtList())
- n.SetLeft(r.expr())
- n.PtrBody().Set(r.stmtList())
- n.PtrRlist().Set(r.stmtList())
+ n.Cond = r.expr()
+ n.Body.Set(r.stmtList())
+ n.Else.Set(r.stmtList())
return n
case ir.OFOR:
- n := ir.NodAt(r.pos(), ir.OFOR, nil, nil)
+ n := ir.NewForStmt(r.pos(), nil, nil, nil, nil)
n.PtrInit().Set(r.stmtList())
left, right := r.exprsOrNil()
- n.SetLeft(left)
- n.SetRight(right)
- n.PtrBody().Set(r.stmtList())
+ n.Cond = left
+ n.Post = right
+ n.Body.Set(r.stmtList())
return n
case ir.ORANGE:
- n := ir.NodAt(r.pos(), ir.ORANGE, nil, nil)
- n.PtrList().Set(r.stmtList())
- n.SetRight(r.expr())
- n.PtrBody().Set(r.stmtList())
+ n := ir.NewRangeStmt(r.pos(), nil, nil, nil)
+ n.Vars.Set(r.stmtList())
+ n.X = r.expr()
+ n.Body.Set(r.stmtList())
return n
case ir.OSELECT:
- n := ir.NodAt(r.pos(), ir.OSELECT, nil, nil)
+ n := ir.NewSelectStmt(r.pos(), nil)
n.PtrInit().Set(r.stmtList())
r.exprsOrNil() // TODO(rsc): Delete (and fix exporter). These are always nil.
- n.PtrList().Set(r.caseList(n))
+ n.Cases.Set(r.caseList(n))
return n
case ir.OSWITCH:
- n := ir.NodAt(r.pos(), ir.OSWITCH, nil, nil)
+ n := ir.NewSwitchStmt(r.pos(), nil, nil)
n.PtrInit().Set(r.stmtList())
left, _ := r.exprsOrNil()
- n.SetLeft(left)
- n.PtrList().Set(r.caseList(n))
+ n.Tag = left
+ n.Cases.Set(r.caseList(n))
return n
// case OCASE:
// handled by caseList
case ir.OFALL:
- n := ir.NodAt(r.pos(), ir.OFALL, nil, nil)
+ n := ir.NewBranchStmt(r.pos(), ir.OFALL, nil)
return n
// case OEMPTY:
var sym *types.Sym
pos := r.pos()
if label := r.string(); label != "" {
- sym = lookup(label)
+ sym = Lookup(label)
}
return ir.NewBranchStmt(pos, op, sym)
case ir.OLABEL:
- return ir.NewLabelStmt(r.pos(), lookup(r.string()))
+ return ir.NewLabelStmt(r.pos(), Lookup(r.string()))
case ir.OEND:
return nil
list := make([]ir.Node, c)
for i := range list {
s := r.ident()
- list[i] = nodSym(ir.OSTRUCTKEY, r.expr(), s)
+ list[i] = ir.NewStructKeyExpr(base.Pos, s, r.expr())
}
return list
}
// +build darwin dragonfly freebsd linux netbsd openbsd
-package gc
+package typecheck
import (
"os"
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
-package gc
+package typecheck
import (
"io"
var b bytes.Buffer
fmt.Fprintln(&b, "// Code generated by mkbuiltin.go. DO NOT EDIT.")
fmt.Fprintln(&b)
- fmt.Fprintln(&b, "package gc")
+ fmt.Fprintln(&b, "package typecheck")
fmt.Fprintln(&b)
fmt.Fprintln(&b, `import (`)
fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+func typecheckrangeExpr(n *ir.RangeStmt) {
+ n.X = Expr(n.X)
+
+ t := n.X.Type()
+ if t == nil {
+ return
+ }
+ // delicate little dance. see typecheckas2
+ ls := n.Vars
+ for i1, n1 := range ls {
+ if !ir.DeclaredBy(n1, n) {
+ ls[i1] = AssignExpr(ls[i1])
+ }
+ }
+
+ if t.IsPtr() && t.Elem().IsArray() {
+ t = t.Elem()
+ }
+ n.SetType(t)
+
+ var t1, t2 *types.Type
+ toomany := false
+ switch t.Kind() {
+ default:
+ base.ErrorfAt(n.Pos(), "cannot range over %L", n.X)
+ return
+
+ case types.TARRAY, types.TSLICE:
+ t1 = types.Types[types.TINT]
+ t2 = t.Elem()
+
+ case types.TMAP:
+ t1 = t.Key()
+ t2 = t.Elem()
+
+ case types.TCHAN:
+ if !t.ChanDir().CanRecv() {
+ base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.X, n.X.Type())
+ return
+ }
+
+ t1 = t.Elem()
+ t2 = nil
+ if len(n.Vars) == 2 {
+ toomany = true
+ }
+
+ case types.TSTRING:
+ t1 = types.Types[types.TINT]
+ t2 = types.RuneType
+ }
+
+ if len(n.Vars) > 2 || toomany {
+ base.ErrorfAt(n.Pos(), "too many variables in range")
+ }
+
+ var v1, v2 ir.Node
+ if len(n.Vars) != 0 {
+ v1 = n.Vars[0]
+ }
+ if len(n.Vars) > 1 {
+ v2 = n.Vars[1]
+ }
+
+ // this is not only an optimization but also a requirement in the spec.
+ // "if the second iteration variable is the blank identifier, the range
+ // clause is equivalent to the same clause with only the first variable
+ // present."
+ if ir.IsBlank(v2) {
+ if v1 != nil {
+ n.Vars = []ir.Node{v1}
+ }
+ v2 = nil
+ }
+
+ if v1 != nil {
+ if ir.DeclaredBy(v1, n) {
+ v1.SetType(t1)
+ } else if v1.Type() != nil {
+ if op, why := assignop(t1, v1.Type()); op == ir.OXXX {
+ base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t1, v1, why)
+ }
+ }
+ checkassign(n, v1)
+ }
+
+ if v2 != nil {
+ if ir.DeclaredBy(v2, n) {
+ v2.SetType(t2)
+ } else if v2.Type() != nil {
+ if op, why := assignop(t2, v2.Type()); op == ir.OXXX {
+ base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t2, v2, why)
+ }
+ }
+ checkassign(n, v2)
+ }
+}
+
+// type check assignment.
+// if this assignment is the definition of a var on the left side,
+// fill in the var's type.
+func tcAssign(n *ir.AssignStmt) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheckas", n)(nil)
+ }
+
+ // delicate little dance.
+ // the definition of n may refer to this assignment
+ // as its definition, in which case it will call typecheckas.
+ // in that case, do not call typecheck back, or it will cycle.
+ // if the variable has a type (ntype) then typechecking
+ // will not look at defn, so it is okay (and desirable,
+ // so that the conversion below happens).
+ n.X = Resolve(n.X)
+
+ if !ir.DeclaredBy(n.X, n) || n.X.Name().Ntype != nil {
+ n.X = AssignExpr(n.X)
+ }
+
+ // Use ctxMultiOK so we can emit an "N variables but M values" error
+ // to be consistent with typecheckas2 (#26616).
+ n.Y = typecheck(n.Y, ctxExpr|ctxMultiOK)
+ checkassign(n, n.X)
+ if n.Y != nil && n.Y.Type() != nil {
+ if n.Y.Type().IsFuncArgStruct() {
+ base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Y.(*ir.CallExpr).X, n.Y.Type().NumFields())
+ // Multi-value RHS isn't actually valid for OAS; nil out
+ // to indicate failed typechecking.
+ n.Y.SetType(nil)
+ } else if n.X.Type() != nil {
+ n.Y = AssignConv(n.Y, n.X.Type(), "assignment")
+ }
+ }
+
+ if ir.DeclaredBy(n.X, n) && n.X.Name().Ntype == nil {
+ n.Y = DefaultLit(n.Y, nil)
+ n.X.SetType(n.Y.Type())
+ }
+
+ // second half of dance.
+ // now that right is done, typecheck the left
+ // just to get it over with. see dance above.
+ n.SetTypecheck(1)
+
+ if n.X.Typecheck() == 0 {
+ n.X = AssignExpr(n.X)
+ }
+ if !ir.IsBlank(n.X) {
+ types.CheckSize(n.X.Type()) // ensure width is calculated for backend
+ }
+}
+
+func tcAssignList(n *ir.AssignListStmt) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheckas2", n)(nil)
+ }
+
+ ls := n.Lhs
+ for i1, n1 := range ls {
+ // delicate little dance.
+ n1 = Resolve(n1)
+ ls[i1] = n1
+
+ if !ir.DeclaredBy(n1, n) || n1.Name().Ntype != nil {
+ ls[i1] = AssignExpr(ls[i1])
+ }
+ }
+
+ cl := len(n.Lhs)
+ cr := len(n.Rhs)
+ if cl > 1 && cr == 1 {
+ n.Rhs[0] = typecheck(n.Rhs[0], ctxExpr|ctxMultiOK)
+ } else {
+ Exprs(n.Rhs)
+ }
+ checkassignlist(n, n.Lhs)
+
+ var l ir.Node
+ var r ir.Node
+ if cl == cr {
+ // easy
+ ls := n.Lhs
+ rs := n.Rhs
+ for il, nl := range ls {
+ nr := rs[il]
+ if nl.Type() != nil && nr.Type() != nil {
+ rs[il] = AssignConv(nr, nl.Type(), "assignment")
+ }
+ if ir.DeclaredBy(nl, n) && nl.Name().Ntype == nil {
+ rs[il] = DefaultLit(rs[il], nil)
+ nl.SetType(rs[il].Type())
+ }
+ }
+
+ goto out
+ }
+
+ l = n.Lhs[0]
+ r = n.Rhs[0]
+
+ // x,y,z = f()
+ if cr == 1 {
+ if r.Type() == nil {
+ goto out
+ }
+ switch r.Op() {
+ case ir.OCALLMETH, ir.OCALLINTER, ir.OCALLFUNC:
+ if !r.Type().IsFuncArgStruct() {
+ break
+ }
+ cr = r.Type().NumFields()
+ if cr != cl {
+ goto mismatch
+ }
+ r.(*ir.CallExpr).Use = ir.CallUseList
+ n.SetOp(ir.OAS2FUNC)
+ for i, l := range n.Lhs {
+ f := r.Type().Field(i)
+ if f.Type != nil && l.Type() != nil {
+ checkassignto(f.Type, l)
+ }
+ if ir.DeclaredBy(l, n) && l.Name().Ntype == nil {
+ l.SetType(f.Type)
+ }
+ }
+ goto out
+ }
+ }
+
+ // x, ok = y
+ if cl == 2 && cr == 1 {
+ if r.Type() == nil {
+ goto out
+ }
+ switch r.Op() {
+ case ir.OINDEXMAP, ir.ORECV, ir.ODOTTYPE:
+ switch r.Op() {
+ case ir.OINDEXMAP:
+ n.SetOp(ir.OAS2MAPR)
+ case ir.ORECV:
+ n.SetOp(ir.OAS2RECV)
+ case ir.ODOTTYPE:
+ r := r.(*ir.TypeAssertExpr)
+ n.SetOp(ir.OAS2DOTTYPE)
+ r.SetOp(ir.ODOTTYPE2)
+ }
+ if l.Type() != nil {
+ checkassignto(r.Type(), l)
+ }
+ if ir.DeclaredBy(l, n) {
+ l.SetType(r.Type())
+ }
+ l := n.Lhs[1]
+ if l.Type() != nil && !l.Type().IsBoolean() {
+ checkassignto(types.Types[types.TBOOL], l)
+ }
+ if ir.DeclaredBy(l, n) && l.Name().Ntype == nil {
+ l.SetType(types.Types[types.TBOOL])
+ }
+ goto out
+ }
+ }
+
+mismatch:
+ switch r.Op() {
+ default:
+ base.Errorf("assignment mismatch: %d variables but %d values", cl, cr)
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ r := r.(*ir.CallExpr)
+ base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.X, cr)
+ }
+
+ // second half of dance
+out:
+ n.SetTypecheck(1)
+ ls = n.Lhs
+ for i1, n1 := range ls {
+ if n1.Typecheck() == 0 {
+ ls[i1] = AssignExpr(ls[i1])
+ }
+ }
+}
+
+// tcFor typechecks an OFOR node.
+func tcFor(n *ir.ForStmt) ir.Node {
+ Stmts(n.Init())
+ decldepth++
+ n.Cond = Expr(n.Cond)
+ n.Cond = DefaultLit(n.Cond, nil)
+ if n.Cond != nil {
+ t := n.Cond.Type()
+ if t != nil && !t.IsBoolean() {
+ base.Errorf("non-bool %L used as for condition", n.Cond)
+ }
+ }
+ n.Post = Stmt(n.Post)
+ if n.Op() == ir.OFORUNTIL {
+ Stmts(n.Late)
+ }
+ Stmts(n.Body)
+ decldepth--
+ return n
+}
+
+func tcGoDefer(n *ir.GoDeferStmt) {
+ what := "defer"
+ if n.Op() == ir.OGO {
+ what = "go"
+ }
+
+ switch n.Call.Op() {
+ // ok
+ case ir.OCALLINTER,
+ ir.OCALLMETH,
+ ir.OCALLFUNC,
+ ir.OCLOSE,
+ ir.OCOPY,
+ ir.ODELETE,
+ ir.OPANIC,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.ORECOVER:
+ return
+
+ case ir.OAPPEND,
+ ir.OCAP,
+ ir.OCOMPLEX,
+ ir.OIMAG,
+ ir.OLEN,
+ ir.OMAKE,
+ ir.OMAKESLICE,
+ ir.OMAKECHAN,
+ ir.OMAKEMAP,
+ ir.ONEW,
+ ir.OREAL,
+ ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
+ if orig := ir.Orig(n.Call); orig.Op() == ir.OCONV {
+ break
+ }
+ base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Call)
+ return
+ }
+
+ // type is broken or missing, most likely a method call on a broken type
+ // we will warn about the broken type elsewhere. no need to emit a potentially confusing error
+ if n.Call.Type() == nil || n.Call.Type().Broke() {
+ return
+ }
+
+ if !n.Diag() {
+ // The syntax made sure it was a call, so this must be
+ // a conversion.
+ n.SetDiag(true)
+ base.ErrorfAt(n.Pos(), "%s requires function call, not conversion", what)
+ }
+}
+
+// tcIf typechecks an OIF node.
+func tcIf(n *ir.IfStmt) ir.Node {
+ Stmts(n.Init())
+ n.Cond = Expr(n.Cond)
+ n.Cond = DefaultLit(n.Cond, nil)
+ if n.Cond != nil {
+ t := n.Cond.Type()
+ if t != nil && !t.IsBoolean() {
+ base.Errorf("non-bool %L used as if condition", n.Cond)
+ }
+ }
+ Stmts(n.Body)
+ Stmts(n.Else)
+ return n
+}
+
+// range
+func tcRange(n *ir.RangeStmt) {
+ // Typechecking order is important here:
+ // 0. first typecheck range expression (slice/map/chan),
+ // it is evaluated only once and so logically it is not part of the loop.
+ // 1. typecheck produced values,
+ // this part can declare new vars and so it must be typechecked before body,
+ // because body can contain a closure that captures the vars.
+ // 2. decldepth++ to denote loop body.
+ // 3. typecheck body.
+ // 4. decldepth--.
+ typecheckrangeExpr(n)
+
+ // second half of dance, the first half being typecheckrangeExpr
+ n.SetTypecheck(1)
+ ls := n.Vars
+ for i1, n1 := range ls {
+ if n1.Typecheck() == 0 {
+ ls[i1] = AssignExpr(ls[i1])
+ }
+ }
+
+ decldepth++
+ Stmts(n.Body)
+ decldepth--
+}
+
+// tcReturn typechecks an ORETURN node.
+func tcReturn(n *ir.ReturnStmt) ir.Node {
+ typecheckargs(n)
+ if ir.CurFunc == nil {
+ base.Errorf("return outside function")
+ n.SetType(nil)
+ return n
+ }
+
+ if ir.HasNamedResults(ir.CurFunc) && len(n.Results) == 0 {
+ return n
+ }
+ typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, func() string { return "return argument" })
+ return n
+}
+
+// select
+func tcSelect(sel *ir.SelectStmt) {
+ var def ir.Node
+ lno := ir.SetPos(sel)
+ Stmts(sel.Init())
+ for _, ncase := range sel.Cases {
+ ncase := ncase.(*ir.CaseStmt)
+
+ if len(ncase.List) == 0 {
+ // default
+ if def != nil {
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def))
+ } else {
+ def = ncase
+ }
+ } else if len(ncase.List) > 1 {
+ base.ErrorfAt(ncase.Pos(), "select cases cannot be lists")
+ } else {
+ ncase.List[0] = Stmt(ncase.List[0])
+ n := ncase.List[0]
+ ncase.Comm = n
+ ncase.List.Set(nil)
+ oselrecv2 := func(dst, recv ir.Node, colas bool) {
+ n := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, nil, nil)
+ n.Lhs = []ir.Node{dst, ir.BlankNode}
+ n.Rhs = []ir.Node{recv}
+ n.Def = colas
+ n.SetTypecheck(1)
+ ncase.Comm = n
+ }
+ switch n.Op() {
+ default:
+ pos := n.Pos()
+ if n.Op() == ir.ONAME {
+ // We don't have the right position for ONAME nodes (see #15459 and
+ // others). Using ncase.Pos for now as it will provide the correct
+ // line number (assuming the expression follows the "case" keyword
+ // on the same line). This matches the approach before 1.10.
+ pos = ncase.Pos()
+ }
+ base.ErrorfAt(pos, "select case must be receive, send or assign recv")
+
+ case ir.OAS:
+ // convert x = <-c into x, _ = <-c
+ // remove implicit conversions; the eventual assignment
+ // will reintroduce them.
+ n := n.(*ir.AssignStmt)
+ if r := n.Y; r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE {
+ r := r.(*ir.ConvExpr)
+ if r.Implicit() {
+ n.Y = r.X
+ }
+ }
+ if n.Y.Op() != ir.ORECV {
+ base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
+ break
+ }
+ oselrecv2(n.X, n.Y, n.Def)
+
+ case ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ if n.Rhs[0].Op() != ir.ORECV {
+ base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
+ break
+ }
+ n.SetOp(ir.OSELRECV2)
+
+ case ir.ORECV:
+ // convert <-c into _, _ = <-c
+ n := n.(*ir.UnaryExpr)
+ oselrecv2(ir.BlankNode, n, false)
+
+ case ir.OSEND:
+ break
+ }
+ }
+
+ Stmts(ncase.Body)
+ }
+
+ base.Pos = lno
+}
+
+// tcSend typechecks an OSEND node.
+func tcSend(n *ir.SendStmt) ir.Node {
+ n.Chan = Expr(n.Chan)
+ n.Value = Expr(n.Value)
+ n.Chan = DefaultLit(n.Chan, nil)
+ t := n.Chan.Type()
+ if t == nil {
+ return n
+ }
+ if !t.IsChan() {
+ base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t)
+ return n
+ }
+
+ if !t.ChanDir().CanSend() {
+ base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t)
+ return n
+ }
+
+ n.Value = AssignConv(n.Value, t.Elem(), "send")
+ if n.Value.Type() == nil {
+ return n
+ }
+ return n
+}
+
+// tcSwitch typechecks a switch statement.
+func tcSwitch(n *ir.SwitchStmt) {
+ Stmts(n.Init())
+ if n.Tag != nil && n.Tag.Op() == ir.OTYPESW {
+ tcSwitchType(n)
+ } else {
+ tcSwitchExpr(n)
+ }
+}
+
+func tcSwitchExpr(n *ir.SwitchStmt) {
+ t := types.Types[types.TBOOL]
+ if n.Tag != nil {
+ n.Tag = Expr(n.Tag)
+ n.Tag = DefaultLit(n.Tag, nil)
+ t = n.Tag.Type()
+ }
+
+ var nilonly string
+ if t != nil {
+ switch {
+ case t.IsMap():
+ nilonly = "map"
+ case t.Kind() == types.TFUNC:
+ nilonly = "func"
+ case t.IsSlice():
+ nilonly = "slice"
+
+ case !types.IsComparable(t):
+ if t.IsStruct() {
+ base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Tag, types.IncomparableField(t).Type)
+ } else {
+ base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Tag)
+ }
+ t = nil
+ }
+ }
+
+ var defCase ir.Node
+ var cs constSet
+ for _, ncase := range n.Cases {
+ ncase := ncase.(*ir.CaseStmt)
+ ls := ncase.List
+ if len(ls) == 0 { // default:
+ if defCase != nil {
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
+ } else {
+ defCase = ncase
+ }
+ }
+
+ for i := range ls {
+ ir.SetPos(ncase)
+ ls[i] = Expr(ls[i])
+ ls[i] = DefaultLit(ls[i], t)
+ n1 := ls[i]
+ if t == nil || n1.Type() == nil {
+ continue
+ }
+
+ if nilonly != "" && !ir.IsNil(n1) {
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Tag)
+ } else if t.IsInterface() && !n1.Type().IsInterface() && !types.IsComparable(n1.Type()) {
+ base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1)
+ } else {
+ op1, _ := assignop(n1.Type(), t)
+ op2, _ := assignop(t, n1.Type())
+ if op1 == ir.OXXX && op2 == ir.OXXX {
+ if n.Tag != nil {
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Tag, n1.Type(), t)
+ } else {
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type())
+ }
+ }
+ }
+
+ // Don't check for duplicate bools. Although the spec allows it,
+ // (1) the compiler hasn't checked it in the past, so compatibility mandates it, and
+ // (2) it would disallow useful things like
+ // case GOARCH == "arm" && GOARM == "5":
+ // case GOARCH == "arm":
+ // which would both evaluate to false for non-ARM compiles.
+ if !n1.Type().IsBoolean() {
+ cs.add(ncase.Pos(), n1, "case", "switch")
+ }
+ }
+
+ Stmts(ncase.Body)
+ }
+}
+
+func tcSwitchType(n *ir.SwitchStmt) {
+ guard := n.Tag.(*ir.TypeSwitchGuard)
+ guard.X = Expr(guard.X)
+ t := guard.X.Type()
+ if t != nil && !t.IsInterface() {
+ base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", guard.X)
+ t = nil
+ }
+
+ // We don't actually declare the type switch's guarded
+ // declaration itself. So if there are no cases, we won't
+ // notice that it went unused.
+ if v := guard.Tag; v != nil && !ir.IsBlank(v) && len(n.Cases) == 0 {
+ base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym())
+ }
+
+ var defCase, nilCase ir.Node
+ var ts typeSet
+ for _, ncase := range n.Cases {
+ ncase := ncase.(*ir.CaseStmt)
+ ls := ncase.List
+ if len(ls) == 0 { // default:
+ if defCase != nil {
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
+ } else {
+ defCase = ncase
+ }
+ }
+
+ for i := range ls {
+ ls[i] = typecheck(ls[i], ctxExpr|ctxType)
+ n1 := ls[i]
+ if t == nil || n1.Type() == nil {
+ continue
+ }
+
+ var missing, have *types.Field
+ var ptr int
+ if ir.IsNil(n1) { // case nil:
+ if nilCase != nil {
+ base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase))
+ } else {
+ nilCase = ncase
+ }
+ continue
+ }
+ if n1.Op() != ir.OTYPE {
+ base.ErrorfAt(ncase.Pos(), "%L is not a type", n1)
+ continue
+ }
+ if !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke() {
+ if have != nil && !have.Broke() {
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", guard.X, n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else if ptr != 0 {
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (%v method has pointer receiver)", guard.X, n1.Type(), missing.Sym)
+ } else {
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (missing %v method)", guard.X, n1.Type(), missing.Sym)
+ }
+ continue
+ }
+
+ ts.add(ncase.Pos(), n1.Type())
+ }
+
+ if len(ncase.Vars) != 0 {
+ // Assign the clause variable's type.
+ vt := t
+ if len(ls) == 1 {
+ if ls[0].Op() == ir.OTYPE {
+ vt = ls[0].Type()
+ } else if !ir.IsNil(ls[0]) {
+ // Invalid single-type case;
+ // mark variable as broken.
+ vt = nil
+ }
+ }
+
+ nvar := ncase.Vars[0]
+ nvar.SetType(vt)
+ if vt != nil {
+ nvar = AssignExpr(nvar)
+ } else {
+ // Clause variable is broken; prevent typechecking.
+ nvar.SetTypecheck(1)
+ nvar.SetWalkdef(1)
+ }
+ ncase.Vars[0] = nvar
+ }
+
+ Stmts(ncase.Body)
+ }
+}
+
+type typeSet struct {
+ m map[string][]typeSetEntry
+}
+
+type typeSetEntry struct {
+ pos src.XPos
+ typ *types.Type
+}
+
+func (s *typeSet) add(pos src.XPos, typ *types.Type) {
+ if s.m == nil {
+ s.m = make(map[string][]typeSetEntry)
+ }
+
+ // LongString does not uniquely identify types, so we need to
+ // disambiguate collisions with types.Identical.
+ // TODO(mdempsky): Add a method that *is* unique.
+ ls := typ.LongString()
+ prevs := s.m[ls]
+ for _, prev := range prevs {
+ if types.Identical(typ, prev.typ) {
+ base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos))
+ return
+ }
+ }
+ s.m[ls] = append(prevs, typeSetEntry{pos, typ})
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+func AssignConv(n ir.Node, t *types.Type, context string) ir.Node {
+ return assignconvfn(n, t, func() string { return context })
+}
+
+// DotImportRefs maps idents introduced by importDot back to the
+// ir.PkgName they were dot-imported through.
+var DotImportRefs map[*ir.Ident]*ir.PkgName
+
+// LookupNum looks up the symbol starting with prefix and ending with
+// the decimal n. If prefix is too long, LookupNum panics.
+func LookupNum(prefix string, n int) *types.Sym {
+ var buf [20]byte // plenty long enough for all current users
+ copy(buf[:], prefix)
+ b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10)
+ return types.LocalPkg.LookupBytes(b)
+}
+
+// Given funarg struct list, return list of fn args.
+func NewFuncParams(tl *types.Type, mustname bool) []*ir.Field {
+ var args []*ir.Field
+ gen := 0
+ for _, t := range tl.Fields().Slice() {
+ s := t.Sym
+ if mustname && (s == nil || s.Name == "_") {
+ // invent a name so that we can refer to it in the trampoline
+ s = LookupNum(".anon", gen)
+ gen++
+ }
+ a := ir.NewField(base.Pos, s, nil, t.Type)
+ a.Pos = t.Pos
+ a.IsDDD = t.IsDDD()
+ args = append(args, a)
+ }
+
+ return args
+}
+
+// newname returns a new ONAME Node associated with symbol s.
+func NewName(s *types.Sym) *ir.Name {
+ n := ir.NewNameAt(base.Pos, s)
+ n.Curfn = ir.CurFunc
+ return n
+}
+
+// NodAddr returns a node representing &n at base.Pos.
+func NodAddr(n ir.Node) *ir.AddrExpr {
+ return NodAddrAt(base.Pos, n)
+}
+
+// nodAddrPos returns a node representing &n at position pos.
+func NodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr {
+ return ir.NewAddrExpr(pos, n)
+}
+
+func NodNil() ir.Node {
+ n := ir.NewNilExpr(base.Pos)
+ n.SetType(types.Types[types.TNIL])
+ return n
+}
+
+// in T.field
+// find missing fields that
+// will give shortest unique addressing.
+// modify the tree with missing type names.
+func AddImplicitDots(n *ir.SelectorExpr) *ir.SelectorExpr {
+ n.X = typecheck(n.X, ctxType|ctxExpr)
+ if n.X.Diag() {
+ n.SetDiag(true)
+ }
+ t := n.X.Type()
+ if t == nil {
+ return n
+ }
+
+ if n.X.Op() == ir.OTYPE {
+ return n
+ }
+
+ s := n.Sel
+ if s == nil {
+ return n
+ }
+
+ switch path, ambig := dotpath(s, t, nil, false); {
+ case path != nil:
+ // rebuild elided dots
+ for c := len(path) - 1; c >= 0; c-- {
+ dot := ir.NewSelectorExpr(base.Pos, ir.ODOT, n.X, path[c].field.Sym)
+ dot.SetImplicit(true)
+ dot.SetType(path[c].field.Type)
+ n.X = dot
+ }
+ case ambig:
+ base.Errorf("ambiguous selector %v", n)
+ n.X = nil
+ }
+
+ return n
+}
+
+func CalcMethods(t *types.Type) {
+ if t == nil || t.AllMethods().Len() != 0 {
+ return
+ }
+
+ // mark top-level method symbols
+ // so that expand1 doesn't consider them.
+ for _, f := range t.Methods().Slice() {
+ f.Sym.SetUniq(true)
+ }
+
+ // generate all reachable methods
+ slist = slist[:0]
+ expand1(t, true)
+
+ // check each method to be uniquely reachable
+ var ms []*types.Field
+ for i, sl := range slist {
+ slist[i].field = nil
+ sl.field.Sym.SetUniq(false)
+
+ var f *types.Field
+ path, _ := dotpath(sl.field.Sym, t, &f, false)
+ if path == nil {
+ continue
+ }
+
+ // dotpath may have dug out arbitrary fields, we only want methods.
+ if !f.IsMethod() {
+ continue
+ }
+
+ // add it to the base type method list
+ f = f.Copy()
+ f.Embedded = 1 // needs a trampoline
+ for _, d := range path {
+ if d.field.Type.IsPtr() {
+ f.Embedded = 2
+ break
+ }
+ }
+ ms = append(ms, f)
+ }
+
+ for _, f := range t.Methods().Slice() {
+ f.Sym.SetUniq(false)
+ }
+
+ ms = append(ms, t.Methods().Slice()...)
+ sort.Sort(types.MethodsByName(ms))
+ t.AllMethods().Set(ms)
+}
+
+// adddot1 returns the number of fields or methods named s at depth d in Type t.
+// If exactly one exists, it will be returned in *save (if save is not nil),
+// and dotlist will contain the path of embedded fields traversed to find it,
+// in reverse order. If none exist, more will indicate whether t contains any
+// embedded fields at depth d, so callers can decide whether to retry at
+// a greater depth.
+func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase bool) (c int, more bool) {
+ if t.Recur() {
+ return
+ }
+ t.SetRecur(true)
+ defer t.SetRecur(false)
+
+ var u *types.Type
+ d--
+ if d < 0 {
+ // We've reached our target depth. If t has any fields/methods
+ // named s, then we're done. Otherwise, we still need to check
+ // below for embedded fields.
+ c = lookdot0(s, t, save, ignorecase)
+ if c != 0 {
+ return c, false
+ }
+ }
+
+ u = t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+ if !u.IsStruct() && !u.IsInterface() {
+ return c, false
+ }
+
+ for _, f := range u.Fields().Slice() {
+ if f.Embedded == 0 || f.Sym == nil {
+ continue
+ }
+ if d < 0 {
+ // Found an embedded field at target depth.
+ return c, true
+ }
+ a, more1 := adddot1(s, f.Type, d, save, ignorecase)
+ if a != 0 && c == 0 {
+ dotlist[d].field = f
+ }
+ c += a
+ if more1 {
+ more = true
+ }
+ }
+
+ return c, more
+}
+
+// dotlist is used by adddot1 to record the path of embedded fields
+// used to access a target field or method.
+// Must be non-nil so that dotpath returns a non-nil slice even if d is zero.
+var dotlist = make([]dlist, 10)
+
+// Convert node n for assignment to type t.
+func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
+ if n == nil || n.Type() == nil || n.Type().Broke() {
+ return n
+ }
+
+ if t.Kind() == types.TBLANK && n.Type().Kind() == types.TNIL {
+ base.Errorf("use of untyped nil")
+ }
+
+ n = convlit1(n, t, false, context)
+ if n.Type() == nil {
+ return n
+ }
+ if t.Kind() == types.TBLANK {
+ return n
+ }
+
+ // Convert ideal bool from comparison to plain bool
+ // if the next step is non-bool (like interface{}).
+ if n.Type() == types.UntypedBool && !t.IsBoolean() {
+ if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL {
+ r := ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n)
+ r.SetType(types.Types[types.TBOOL])
+ r.SetTypecheck(1)
+ r.SetImplicit(true)
+ n = r
+ }
+ }
+
+ if types.Identical(n.Type(), t) {
+ return n
+ }
+
+ op, why := assignop(n.Type(), t)
+ if op == ir.OXXX {
+ base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why)
+ op = ir.OCONV
+ }
+
+ r := ir.NewConvExpr(base.Pos, op, t, n)
+ r.SetTypecheck(1)
+ r.SetImplicit(true)
+ return r
+}
+
+// Is type src assignment compatible to type dst?
+// If so, return op code to use in conversion.
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
+func assignop(src, dst *types.Type) (ir.Op, string) {
+ if src == dst {
+ return ir.OCONVNOP, ""
+ }
+ if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil {
+ return ir.OXXX, ""
+ }
+
+ // 1. src type is identical to dst.
+ if types.Identical(src, dst) {
+ return ir.OCONVNOP, ""
+ }
+
+ // 2. src and dst have identical underlying types
+ // and either src or dst is not a named type or
+ // both are empty interface types.
+ // For assignable but different non-empty interface types,
+ // we want to recompute the itab. Recomputing the itab ensures
+ // that itabs are unique (thus an interface with a compile-time
+ // type I has an itab with interface type I).
+ if types.Identical(src.Underlying(), dst.Underlying()) {
+ if src.IsEmptyInterface() {
+ // Conversion between two empty interfaces
+ // requires no code.
+ return ir.OCONVNOP, ""
+ }
+ if (src.Sym() == nil || dst.Sym() == nil) && !src.IsInterface() {
+ // Conversion between two types, at least one unnamed,
+ // needs no conversion. The exception is nonempty interfaces
+ // which need to have their itab updated.
+ return ir.OCONVNOP, ""
+ }
+ }
+
+ // 3. dst is an interface type and src implements dst.
+ if dst.IsInterface() && src.Kind() != types.TNIL {
+ var missing, have *types.Field
+ var ptr int
+ if implements(src, dst, &missing, &have, &ptr) {
+ // Call itabname so that (src, dst)
+ // gets added to itabs early, which allows
+ // us to de-virtualize calls through this
+ // type/interface pair later. See peekitabs in reflect.go
+ if types.IsDirectIface(src) && !dst.IsEmptyInterface() {
+ NeedITab(src, dst)
+ }
+
+ return ir.OCONVIFACE, ""
+ }
+
+ // we'll have complained about this method anyway, suppress spurious messages.
+ if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) {
+ return ir.OCONVIFACE, ""
+ }
+
+ var why string
+ if isptrto(src, types.TINTER) {
+ why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
+ } else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
+ } else if have != nil && have.Sym == missing.Sym {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
+ "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else if ptr != 0 {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
+ } else if have != nil {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
+ "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
+ }
+
+ return ir.OXXX, why
+ }
+
+ if isptrto(dst, types.TINTER) {
+ why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
+ return ir.OXXX, why
+ }
+
+ if src.IsInterface() && dst.Kind() != types.TBLANK {
+ var missing, have *types.Field
+ var ptr int
+ var why string
+ if implements(dst, src, &missing, &have, &ptr) {
+ why = ": need type assertion"
+ }
+ return ir.OXXX, why
+ }
+
+ // 4. src is a bidirectional channel value, dst is a channel type,
+ // src and dst have identical element types, and
+ // either src or dst is not a named type.
+ if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
+ if types.Identical(src.Elem(), dst.Elem()) && (src.Sym() == nil || dst.Sym() == nil) {
+ return ir.OCONVNOP, ""
+ }
+ }
+
+ // 5. src is the predeclared identifier nil and dst is a nillable type.
+ if src.Kind() == types.TNIL {
+ switch dst.Kind() {
+ case types.TPTR,
+ types.TFUNC,
+ types.TMAP,
+ types.TCHAN,
+ types.TINTER,
+ types.TSLICE:
+ return ir.OCONVNOP, ""
+ }
+ }
+
+ // 6. rule about untyped constants - already converted by defaultlit.
+
+ // 7. Any typed value can be assigned to the blank identifier.
+ if dst.Kind() == types.TBLANK {
+ return ir.OCONVNOP, ""
+ }
+
+ return ir.OXXX, ""
+}
+
+// Can we convert a value of type src to a value of type dst?
+// If so, return op code to use in conversion (maybe OCONVNOP).
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
+// srcConstant indicates whether the value of type src is a constant.
+func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
+ if src == dst {
+ return ir.OCONVNOP, ""
+ }
+ if src == nil || dst == nil {
+ return ir.OXXX, ""
+ }
+
+ // Conversions from regular to go:notinheap are not allowed
+ // (unless it's unsafe.Pointer). These are runtime-specific
+ // rules.
+ // (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
+ if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
+ why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
+ return ir.OXXX, why
+ }
+ // (b) Disallow string to []T where T is go:notinheap.
+ if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Kind() == types.ByteType.Kind() || dst.Elem().Kind() == types.RuneType.Kind()) {
+ why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
+ return ir.OXXX, why
+ }
+
+ // 1. src can be assigned to dst.
+ op, why := assignop(src, dst)
+ if op != ir.OXXX {
+ return op, why
+ }
+
+ // The rules for interfaces are no different in conversions
+ // than assignments. If interfaces are involved, stop now
+ // with the good message from assignop.
+ // Otherwise clear the error.
+ if src.IsInterface() || dst.IsInterface() {
+ return ir.OXXX, why
+ }
+
+ // 2. Ignoring struct tags, src and dst have identical underlying types.
+ if types.IdenticalIgnoreTags(src.Underlying(), dst.Underlying()) {
+ return ir.OCONVNOP, ""
+ }
+
+ // 3. src and dst are unnamed pointer types and, ignoring struct tags,
+ // their base types have identical underlying types.
+ if src.IsPtr() && dst.IsPtr() && src.Sym() == nil && dst.Sym() == nil {
+ if types.IdenticalIgnoreTags(src.Elem().Underlying(), dst.Elem().Underlying()) {
+ return ir.OCONVNOP, ""
+ }
+ }
+
+ // 4. src and dst are both integer or floating point types.
+ if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
+ if types.SimType[src.Kind()] == types.SimType[dst.Kind()] {
+ return ir.OCONVNOP, ""
+ }
+ return ir.OCONV, ""
+ }
+
+ // 5. src and dst are both complex types.
+ if src.IsComplex() && dst.IsComplex() {
+ if types.SimType[src.Kind()] == types.SimType[dst.Kind()] {
+ return ir.OCONVNOP, ""
+ }
+ return ir.OCONV, ""
+ }
+
+ // Special case for constant conversions: any numeric
+ // conversion is potentially okay. We'll validate further
+ // within evconst. See #38117.
+ if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) {
+ return ir.OCONV, ""
+ }
+
+ // 6. src is an integer or has type []byte or []rune
+ // and dst is a string type.
+ if src.IsInteger() && dst.IsString() {
+ return ir.ORUNESTR, ""
+ }
+
+ if src.IsSlice() && dst.IsString() {
+ if src.Elem().Kind() == types.ByteType.Kind() {
+ return ir.OBYTES2STR, ""
+ }
+ if src.Elem().Kind() == types.RuneType.Kind() {
+ return ir.ORUNES2STR, ""
+ }
+ }
+
+ // 7. src is a string and dst is []byte or []rune.
+ // String to slice.
+ if src.IsString() && dst.IsSlice() {
+ if dst.Elem().Kind() == types.ByteType.Kind() {
+ return ir.OSTR2BYTES, ""
+ }
+ if dst.Elem().Kind() == types.RuneType.Kind() {
+ return ir.OSTR2RUNES, ""
+ }
+ }
+
+ // 8. src is a pointer or uintptr and dst is unsafe.Pointer.
+ if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
+ return ir.OCONVNOP, ""
+ }
+
+ // 9. src is unsafe.Pointer and dst is a pointer or uintptr.
+ if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
+ return ir.OCONVNOP, ""
+ }
+
+ // src is map and dst is a pointer to corresponding hmap.
+ // This rule is needed for the implementation detail that
+ // go gc maps are implemented as a pointer to a hmap struct.
+ if src.Kind() == types.TMAP && dst.IsPtr() &&
+ src.MapType().Hmap == dst.Elem() {
+ return ir.OCONVNOP, ""
+ }
+
+ return ir.OXXX, ""
+}
+
+// Code to resolve elided DOTs in embedded types.
+
+// A dlist stores a pointer to a TFIELD Type embedded within
+// a TSTRUCT or TINTER Type.
+type dlist struct {
+ field *types.Field
+}
+
+// dotpath computes the unique shortest explicit selector path to fully qualify
+// a selection expression x.f, where x is of type t and f is the symbol s.
+// If no such path exists, dotpath returns nil.
+// If there are multiple shortest paths to the same depth, ambig is true.
+func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (path []dlist, ambig bool) {
+ // The embedding of types within structs imposes a tree structure onto
+ // types: structs parent the types they embed, and types parent their
+ // fields or methods. Our goal here is to find the shortest path to
+ // a field or method named s in the subtree rooted at t. To accomplish
+ // that, we iteratively perform depth-first searches of increasing depth
+ // until we either find the named field/method or exhaust the tree.
+ for d := 0; ; d++ {
+ if d > len(dotlist) {
+ dotlist = append(dotlist, dlist{})
+ }
+ if c, more := adddot1(s, t, d, save, ignorecase); c == 1 {
+ return dotlist[:d], false
+ } else if c > 1 {
+ return nil, true
+ } else if !more {
+ return nil, false
+ }
+ }
+}
+
+func expand0(t *types.Type) {
+ u := t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+
+ if u.IsInterface() {
+ for _, f := range u.Fields().Slice() {
+ if f.Sym.Uniq() {
+ continue
+ }
+ f.Sym.SetUniq(true)
+ slist = append(slist, symlink{field: f})
+ }
+
+ return
+ }
+
+ u = types.ReceiverBaseType(t)
+ if u != nil {
+ for _, f := range u.Methods().Slice() {
+ if f.Sym.Uniq() {
+ continue
+ }
+ f.Sym.SetUniq(true)
+ slist = append(slist, symlink{field: f})
+ }
+ }
+}
+
+func expand1(t *types.Type, top bool) {
+ if t.Recur() {
+ return
+ }
+ t.SetRecur(true)
+
+ if !top {
+ expand0(t)
+ }
+
+ u := t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+
+ if u.IsStruct() || u.IsInterface() {
+ for _, f := range u.Fields().Slice() {
+ if f.Embedded == 0 {
+ continue
+ }
+ if f.Sym == nil {
+ continue
+ }
+ expand1(f.Type, false)
+ }
+ }
+
+ t.SetRecur(false)
+}
+
+func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field, followptr bool) {
+ if t == nil {
+ return nil, false
+ }
+
+ path, ambig := dotpath(s, t, &m, ignorecase)
+ if path == nil {
+ if ambig {
+ base.Errorf("%v.%v is ambiguous", t, s)
+ }
+ return nil, false
+ }
+
+ for _, d := range path {
+ if d.field.Type.IsPtr() {
+ followptr = true
+ break
+ }
+ }
+
+ if !m.IsMethod() {
+ base.Errorf("%v.%v is a field, not a method", t, s)
+ return nil, followptr
+ }
+
+ return m, followptr
+}
+
+func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool {
+ t0 := t
+ if t == nil {
+ return false
+ }
+
+ if t.IsInterface() {
+ i := 0
+ tms := t.Fields().Slice()
+ for _, im := range iface.Fields().Slice() {
+ for i < len(tms) && tms[i].Sym != im.Sym {
+ i++
+ }
+ if i == len(tms) {
+ *m = im
+ *samename = nil
+ *ptr = 0
+ return false
+ }
+ tm := tms[i]
+ if !types.Identical(tm.Type, im.Type) {
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return false
+ }
+ }
+
+ return true
+ }
+
+ t = types.ReceiverBaseType(t)
+ var tms []*types.Field
+ if t != nil {
+ CalcMethods(t)
+ tms = t.AllMethods().Slice()
+ }
+ i := 0
+ for _, im := range iface.Fields().Slice() {
+ if im.Broke() {
+ continue
+ }
+ for i < len(tms) && tms[i].Sym != im.Sym {
+ i++
+ }
+ if i == len(tms) {
+ *m = im
+ *samename, _ = ifacelookdot(im.Sym, t, true)
+ *ptr = 0
+ return false
+ }
+ tm := tms[i]
+ if tm.Nointerface() || !types.Identical(tm.Type, im.Type) {
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return false
+ }
+ followptr := tm.Embedded == 2
+
+ // if pointer receiver in method,
+ // the method does not exist for value types.
+ rcvr := tm.Type.Recv().Type
+ if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !types.IsInterfaceMethod(tm.Type) {
+ if false && base.Flag.LowerR != 0 {
+ base.Errorf("interface pointer mismatch")
+ }
+
+ *m = im
+ *samename = nil
+ *ptr = 1
+ return false
+ }
+ }
+
+ return true
+}
+
+func isptrto(t *types.Type, et types.Kind) bool {
+ if t == nil {
+ return false
+ }
+ if !t.IsPtr() {
+ return false
+ }
+ t = t.Elem()
+ if t == nil {
+ return false
+ }
+ if t.Kind() != et {
+ return false
+ }
+ return true
+}
+
+// lookdot0 returns the number of fields or methods named s associated
+// with Type t. If exactly one exists, it will be returned in *save
+// (if save is not nil).
+func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) int {
+ u := t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+
+ c := 0
+ if u.IsStruct() || u.IsInterface() {
+ for _, f := range u.Fields().Slice() {
+ if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ u = t
+ if t.Sym() != nil && t.IsPtr() && !t.Elem().IsPtr() {
+ // If t is a defined pointer type, then x.m is shorthand for (*x).m.
+ u = t.Elem()
+ }
+ u = types.ReceiverBaseType(u)
+ if u != nil {
+ for _, f := range u.Methods().Slice() {
+ if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ return c
+}
+
+var slist []symlink
+
+// Code to help generate trampoline functions for methods on embedded
+// types. These are approx the same as the corresponding adddot
+// routines except that they expect to be called with unique tasks and
+// they return the actual methods.
+
+type symlink struct {
+ field *types.Field
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+func LookupRuntime(name string) *ir.Name {
+ s := ir.Pkgs.Runtime.Lookup(name)
+ if s == nil || s.Def == nil {
+ base.Fatalf("syslook: can't find runtime.%s", name)
+ }
+ return ir.AsNode(s.Def).(*ir.Name)
+}
+
+// SubstArgTypes substitutes the given list of types for
+// successive occurrences of the "any" placeholder in the
+// type syntax expression n.Type.
+// The result of SubstArgTypes MUST be assigned back to old, e.g.
+// n.Left = SubstArgTypes(n.Left, t1, t2)
+func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
+ n := old.CloneName()
+
+ for _, t := range types_ {
+ types.CalcSize(t)
+ }
+ n.SetType(types.SubstAny(n.Type(), &types_))
+ if len(types_) > 0 {
+ base.Fatalf("substArgTypes: too many argument types")
+ }
+ return n
+}
+
+// AutoLabel generates a new Name node for use with
+// an automatically generated label.
+// prefix is a short mnemonic (e.g. ".s" for switch)
+// to help with debugging.
+// It should begin with "." to avoid conflicts with
+// user labels.
+func AutoLabel(prefix string) *types.Sym {
+ if prefix[0] != '.' {
+ base.Fatalf("autolabel prefix must start with '.', have %q", prefix)
+ }
+ fn := ir.CurFunc
+ if ir.CurFunc == nil {
+ base.Fatalf("autolabel outside function")
+ }
+ n := fn.Label
+ fn.Label++
+ return LookupNum(prefix, int(n))
+}
+
+func Lookup(name string) *types.Sym {
+ return types.LocalPkg.Lookup(name)
+}
+
+// loadsys loads the definitions for the low-level runtime functions,
+// so that the compiler can generate calls to them,
+// but does not make them visible to user code.
+func loadsys() {
+ types.Block = 1
+
+ inimport = true
+ TypecheckAllowed = true
+
+ typs := runtimeTypes()
+ for _, d := range &runtimeDecls {
+ sym := ir.Pkgs.Runtime.Lookup(d.name)
+ typ := typs[d.typ]
+ switch d.tag {
+ case funcTag:
+ importfunc(ir.Pkgs.Runtime, src.NoXPos, sym, typ)
+ case varTag:
+ importvar(ir.Pkgs.Runtime, src.NoXPos, sym, typ)
+ default:
+ base.Fatalf("unhandled declaration tag %v", d.tag)
+ }
+ }
+
+ TypecheckAllowed = false
+ inimport = false
+}
+
+// LookupRuntimeFunc looks up Go function name in package runtime. This function
+// must follow the internal calling convention.
+func LookupRuntimeFunc(name string) *obj.LSym {
+ s := ir.Pkgs.Runtime.Lookup(name)
+ s.SetFunc(true)
+ return s.Linksym()
+}
+
+// LookupRuntimeVar looks up a variable (or assembly function) name in package
+// runtime. If this is a function, it may have a special calling
+// convention.
+func LookupRuntimeVar(name string) *obj.LSym {
+ return ir.Pkgs.Runtime.Lookup(name).Linksym()
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+//go:generate go run mkbuiltin.go
+
+package typecheck
+
+import "cmd/compile/internal/ir"
+
+// Target is the package being compiled.
+var Target *ir.Package
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// tcArrayType typechecks an OTARRAY node.
+func tcArrayType(n *ir.ArrayType) ir.Node {
+ n.Elem = typecheck(n.Elem, ctxType)
+ if n.Elem.Type() == nil {
+ return n
+ }
+ if n.Len == nil { // [...]T
+ if !n.Diag() {
+ n.SetDiag(true)
+ base.Errorf("use of [...] array outside of array literal")
+ }
+ return n
+ }
+ n.Len = indexlit(Expr(n.Len))
+ size := n.Len
+ if ir.ConstType(size) != constant.Int {
+ switch {
+ case size.Type() == nil:
+ // Error already reported elsewhere.
+ case size.Type().IsInteger() && size.Op() != ir.OLITERAL:
+ base.Errorf("non-constant array bound %v", size)
+ default:
+ base.Errorf("invalid array bound %v", size)
+ }
+ return n
+ }
+
+ v := size.Val()
+ if ir.ConstOverflow(v, types.Types[types.TINT]) {
+ base.Errorf("array bound is too large")
+ return n
+ }
+
+ if constant.Sign(v) < 0 {
+ base.Errorf("array bound must be non-negative")
+ return n
+ }
+
+ bound, _ := constant.Int64Val(v)
+ t := types.NewArray(n.Elem.Type(), bound)
+ n.SetOTYPE(t)
+ types.CheckSize(t)
+ return n
+}
+
+// tcChanType typechecks an OTCHAN node.
+func tcChanType(n *ir.ChanType) ir.Node {
+ n.Elem = typecheck(n.Elem, ctxType)
+ l := n.Elem
+ if l.Type() == nil {
+ return n
+ }
+ if l.Type().NotInHeap() {
+ base.Errorf("chan of incomplete (or unallocatable) type not allowed")
+ }
+ n.SetOTYPE(types.NewChan(l.Type(), n.Dir))
+ return n
+}
+
+// tcFuncType typechecks an OTFUNC node.
+func tcFuncType(n *ir.FuncType) ir.Node {
+ n.SetOTYPE(NewFuncType(n.Recv, n.Params, n.Results))
+ return n
+}
+
+// tcInterfaceType typechecks an OTINTER node.
+func tcInterfaceType(n *ir.InterfaceType) ir.Node {
+ n.SetOTYPE(tointerface(n.Methods))
+ return n
+}
+
+// tcMapType typechecks an OTMAP node.
+func tcMapType(n *ir.MapType) ir.Node {
+ n.Key = typecheck(n.Key, ctxType)
+ n.Elem = typecheck(n.Elem, ctxType)
+ l := n.Key
+ r := n.Elem
+ if l.Type() == nil || r.Type() == nil {
+ return n
+ }
+ if l.Type().NotInHeap() {
+ base.Errorf("incomplete (or unallocatable) map key not allowed")
+ }
+ if r.Type().NotInHeap() {
+ base.Errorf("incomplete (or unallocatable) map value not allowed")
+ }
+ n.SetOTYPE(types.NewMap(l.Type(), r.Type()))
+ mapqueue = append(mapqueue, n) // check map keys when all types are settled
+ return n
+}
+
+// tcSliceType typechecks an OTSLICE node.
+func tcSliceType(n *ir.SliceType) ir.Node {
+ n.Elem = typecheck(n.Elem, ctxType)
+ if n.Elem.Type() == nil {
+ return n
+ }
+ t := types.NewSlice(n.Elem.Type())
+ n.SetOTYPE(t)
+ types.CheckSize(t)
+ return n
+}
+
+// tcStructType typechecks an OTSTRUCT node.
+func tcStructType(n *ir.StructType) ir.Node {
+ n.SetOTYPE(NewStructType(n.Fields))
+ return n
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// Function collecting autotmps generated during typechecking,
+// to be included in the package-level init function.
+var InitTodoFunc = ir.NewFunc(base.Pos)
+
+var inimport bool // set during import
+
+var decldepth int32
+
+var TypecheckAllowed bool
+
+var (
+ NeedFuncSym = func(*types.Sym) {}
+ NeedITab = func(t, itype *types.Type) {}
+ NeedRuntimeType = func(*types.Type) {}
+)
+
+func Init() {
+ initUniverse()
+ DeclContext = ir.PEXTERN
+ base.Timer.Start("fe", "loadsys")
+ loadsys()
+}
+
+func Package() {
+ declareUniverse()
+
+ TypecheckAllowed = true
+
+ // Process top-level declarations in phases.
+
+ // Phase 1: const, type, and names and types of funcs.
+ // This will gather all the information about types
+ // and methods but doesn't depend on any of it.
+ //
+ // We also defer type alias declarations until phase 2
+ // to avoid cycles like #18640.
+ // TODO(gri) Remove this again once we have a fix for #25838.
+
+ // Don't use range--typecheck can add closures to Target.Decls.
+ base.Timer.Start("fe", "typecheck", "top1")
+ for i := 0; i < len(Target.Decls); i++ {
+ n := Target.Decls[i]
+ if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Name().Alias()) {
+ Target.Decls[i] = Stmt(n)
+ }
+ }
+
+ // Phase 2: Variable assignments.
+ // To check interface assignments, depends on phase 1.
+
+ // Don't use range--typecheck can add closures to Target.Decls.
+ base.Timer.Start("fe", "typecheck", "top2")
+ for i := 0; i < len(Target.Decls); i++ {
+ n := Target.Decls[i]
+ if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Name().Alias() {
+ Target.Decls[i] = Stmt(n)
+ }
+ }
+
+ // Phase 3: Type check function bodies.
+ // Don't use range--typecheck can add closures to Target.Decls.
+ base.Timer.Start("fe", "typecheck", "func")
+ var fcount int64
+ for i := 0; i < len(Target.Decls); i++ {
+ n := Target.Decls[i]
+ if n.Op() == ir.ODCLFUNC {
+ FuncBody(n.(*ir.Func))
+ fcount++
+ }
+ }
+
+ // Phase 4: Check external declarations.
+ // TODO(mdempsky): This should be handled when type checking their
+ // corresponding ODCL nodes.
+ base.Timer.Start("fe", "typecheck", "externdcls")
+ for i, n := range Target.Externs {
+ if n.Op() == ir.ONAME {
+ Target.Externs[i] = Expr(Target.Externs[i])
+ }
+ }
+
+ // Phase 5: With all user code type-checked, it's now safe to verify map keys.
+ CheckMapKeys()
+
+ // Phase 6: Decide how to capture closed variables.
+ // This needs to run before escape analysis,
+ // because variables captured by value do not escape.
+ base.Timer.Start("fe", "capturevars")
+ for _, n := range Target.Decls {
+ if n.Op() == ir.ODCLFUNC {
+ n := n.(*ir.Func)
+ if n.OClosure != nil {
+ ir.CurFunc = n
+ CaptureVars(n)
+ }
+ }
+ }
+ CaptureVarsComplete = true
+ ir.CurFunc = nil
+
+ if base.Debug.TypecheckInl != 0 {
+ // Typecheck imported function bodies if Debug.l > 1,
+ // otherwise lazily when used or re-exported.
+ AllImportedBodies()
+ }
+}
+
+func AssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) }
+func Expr(n ir.Node) ir.Node { return typecheck(n, ctxExpr) }
+func Stmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) }
+
+func Exprs(exprs []ir.Node) { typecheckslice(exprs, ctxExpr) }
+func Stmts(stmts []ir.Node) { typecheckslice(stmts, ctxStmt) }
+
+func Call(call *ir.CallExpr) {
+ t := call.X.Type()
+ if t == nil {
+ panic("misuse of Call")
+ }
+ ctx := ctxStmt
+ if t.NumResults() > 0 {
+ ctx = ctxExpr | ctxMultiOK
+ }
+ if typecheck(call, ctx) != call {
+ panic("bad typecheck")
+ }
+}
+
+func Callee(n ir.Node) ir.Node {
+ return typecheck(n, ctxExpr|ctxCallee)
+}
+
+func FuncBody(n *ir.Func) {
+ ir.CurFunc = n
+ decldepth = 1
+ errorsBefore := base.Errors()
+ Stmts(n.Body)
+ CheckReturn(n)
+ if base.Errors() > errorsBefore {
+ n.Body.Set(nil) // type errors; do not compile
+ }
+ // Now that we've checked whether n terminates,
+ // we can eliminate some obviously dead code.
+ deadcode(n)
+}
+
+var importlist []*ir.Func
+
+func AllImportedBodies() {
+ for _, n := range importlist {
+ if n.Inl != nil {
+ ImportedBody(n)
+ }
+ }
+}
+
+var traceIndent []byte
+
+func tracePrint(title string, n ir.Node) func(np *ir.Node) {
+ indent := traceIndent
+
+ // guard against nil
+ var pos, op string
+ var tc uint8
+ if n != nil {
+ pos = base.FmtPos(n.Pos())
+ op = n.Op().String()
+ tc = n.Typecheck()
+ }
+
+ types.SkipSizeForTracing = true
+ defer func() { types.SkipSizeForTracing = false }()
+ fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc)
+ traceIndent = append(traceIndent, ". "...)
+
+ return func(np *ir.Node) {
+ traceIndent = traceIndent[:len(traceIndent)-2]
+
+ // if we have a result, use that
+ if np != nil {
+ n = *np
+ }
+
+ // guard against nil
+ // use outer pos, op so we don't get empty pos/op if n == nil (nicer output)
+ var tc uint8
+ var typ *types.Type
+ if n != nil {
+ pos = base.FmtPos(n.Pos())
+ op = n.Op().String()
+ tc = n.Typecheck()
+ typ = n.Type()
+ }
+
+ types.SkipSizeForTracing = true
+ defer func() { types.SkipSizeForTracing = false }()
+ fmt.Printf("%s: %s=> %p %s %v tc=%d type=%L\n", pos, indent, n, op, n, tc, typ)
+ }
+}
+
+const (
+ ctxStmt = 1 << iota // evaluated at statement level
+ ctxExpr // evaluated in value context
+ ctxType // evaluated in type context
+ ctxCallee // call-only expressions are ok
+ ctxMultiOK // multivalue function returns are ok
+ ctxAssign // assigning to expression
+)
+
+// type checks the whole tree of an expression.
+// calculates expression types.
+// evaluates compile time constants.
+// marks variables that escape the local frame.
+// rewrites n.Op to be more specific in some cases.
+
+var typecheckdefstack []ir.Node
+
+// Resolve ONONAME to definition, if any.
+func Resolve(n ir.Node) (res ir.Node) {
+ if n == nil || n.Op() != ir.ONONAME {
+ return n
+ }
+
+ // only trace if there's work to do
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("resolve", n)(&res)
+ }
+
+ if sym := n.Sym(); sym.Pkg != types.LocalPkg {
+ // We might have an ir.Ident from oldname or importDot.
+ if id, ok := n.(*ir.Ident); ok {
+ if pkgName := DotImportRefs[id]; pkgName != nil {
+ pkgName.Used = true
+ }
+ }
+
+ if inimport {
+ base.Fatalf("recursive inimport")
+ }
+ inimport = true
+ n = expandDecl(n)
+ inimport = false
+ return n
+ }
+
+ r := ir.AsNode(n.Sym().Def)
+ if r == nil {
+ return n
+ }
+
+ if r.Op() == ir.OIOTA {
+ if x := getIotaValue(); x >= 0 {
+ return ir.NewInt(x)
+ }
+ return n
+ }
+
+ return r
+}
+
+func typecheckslice(l []ir.Node, top int) {
+ for i := range l {
+ l[i] = typecheck(l[i], top)
+ }
+}
+
+var _typekind = []string{
+ types.TINT: "int",
+ types.TUINT: "uint",
+ types.TINT8: "int8",
+ types.TUINT8: "uint8",
+ types.TINT16: "int16",
+ types.TUINT16: "uint16",
+ types.TINT32: "int32",
+ types.TUINT32: "uint32",
+ types.TINT64: "int64",
+ types.TUINT64: "uint64",
+ types.TUINTPTR: "uintptr",
+ types.TCOMPLEX64: "complex64",
+ types.TCOMPLEX128: "complex128",
+ types.TFLOAT32: "float32",
+ types.TFLOAT64: "float64",
+ types.TBOOL: "bool",
+ types.TSTRING: "string",
+ types.TPTR: "pointer",
+ types.TUNSAFEPTR: "unsafe.Pointer",
+ types.TSTRUCT: "struct",
+ types.TINTER: "interface",
+ types.TCHAN: "chan",
+ types.TMAP: "map",
+ types.TARRAY: "array",
+ types.TSLICE: "slice",
+ types.TFUNC: "func",
+ types.TNIL: "nil",
+ types.TIDEAL: "untyped number",
+}
+
+func typekind(t *types.Type) string {
+ if t.IsUntyped() {
+ return fmt.Sprintf("%v", t)
+ }
+ et := t.Kind()
+ if int(et) < len(_typekind) {
+ s := _typekind[et]
+ if s != "" {
+ return s
+ }
+ }
+ return fmt.Sprintf("etype=%d", et)
+}
+
+func cycleFor(start ir.Node) []ir.Node {
+ // Find the start node in typecheck_tcstack.
+ // We know that it must exist because each time we mark
+ // a node with n.SetTypecheck(2) we push it on the stack,
+ // and each time we mark a node with n.SetTypecheck(2) we
+ // pop it from the stack. We hit a cycle when we encounter
+ // a node marked 2 in which case is must be on the stack.
+ i := len(typecheck_tcstack) - 1
+ for i > 0 && typecheck_tcstack[i] != start {
+ i--
+ }
+
+ // collect all nodes with same Op
+ var cycle []ir.Node
+ for _, n := range typecheck_tcstack[i:] {
+ if n.Op() == start.Op() {
+ cycle = append(cycle, n)
+ }
+ }
+
+ return cycle
+}
+
+func cycleTrace(cycle []ir.Node) string {
+ var s string
+ for i, n := range cycle {
+ s += fmt.Sprintf("\n\t%v: %v uses %v", ir.Line(n), n, cycle[(i+1)%len(cycle)])
+ }
+ return s
+}
+
+var typecheck_tcstack []ir.Node
+
+func Func(fn *ir.Func) {
+ new := Stmt(fn)
+ if new != fn {
+ base.Fatalf("typecheck changed func")
+ }
+}
+
+func typecheckNtype(n ir.Ntype) ir.Ntype {
+ return typecheck(n, ctxType).(ir.Ntype)
+}
+
+// typecheck type checks node n.
+// The result of typecheck MUST be assigned back to n, e.g.
+// n.Left = typecheck(n.Left, top)
+func typecheck(n ir.Node, top int) (res ir.Node) {
+ // cannot type check until all the source has been parsed
+ if !TypecheckAllowed {
+ base.Fatalf("early typecheck")
+ }
+
+ if n == nil {
+ return nil
+ }
+
+ // only trace if there's work to do
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheck", n)(&res)
+ }
+
+ lno := ir.SetPos(n)
+
+ // Skip over parens.
+ for n.Op() == ir.OPAREN {
+ n = n.(*ir.ParenExpr).X
+ }
+
+ // Resolve definition of name and value of iota lazily.
+ n = Resolve(n)
+
+ // Skip typecheck if already done.
+ // But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
+ if n.Typecheck() == 1 {
+ switch n.Op() {
+ case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.OPACK:
+ break
+
+ default:
+ base.Pos = lno
+ return n
+ }
+ }
+
+ if n.Typecheck() == 2 {
+ // Typechecking loop. Trying printing a meaningful message,
+ // otherwise a stack trace of typechecking.
+ switch n.Op() {
+ // We can already diagnose variables used as types.
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if top&(ctxExpr|ctxType) == ctxType {
+ base.Errorf("%v is not a type", n)
+ }
+
+ case ir.OTYPE:
+ // Only report a type cycle if we are expecting a type.
+ // Otherwise let other code report an error.
+ if top&ctxType == ctxType {
+ // A cycle containing only alias types is an error
+ // since it would expand indefinitely when aliases
+ // are substituted.
+ cycle := cycleFor(n)
+ for _, n1 := range cycle {
+ if n1.Name() != nil && !n1.Name().Alias() {
+ // Cycle is ok. But if n is an alias type and doesn't
+ // have a type yet, we have a recursive type declaration
+ // with aliases that we can't handle properly yet.
+ // Report an error rather than crashing later.
+ if n.Name() != nil && n.Name().Alias() && n.Type() == nil {
+ base.Pos = n.Pos()
+ base.Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
+ }
+ base.Pos = lno
+ return n
+ }
+ }
+ base.ErrorfAt(n.Pos(), "invalid recursive type alias %v%s", n, cycleTrace(cycle))
+ }
+
+ case ir.OLITERAL:
+ if top&(ctxExpr|ctxType) == ctxType {
+ base.Errorf("%v is not a type", n)
+ break
+ }
+ base.ErrorfAt(n.Pos(), "constant definition loop%s", cycleTrace(cycleFor(n)))
+ }
+
+ if base.Errors() == 0 {
+ var trace string
+ for i := len(typecheck_tcstack) - 1; i >= 0; i-- {
+ x := typecheck_tcstack[i]
+ trace += fmt.Sprintf("\n\t%v %v", ir.Line(x), x)
+ }
+ base.Errorf("typechecking loop involving %v%s", n, trace)
+ }
+
+ base.Pos = lno
+ return n
+ }
+
+ typecheck_tcstack = append(typecheck_tcstack, n)
+
+ n.SetTypecheck(2)
+ n = typecheck1(n, top)
+ n.SetTypecheck(1)
+
+ last := len(typecheck_tcstack) - 1
+ typecheck_tcstack[last] = nil
+ typecheck_tcstack = typecheck_tcstack[:last]
+
+ _, isExpr := n.(ir.Expr)
+ _, isStmt := n.(ir.Stmt)
+ isMulti := false
+ switch n.Op() {
+ case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
+ n := n.(*ir.CallExpr)
+ if t := n.X.Type(); t != nil && t.Kind() == types.TFUNC {
+ nr := t.NumResults()
+ isMulti = nr > 1
+ if nr == 0 {
+ isExpr = false
+ }
+ }
+ case ir.OAPPEND:
+ // Must be used (and not BinaryExpr/UnaryExpr).
+ isStmt = false
+ case ir.OCLOSE, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.OVARKILL, ir.OVARLIVE:
+ // Must not be used.
+ isExpr = false
+ isStmt = true
+ case ir.OCOPY, ir.ORECOVER, ir.ORECV:
+ // Can be used or not.
+ isStmt = true
+ }
+
+ t := n.Type()
+ if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE {
+ switch t.Kind() {
+ case types.TFUNC, // might have TANY; wait until it's called
+ types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK:
+ break
+
+ default:
+ types.CheckSize(t)
+ }
+ }
+ if t != nil {
+ n = EvalConst(n)
+ t = n.Type()
+ }
+
+ // TODO(rsc): Lots of the complexity here is because typecheck can
+ // see OTYPE, ONAME, and OLITERAL nodes multiple times.
+ // Once we make the IR a proper tree, we should be able to simplify
+ // this code a bit, especially the final case.
+ switch {
+ case top&(ctxStmt|ctxExpr) == ctxExpr && !isExpr && n.Op() != ir.OTYPE && !isMulti:
+ if !n.Diag() {
+ base.Errorf("%v used as value", n)
+ n.SetDiag(true)
+ }
+ if t != nil {
+ n.SetType(nil)
+ }
+
+ case top&ctxType == 0 && n.Op() == ir.OTYPE && t != nil:
+ if !n.Type().Broke() {
+ base.Errorf("type %v is not an expression", n.Type())
+ }
+ n.SetType(nil)
+
+ case top&(ctxStmt|ctxExpr) == ctxStmt && !isStmt && t != nil:
+ if !n.Diag() {
+ base.Errorf("%v evaluated but not used", n)
+ n.SetDiag(true)
+ }
+ n.SetType(nil)
+
+ case top&(ctxType|ctxExpr) == ctxType && n.Op() != ir.OTYPE && n.Op() != ir.ONONAME && (t != nil || n.Op() == ir.ONAME):
+ base.Errorf("%v is not a type", n)
+ if t != nil {
+ n.SetType(nil)
+ }
+
+ }
+
+ base.Pos = lno
+ return n
+}
+
+// indexlit implements typechecking of untyped values as
+// array/slice indexes. It is almost equivalent to defaultlit
+// but also accepts untyped numeric values representable as
+// value of type int (see also checkmake for comparison).
+// The result of indexlit MUST be assigned back to n, e.g.
+// n.Left = indexlit(n.Left)
+func indexlit(n ir.Node) ir.Node {
+ if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
+ return DefaultLit(n, types.Types[types.TINT])
+ }
+ return n
+}
+
+// typecheck1 should ONLY be called from typecheck.
+func typecheck1(n ir.Node, top int) ir.Node {
+ switch n.Op() {
+ case ir.OLITERAL, ir.ONAME, ir.ONONAME, ir.OTYPE:
+ if n.Sym() == nil {
+ return n
+ }
+
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if n.BuiltinOp != 0 && top&ctxCallee == 0 {
+ base.Errorf("use of builtin %v not in function call", n.Sym())
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ typecheckdef(n)
+ if n.Op() == ir.ONONAME {
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ switch n.Op() {
+ default:
+ ir.Dump("typecheck", n)
+ base.Fatalf("typecheck %v", n.Op())
+ panic("unreachable")
+
+ // names
+ case ir.OLITERAL:
+ if n.Type() == nil && n.Val().Kind() == constant.String {
+ base.Fatalf("string literal missing type")
+ }
+ return n
+
+ case ir.ONIL, ir.ONONAME:
+ return n
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Name().Decldepth == 0 {
+ n.Name().Decldepth = decldepth
+ }
+ if n.BuiltinOp != 0 {
+ return n
+ }
+ if top&ctxAssign == 0 {
+ // not a write to the variable
+ if ir.IsBlank(n) {
+ base.Errorf("cannot use _ as value")
+ n.SetType(nil)
+ return n
+ }
+ n.Name().SetUsed(true)
+ }
+ return n
+
+ case ir.ONAMEOFFSET:
+ // type already set
+ return n
+
+ case ir.OPACK:
+ n := n.(*ir.PkgName)
+ base.Errorf("use of package %v without selector", n.Sym())
+ n.SetType(nil)
+ return n
+
+ // types (ODEREF is with exprs)
+ case ir.OTYPE:
+ if n.Type() == nil {
+ return n
+ }
+ return n
+
+ case ir.OTSLICE:
+ n := n.(*ir.SliceType)
+ return tcSliceType(n)
+
+ case ir.OTARRAY:
+ n := n.(*ir.ArrayType)
+ return tcArrayType(n)
+
+ case ir.OTMAP:
+ n := n.(*ir.MapType)
+ return tcMapType(n)
+
+ case ir.OTCHAN:
+ n := n.(*ir.ChanType)
+ return tcChanType(n)
+
+ case ir.OTSTRUCT:
+ n := n.(*ir.StructType)
+ return tcStructType(n)
+
+ case ir.OTINTER:
+ n := n.(*ir.InterfaceType)
+ return tcInterfaceType(n)
+
+ case ir.OTFUNC:
+ n := n.(*ir.FuncType)
+ return tcFuncType(n)
+ // type or expr
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ return tcStar(n, top)
+ // arithmetic exprs
+ case ir.OASOP,
+ ir.OADD,
+ ir.OAND,
+ ir.OANDAND,
+ ir.OANDNOT,
+ ir.ODIV,
+ ir.OEQ,
+ ir.OGE,
+ ir.OGT,
+ ir.OLE,
+ ir.OLT,
+ ir.OLSH,
+ ir.ORSH,
+ ir.OMOD,
+ ir.OMUL,
+ ir.ONE,
+ ir.OOR,
+ ir.OOROR,
+ ir.OSUB,
+ ir.OXOR:
+ return tcArith(n)
+
+ case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS:
+ n := n.(*ir.UnaryExpr)
+ return tcUnaryArith(n)
+
+ // exprs
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ return tcAddr(n)
+
+ case ir.OCOMPLIT:
+ return tcCompLit(n.(*ir.CompLitExpr))
+
+ case ir.OXDOT, ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ return tcDot(n, top)
+
+ case ir.ODOTTYPE:
+ n := n.(*ir.TypeAssertExpr)
+ return tcDotType(n)
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ return tcIndex(n)
+
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ return tcRecv(n)
+
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ return tcSend(n)
+
+ case ir.OSLICEHEADER:
+ n := n.(*ir.SliceHeaderExpr)
+ return tcSliceHeader(n)
+
+ case ir.OMAKESLICECOPY:
+ n := n.(*ir.MakeExpr)
+ return tcMakeSliceCopy(n)
+
+ case ir.OSLICE, ir.OSLICE3:
+ n := n.(*ir.SliceExpr)
+ return tcSlice(n)
+
+ // call and call like
+ case ir.OCALL:
+ n := n.(*ir.CallExpr)
+ return tcCall(n, top)
+
+ case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ n := n.(*ir.UnaryExpr)
+ n.SetType(types.Types[types.TUINTPTR])
+ return n
+
+ case ir.OCAP, ir.OLEN:
+ n := n.(*ir.UnaryExpr)
+ return tcLenCap(n)
+
+ case ir.OREAL, ir.OIMAG:
+ n := n.(*ir.UnaryExpr)
+ return tcRealImag(n)
+
+ case ir.OCOMPLEX:
+ n := n.(*ir.BinaryExpr)
+ return tcComplex(n)
+
+ case ir.OCLOSE:
+ n := n.(*ir.UnaryExpr)
+ return tcClose(n)
+
+ case ir.ODELETE:
+ n := n.(*ir.CallExpr)
+ return tcDelete(n)
+
+ case ir.OAPPEND:
+ n := n.(*ir.CallExpr)
+ return tcAppend(n)
+
+ case ir.OCOPY:
+ n := n.(*ir.BinaryExpr)
+ return tcCopy(n)
+
+ case ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ return tcConv(n)
+
+ case ir.OMAKE:
+ n := n.(*ir.CallExpr)
+ return tcMake(n)
+
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
+ return tcNew(n)
+
+ case ir.OPRINT, ir.OPRINTN:
+ n := n.(*ir.CallExpr)
+ return tcPrint(n)
+
+ case ir.OPANIC:
+ n := n.(*ir.UnaryExpr)
+ return tcPanic(n)
+
+ case ir.ORECOVER:
+ n := n.(*ir.CallExpr)
+ return tcRecover(n)
+
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ tcClosure(n, top)
+ if n.Type() == nil {
+ return n
+ }
+ return n
+
+ case ir.OITAB:
+ n := n.(*ir.UnaryExpr)
+ return tcITab(n)
+
+ case ir.OIDATA:
+ // Whoever creates the OIDATA node must know a priori the concrete type at that moment,
+ // usually by just having checked the OITAB.
+ n := n.(*ir.UnaryExpr)
+ base.Fatalf("cannot typecheck interface data %v", n)
+ panic("unreachable")
+
+ case ir.OSPTR:
+ n := n.(*ir.UnaryExpr)
+ return tcSPtr(n)
+
+ case ir.OCLOSUREREAD:
+ return n
+
+ case ir.OCFUNC:
+ n := n.(*ir.UnaryExpr)
+ n.X = Expr(n.X)
+ n.SetType(types.Types[types.TUINTPTR])
+ return n
+
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ n.X = Expr(n.X)
+ return n
+
+ // statements
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ tcAssign(n)
+
+ // Code that creates temps does not bother to set defn, so do it here.
+ if n.X.Op() == ir.ONAME && ir.IsAutoTmp(n.X) {
+ n.X.Name().Defn = n
+ }
+ return n
+
+ case ir.OAS2:
+ tcAssignList(n.(*ir.AssignListStmt))
+ return n
+
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.ODCL,
+ ir.OGOTO,
+ ir.OFALL,
+ ir.OVARKILL,
+ ir.OVARLIVE:
+ return n
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ Stmts(n.List)
+ return n
+
+ case ir.OLABEL:
+ decldepth++
+ if n.Sym().IsBlank() {
+ // Empty identifier is valid but useless.
+ // Eliminate now to simplify life later.
+ // See issues 7538, 11589, 11593.
+ n = ir.NewBlockStmt(n.Pos(), nil)
+ }
+ return n
+
+ case ir.ODEFER, ir.OGO:
+ n := n.(*ir.GoDeferStmt)
+ n.Call = typecheck(n.Call, ctxStmt|ctxExpr)
+ if !n.Call.Diag() {
+ tcGoDefer(n)
+ }
+ return n
+
+ case ir.OFOR, ir.OFORUNTIL:
+ n := n.(*ir.ForStmt)
+ return tcFor(n)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ return tcIf(n)
+
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ return tcReturn(n)
+
+ case ir.ORETJMP:
+ n := n.(*ir.BranchStmt)
+ return n
+
+ case ir.OSELECT:
+ tcSelect(n.(*ir.SelectStmt))
+ return n
+
+ case ir.OSWITCH:
+ tcSwitch(n.(*ir.SwitchStmt))
+ return n
+
+ case ir.ORANGE:
+ tcRange(n.(*ir.RangeStmt))
+ return n
+
+ case ir.OTYPESW:
+ n := n.(*ir.TypeSwitchGuard)
+ base.Errorf("use of .(type) outside type switch")
+ n.SetType(nil)
+ return n
+
+ case ir.ODCLFUNC:
+ tcFunc(n.(*ir.Func))
+ return n
+
+ case ir.ODCLCONST:
+ n := n.(*ir.Decl)
+ n.X = Expr(n.X)
+ return n
+
+ case ir.ODCLTYPE:
+ n := n.(*ir.Decl)
+ n.X = typecheck(n.X, ctxType)
+ types.CheckSize(n.X.Type())
+ return n
+ }
+
+ // No return n here!
+ // Individual cases can type-assert n, introducing a new one.
+ // Each must execute its own return n.
+}
+
+func typecheckargs(n ir.Node) {
+ var list []ir.Node
+ switch n := n.(type) {
+ default:
+ base.Fatalf("typecheckargs %+v", n.Op())
+ case *ir.CallExpr:
+ list = n.Args
+ if n.IsDDD {
+ Exprs(list)
+ return
+ }
+ case *ir.ReturnStmt:
+ list = n.Results
+ }
+ if len(list) != 1 {
+ Exprs(list)
+ return
+ }
+
+ typecheckslice(list, ctxExpr|ctxMultiOK)
+ t := list[0].Type()
+ if t == nil || !t.IsFuncArgStruct() {
+ return
+ }
+
+ // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
+
+ // Save n as n.Orig for fmt.go.
+ if ir.Orig(n) == n {
+ n.(ir.OrigNode).SetOrig(ir.SepCopy(n))
+ }
+
+ as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ as.Rhs.Append(list...)
+
+ // If we're outside of function context, then this call will
+ // be executed during the generated init function. However,
+ // init.go hasn't yet created it. Instead, associate the
+ // temporary variables with initTodo for now, and init.go
+ // will reassociate them later when it's appropriate.
+ static := ir.CurFunc == nil
+ if static {
+ ir.CurFunc = InitTodoFunc
+ }
+ list = nil
+ for _, f := range t.FieldSlice() {
+ t := Temp(f.Type)
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, t))
+ as.Lhs.Append(t)
+ list = append(list, t)
+ }
+ if static {
+ ir.CurFunc = nil
+ }
+
+ switch n := n.(type) {
+ case *ir.CallExpr:
+ n.Args.Set(list)
+ case *ir.ReturnStmt:
+ n.Results.Set(list)
+ }
+
+ n.PtrInit().Append(Stmt(as))
+}
+
+func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool {
+ t := r.Type()
+ if t == nil {
+ return false
+ }
+ if !t.IsInteger() {
+ base.Errorf("invalid slice index %v (type %v)", r, t)
+ return false
+ }
+
+ if r.Op() == ir.OLITERAL {
+ x := r.Val()
+ if constant.Sign(x) < 0 {
+ base.Errorf("invalid slice index %v (index must be non-negative)", r)
+ return false
+ } else if tp != nil && tp.NumElem() >= 0 && constant.Compare(x, token.GTR, constant.MakeInt64(tp.NumElem())) {
+ base.Errorf("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
+ return false
+ } else if ir.IsConst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(ir.StringVal(l))))) {
+ base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(ir.StringVal(l)))
+ return false
+ } else if ir.ConstOverflow(x, types.Types[types.TINT]) {
+ base.Errorf("invalid slice index %v (index too large)", r)
+ return false
+ }
+ }
+
+ return true
+}
+
+func checksliceconst(lo ir.Node, hi ir.Node) bool {
+ if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) {
+ base.Errorf("invalid slice index: %v > %v", lo, hi)
+ return false
+ }
+
+ return true
+}
+
+// The result of implicitstar MUST be assigned back to n, e.g.
+// n.Left = implicitstar(n.Left)
+func implicitstar(n ir.Node) ir.Node {
+ // insert implicit * if needed for fixed array
+ t := n.Type()
+ if t == nil || !t.IsPtr() {
+ return n
+ }
+ t = t.Elem()
+ if t == nil {
+ return n
+ }
+ if !t.IsArray() {
+ return n
+ }
+ star := ir.NewStarExpr(base.Pos, n)
+ star.SetImplicit(true)
+ return Expr(star)
+}
+
+func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) {
+ if len(n.Args) == 0 {
+ p := fmt.Sprintf(f, args...)
+ base.Errorf("missing argument to %s: %v", p, n)
+ return nil, false
+ }
+
+ if len(n.Args) > 1 {
+ p := fmt.Sprintf(f, args...)
+ base.Errorf("too many arguments to %s: %v", p, n)
+ return n.Args[0], false
+ }
+
+ return n.Args[0], true
+}
+
+func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) {
+ if len(n.Args) != 2 {
+ if len(n.Args) < 2 {
+ base.Errorf("not enough arguments in call to %v", n)
+ } else {
+ base.Errorf("too many arguments in call to %v", n)
+ }
+ return nil, nil, false
+ }
+ return n.Args[0], n.Args[1], true
+}
+
+func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
+ var r *types.Field
+ for _, f := range fs.Slice() {
+ if dostrcmp != 0 && f.Sym.Name == s.Name {
+ return f
+ }
+ if dostrcmp == 2 && strings.EqualFold(f.Sym.Name, s.Name) {
+ return f
+ }
+ if f.Sym != s {
+ continue
+ }
+ if r != nil {
+ if errnode != nil {
+ base.Errorf("ambiguous selector %v", errnode)
+ } else if t.IsPtr() {
+ base.Errorf("ambiguous selector (%v).%v", t, s)
+ } else {
+ base.Errorf("ambiguous selector %v.%v", t, s)
+ }
+ break
+ }
+
+ r = f
+ }
+
+ return r
+}
+
+// typecheckMethodExpr checks selector expressions (ODOT) where the
+// base expression is a type expression (OTYPE).
+func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheckMethodExpr", n)(&res)
+ }
+
+ t := n.X.Type()
+
+ // Compute the method set for t.
+ var ms *types.Fields
+ if t.IsInterface() {
+ ms = t.Fields()
+ } else {
+ mt := types.ReceiverBaseType(t)
+ if mt == nil {
+ base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sel)
+ n.SetType(nil)
+ return n
+ }
+ CalcMethods(mt)
+ ms = mt.AllMethods()
+
+ // The method expression T.m requires a wrapper when T
+ // is different from m's declared receiver type. We
+ // normally generate these wrappers while writing out
+ // runtime type descriptors, which is always done for
+ // types declared at package scope. However, we need
+ // to make sure to generate wrappers for anonymous
+ // receiver types too.
+ if mt.Sym() == nil {
+ NeedRuntimeType(t)
+ }
+ }
+
+ s := n.Sel
+ m := lookdot1(n, s, t, ms, 0)
+ if m == nil {
+ if lookdot1(n, s, t, ms, 1) != nil {
+ base.Errorf("%v undefined (cannot refer to unexported method %v)", n, s)
+ } else if _, ambig := dotpath(s, t, nil, false); ambig {
+ base.Errorf("%v undefined (ambiguous selector)", n) // method or field
+ } else {
+ base.Errorf("%v undefined (type %v has no method %v)", n, t, s)
+ }
+ n.SetType(nil)
+ return n
+ }
+
+ if !types.IsMethodApplicable(t, m) {
+ base.Errorf("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
+ n.SetType(nil)
+ return n
+ }
+
+ me := ir.NewMethodExpr(n.Pos(), n.X.Type(), m)
+ me.SetType(NewMethodType(m.Type, n.X.Type()))
+ f := NewName(ir.MethodSym(t, m.Sym))
+ f.Class_ = ir.PFUNC
+ f.SetType(me.Type())
+ me.FuncName_ = f
+
+ // Issue 25065. Make sure that we emit the symbol for a local method.
+ if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == types.LocalPkg) {
+ NeedFuncSym(me.FuncName_.Sym())
+ }
+
+ return me
+}
+
+func derefall(t *types.Type) *types.Type {
+ for t != nil && t.IsPtr() {
+ t = t.Elem()
+ }
+ return t
+}
+
+func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field {
+ s := n.Sel
+
+ types.CalcSize(t)
+ var f1 *types.Field
+ if t.IsStruct() || t.IsInterface() {
+ f1 = lookdot1(n, s, t, t.Fields(), dostrcmp)
+ }
+
+ var f2 *types.Field
+ if n.X.Type() == t || n.X.Type().Sym() == nil {
+ mt := types.ReceiverBaseType(t)
+ if mt != nil {
+ f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp)
+ }
+ }
+
+ if f1 != nil {
+ if dostrcmp > 1 || f1.Broke() {
+ // Already in the process of diagnosing an error.
+ return f1
+ }
+ if f2 != nil {
+ base.Errorf("%v is both field and method", n.Sel)
+ }
+ if f1.Offset == types.BADWIDTH {
+ base.Fatalf("lookdot badwidth %v %p", f1, f1)
+ }
+ n.Offset = f1.Offset
+ n.SetType(f1.Type)
+ if t.IsInterface() {
+ if n.X.Type().IsPtr() {
+ star := ir.NewStarExpr(base.Pos, n.X)
+ star.SetImplicit(true)
+ n.X = Expr(star)
+ }
+
+ n.SetOp(ir.ODOTINTER)
+ }
+ n.Selection = f1
+ return f1
+ }
+
+ if f2 != nil {
+ if dostrcmp > 1 {
+ // Already in the process of diagnosing an error.
+ return f2
+ }
+ tt := n.X.Type()
+ types.CalcSize(tt)
+ rcvr := f2.Type.Recv().Type
+ if !types.Identical(rcvr, tt) {
+ if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
+ checklvalue(n.X, "call pointer method on")
+ addr := NodAddr(n.X)
+ addr.SetImplicit(true)
+ n.X = typecheck(addr, ctxType|ctxExpr)
+ } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
+ star := ir.NewStarExpr(base.Pos, n.X)
+ star.SetImplicit(true)
+ n.X = typecheck(star, ctxType|ctxExpr)
+ } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
+ base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sel, n.X)
+ for tt.IsPtr() {
+ // Stop one level early for method with pointer receiver.
+ if rcvr.IsPtr() && !tt.Elem().IsPtr() {
+ break
+ }
+ star := ir.NewStarExpr(base.Pos, n.X)
+ star.SetImplicit(true)
+ n.X = typecheck(star, ctxType|ctxExpr)
+ tt = tt.Elem()
+ }
+ } else {
+ base.Fatalf("method mismatch: %v for %v", rcvr, tt)
+ }
+ }
+
+ implicit, ll := n.Implicit(), n.X
+ for ll != nil && (ll.Op() == ir.ODOT || ll.Op() == ir.ODOTPTR || ll.Op() == ir.ODEREF) {
+ switch l := ll.(type) {
+ case *ir.SelectorExpr:
+ implicit, ll = l.Implicit(), l.X
+ case *ir.StarExpr:
+ implicit, ll = l.Implicit(), l.X
+ }
+ }
+ if implicit && ll.Type().IsPtr() && ll.Type().Sym() != nil && ll.Type().Sym().Def != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE {
+ // It is invalid to automatically dereference a named pointer type when selecting a method.
+ // Make n.Left == ll to clarify error message.
+ n.X = ll
+ return nil
+ }
+
+ n.Sel = ir.MethodSym(n.X.Type(), f2.Sym)
+ n.Offset = f2.Offset
+ n.SetType(f2.Type)
+ n.SetOp(ir.ODOTMETH)
+ n.Selection = f2
+
+ return f2
+ }
+
+ return nil
+}
+
+func nokeys(l ir.Nodes) bool {
+ for _, n := range l {
+ if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY {
+ return false
+ }
+ }
+ return true
+}
+
+func hasddd(t *types.Type) bool {
+ for _, tl := range t.Fields().Slice() {
+ if tl.IsDDD() {
+ return true
+ }
+ }
+
+ return false
+}
+
+// typecheck assignment: type list = expression list
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) {
+ var t *types.Type
+ var i int
+
+ lno := base.Pos
+ defer func() { base.Pos = lno }()
+
+ if tstruct.Broke() {
+ return
+ }
+
+ var n ir.Node
+ if len(nl) == 1 {
+ n = nl[0]
+ }
+
+ n1 := tstruct.NumFields()
+ n2 := len(nl)
+ if !hasddd(tstruct) {
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ } else {
+ if !isddd {
+ if n2 < n1-1 {
+ goto notenough
+ }
+ } else {
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ }
+ }
+
+ i = 0
+ for _, tl := range tstruct.Fields().Slice() {
+ t = tl.Type
+ if tl.IsDDD() {
+ if isddd {
+ if i >= len(nl) {
+ goto notenough
+ }
+ if len(nl)-i > 1 {
+ goto toomany
+ }
+ n = nl[i]
+ ir.SetPos(n)
+ if n.Type() != nil {
+ nl[i] = assignconvfn(n, t, desc)
+ }
+ return
+ }
+
+ // TODO(mdempsky): Make into ... call with implicit slice.
+ for ; i < len(nl); i++ {
+ n = nl[i]
+ ir.SetPos(n)
+ if n.Type() != nil {
+ nl[i] = assignconvfn(n, t.Elem(), desc)
+ }
+ }
+ return
+ }
+
+ if i >= len(nl) {
+ goto notenough
+ }
+ n = nl[i]
+ ir.SetPos(n)
+ if n.Type() != nil {
+ nl[i] = assignconvfn(n, t, desc)
+ }
+ i++
+ }
+
+ if i < len(nl) {
+ goto toomany
+ }
+ if isddd {
+ if call != nil {
+ base.Errorf("invalid use of ... in call to %v", call)
+ } else {
+ base.Errorf("invalid use of ... in %v", op)
+ }
+ }
+ return
+
+notenough:
+ if n == nil || (!n.Diag() && n.Type() != nil) {
+ details := errorDetails(nl, tstruct, isddd)
+ if call != nil {
+ // call is the expression being called, not the overall call.
+ // Method expressions have the form T.M, and the compiler has
+ // rewritten those to ONAME nodes but left T in Left.
+ if call.Op() == ir.OMETHEXPR {
+ call := call.(*ir.MethodExpr)
+ base.Errorf("not enough arguments in call to method expression %v%s", call, details)
+ } else {
+ base.Errorf("not enough arguments in call to %v%s", call, details)
+ }
+ } else {
+ base.Errorf("not enough arguments to %v%s", op, details)
+ }
+ if n != nil {
+ n.SetDiag(true)
+ }
+ }
+ return
+
+toomany:
+ details := errorDetails(nl, tstruct, isddd)
+ if call != nil {
+ base.Errorf("too many arguments in call to %v%s", call, details)
+ } else {
+ base.Errorf("too many arguments to %v%s", op, details)
+ }
+}
+
+func errorDetails(nl ir.Nodes, tstruct *types.Type, isddd bool) string {
+ // If we don't know any type at a call site, let's suppress any return
+ // message signatures. See Issue https://golang.org/issues/19012.
+ if tstruct == nil {
+ return ""
+ }
+ // If any node has an unknown type, suppress it as well
+ for _, n := range nl {
+ if n.Type() == nil {
+ return ""
+ }
+ }
+ return fmt.Sprintf("\n\thave %s\n\twant %v", fmtSignature(nl, isddd), tstruct)
+}
+
+// sigrepr is a type's representation to the outside world,
+// in string representations of return signatures
+// e.g in error messages about wrong arguments to return.
+func sigrepr(t *types.Type, isddd bool) string {
+ switch t {
+ case types.UntypedString:
+ return "string"
+ case types.UntypedBool:
+ return "bool"
+ }
+
+ if t.Kind() == types.TIDEAL {
+ // "untyped number" is not commonly used
+ // outside of the compiler, so let's use "number".
+ // TODO(mdempsky): Revisit this.
+ return "number"
+ }
+
+ // Turn []T... argument to ...T for clearer error message.
+ if isddd {
+ if !t.IsSlice() {
+ base.Fatalf("bad type for ... argument: %v", t)
+ }
+ return "..." + t.Elem().String()
+ }
+ return t.String()
+}
+
+// sigerr returns the signature of the types at the call or return.
+func fmtSignature(nl ir.Nodes, isddd bool) string {
+ if len(nl) < 1 {
+ return "()"
+ }
+
+ var typeStrings []string
+ for i, n := range nl {
+ isdddArg := isddd && i == len(nl)-1
+ typeStrings = append(typeStrings, sigrepr(n.Type(), isdddArg))
+ }
+
+ return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
+}
+
+// type check composite
+func fielddup(name string, hash map[string]bool) {
+ if hash[name] {
+ base.Errorf("duplicate field name in struct literal: %s", name)
+ return
+ }
+ hash[name] = true
+}
+
+// iscomptype reports whether type t is a composite literal type.
+func iscomptype(t *types.Type) bool {
+ switch t.Kind() {
+ case types.TARRAY, types.TSLICE, types.TSTRUCT, types.TMAP:
+ return true
+ default:
+ return false
+ }
+}
+
+// pushtype adds elided type information for composite literals if
+// appropriate, and returns the resulting expression.
+func pushtype(nn ir.Node, t *types.Type) ir.Node {
+ if nn == nil || nn.Op() != ir.OCOMPLIT {
+ return nn
+ }
+ n := nn.(*ir.CompLitExpr)
+ if n.Ntype != nil {
+ return n
+ }
+
+ switch {
+ case iscomptype(t):
+ // For T, return T{...}.
+ n.Ntype = ir.TypeNode(t)
+
+ case t.IsPtr() && iscomptype(t.Elem()):
+ // For *T, return &T{...}.
+ n.Ntype = ir.TypeNode(t.Elem())
+
+ addr := NodAddrAt(n.Pos(), n)
+ addr.SetImplicit(true)
+ return addr
+ }
+ return n
+}
+
+// typecheckarraylit type-checks a sequence of slice/array literal elements.
+func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx string) int64 {
+ // If there are key/value pairs, create a map to keep seen
+ // keys so we can check for duplicate indices.
+ var indices map[int64]bool
+ for _, elt := range elts {
+ if elt.Op() == ir.OKEY {
+ indices = make(map[int64]bool)
+ break
+ }
+ }
+
+ var key, length int64
+ for i, elt := range elts {
+ ir.SetPos(elt)
+ r := elts[i]
+ var kv *ir.KeyExpr
+ if elt.Op() == ir.OKEY {
+ elt := elt.(*ir.KeyExpr)
+ elt.Key = Expr(elt.Key)
+ key = IndexConst(elt.Key)
+ if key < 0 {
+ if !elt.Key.Diag() {
+ if key == -2 {
+ base.Errorf("index too large")
+ } else {
+ base.Errorf("index must be non-negative integer constant")
+ }
+ elt.Key.SetDiag(true)
+ }
+ key = -(1 << 30) // stay negative for a while
+ }
+ kv = elt
+ r = elt.Value
+ }
+
+ r = pushtype(r, elemType)
+ r = Expr(r)
+ r = AssignConv(r, elemType, ctx)
+ if kv != nil {
+ kv.Value = r
+ } else {
+ elts[i] = r
+ }
+
+ if key >= 0 {
+ if indices != nil {
+ if indices[key] {
+ base.Errorf("duplicate index in %s: %d", ctx, key)
+ } else {
+ indices[key] = true
+ }
+ }
+
+ if bound >= 0 && key >= bound {
+ base.Errorf("array index %d out of bounds [0:%d]", key, bound)
+ bound = -1
+ }
+ }
+
+ key++
+ if key > length {
+ length = key
+ }
+ }
+
+ return length
+}
+
+// visible reports whether sym is exported or locally defined.
+func visible(sym *types.Sym) bool {
+ return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == types.LocalPkg)
+}
+
+// nonexported reports whether sym is an unexported field.
+func nonexported(sym *types.Sym) bool {
+ return sym != nil && !types.IsExported(sym.Name)
+}
+
+func checklvalue(n ir.Node, verb string) {
+ if !ir.IsAssignable(n) {
+ base.Errorf("cannot %s %v", verb, n)
+ }
+}
+
+func checkassign(stmt ir.Node, n ir.Node) {
+ // Variables declared in ORANGE are assigned on every iteration.
+ if !ir.DeclaredBy(n, stmt) || stmt.Op() == ir.ORANGE {
+ r := ir.OuterValue(n)
+ if r.Op() == ir.ONAME {
+ r := r.(*ir.Name)
+ r.Name().SetAssigned(true)
+ if r.Name().IsClosureVar() {
+ r.Name().Defn.Name().SetAssigned(true)
+ }
+ }
+ }
+
+ if ir.IsAssignable(n) {
+ return
+ }
+ if n.Op() == ir.OINDEXMAP {
+ n := n.(*ir.IndexExpr)
+ n.Assigned = true
+ return
+ }
+
+ // have already complained about n being invalid
+ if n.Type() == nil {
+ return
+ }
+
+ switch {
+ case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).X.Op() == ir.OINDEXMAP:
+ base.Errorf("cannot assign to struct field %v in map", n)
+ case (n.Op() == ir.OINDEX && n.(*ir.IndexExpr).X.Type().IsString()) || n.Op() == ir.OSLICESTR:
+ base.Errorf("cannot assign to %v (strings are immutable)", n)
+ case n.Op() == ir.OLITERAL && n.Sym() != nil && ir.IsConstNode(n):
+ base.Errorf("cannot assign to %v (declared const)", n)
+ default:
+ base.Errorf("cannot assign to %v", n)
+ }
+ n.SetType(nil)
+}
+
+func checkassignlist(stmt ir.Node, l ir.Nodes) {
+ for _, n := range l {
+ checkassign(stmt, n)
+ }
+}
+
+func checkassignto(src *types.Type, dst ir.Node) {
+ if op, why := assignop(src, dst.Type()); op == ir.OXXX {
+ base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why)
+ return
+ }
+}
+
+// The result of stringtoruneslit MUST be assigned back to n, e.g.
+// n.Left = stringtoruneslit(n.Left)
+func stringtoruneslit(n *ir.ConvExpr) ir.Node {
+ if n.X.Op() != ir.OLITERAL || n.X.Val().Kind() != constant.String {
+ base.Fatalf("stringtoarraylit %v", n)
+ }
+
+ var l []ir.Node
+ i := 0
+ for _, r := range ir.StringVal(n.X) {
+ l = append(l, ir.NewKeyExpr(base.Pos, ir.NewInt(int64(i)), ir.NewInt(int64(r))))
+ i++
+ }
+
+ nn := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(n.Type()).(ir.Ntype), nil)
+ nn.List.Set(l)
+ return Expr(nn)
+}
+
+var mapqueue []*ir.MapType
+
+func CheckMapKeys() {
+ for _, n := range mapqueue {
+ k := n.Type().MapType().Key
+ if !k.Broke() && !types.IsComparable(k) {
+ base.ErrorfAt(n.Pos(), "invalid map key type %v", k)
+ }
+ }
+ mapqueue = nil
+}
+
+func typecheckdeftype(n *ir.Name) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheckdeftype", n)(nil)
+ }
+
+ t := types.NewNamed(n)
+ t.Vargen = n.Vargen
+ if n.Pragma()&ir.NotInHeap != 0 {
+ t.SetNotInHeap(true)
+ }
+
+ n.SetType(t)
+ n.SetTypecheck(1)
+ n.SetWalkdef(1)
+
+ types.DeferCheckSize()
+ errorsBefore := base.Errors()
+ n.Ntype = typecheckNtype(n.Ntype)
+ if underlying := n.Ntype.Type(); underlying != nil {
+ t.SetUnderlying(underlying)
+ } else {
+ n.SetDiag(true)
+ n.SetType(nil)
+ }
+ if t.Kind() == types.TFORW && base.Errors() > errorsBefore {
+ // Something went wrong during type-checking,
+ // but it was reported. Silence future errors.
+ t.SetBroke(true)
+ }
+ types.ResumeCheckSize()
+}
+
+func typecheckdef(n ir.Node) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheckdef", n)(nil)
+ }
+
+ lno := ir.SetPos(n)
+
+ if n.Op() == ir.ONONAME {
+ if !n.Diag() {
+ n.SetDiag(true)
+
+ // Note: adderrorname looks for this string and
+ // adds context about the outer expression
+ base.ErrorfAt(base.Pos, "undefined: %v", n.Sym())
+ }
+ base.Pos = lno
+ return
+ }
+
+ if n.Walkdef() == 1 {
+ base.Pos = lno
+ return
+ }
+
+ typecheckdefstack = append(typecheckdefstack, n)
+ if n.Walkdef() == 2 {
+ base.FlushErrors()
+ fmt.Printf("typecheckdef loop:")
+ for i := len(typecheckdefstack) - 1; i >= 0; i-- {
+ n := typecheckdefstack[i]
+ fmt.Printf(" %v", n.Sym())
+ }
+ fmt.Printf("\n")
+ base.Fatalf("typecheckdef loop")
+ }
+
+ n.SetWalkdef(2)
+
+ if n.Type() != nil || n.Sym() == nil { // builtin or no name
+ goto ret
+ }
+
+ switch n.Op() {
+ default:
+ base.Fatalf("typecheckdef %v", n.Op())
+
+ case ir.OLITERAL:
+ if n.Name().Ntype != nil {
+ n.Name().Ntype = typecheckNtype(n.Name().Ntype)
+ n.SetType(n.Name().Ntype.Type())
+ n.Name().Ntype = nil
+ if n.Type() == nil {
+ n.SetDiag(true)
+ goto ret
+ }
+ }
+
+ e := n.Name().Defn
+ n.Name().Defn = nil
+ if e == nil {
+ ir.Dump("typecheckdef nil defn", n)
+ base.ErrorfAt(n.Pos(), "xxx")
+ }
+
+ e = Expr(e)
+ if e.Type() == nil {
+ goto ret
+ }
+ if !ir.IsConstNode(e) {
+ if !e.Diag() {
+ if e.Op() == ir.ONIL {
+ base.ErrorfAt(n.Pos(), "const initializer cannot be nil")
+ } else {
+ base.ErrorfAt(n.Pos(), "const initializer %v is not a constant", e)
+ }
+ e.SetDiag(true)
+ }
+ goto ret
+ }
+
+ t := n.Type()
+ if t != nil {
+ if !ir.OKForConst[t.Kind()] {
+ base.ErrorfAt(n.Pos(), "invalid constant type %v", t)
+ goto ret
+ }
+
+ if !e.Type().IsUntyped() && !types.Identical(t, e.Type()) {
+ base.ErrorfAt(n.Pos(), "cannot use %L as type %v in const initializer", e, t)
+ goto ret
+ }
+
+ e = convlit(e, t)
+ }
+
+ n.SetType(e.Type())
+ if n.Type() != nil {
+ n.SetVal(e.Val())
+ }
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Name().Ntype != nil {
+ n.Name().Ntype = typecheckNtype(n.Name().Ntype)
+ n.SetType(n.Name().Ntype.Type())
+ if n.Type() == nil {
+ n.SetDiag(true)
+ goto ret
+ }
+ }
+
+ if n.Type() != nil {
+ break
+ }
+ if n.Name().Defn == nil {
+ if n.BuiltinOp != 0 { // like OPRINTN
+ break
+ }
+ if base.Errors() > 0 {
+ // Can have undefined variables in x := foo
+ // that make x have an n.name.Defn == nil.
+ // If there are other errors anyway, don't
+ // bother adding to the noise.
+ break
+ }
+
+ base.Fatalf("var without type, init: %v", n.Sym())
+ }
+
+ if n.Name().Defn.Op() == ir.ONAME {
+ n.Name().Defn = Expr(n.Name().Defn)
+ n.SetType(n.Name().Defn.Type())
+ break
+ }
+
+ n.Name().Defn = Stmt(n.Name().Defn) // fills in n.Type
+
+ case ir.OTYPE:
+ n := n.(*ir.Name)
+ if n.Alias() {
+ // Type alias declaration: Simply use the rhs type - no need
+ // to create a new type.
+ // If we have a syntax error, name.Ntype may be nil.
+ if n.Ntype != nil {
+ n.Ntype = typecheckNtype(n.Ntype)
+ n.SetType(n.Ntype.Type())
+ if n.Type() == nil {
+ n.SetDiag(true)
+ goto ret
+ }
+ // For package-level type aliases, set n.Sym.Def so we can identify
+ // it as a type alias during export. See also #31959.
+ if n.Curfn == nil {
+ n.Sym().Def = n.Ntype
+ }
+ }
+ break
+ }
+
+ // regular type declaration
+ typecheckdeftype(n)
+ }
+
+ret:
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().IsUntyped() {
+ base.Fatalf("got %v for %v", n.Type(), n)
+ }
+ last := len(typecheckdefstack) - 1
+ if typecheckdefstack[last] != n {
+ base.Fatalf("typecheckdefstack mismatch")
+ }
+ typecheckdefstack[last] = nil
+ typecheckdefstack = typecheckdefstack[:last]
+
+ base.Pos = lno
+ n.SetWalkdef(1)
+}
+
+func checkmake(t *types.Type, arg string, np *ir.Node) bool {
+ n := *np
+ if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+ base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type())
+ return false
+ }
+
+ // Do range checks for constants before defaultlit
+ // to avoid redundant "constant NNN overflows int" errors.
+ if n.Op() == ir.OLITERAL {
+ v := toint(n.Val())
+ if constant.Sign(v) < 0 {
+ base.Errorf("negative %s argument in make(%v)", arg, t)
+ return false
+ }
+ if ir.ConstOverflow(v, types.Types[types.TINT]) {
+ base.Errorf("%s argument too large in make(%v)", arg, t)
+ return false
+ }
+ }
+
+ // defaultlit is necessary for non-constants too: n might be 1.1<<k.
+ // TODO(gri) The length argument requirements for (array/slice) make
+ // are the same as for index expressions. Factor the code better;
+ // for instance, indexlit might be called here and incorporate some
+ // of the bounds checks done for make.
+ n = DefaultLit(n, types.Types[types.TINT])
+ *np = n
+
+ return true
+}
+
+// markBreak marks control statements containing break statements with SetHasBreak(true).
+func markBreak(fn *ir.Func) {
+ var labels map[*types.Sym]ir.Node
+ var implicit ir.Node
+
+ var mark func(ir.Node) error
+ mark = func(n ir.Node) error {
+ switch n.Op() {
+ default:
+ ir.DoChildren(n, mark)
+
+ case ir.OBREAK:
+ n := n.(*ir.BranchStmt)
+ if n.Label == nil {
+ setHasBreak(implicit)
+ } else {
+ setHasBreak(labels[n.Label])
+ }
+
+ case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT, ir.ORANGE:
+ old := implicit
+ implicit = n
+ var sym *types.Sym
+ switch n := n.(type) {
+ case *ir.ForStmt:
+ sym = n.Label
+ case *ir.RangeStmt:
+ sym = n.Label
+ case *ir.SelectStmt:
+ sym = n.Label
+ case *ir.SwitchStmt:
+ sym = n.Label
+ }
+ if sym != nil {
+ if labels == nil {
+ // Map creation delayed until we need it - most functions don't.
+ labels = make(map[*types.Sym]ir.Node)
+ }
+ labels[sym] = n
+ }
+ ir.DoChildren(n, mark)
+ if sym != nil {
+ delete(labels, sym)
+ }
+ implicit = old
+ }
+ return nil
+ }
+
+ mark(fn)
+}
+
+func controlLabel(n ir.Node) *types.Sym {
+ switch n := n.(type) {
+ default:
+ base.Fatalf("controlLabel %+v", n.Op())
+ return nil
+ case *ir.ForStmt:
+ return n.Label
+ case *ir.RangeStmt:
+ return n.Label
+ case *ir.SelectStmt:
+ return n.Label
+ case *ir.SwitchStmt:
+ return n.Label
+ }
+}
+
+func setHasBreak(n ir.Node) {
+ switch n := n.(type) {
+ default:
+ base.Fatalf("setHasBreak %+v", n.Op())
+ case nil:
+ // ignore
+ case *ir.ForStmt:
+ n.HasBreak = true
+ case *ir.RangeStmt:
+ n.HasBreak = true
+ case *ir.SelectStmt:
+ n.HasBreak = true
+ case *ir.SwitchStmt:
+ n.HasBreak = true
+ }
+}
+
+// isTermNodes reports whether the Nodes list ends with a terminating statement.
+func isTermNodes(l ir.Nodes) bool {
+ s := l
+ c := len(s)
+ if c == 0 {
+ return false
+ }
+ return isTermNode(s[c-1])
+}
+
+// isTermNode reports whether the node n, the last one in a
+// statement list, is a terminating statement.
+func isTermNode(n ir.Node) bool {
+ switch n.Op() {
+ // NOTE: OLABEL is treated as a separate statement,
+ // not a separate prefix, so skipping to the last statement
+ // in the block handles the labeled statement case by
+ // skipping over the label. No case OLABEL here.
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ return isTermNodes(n.List)
+
+ case ir.OGOTO, ir.ORETURN, ir.ORETJMP, ir.OPANIC, ir.OFALL:
+ return true
+
+ case ir.OFOR, ir.OFORUNTIL:
+ n := n.(*ir.ForStmt)
+ if n.Cond != nil {
+ return false
+ }
+ if n.HasBreak {
+ return false
+ }
+ return true
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ return isTermNodes(n.Body) && isTermNodes(n.Else)
+
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ if n.HasBreak {
+ return false
+ }
+ def := false
+ for _, cas := range n.Cases {
+ cas := cas.(*ir.CaseStmt)
+ if !isTermNodes(cas.Body) {
+ return false
+ }
+ if len(cas.List) == 0 { // default
+ def = true
+ }
+ }
+ return def
+
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ if n.HasBreak {
+ return false
+ }
+ for _, cas := range n.Cases {
+ cas := cas.(*ir.CaseStmt)
+ if !isTermNodes(cas.Body) {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+// CheckReturn makes sure that fn terminates appropriately.
+func CheckReturn(fn *ir.Func) {
+ if fn.Type().NumResults() != 0 && len(fn.Body) != 0 {
+ markBreak(fn)
+ if !isTermNodes(fn.Body) {
+ base.ErrorfAt(fn.Endlineno, "missing return at end of function")
+ }
+ }
+}
+
+func deadcode(fn *ir.Func) {
+ deadcodeslice(&fn.Body)
+
+ if len(fn.Body) == 0 {
+ return
+ }
+
+ for _, n := range fn.Body {
+ if len(n.Init()) > 0 {
+ return
+ }
+ switch n.Op() {
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ if !ir.IsConst(n.Cond, constant.Bool) || len(n.Body) > 0 || len(n.Else) > 0 {
+ return
+ }
+ case ir.OFOR:
+ n := n.(*ir.ForStmt)
+ if !ir.IsConst(n.Cond, constant.Bool) || ir.BoolVal(n.Cond) {
+ return
+ }
+ default:
+ return
+ }
+ }
+
+ fn.Body.Set([]ir.Node{ir.NewBlockStmt(base.Pos, nil)})
+}
+
+func deadcodeslice(nn *ir.Nodes) {
+ var lastLabel = -1
+ for i, n := range *nn {
+ if n != nil && n.Op() == ir.OLABEL {
+ lastLabel = i
+ }
+ }
+ for i, n := range *nn {
+ // Cut is set to true when all nodes after i'th position
+ // should be removed.
+ // In other words, it marks whole slice "tail" as dead.
+ cut := false
+ if n == nil {
+ continue
+ }
+ if n.Op() == ir.OIF {
+ n := n.(*ir.IfStmt)
+ n.Cond = deadcodeexpr(n.Cond)
+ if ir.IsConst(n.Cond, constant.Bool) {
+ var body ir.Nodes
+ if ir.BoolVal(n.Cond) {
+ n.Else = ir.Nodes{}
+ body = n.Body
+ } else {
+ n.Body = ir.Nodes{}
+ body = n.Else
+ }
+ // If "then" or "else" branch ends with panic or return statement,
+ // it is safe to remove all statements after this node.
+ // isterminating is not used to avoid goto-related complications.
+ // We must be careful not to deadcode-remove labels, as they
+ // might be the target of a goto. See issue 28616.
+ if body := body; len(body) != 0 {
+ switch body[(len(body) - 1)].Op() {
+ case ir.ORETURN, ir.ORETJMP, ir.OPANIC:
+ if i > lastLabel {
+ cut = true
+ }
+ }
+ }
+ }
+ }
+
+ deadcodeslice(n.PtrInit())
+ switch n.Op() {
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ deadcodeslice(&n.List)
+ case ir.OCASE:
+ n := n.(*ir.CaseStmt)
+ deadcodeslice(&n.Body)
+ case ir.OFOR:
+ n := n.(*ir.ForStmt)
+ deadcodeslice(&n.Body)
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ deadcodeslice(&n.Body)
+ deadcodeslice(&n.Else)
+ case ir.ORANGE:
+ n := n.(*ir.RangeStmt)
+ deadcodeslice(&n.Body)
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ deadcodeslice(&n.Cases)
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ deadcodeslice(&n.Cases)
+ }
+
+ if cut {
+ nn.Set((*nn)[:i+1])
+ break
+ }
+ }
+}
+
+func deadcodeexpr(n ir.Node) ir.Node {
+ // Perform dead-code elimination on short-circuited boolean
+ // expressions involving constants with the intent of
+ // producing a constant 'if' condition.
+ switch n.Op() {
+ case ir.OANDAND:
+ n := n.(*ir.LogicalExpr)
+ n.X = deadcodeexpr(n.X)
+ n.Y = deadcodeexpr(n.Y)
+ if ir.IsConst(n.X, constant.Bool) {
+ if ir.BoolVal(n.X) {
+ return n.Y // true && x => x
+ } else {
+ return n.X // false && x => false
+ }
+ }
+ case ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ n.X = deadcodeexpr(n.X)
+ n.Y = deadcodeexpr(n.Y)
+ if ir.IsConst(n.X, constant.Bool) {
+ if ir.BoolVal(n.X) {
+ return n.X // true || x => true
+ } else {
+ return n.Y // false || x => x
+ }
+ }
+ }
+ return n
+}
+
+// getIotaValue returns the current value for "iota",
+// or -1 if not within a ConstSpec.
+func getIotaValue() int64 {
+ if i := len(typecheckdefstack); i > 0 {
+ if x := typecheckdefstack[i-1]; x.Op() == ir.OLITERAL {
+ return x.(*ir.Name).Iota()
+ }
+ }
+
+ if ir.CurFunc != nil && ir.CurFunc.Iota >= 0 {
+ return ir.CurFunc.Iota
+ }
+
+ return -1
+}
+
+// curpkg returns the current package, based on Curfn.
+func curpkg() *types.Pkg {
+ fn := ir.CurFunc
+ if fn == nil {
+ // Initialization expressions for package-scope variables.
+ return types.LocalPkg
+ }
+ return fnpkg(fn.Nname)
+}
+
+func Conv(n ir.Node, t *types.Type) ir.Node {
+ if types.Identical(n.Type(), t) {
+ return n
+ }
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(t)
+ n = Expr(n)
+ return n
+}
+
+// ConvNop converts node n to type t using the OCONVNOP op
+// and typechecks the result with ctxExpr.
+func ConvNop(n ir.Node, t *types.Type) ir.Node {
+ if types.Identical(n.Type(), t) {
+ return n
+ }
+ n = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n)
+ n.SetType(t)
+ n = Expr(n)
+ return n
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// TODO(gri) This file should probably become part of package types.
-
-package gc
+package typecheck
import (
+ "go/constant"
+
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
- "go/constant"
+)
+
+var (
+ okfor [ir.OEND][]bool
+ iscmp [ir.OEND]bool
+)
+
+var (
+ okforeq [types.NTYPE]bool
+ okforadd [types.NTYPE]bool
+ okforand [types.NTYPE]bool
+ okfornone [types.NTYPE]bool
+ okforbool [types.NTYPE]bool
+ okforcap [types.NTYPE]bool
+ okforlen [types.NTYPE]bool
+ okforarith [types.NTYPE]bool
)
var basicTypes = [...]struct {
// initUniverse initializes the universe block.
func initUniverse() {
- if Widthptr == 0 {
+ if types.PtrSize == 0 {
base.Fatalf("typeinit before betypeinit")
}
- slicePtrOffset = 0
- sliceLenOffset = Rnd(slicePtrOffset+int64(Widthptr), int64(Widthptr))
- sliceCapOffset = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
- sizeofSlice = Rnd(sliceCapOffset+int64(Widthptr), int64(Widthptr))
+ types.SlicePtrOffset = 0
+ types.SliceLenOffset = types.Rnd(types.SlicePtrOffset+int64(types.PtrSize), int64(types.PtrSize))
+ types.SliceCapOffset = types.Rnd(types.SliceLenOffset+int64(types.PtrSize), int64(types.PtrSize))
+ types.SliceSize = types.Rnd(types.SliceCapOffset+int64(types.PtrSize), int64(types.PtrSize))
// string is same as slice wo the cap
- sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
+ types.StringSize = types.Rnd(types.SliceLenOffset+int64(types.PtrSize), int64(types.PtrSize))
for et := types.Kind(0); et < types.NTYPE; et++ {
- simtype[et] = et
+ types.SimType[et] = et
}
types.Types[types.TANY] = types.New(types.TANY)
n.SetType(t)
sym.Def = n
if kind != types.TANY {
- dowidth(t)
+ types.CalcSize(t)
}
return t
}
for _, s := range &typedefs {
sameas := s.sameas32
- if Widthptr == 8 {
+ if types.PtrSize == 8 {
sameas = s.sameas64
}
- simtype[s.etype] = sameas
+ types.SimType[s.etype] = sameas
types.Types[s.etype] = defBasic(s.etype, types.BuiltinPkg, s.name)
}
types.ErrorType.SetUnderlying(makeErrorInterface())
n.SetType(types.ErrorType)
s.Def = n
- dowidth(types.ErrorType)
+ types.CalcSize(types.ErrorType)
- types.Types[types.TUNSAFEPTR] = defBasic(types.TUNSAFEPTR, unsafepkg, "Pointer")
+ types.Types[types.TUNSAFEPTR] = defBasic(types.TUNSAFEPTR, ir.Pkgs.Unsafe, "Pointer")
// simple aliases
- simtype[types.TMAP] = types.TPTR
- simtype[types.TCHAN] = types.TPTR
- simtype[types.TFUNC] = types.TPTR
- simtype[types.TUNSAFEPTR] = types.TPTR
+ types.SimType[types.TMAP] = types.TPTR
+ types.SimType[types.TCHAN] = types.TPTR
+ types.SimType[types.TFUNC] = types.TPTR
+ types.SimType[types.TUNSAFEPTR] = types.TPTR
for _, s := range &builtinFuncs {
s2 := types.BuiltinPkg.Lookup(s.name)
def := NewName(s2)
- def.SetSubOp(s.op)
+ def.BuiltinOp = s.op
s2.Def = def
}
for _, s := range &unsafeFuncs {
- s2 := unsafepkg.Lookup(s.name)
+ s2 := ir.Pkgs.Unsafe.Lookup(s.name)
def := NewName(s2)
- def.SetSubOp(s.op)
+ def.BuiltinOp = s.op
s2.Def = def
}
s = types.BuiltinPkg.Lookup("false")
s.Def = ir.NewConstAt(src.NoXPos, s, types.UntypedBool, constant.MakeBool(false))
- s = lookup("_")
+ s = Lookup("_")
types.BlankSym = s
s.Block = -100
s.Def = NewName(s)
types.Types[types.TNIL] = types.New(types.TNIL)
s = types.BuiltinPkg.Lookup("nil")
- nnil := nodnil()
+ nnil := NodNil()
nnil.(*ir.NilExpr).SetSym(s)
s.Def = nnil
s.Def = ir.NewIota(base.Pos, s)
for et := types.TINT8; et <= types.TUINT64; et++ {
- isInt[et] = true
+ types.IsInt[et] = true
}
- isInt[types.TINT] = true
- isInt[types.TUINT] = true
- isInt[types.TUINTPTR] = true
+ types.IsInt[types.TINT] = true
+ types.IsInt[types.TUINT] = true
+ types.IsInt[types.TUINTPTR] = true
- isFloat[types.TFLOAT32] = true
- isFloat[types.TFLOAT64] = true
+ types.IsFloat[types.TFLOAT32] = true
+ types.IsFloat[types.TFLOAT64] = true
- isComplex[types.TCOMPLEX64] = true
- isComplex[types.TCOMPLEX128] = true
+ types.IsComplex[types.TCOMPLEX64] = true
+ types.IsComplex[types.TCOMPLEX128] = true
// initialize okfor
for et := types.Kind(0); et < types.NTYPE; et++ {
- if isInt[et] || et == types.TIDEAL {
+ if types.IsInt[et] || et == types.TIDEAL {
okforeq[et] = true
- okforcmp[et] = true
+ types.IsOrdered[et] = true
okforarith[et] = true
okforadd[et] = true
okforand[et] = true
ir.OKForConst[et] = true
- issimple[et] = true
+ types.IsSimple[et] = true
}
- if isFloat[et] {
+ if types.IsFloat[et] {
okforeq[et] = true
- okforcmp[et] = true
+ types.IsOrdered[et] = true
okforadd[et] = true
okforarith[et] = true
ir.OKForConst[et] = true
- issimple[et] = true
+ types.IsSimple[et] = true
}
- if isComplex[et] {
+ if types.IsComplex[et] {
okforeq[et] = true
okforadd[et] = true
okforarith[et] = true
ir.OKForConst[et] = true
- issimple[et] = true
+ types.IsSimple[et] = true
}
}
- issimple[types.TBOOL] = true
+ types.IsSimple[types.TBOOL] = true
okforadd[types.TSTRING] = true
okforeq[types.TARRAY] = true // only if element type is comparable; refined in typecheck
okforeq[types.TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
- okforcmp[types.TSTRING] = true
+ types.IsOrdered[types.TSTRING] = true
for i := range okfor {
okfor[i] = okfornone[:]
okfor[ir.OANDNOT] = okforand[:]
okfor[ir.ODIV] = okforarith[:]
okfor[ir.OEQ] = okforeq[:]
- okfor[ir.OGE] = okforcmp[:]
- okfor[ir.OGT] = okforcmp[:]
- okfor[ir.OLE] = okforcmp[:]
- okfor[ir.OLT] = okforcmp[:]
+ okfor[ir.OGE] = types.IsOrdered[:]
+ okfor[ir.OGT] = types.IsOrdered[:]
+ okfor[ir.OLE] = types.IsOrdered[:]
+ okfor[ir.OLT] = types.IsOrdered[:]
okfor[ir.OMOD] = okforand[:]
okfor[ir.OMUL] = okforarith[:]
okfor[ir.ONE] = okforeq[:]
sig := types.NewSignature(types.NoPkg, fakeRecvField(), nil, []*types.Field{
types.NewField(src.NoXPos, nil, types.Types[types.TSTRING]),
})
- method := types.NewField(src.NoXPos, lookup("Error"), sig)
+ method := types.NewField(src.NoXPos, Lookup("Error"), sig)
return types.NewInterface(types.NoPkg, []*types.Field{method})
}
-// finishUniverse makes the universe block visible within the current package.
-func finishUniverse() {
+// declareUniverse makes the universe block visible within the current package.
+func declareUniverse() {
// Operationally, this is similar to a dot import of builtinpkg, except
// that we silently skip symbols that are already declared in the
// package block rather than emitting a redeclared symbol error.
if s.Def == nil {
continue
}
- s1 := lookup(s.Name)
+ s1 := Lookup(s.Name)
if s1.Def != nil {
continue
}
s1.Block = s.Block
}
- nodfp = NewName(lookup(".fp"))
- nodfp.SetType(types.Types[types.TINT32])
- nodfp.SetClass(ir.PPARAM)
- nodfp.SetUsed(true)
+ ir.RegFP = NewName(Lookup(".fp"))
+ ir.RegFP.SetType(types.Types[types.TINT32])
+ ir.RegFP.Class_ = ir.PPARAM
+ ir.RegFP.SetUsed(true)
}
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import "cmd/compile/internal/base"
+
+// AlgKind describes the kind of algorithms used for comparing and
+// hashing a Type.
+type AlgKind int
+
+//go:generate stringer -type AlgKind -trimprefix A
+
+const (
+ // These values are known by runtime.
+ ANOEQ AlgKind = iota
+ AMEM0
+ AMEM8
+ AMEM16
+ AMEM32
+ AMEM64
+ AMEM128
+ ASTRING
+ AINTER
+ ANILINTER
+ AFLOAT32
+ AFLOAT64
+ ACPLX64
+ ACPLX128
+
+ // Type can be compared/hashed as regular memory.
+ AMEM AlgKind = 100
+
+ // Type needs special comparison/hashing functions.
+ ASPECIAL AlgKind = -1
+)
+
+// AlgType returns the AlgKind used for comparing and hashing Type t.
+// If it returns ANOEQ, it also returns the component type of t that
+// makes it incomparable.
+func AlgType(t *Type) (AlgKind, *Type) {
+ if t.Broke() {
+ return AMEM, nil
+ }
+ if t.Noalg() {
+ return ANOEQ, t
+ }
+
+ switch t.Kind() {
+ case TANY, TFORW:
+ // will be defined later.
+ return ANOEQ, t
+
+ case TINT8, TUINT8, TINT16, TUINT16,
+ TINT32, TUINT32, TINT64, TUINT64,
+ TINT, TUINT, TUINTPTR,
+ TBOOL, TPTR,
+ TCHAN, TUNSAFEPTR:
+ return AMEM, nil
+
+ case TFUNC, TMAP:
+ return ANOEQ, t
+
+ case TFLOAT32:
+ return AFLOAT32, nil
+
+ case TFLOAT64:
+ return AFLOAT64, nil
+
+ case TCOMPLEX64:
+ return ACPLX64, nil
+
+ case TCOMPLEX128:
+ return ACPLX128, nil
+
+ case TSTRING:
+ return ASTRING, nil
+
+ case TINTER:
+ if t.IsEmptyInterface() {
+ return ANILINTER, nil
+ }
+ return AINTER, nil
+
+ case TSLICE:
+ return ANOEQ, t
+
+ case TARRAY:
+ a, bad := AlgType(t.Elem())
+ switch a {
+ case AMEM:
+ return AMEM, nil
+ case ANOEQ:
+ return ANOEQ, bad
+ }
+
+ switch t.NumElem() {
+ case 0:
+ // We checked above that the element type is comparable.
+ return AMEM, nil
+ case 1:
+ // Single-element array is same as its lone element.
+ return a, nil
+ }
+
+ return ASPECIAL, nil
+
+ case TSTRUCT:
+ fields := t.FieldSlice()
+
+ // One-field struct is same as that one field alone.
+ if len(fields) == 1 && !fields[0].Sym.IsBlank() {
+ return AlgType(fields[0].Type)
+ }
+
+ ret := AMEM
+ for i, f := range fields {
+ // All fields must be comparable.
+ a, bad := AlgType(f.Type)
+ if a == ANOEQ {
+ return ANOEQ, bad
+ }
+
+ // Blank fields, padded fields, fields with non-memory
+ // equality need special compare.
+ if a != AMEM || f.Sym.IsBlank() || IsPaddedField(t, i) {
+ ret = ASPECIAL
+ }
+ }
+
+ return ret, nil
+ }
+
+ base.Fatalf("algtype1: unexpected type %v", t)
+ return 0, nil
+}
+
+// TypeHasNoAlg reports whether t does not have any associated hash/eq
+// algorithms because t, or some component of t, is marked Noalg.
+func TypeHasNoAlg(t *Type) bool {
+ a, bad := AlgType(t)
+ return a == ANOEQ && bad.Noalg()
+}
+
+// IsComparable reports whether t is a comparable type.
+func IsComparable(t *Type) bool {
+ a, _ := AlgType(t)
+ return a != ANOEQ
+}
+
+// IncomparableField returns an incomparable Field of struct Type t, if any.
+func IncomparableField(t *Type) *Field {
+ for _, f := range t.FieldSlice() {
+ if !IsComparable(f.Type) {
+ return f
+ }
+ }
+ return nil
+}
+
+// IsPaddedField reports whether the i'th field of struct type t is followed
+// by padding.
+func IsPaddedField(t *Type, i int) bool {
+ if !t.IsStruct() {
+ base.Fatalf("ispaddedfield called non-struct %v", t)
+ }
+ end := t.Width
+ if i+1 < t.NumFields() {
+ end = t.Field(i + 1).Offset
+ }
+ return t.Field(i).End() != end
+}
// Code generated by "stringer -type AlgKind -trimprefix A"; DO NOT EDIT.
-package gc
+package types
import "strconv"
import (
"bytes"
+ "crypto/md5"
+ "encoding/binary"
"fmt"
"go/constant"
"strconv"
return v.String()
}
+
+// TypeHash computes a hash value for type t to use in type switch statements.
+func TypeHash(t *Type) uint32 {
+ p := t.LongString()
+
+ // Using MD5 is overkill, but reduces accidental collisions.
+ h := md5.Sum([]byte(p))
+ return binary.LittleEndian.Uint32(h[:4])
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mkbuiltin.go
+
+package types
+
+import (
+ "fmt"
+ "internal/goversion"
+ "log"
+ "regexp"
+ "strconv"
+
+ "cmd/compile/internal/base"
+)
+
+// A lang is a language version broken into major and minor numbers.
+type lang struct {
+ major, minor int
+}
+
+// langWant is the desired language version set by the -lang flag.
+// If the -lang flag is not set, this is the zero value, meaning that
+// any language version is supported.
+var langWant lang
+
+// AllowsGoVersion reports whether a particular package
+// is allowed to use Go version major.minor.
+// We assume the imported packages have all been checked,
+// so we only have to check the local package against the -lang flag.
+func AllowsGoVersion(pkg *Pkg, major, minor int) bool {
+ if pkg == nil {
+ // TODO(mdempsky): Set Pkg for local types earlier.
+ pkg = LocalPkg
+ }
+ if pkg != LocalPkg {
+ // Assume imported packages passed type-checking.
+ return true
+ }
+ if langWant.major == 0 && langWant.minor == 0 {
+ return true
+ }
+ return langWant.major > major || (langWant.major == major && langWant.minor >= minor)
+}
+
+// ParseLangFlag verifies that the -lang flag holds a valid value, and
+// exits if not. It initializes data used by langSupported.
+func ParseLangFlag() {
+ if base.Flag.Lang == "" {
+ return
+ }
+
+ var err error
+ langWant, err = parseLang(base.Flag.Lang)
+ if err != nil {
+ log.Fatalf("invalid value %q for -lang: %v", base.Flag.Lang, err)
+ }
+
+ if def := currentLang(); base.Flag.Lang != def {
+ defVers, err := parseLang(def)
+ if err != nil {
+ log.Fatalf("internal error parsing default lang %q: %v", def, err)
+ }
+ if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) {
+ log.Fatalf("invalid value %q for -lang: max known version is %q", base.Flag.Lang, def)
+ }
+ }
+}
+
+// parseLang parses a -lang option into a langVer.
+func parseLang(s string) (lang, error) {
+ matches := goVersionRE.FindStringSubmatch(s)
+ if matches == nil {
+ return lang{}, fmt.Errorf(`should be something like "go1.12"`)
+ }
+ major, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return lang{}, err
+ }
+ minor, err := strconv.Atoi(matches[2])
+ if err != nil {
+ return lang{}, err
+ }
+ return lang{major: major, minor: minor}, nil
+}
+
+// currentLang returns the current language version.
+func currentLang() string {
+ return fmt.Sprintf("go1.%d", goversion.Version)
+}
+
+// goVersionRE is a regular expression that matches the valid
+// arguments to the -lang flag.
+var goVersionRE = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
f()
pkgMap = saved
}
+
+func IsDotAlias(sym *Sym) bool {
+ return sym.Def != nil && sym.Def.Sym() != sym
+}
Block = blockgen
}
-func IsDclstackValid() bool {
+func isDclstackValid() bool {
for _, d := range dclstack {
if d.sym == nil {
return false
// function scope.
return &s.Def
}
+
+func CheckDclstack() {
+ if !isDclstackValid() {
+ base.Fatalf("mark left on the dclstack")
+ }
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package types
import (
"bytes"
- "cmd/compile/internal/base"
- "cmd/compile/internal/types"
"fmt"
"sort"
+
+ "cmd/compile/internal/base"
+ "cmd/internal/src"
)
+var PtrSize int
+
+var RegSize int
+
+// Slices in the runtime are represented by three components:
+//
+// type slice struct {
+// ptr unsafe.Pointer
+// len int
+// cap int
+// }
+//
+// Strings in the runtime are represented by two components:
+//
+// type string struct {
+// ptr unsafe.Pointer
+// len int
+// }
+//
+// These variables are the offsets of fields and sizes of these structs.
+var (
+ SlicePtrOffset int64
+ SliceLenOffset int64
+ SliceCapOffset int64
+
+ SliceSize int64
+ StringSize int64
+)
+
+var SkipSizeForTracing bool
+
+// typePos returns the position associated with t.
+// This is where t was declared or where it appeared as a type expression.
+func typePos(t *Type) src.XPos {
+ if pos := t.Pos(); pos.IsKnown() {
+ return pos
+ }
+ base.Fatalf("bad type: %v", t)
+ panic("unreachable")
+}
+
// MaxWidth is the maximum size of a value on the target architecture.
var MaxWidth int64
-// sizeCalculationDisabled indicates whether it is safe
+// CalcSizeDisabled indicates whether it is safe
// to calculate Types' widths and alignments. See dowidth.
-var sizeCalculationDisabled bool
+var CalcSizeDisabled bool
// machine size and rounding alignment is dictated around
// the size of a pointer, set in betypeinit (see ../amd64/galign.go).
// expandiface computes the method set for interface type t by
// expanding embedded interfaces.
-func expandiface(t *types.Type) {
- seen := make(map[*types.Sym]*types.Field)
- var methods []*types.Field
+func expandiface(t *Type) {
+ seen := make(map[*Sym]*Field)
+ var methods []*Field
- addMethod := func(m *types.Field, explicit bool) {
+ addMethod := func(m *Field, explicit bool) {
switch prev := seen[m.Sym]; {
case prev == nil:
seen[m.Sym] = m
- case langSupported(1, 14, t.Pkg()) && !explicit && types.Identical(m.Type, prev.Type):
+ case AllowsGoVersion(t.Pkg(), 1, 14) && !explicit && Identical(m.Type, prev.Type):
return
default:
base.ErrorfAt(m.Pos, "duplicate method %s", m.Sym.Name)
continue
}
- checkwidth(m.Type)
+ CheckSize(m.Type)
addMethod(m, true)
}
// method set.
for _, t1 := range m.Type.Fields().Slice() {
// Use m.Pos rather than t1.Pos to preserve embedding position.
- f := types.NewField(m.Pos, t1.Sym, t1.Type)
+ f := NewField(m.Pos, t1.Sym, t1.Type)
addMethod(f, false)
}
}
- sort.Sort(methcmp(methods))
+ sort.Sort(MethodsByName(methods))
- if int64(len(methods)) >= MaxWidth/int64(Widthptr) {
+ if int64(len(methods)) >= MaxWidth/int64(PtrSize) {
base.ErrorfAt(typePos(t), "interface too large")
}
for i, m := range methods {
- m.Offset = int64(i) * int64(Widthptr)
+ m.Offset = int64(i) * int64(PtrSize)
}
// Access fields directly to avoid recursively calling dowidth
// within Type.Fields().
- t.Extra.(*types.Interface).Fields.Set(methods)
+ t.Extra.(*Interface).Fields.Set(methods)
}
-func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
+func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
starto := o
maxalign := int32(flag)
if maxalign < 1 {
continue
}
- dowidth(f.Type)
+ CalcSize(f.Type)
if int32(f.Type.Align) > maxalign {
maxalign = int32(f.Type.Align)
}
// NOTE(rsc): This comment may be stale.
// It's possible the ordering has changed and this is
// now the common case. I'm not sure.
- f.Nname.(types.VarObject).RecordFrameOffset(o)
+ f.Nname.(VarObject).RecordFrameOffset(o)
}
w := f.Type.Width
// path points to a slice used for tracking the sequence of types
// visited. Using a pointer to a slice allows the slice capacity to
// grow and limit reallocations.
-func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
+func findTypeLoop(t *Type, path *[]*Type) bool {
// We implement a simple DFS loop-finding algorithm. This
// could be faster, but type cycles are rare.
// Type imported from package, so it can't be part of
// a type loop (otherwise that package should have
// failed to compile).
- if t.Sym().Pkg != types.LocalPkg {
+ if t.Sym().Pkg != LocalPkg {
return false
}
}
*path = append(*path, t)
- if findTypeLoop(t.Obj().(types.TypeObject).TypeDefn(), path) {
+ if findTypeLoop(t.Obj().(TypeObject).TypeDefn(), path) {
return true
}
*path = (*path)[:len(*path)-1]
// Anonymous type. Recurse on contained types.
switch t.Kind() {
- case types.TARRAY:
+ case TARRAY:
if findTypeLoop(t.Elem(), path) {
return true
}
- case types.TSTRUCT:
+ case TSTRUCT:
for _, f := range t.Fields().Slice() {
if findTypeLoop(f.Type, path) {
return true
}
}
- case types.TINTER:
+ case TINTER:
for _, m := range t.Methods().Slice() {
if m.Type.IsInterface() { // embedded interface
if findTypeLoop(m.Type, path) {
return false
}
-func reportTypeLoop(t *types.Type) {
+func reportTypeLoop(t *Type) {
if t.Broke() {
return
}
- var l []*types.Type
+ var l []*Type
if !findTypeLoop(t, &l) {
base.Fatalf("failed to find type loop for: %v", t)
}
base.ErrorfAt(typePos(l[0]), msg.String())
}
-// dowidth calculates and stores the size and alignment for t.
+// CalcSize calculates and stores the size and alignment for t.
// If sizeCalculationDisabled is set, and the size/alignment
// have not already been calculated, it calls Fatal.
// This is used to prevent data races in the back end.
-func dowidth(t *types.Type) {
+func CalcSize(t *Type) {
// Calling dowidth when typecheck tracing enabled is not safe.
// See issue #33658.
- if enableTrace && skipDowidthForTracing {
+ if base.EnableTrace && SkipSizeForTracing {
return
}
- if Widthptr == 0 {
- base.Fatalf("dowidth without betypeinit")
+ if PtrSize == 0 {
+
+ // Assume this is a test.
+ return
}
if t == nil {
return
}
- if sizeCalculationDisabled {
+ if CalcSizeDisabled {
if t.Broke() {
// break infinite recursion from Fatal call below
return
}
// defer checkwidth calls until after we're done
- defercheckwidth()
+ DeferCheckSize()
lno := base.Pos
if pos := t.Pos(); pos.IsKnown() {
et := t.Kind()
switch et {
- case types.TFUNC, types.TCHAN, types.TMAP, types.TSTRING:
+ case TFUNC, TCHAN, TMAP, TSTRING:
break
// simtype == 0 during bootstrap
default:
- if simtype[t.Kind()] != 0 {
- et = simtype[t.Kind()]
+ if SimType[t.Kind()] != 0 {
+ et = SimType[t.Kind()]
}
}
base.Fatalf("dowidth: unknown type: %v", t)
// compiler-specific stuff
- case types.TINT8, types.TUINT8, types.TBOOL:
+ case TINT8, TUINT8, TBOOL:
// bool is int8
w = 1
- case types.TINT16, types.TUINT16:
+ case TINT16, TUINT16:
w = 2
- case types.TINT32, types.TUINT32, types.TFLOAT32:
+ case TINT32, TUINT32, TFLOAT32:
w = 4
- case types.TINT64, types.TUINT64, types.TFLOAT64:
+ case TINT64, TUINT64, TFLOAT64:
w = 8
- t.Align = uint8(Widthreg)
+ t.Align = uint8(RegSize)
- case types.TCOMPLEX64:
+ case TCOMPLEX64:
w = 8
t.Align = 4
- case types.TCOMPLEX128:
+ case TCOMPLEX128:
w = 16
- t.Align = uint8(Widthreg)
+ t.Align = uint8(RegSize)
- case types.TPTR:
- w = int64(Widthptr)
- checkwidth(t.Elem())
+ case TPTR:
+ w = int64(PtrSize)
+ CheckSize(t.Elem())
- case types.TUNSAFEPTR:
- w = int64(Widthptr)
+ case TUNSAFEPTR:
+ w = int64(PtrSize)
- case types.TINTER: // implemented as 2 pointers
- w = 2 * int64(Widthptr)
- t.Align = uint8(Widthptr)
+ case TINTER: // implemented as 2 pointers
+ w = 2 * int64(PtrSize)
+ t.Align = uint8(PtrSize)
expandiface(t)
- case types.TCHAN: // implemented as pointer
- w = int64(Widthptr)
+ case TCHAN: // implemented as pointer
+ w = int64(PtrSize)
- checkwidth(t.Elem())
+ CheckSize(t.Elem())
// make fake type to check later to
// trigger channel argument check.
- t1 := types.NewChanArgs(t)
- checkwidth(t1)
+ t1 := NewChanArgs(t)
+ CheckSize(t1)
- case types.TCHANARGS:
+ case TCHANARGS:
t1 := t.ChanArgs()
- dowidth(t1) // just in case
+ CalcSize(t1) // just in case
if t1.Elem().Width >= 1<<16 {
base.ErrorfAt(typePos(t1), "channel element type too large (>64kB)")
}
w = 1 // anything will do
- case types.TMAP: // implemented as pointer
- w = int64(Widthptr)
- checkwidth(t.Elem())
- checkwidth(t.Key())
+ case TMAP: // implemented as pointer
+ w = int64(PtrSize)
+ CheckSize(t.Elem())
+ CheckSize(t.Key())
- case types.TFORW: // should have been filled in
+ case TFORW: // should have been filled in
reportTypeLoop(t)
w = 1 // anything will do
- case types.TANY:
+ case TANY:
// not a real type; should be replaced before use.
base.Fatalf("dowidth any")
- case types.TSTRING:
- if sizeofString == 0 {
+ case TSTRING:
+ if StringSize == 0 {
base.Fatalf("early dowidth string")
}
- w = sizeofString
- t.Align = uint8(Widthptr)
+ w = StringSize
+ t.Align = uint8(PtrSize)
- case types.TARRAY:
+ case TARRAY:
if t.Elem() == nil {
break
}
- dowidth(t.Elem())
+ CalcSize(t.Elem())
if t.Elem().Width != 0 {
cap := (uint64(MaxWidth) - 1) / uint64(t.Elem().Width)
if uint64(t.NumElem()) > cap {
w = t.NumElem() * t.Elem().Width
t.Align = t.Elem().Align
- case types.TSLICE:
+ case TSLICE:
if t.Elem() == nil {
break
}
- w = sizeofSlice
- checkwidth(t.Elem())
- t.Align = uint8(Widthptr)
+ w = SliceSize
+ CheckSize(t.Elem())
+ t.Align = uint8(PtrSize)
- case types.TSTRUCT:
+ case TSTRUCT:
if t.IsFuncArgStruct() {
base.Fatalf("dowidth fn struct %v", t)
}
- w = widstruct(t, t, 0, 1)
+ w = calcStructOffset(t, t, 0, 1)
// make fake type to check later to
// trigger function argument computation.
- case types.TFUNC:
- t1 := types.NewFuncArgs(t)
- checkwidth(t1)
- w = int64(Widthptr) // width of func type is pointer
+ case TFUNC:
+ t1 := NewFuncArgs(t)
+ CheckSize(t1)
+ w = int64(PtrSize) // width of func type is pointer
// function is 3 cated structures;
// compute their widths as side-effect.
- case types.TFUNCARGS:
+ case TFUNCARGS:
t1 := t.FuncArgs()
- w = widstruct(t1, t1.Recvs(), 0, 0)
- w = widstruct(t1, t1.Params(), w, Widthreg)
- w = widstruct(t1, t1.Results(), w, Widthreg)
- t1.Extra.(*types.Func).Argwid = w
- if w%int64(Widthreg) != 0 {
+ w = calcStructOffset(t1, t1.Recvs(), 0, 0)
+ w = calcStructOffset(t1, t1.Params(), w, RegSize)
+ w = calcStructOffset(t1, t1.Results(), w, RegSize)
+ t1.Extra.(*Func).Argwid = w
+ if w%int64(RegSize) != 0 {
base.Warn("bad type %v %d\n", t1, w)
}
t.Align = 1
}
- if Widthptr == 4 && w != int64(int32(w)) {
+ if PtrSize == 4 && w != int64(int32(w)) {
base.ErrorfAt(typePos(t), "type %v too large", t)
}
base.Pos = lno
- resumecheckwidth()
+ ResumeCheckSize()
}
// CalcStructSize calculates the size of s,
// filling in s.Width and s.Align,
// even if size calculation is otherwise disabled.
-func CalcStructSize(s *types.Type) {
- s.Width = widstruct(s, s, 0, 1) // sets align
+func CalcStructSize(s *Type) {
+ s.Width = calcStructOffset(s, s, 0, 1) // sets align
}
// when a type's width should be known, we call checkwidth
// is needed immediately. checkwidth makes sure the
// size is evaluated eventually.
-var deferredTypeStack []*types.Type
+var deferredTypeStack []*Type
-func checkwidth(t *types.Type) {
+func CheckSize(t *Type) {
if t == nil {
return
}
}
if defercalc == 0 {
- dowidth(t)
+ CalcSize(t)
return
}
}
}
-func defercheckwidth() {
+func DeferCheckSize() {
defercalc++
}
-func resumecheckwidth() {
+func ResumeCheckSize() {
if defercalc == 1 {
for len(deferredTypeStack) > 0 {
t := deferredTypeStack[len(deferredTypeStack)-1]
deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1]
t.SetDeferwidth(false)
- dowidth(t)
+ CalcSize(t)
}
}
defercalc--
}
+
+// PtrDataSize returns the length in bytes of the prefix of t
+// containing pointer data. Anything after this offset is scalar data.
+func PtrDataSize(t *Type) int64 {
+ if !t.HasPointers() {
+ return 0
+ }
+
+ switch t.Kind() {
+ case TPTR,
+ TUNSAFEPTR,
+ TFUNC,
+ TCHAN,
+ TMAP:
+ return int64(PtrSize)
+
+ case TSTRING:
+ // struct { byte *str; intgo len; }
+ return int64(PtrSize)
+
+ case TINTER:
+ // struct { Itab *tab; void *data; } or
+ // struct { Type *type; void *data; }
+ // Note: see comment in plive.go:onebitwalktype1.
+ return 2 * int64(PtrSize)
+
+ case TSLICE:
+ // struct { byte *array; uintgo len; uintgo cap; }
+ return int64(PtrSize)
+
+ case TARRAY:
+ // haspointers already eliminated t.NumElem() == 0.
+ return (t.NumElem()-1)*t.Elem().Width + PtrDataSize(t.Elem())
+
+ case TSTRUCT:
+ // Find the last field that has pointers.
+ var lastPtrField *Field
+ for _, t1 := range t.Fields().Slice() {
+ if t1.Type.HasPointers() {
+ lastPtrField = t1
+ }
+ }
+ return lastPtrField.Offset + PtrDataSize(lastPtrField.Type)
+
+ default:
+ base.Fatalf("typeptrdata: unexpected type, %v", t)
+ return 0
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// MethodsByName sorts methods by symbol.
+type MethodsByName []*Field
+
+func (x MethodsByName) Len() int { return len(x) }
+
+func (x MethodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x MethodsByName) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
+ "sync"
)
// IRNode represents an ir.Node, but without needing to import cmd/compile/internal/ir,
t := New(TPTR)
t.Extra = Ptr{Elem: elem}
- t.Width = int64(Widthptr)
- t.Align = uint8(Widthptr)
+ t.Width = int64(PtrSize)
+ t.Align = uint8(PtrSize)
if NewPtrCacheEnabled {
elem.cache.ptr = t
}
case TSTRUCT:
return &t.Extra.(*Struct).fields
case TINTER:
- Dowidth(t)
+ CalcSize(t)
return &t.Extra.(*Interface).Fields
}
base.Fatalf("Fields: type %v does not have fields", t)
}
return 0
}
- Dowidth(t)
+ CalcSize(t)
return t.Width
}
func (t *Type) Alignment() int64 {
- Dowidth(t)
+ CalcSize(t)
return int64(t.Align)
}
}
return false
}
+
+var (
+ IsInt [NTYPE]bool
+ IsFloat [NTYPE]bool
+ IsComplex [NTYPE]bool
+ IsSimple [NTYPE]bool
+)
+
+var IsOrdered [NTYPE]bool
+
+// IsReflexive reports whether t has a reflexive equality operator.
+// That is, if x==x for all x of type t.
+func IsReflexive(t *Type) bool {
+ switch t.Kind() {
+ case TBOOL,
+ TINT,
+ TUINT,
+ TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TUINTPTR,
+ TPTR,
+ TUNSAFEPTR,
+ TSTRING,
+ TCHAN:
+ return true
+
+ case TFLOAT32,
+ TFLOAT64,
+ TCOMPLEX64,
+ TCOMPLEX128,
+ TINTER:
+ return false
+
+ case TARRAY:
+ return IsReflexive(t.Elem())
+
+ case TSTRUCT:
+ for _, t1 := range t.Fields().Slice() {
+ if !IsReflexive(t1.Type) {
+ return false
+ }
+ }
+ return true
+
+ default:
+ base.Fatalf("bad type for map key: %v", t)
+ return false
+ }
+}
+
+// Can this type be stored directly in an interface word?
+// Yes, if the representation is a single pointer.
+func IsDirectIface(t *Type) bool {
+ if t.Broke() {
+ return false
+ }
+
+ switch t.Kind() {
+ case TPTR:
+ // Pointers to notinheap types must be stored indirectly. See issue 42076.
+ return !t.Elem().NotInHeap()
+ case TCHAN,
+ TMAP,
+ TFUNC,
+ TUNSAFEPTR:
+ return true
+
+ case TARRAY:
+ // Array of 1 direct iface type can be direct.
+ return t.NumElem() == 1 && IsDirectIface(t.Elem())
+
+ case TSTRUCT:
+ // Struct with 1 field of direct iface type can be direct.
+ return t.NumFields() == 1 && IsDirectIface(t.Field(0).Type)
+ }
+
+ return false
+}
+
+// IsInterfaceMethod reports whether (field) m is
+// an interface method. Such methods have the
+// special receiver type types.FakeRecvType().
+func IsInterfaceMethod(f *Type) bool {
+ return f.Recv().Type == FakeRecvType()
+}
+
+// IsMethodApplicable reports whether method m can be called on a
+// value of type t. This is necessary because we compute a single
+// method set for both T and *T, but some *T methods are not
+// applicable to T receivers.
+func IsMethodApplicable(t *Type, m *Field) bool {
+ return t.IsPtr() || !m.Type.Recv().Type.IsPtr() || IsInterfaceMethod(m.Type) || m.Embedded == 2
+}
+
+// IsRuntimePkg reports whether p is package runtime.
+func IsRuntimePkg(p *Pkg) bool {
+ if base.Flag.CompilingRuntime && p == LocalPkg {
+ return true
+ }
+ return p.Path == "runtime"
+}
+
+// IsReflectPkg reports whether p is package reflect.
+func IsReflectPkg(p *Pkg) bool {
+ if p == LocalPkg {
+ return base.Ctxt.Pkgpath == "reflect"
+ }
+ return p.Path == "reflect"
+}
+
+// ReceiverBaseType returns the underlying type, if any,
+// that owns methods with receiver parameter t.
+// The result is either a named type or an anonymous struct.
+func ReceiverBaseType(t *Type) *Type {
+ if t == nil {
+ return nil
+ }
+
+ // Strip away pointer if it's there.
+ if t.IsPtr() {
+ if t.Sym() != nil {
+ return nil
+ }
+ t = t.Elem()
+ if t == nil {
+ return nil
+ }
+ }
+
+ // Must be a named type or anonymous struct.
+ if t.Sym() == nil && !t.IsStruct() {
+ return nil
+ }
+
+ // Check types.
+ if IsSimple[t.Kind()] {
+ return t
+ }
+ switch t.Kind() {
+ case TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRING, TSTRUCT:
+ return t
+ }
+ return nil
+}
+
+func FloatForComplex(t *Type) *Type {
+ switch t.Kind() {
+ case TCOMPLEX64:
+ return Types[TFLOAT32]
+ case TCOMPLEX128:
+ return Types[TFLOAT64]
+ }
+ base.Fatalf("unexpected type: %v", t)
+ return nil
+}
+
+func ComplexForFloat(t *Type) *Type {
+ switch t.Kind() {
+ case TFLOAT32:
+ return Types[TCOMPLEX64]
+ case TFLOAT64:
+ return Types[TCOMPLEX128]
+ }
+ base.Fatalf("unexpected type: %v", t)
+ return nil
+}
+
+func TypeSym(t *Type) *Sym {
+ return TypeSymLookup(TypeSymName(t))
+}
+
+func TypeSymLookup(name string) *Sym {
+ typepkgmu.Lock()
+ s := typepkg.Lookup(name)
+ typepkgmu.Unlock()
+ return s
+}
+
+func TypeSymName(t *Type) string {
+ name := t.ShortString()
+ // Use a separate symbol name for Noalg types for #17752.
+ if TypeHasNoAlg(t) {
+ name = "noalg." + name
+ }
+ return name
+}
+
+// Fake package for runtime type info (headers)
+// Don't access directly, use typeLookup below.
+var (
+ typepkgmu sync.Mutex // protects typepkg lookups
+ typepkg = NewPkg("type", "type")
+)
+
+var SimType [NTYPE]Kind
// They are here to break import cycles.
// TODO(gri) eliminate these dependencies.
var (
- Widthptr int
- Dowidth func(*Type)
TypeLinkSym func(*Type) *obj.LSym
)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// walkAssign walks an OAS (AssignExpr) or OASOP (AssignOpExpr) node.
+func walkAssign(init *ir.Nodes, n ir.Node) ir.Node {
+ init.Append(n.PtrInit().Take()...)
+
+ var left, right ir.Node
+ switch n.Op() {
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ left, right = n.X, n.Y
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ left, right = n.X, n.Y
+ }
+
+ // Recognize m[k] = append(m[k], ...) so we can reuse
+ // the mapassign call.
+ var mapAppend *ir.CallExpr
+ if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND {
+ left := left.(*ir.IndexExpr)
+ mapAppend = right.(*ir.CallExpr)
+ if !ir.SameSafeExpr(left, mapAppend.Args[0]) {
+ base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args[0])
+ }
+ }
+
+ left = walkExpr(left, init)
+ left = safeExpr(left, init)
+ if mapAppend != nil {
+ mapAppend.Args[0] = left
+ }
+
+ if n.Op() == ir.OASOP {
+ // Rewrite x op= y into x = x op y.
+ n = ir.NewAssignStmt(base.Pos, left, typecheck.Expr(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).AsOp, left, right)))
+ } else {
+ n.(*ir.AssignStmt).X = left
+ }
+ as := n.(*ir.AssignStmt)
+
+ if oaslit(as, init) {
+ return ir.NewBlockStmt(as.Pos(), nil)
+ }
+
+ if as.Y == nil {
+ // TODO(austin): Check all "implicit zeroing"
+ return as
+ }
+
+ if !base.Flag.Cfg.Instrumenting && ir.IsZero(as.Y) {
+ return as
+ }
+
+ switch as.Y.Op() {
+ default:
+ as.Y = walkExpr(as.Y, init)
+
+ case ir.ORECV:
+ // x = <-c; as.Left is x, as.Right.Left is c.
+ // order.stmt made sure x is addressable.
+ recv := as.Y.(*ir.UnaryExpr)
+ recv.X = walkExpr(recv.X, init)
+
+ n1 := typecheck.NodAddr(as.X)
+ r := recv.X // the channel
+ return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1)
+
+ case ir.OAPPEND:
+ // x = append(...)
+ call := as.Y.(*ir.CallExpr)
+ if call.Type().Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem())
+ }
+ var r ir.Node
+ switch {
+ case isAppendOfMake(call):
+ // x = append(y, make([]T, y)...)
+ r = extendSlice(call, init)
+ case call.IsDDD:
+ r = appendSlice(call, init) // also works for append(slice, string).
+ default:
+ r = walkAppend(call, init, as)
+ }
+ as.Y = r
+ if r.Op() == ir.OAPPEND {
+ // Left in place for back end.
+ // Do not add a new write barrier.
+ // Set up address of type for back end.
+ r.(*ir.CallExpr).X = reflectdata.TypePtr(r.Type().Elem())
+ return as
+ }
+ // Otherwise, lowered for race detector.
+ // Treat as ordinary assignment.
+ }
+
+ if as.X != nil && as.Y != nil {
+ return convas(as, init)
+ }
+ return as
+}
+
+// walkAssignDotType walks an OAS2DOTTYPE node.
+func walkAssignDotType(n *ir.AssignListStmt, init *ir.Nodes) ir.Node {
+ walkExprListSafe(n.Lhs, init)
+ n.Rhs[0] = walkExpr(n.Rhs[0], init)
+ return n
+}
+
+// walkAssignFunc walks an OAS2FUNC node.
+func walkAssignFunc(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+ init.Append(n.PtrInit().Take()...)
+
+ r := n.Rhs[0]
+ walkExprListSafe(n.Lhs, init)
+ r = walkExpr(r, init)
+
+ if ir.IsIntrinsicCall(r.(*ir.CallExpr)) {
+ n.Rhs = []ir.Node{r}
+ return n
+ }
+ init.Append(r)
+
+ ll := ascompatet(n.Lhs, r.Type())
+ return ir.NewBlockStmt(src.NoXPos, ll)
+}
+
+// walkAssignList walks an OAS2 node.
+func walkAssignList(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+ init.Append(n.PtrInit().Take()...)
+ walkExprListSafe(n.Lhs, init)
+ walkExprListSafe(n.Rhs, init)
+ return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs, init))
+}
+
+// walkAssignMapRead walks an OAS2MAPR node.
+func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+ init.Append(n.PtrInit().Take()...)
+
+ r := n.Rhs[0].(*ir.IndexExpr)
+ walkExprListSafe(n.Lhs, init)
+ r.X = walkExpr(r.X, init)
+ r.Index = walkExpr(r.Index, init)
+ t := r.X.Type()
+
+ fast := mapfast(t)
+ var key ir.Node
+ if fast != mapslow {
+ // fast versions take key by value
+ key = r.Index
+ } else {
+ // standard version takes key by reference
+ // order.expr made sure key is addressable.
+ key = typecheck.NodAddr(r.Index)
+ }
+
+ // from:
+ // a,b = m[i]
+ // to:
+ // var,b = mapaccess2*(t, m, i)
+ // a = *var
+ a := n.Lhs[0]
+
+ var call *ir.CallExpr
+ if w := t.Elem().Width; w <= zeroValSize {
+ fn := mapfn(mapaccess2[fast], t)
+ call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key)
+ } else {
+ fn := mapfn("mapaccess2_fat", t)
+ z := reflectdata.ZeroAddr(w)
+ call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z)
+ }
+
+ // mapaccess2* returns a typed bool, but due to spec changes,
+ // the boolean result of i.(T) is now untyped so we make it the
+ // same type as the variable on the lhs.
+ if ok := n.Lhs[1]; !ir.IsBlank(ok) && ok.Type().IsBoolean() {
+ call.Type().Field(1).Type = ok.Type()
+ }
+ n.Rhs = []ir.Node{call}
+ n.SetOp(ir.OAS2FUNC)
+
+ // don't generate a = *var if a is _
+ if ir.IsBlank(a) {
+ return walkExpr(typecheck.Stmt(n), init)
+ }
+
+ var_ := typecheck.Temp(types.NewPtr(t.Elem()))
+ var_.SetTypecheck(1)
+ var_.MarkNonNil() // mapaccess always returns a non-nil pointer
+
+ n.Lhs[0] = var_
+ init.Append(walkExpr(n, init))
+
+ as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_))
+ return walkExpr(typecheck.Stmt(as), init)
+}
+
+// walkAssignRecv walks an OAS2RECV node.
+func walkAssignRecv(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+ init.Append(n.PtrInit().Take()...)
+
+ r := n.Rhs[0].(*ir.UnaryExpr) // recv
+ walkExprListSafe(n.Lhs, init)
+ r.X = walkExpr(r.X, init)
+ var n1 ir.Node
+ if ir.IsBlank(n.Lhs[0]) {
+ n1 = typecheck.NodNil()
+ } else {
+ n1 = typecheck.NodAddr(n.Lhs[0])
+ }
+ fn := chanfn("chanrecv2", 2, r.X.Type())
+ ok := n.Lhs[1]
+ call := mkcall1(fn, types.Types[types.TBOOL], init, r.X, n1)
+ return typecheck.Stmt(ir.NewAssignStmt(base.Pos, ok, call))
+}
+
+// walkReturn walks an ORETURN node.
+func walkReturn(n *ir.ReturnStmt) ir.Node {
+ ir.CurFunc.NumReturns++
+ if len(n.Results) == 0 {
+ return n
+ }
+ if (ir.HasNamedResults(ir.CurFunc) && len(n.Results) > 1) || paramoutheap(ir.CurFunc) {
+ // assign to the function out parameters,
+ // so that ascompatee can fix up conflicts
+ var rl []ir.Node
+
+ for _, ln := range ir.CurFunc.Dcl {
+ cl := ln.Class_
+ if cl == ir.PAUTO || cl == ir.PAUTOHEAP {
+ break
+ }
+ if cl == ir.PPARAMOUT {
+ var ln ir.Node = ln
+ if ir.IsParamStackCopy(ln) {
+ ln = walkExpr(typecheck.Expr(ir.NewStarExpr(base.Pos, ln.Name().Heapaddr)), nil)
+ }
+ rl = append(rl, ln)
+ }
+ }
+
+ if got, want := len(n.Results), len(rl); got != want {
+ // order should have rewritten multi-value function calls
+ // with explicit OAS2FUNC nodes.
+ base.Fatalf("expected %v return arguments, have %v", want, got)
+ }
+
+ // move function calls out, to make ascompatee's job easier.
+ walkExprListSafe(n.Results, n.PtrInit())
+
+ n.Results.Set(ascompatee(n.Op(), rl, n.Results, n.PtrInit()))
+ return n
+ }
+ walkExprList(n.Results, n.PtrInit())
+
+ // For each return parameter (lhs), assign the corresponding result (rhs).
+ lhs := ir.CurFunc.Type().Results()
+ rhs := n.Results
+ res := make([]ir.Node, lhs.NumFields())
+ for i, nl := range lhs.FieldSlice() {
+ nname := ir.AsNode(nl.Nname)
+ if ir.IsParamHeapCopy(nname) {
+ nname = nname.Name().Stackcopy
+ }
+ a := ir.NewAssignStmt(base.Pos, nname, rhs[i])
+ res[i] = convas(a, n.PtrInit())
+ }
+ n.Results.Set(res)
+ return n
+}
+
+// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
+func fncall(l ir.Node, rt *types.Type) bool {
+ if l.HasCall() || l.Op() == ir.OINDEXMAP {
+ return true
+ }
+ if types.Identical(l.Type(), rt) {
+ return false
+ }
+ // There might be a conversion required, which might involve a runtime call.
+ return true
+}
+
+func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node {
+ // check assign expression list to
+ // an expression list. called in
+ // expr-list = expr-list
+
+ // ensure order of evaluation for function calls
+ for i := range nl {
+ nl[i] = safeExpr(nl[i], init)
+ }
+ for i1 := range nr {
+ nr[i1] = safeExpr(nr[i1], init)
+ }
+
+ var nn []*ir.AssignStmt
+ i := 0
+ for ; i < len(nl); i++ {
+ if i >= len(nr) {
+ break
+ }
+ // Do not generate 'x = x' during return. See issue 4014.
+ if op == ir.ORETURN && ir.SameSafeExpr(nl[i], nr[i]) {
+ continue
+ }
+ nn = append(nn, ascompatee1(nl[i], nr[i], init))
+ }
+
+ // cannot happen: caller checked that lists had same length
+ if i < len(nl) || i < len(nr) {
+ var nln, nrn ir.Nodes
+ nln.Set(nl)
+ nrn.Set(nr)
+ base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(ir.CurFunc))
+ }
+ return reorder3(nn)
+}
+
+func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) *ir.AssignStmt {
+ // convas will turn map assigns into function calls,
+ // making it impossible for reorder3 to work.
+ n := ir.NewAssignStmt(base.Pos, l, r)
+
+ if l.Op() == ir.OINDEXMAP {
+ return n
+ }
+
+ return convas(n, init)
+}
+
+// check assign type list to
+// an expression list. called in
+// expr-list = func()
+func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
+ if len(nl) != nr.NumFields() {
+ base.Fatalf("ascompatet: assignment count mismatch: %d = %d", len(nl), nr.NumFields())
+ }
+
+ var nn, mm ir.Nodes
+ for i, l := range nl {
+ if ir.IsBlank(l) {
+ continue
+ }
+ r := nr.Field(i)
+
+ // Any assignment to an lvalue that might cause a function call must be
+ // deferred until all the returned values have been read.
+ if fncall(l, r.Type) {
+ tmp := ir.Node(typecheck.Temp(r.Type))
+ tmp = typecheck.Expr(tmp)
+ a := convas(ir.NewAssignStmt(base.Pos, l, tmp), &mm)
+ mm.Append(a)
+ l = tmp
+ }
+
+ res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH)
+ res.Offset = base.Ctxt.FixedFrameSize() + r.Offset
+ res.SetType(r.Type)
+ res.SetTypecheck(1)
+
+ a := convas(ir.NewAssignStmt(base.Pos, l, res), &nn)
+ updateHasCall(a)
+ if a.HasCall() {
+ ir.Dump("ascompatet ucount", a)
+ base.Fatalf("ascompatet: too many function calls evaluating parameters")
+ }
+
+ nn.Append(a)
+ }
+ return append(nn, mm...)
+}
+
+// reorder3
+// from ascompatee
+// a,b = c,d
+// simultaneous assignment. there cannot
+// be later use of an earlier lvalue.
+//
+// function calls have been removed.
+func reorder3(all []*ir.AssignStmt) []ir.Node {
+ // If a needed expression may be affected by an
+ // earlier assignment, make an early copy of that
+ // expression and use the copy instead.
+ var early []ir.Node
+
+ var mapinit ir.Nodes
+ for i, n := range all {
+ l := n.X
+
+ // Save subexpressions needed on left side.
+ // Drill through non-dereferences.
+ for {
+ switch ll := l; ll.Op() {
+ case ir.ODOT:
+ ll := ll.(*ir.SelectorExpr)
+ l = ll.X
+ continue
+ case ir.OPAREN:
+ ll := ll.(*ir.ParenExpr)
+ l = ll.X
+ continue
+ case ir.OINDEX:
+ ll := ll.(*ir.IndexExpr)
+ if ll.X.Type().IsArray() {
+ ll.Index = reorder3save(ll.Index, all, i, &early)
+ l = ll.X
+ continue
+ }
+ }
+ break
+ }
+
+ switch l.Op() {
+ default:
+ base.Fatalf("reorder3 unexpected lvalue %v", l.Op())
+
+ case ir.ONAME:
+ break
+
+ case ir.OINDEX, ir.OINDEXMAP:
+ l := l.(*ir.IndexExpr)
+ l.X = reorder3save(l.X, all, i, &early)
+ l.Index = reorder3save(l.Index, all, i, &early)
+ if l.Op() == ir.OINDEXMAP {
+ all[i] = convas(all[i], &mapinit)
+ }
+
+ case ir.ODEREF:
+ l := l.(*ir.StarExpr)
+ l.X = reorder3save(l.X, all, i, &early)
+ case ir.ODOTPTR:
+ l := l.(*ir.SelectorExpr)
+ l.X = reorder3save(l.X, all, i, &early)
+ }
+
+ // Save expression on right side.
+ all[i].Y = reorder3save(all[i].Y, all, i, &early)
+ }
+
+ early = append(mapinit, early...)
+ for _, as := range all {
+ early = append(early, as)
+ }
+ return early
+}
+
+// if the evaluation of *np would be affected by the
+// assignments in all up to but not including the ith assignment,
+// copy into a temporary during *early and
+// replace *np with that temp.
+// The result of reorder3save MUST be assigned back to n, e.g.
+// n.Left = reorder3save(n.Left, all, i, early)
+func reorder3save(n ir.Node, all []*ir.AssignStmt, i int, early *[]ir.Node) ir.Node {
+ if !aliased(n, all[:i]) {
+ return n
+ }
+
+ q := ir.Node(typecheck.Temp(n.Type()))
+ as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, q, n))
+ *early = append(*early, as)
+ return q
+}
+
+// Is it possible that the computation of r might be
+// affected by assignments in all?
+func aliased(r ir.Node, all []*ir.AssignStmt) bool {
+ if r == nil {
+ return false
+ }
+
+ // Treat all fields of a struct as referring to the whole struct.
+ // We could do better but we would have to keep track of the fields.
+ for r.Op() == ir.ODOT {
+ r = r.(*ir.SelectorExpr).X
+ }
+
+ // Look for obvious aliasing: a variable being assigned
+ // during the all list and appearing in n.
+ // Also record whether there are any writes to addressable
+ // memory (either main memory or variables whose addresses
+ // have been taken).
+ memwrite := false
+ for _, as := range all {
+ // We can ignore assignments to blank.
+ if ir.IsBlank(as.X) {
+ continue
+ }
+
+ lv := ir.OuterValue(as.X)
+ if lv.Op() != ir.ONAME {
+ memwrite = true
+ continue
+ }
+ l := lv.(*ir.Name)
+
+ switch l.Class_ {
+ default:
+ base.Fatalf("unexpected class: %v, %v", l, l.Class_)
+
+ case ir.PAUTOHEAP, ir.PEXTERN:
+ memwrite = true
+ continue
+
+ case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
+ if l.Name().Addrtaken() {
+ memwrite = true
+ continue
+ }
+
+ if refersToName(l, r) {
+ // Direct hit: l appears in r.
+ return true
+ }
+ }
+ }
+
+ // The variables being written do not appear in r.
+ // However, r might refer to computed addresses
+ // that are being written.
+
+ // If no computed addresses are affected by the writes, no aliasing.
+ if !memwrite {
+ return false
+ }
+
+ // If r does not refer to any variables whose addresses have been taken,
+ // then the only possible writes to r would be directly to the variables,
+ // and we checked those above, so no aliasing problems.
+ if !anyAddrTaken(r) {
+ return false
+ }
+
+ // Otherwise, both the writes and r refer to computed memory addresses.
+ // Assume that they might conflict.
+ return true
+}
+
+// anyAddrTaken reports whether the evaluation n,
+// which appears on the left side of an assignment,
+// may refer to variables whose addresses have been taken.
+func anyAddrTaken(n ir.Node) bool {
+ return ir.Any(n, func(n ir.Node) bool {
+ switch n.Op() {
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ return n.Class_ == ir.PEXTERN || n.Class_ == ir.PAUTOHEAP || n.Name().Addrtaken()
+
+ case ir.ODOT: // but not ODOTPTR - should have been handled in aliased.
+ base.Fatalf("anyAddrTaken unexpected ODOT")
+
+ case ir.OADD,
+ ir.OAND,
+ ir.OANDAND,
+ ir.OANDNOT,
+ ir.OBITNOT,
+ ir.OCONV,
+ ir.OCONVIFACE,
+ ir.OCONVNOP,
+ ir.ODIV,
+ ir.ODOTTYPE,
+ ir.OLITERAL,
+ ir.OLSH,
+ ir.OMOD,
+ ir.OMUL,
+ ir.ONEG,
+ ir.ONIL,
+ ir.OOR,
+ ir.OOROR,
+ ir.OPAREN,
+ ir.OPLUS,
+ ir.ORSH,
+ ir.OSUB,
+ ir.OXOR:
+ return false
+ }
+ // Be conservative.
+ return true
+ })
+}
+
+// refersToName reports whether r refers to name.
+func refersToName(name *ir.Name, r ir.Node) bool {
+ return ir.Any(r, func(r ir.Node) bool {
+ return r.Op() == ir.ONAME && r == name
+ })
+}
+
+// refersToCommonName reports whether any name
+// appears in common between l and r.
+// This is called from sinit.go.
+func refersToCommonName(l ir.Node, r ir.Node) bool {
+ if l == nil || r == nil {
+ return false
+ }
+
+ // This could be written elegantly as a Find nested inside a Find:
+ //
+ // found := ir.Find(l, func(l ir.Node) interface{} {
+ // if l.Op() == ir.ONAME {
+ // return ir.Find(r, func(r ir.Node) interface{} {
+ // if r.Op() == ir.ONAME && l.Name() == r.Name() {
+ // return r
+ // }
+ // return nil
+ // })
+ // }
+ // return nil
+ // })
+ // return found != nil
+ //
+ // But that would allocate a new closure for the inner Find
+ // for each name found on the left side.
+ // It may not matter at all, but the below way of writing it
+ // only allocates two closures, not O(|L|) closures.
+
+ var doL, doR func(ir.Node) error
+ var targetL *ir.Name
+ doR = func(r ir.Node) error {
+ if r.Op() == ir.ONAME && r.Name() == targetL {
+ return stop
+ }
+ return ir.DoChildren(r, doR)
+ }
+ doL = func(l ir.Node) error {
+ if l.Op() == ir.ONAME {
+ l := l.(*ir.Name)
+ targetL = l.Name()
+ if doR(r) == stop {
+ return stop
+ }
+ }
+ return ir.DoChildren(l, doL)
+ }
+ return doL(l) == stop
+}
+
+// expand append(l1, l2...) to
+// init {
+// s := l1
+// n := len(s) + len(l2)
+// // Compare as uint so growslice can panic on overflow.
+// if uint(n) > uint(cap(s)) {
+// s = growslice(s, n)
+// }
+// s = s[:n]
+// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+// }
+// s
+//
+// l2 is allowed to be a string.
+func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+ walkAppendArgs(n, init)
+
+ l1 := n.Args[0]
+ l2 := n.Args[1]
+ l2 = cheapExpr(l2, init)
+ n.Args[1] = l2
+
+ var nodes ir.Nodes
+
+ // var s []T
+ s := typecheck.Temp(l1.Type())
+ nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1
+
+ elemtype := s.Type().Elem()
+
+ // n := len(s) + len(l2)
+ nn := typecheck.Temp(types.Types[types.TINT])
+ nodes.Append(ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), ir.NewUnaryExpr(base.Pos, ir.OLEN, l2))))
+
+ // if uint(n) > uint(cap(s))
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nuint := typecheck.Conv(nn, types.Types[types.TUINT])
+ scapuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT])
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, scapuint)
+
+ // instantiate growslice(typ *type, []any, int) []any
+ fn := typecheck.LookupRuntime("growslice")
+ fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
+
+ // s = growslice(T, s, n)
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
+ nodes.Append(nif)
+
+ // s = s[:n]
+ nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s)
+ nt.SetSliceBounds(nil, nn, nil)
+ nt.SetBounded(true)
+ nodes.Append(ir.NewAssignStmt(base.Pos, s, nt))
+
+ var ncopy ir.Node
+ if elemtype.HasPointers() {
+ // copy(s[len(l1):], l2)
+ slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s)
+ slice.SetType(s.Type())
+ slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil)
+
+ ir.CurFunc.SetWBPos(n.Pos())
+
+ // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
+ fn := typecheck.LookupRuntime("typedslicecopy")
+ fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
+ ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
+ ptr2, len2 := backingArrayPtrLen(l2)
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.TypePtr(elemtype), ptr1, len1, ptr2, len2)
+ } else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime {
+ // rely on runtime to instrument:
+ // copy(s[len(l1):], l2)
+ // l2 can be a slice or string.
+ slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s)
+ slice.SetType(s.Type())
+ slice.SetSliceBounds(ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil)
+
+ ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
+ ptr2, len2 := backingArrayPtrLen(l2)
+
+ fn := typecheck.LookupRuntime("slicecopy")
+ fn = typecheck.SubstArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem())
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(elemtype.Width))
+ } else {
+ // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+ ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1))
+ ix.SetBounded(true)
+ addr := typecheck.NodAddr(ix)
+
+ sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l2)
+
+ nwid := cheapExpr(typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes)
+ nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(elemtype.Width))
+
+ // instantiate func memmove(to *any, frm *any, length uintptr)
+ fn := typecheck.LookupRuntime("memmove")
+ fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
+ ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid)
+ }
+ ln := append(nodes, ncopy)
+
+ typecheck.Stmts(ln)
+ walkStmtList(ln)
+ init.Append(ln...)
+ return s
+}
+
+// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
+// isAppendOfMake assumes n has already been typechecked.
+func isAppendOfMake(n ir.Node) bool {
+ if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+ return false
+ }
+
+ if n.Typecheck() == 0 {
+ base.Fatalf("missing typecheck: %+v", n)
+ }
+
+ if n.Op() != ir.OAPPEND {
+ return false
+ }
+ call := n.(*ir.CallExpr)
+ if !call.IsDDD || len(call.Args) != 2 || call.Args[1].Op() != ir.OMAKESLICE {
+ return false
+ }
+
+ mk := call.Args[1].(*ir.MakeExpr)
+ if mk.Cap != nil {
+ return false
+ }
+
+ // y must be either an integer constant or the largest possible positive value
+ // of variable y needs to fit into an uint.
+
+ // typecheck made sure that constant arguments to make are not negative and fit into an int.
+
+ // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
+ y := mk.Len
+ if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() {
+ return false
+ }
+
+ return true
+}
+
+// extendSlice rewrites append(l1, make([]T, l2)...) to
+// init {
+// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
+// } else {
+// panicmakeslicelen()
+// }
+// s := l1
+// n := len(s) + l2
+// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
+// // cap is a positive int and n can become negative when len(s) + l2
+// // overflows int. Interpreting n when negative as uint makes it larger
+// // than cap(s). growslice will check the int n arg and panic if n is
+// // negative. This prevents the overflow from being undetected.
+// if uint(n) > uint(cap(s)) {
+// s = growslice(T, s, n)
+// }
+// s = s[:n]
+// lptr := &l1[0]
+// sptr := &s[0]
+// if lptr == sptr || !T.HasPointers() {
+// // growslice did not clear the whole underlying array (or did not get called)
+// hp := &s[len(l1)]
+// hn := l2 * sizeof(T)
+// memclr(hp, hn)
+// }
+// }
+// s
+func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+ // isAppendOfMake made sure all possible positive values of l2 fit into an uint.
+ // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
+ // check of l2 < 0 at runtime which is generated below.
+ l2 := typecheck.Conv(n.Args[1].(*ir.MakeExpr).Len, types.Types[types.TINT])
+ l2 = typecheck.Expr(l2)
+ n.Args[1] = l2 // walkAppendArgs expects l2 in n.List.Second().
+
+ walkAppendArgs(n, init)
+
+ l1 := n.Args[0]
+ l2 = n.Args[1] // re-read l2, as it may have been updated by walkAppendArgs
+
+ var nodes []ir.Node
+
+ // if l2 >= 0 (likely happens), do nothing
+ nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, ir.NewInt(0)), nil, nil)
+ nifneg.Likely = true
+
+ // else panicmakeslicelen()
+ nifneg.Else = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
+ nodes = append(nodes, nifneg)
+
+ // s := l1
+ s := typecheck.Temp(l1.Type())
+ nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1))
+
+ elemtype := s.Type().Elem()
+
+ // n := len(s) + l2
+ nn := typecheck.Temp(types.Types[types.TINT])
+ nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2)))
+
+ // if uint(n) > uint(cap(s))
+ nuint := typecheck.Conv(nn, types.Types[types.TUINT])
+ capuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT])
+ nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, capuint), nil, nil)
+
+ // instantiate growslice(typ *type, old []any, newcap int) []any
+ fn := typecheck.LookupRuntime("growslice")
+ fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
+
+ // s = growslice(T, s, n)
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
+ nodes = append(nodes, nif)
+
+ // s = s[:n]
+ nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s)
+ nt.SetSliceBounds(nil, nn, nil)
+ nt.SetBounded(true)
+ nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, nt))
+
+ // lptr := &l1[0]
+ l1ptr := typecheck.Temp(l1.Type().Elem().PtrTo())
+ tmp := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l1)
+ nodes = append(nodes, ir.NewAssignStmt(base.Pos, l1ptr, tmp))
+
+ // sptr := &s[0]
+ sptr := typecheck.Temp(elemtype.PtrTo())
+ tmp = ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
+ nodes = append(nodes, ir.NewAssignStmt(base.Pos, sptr, tmp))
+
+ // hp := &s[len(l1)]
+ ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1))
+ ix.SetBounded(true)
+ hp := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
+
+ // hn := l2 * sizeof(elem(s))
+ hn := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, ir.NewInt(elemtype.Width)), types.Types[types.TUINTPTR])
+
+ clrname := "memclrNoHeapPointers"
+ hasPointers := elemtype.HasPointers()
+ if hasPointers {
+ clrname = "memclrHasPointers"
+ ir.CurFunc.SetWBPos(n.Pos())
+ }
+
+ var clr ir.Nodes
+ clrfn := mkcall(clrname, nil, &clr, hp, hn)
+ clr.Append(clrfn)
+
+ if hasPointers {
+ // if l1ptr == sptr
+ nifclr := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OEQ, l1ptr, sptr), nil, nil)
+ nifclr.Body = clr
+ nodes = append(nodes, nifclr)
+ } else {
+ nodes = append(nodes, clr...)
+ }
+
+ typecheck.Stmts(nodes)
+ walkStmtList(nodes)
+ init.Append(nodes...)
+ return s
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/escape"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// Rewrite append(src, x, y, z) so that any side effects in
+// x, y, z (including runtime panics) are evaluated in
+// initialization statements before the append.
+// For normal code generation, stop there and leave the
+// rest to cgen_append.
+//
+// For race detector, expand append(src, a [, b]* ) to
+//
+// init {
+// s := src
+// const argc = len(args) - 1
+// if cap(s) - len(s) < argc {
+// s = growslice(s, len(s)+argc)
+// }
+// n := len(s)
+// s = s[:n+argc]
+// s[n] = a
+// s[n+1] = b
+// ...
+// }
+// s
+func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
+ if !ir.SameSafeExpr(dst, n.Args[0]) {
+ n.Args[0] = safeExpr(n.Args[0], init)
+ n.Args[0] = walkExpr(n.Args[0], init)
+ }
+ walkExprListSafe(n.Args[1:], init)
+
+ nsrc := n.Args[0]
+
+ // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ // Using cheapexpr also makes sure that the evaluation
+ // of all arguments (and especially any panics) happen
+ // before we begin to modify the slice in a visible way.
+ ls := n.Args[1:]
+ for i, n := range ls {
+ n = cheapExpr(n, init)
+ if !types.Identical(n.Type(), nsrc.Type().Elem()) {
+ n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append")
+ n = walkExpr(n, init)
+ }
+ ls[i] = n
+ }
+
+ argc := len(n.Args) - 1
+ if argc < 1 {
+ return nsrc
+ }
+
+ // General case, with no function calls left as arguments.
+ // Leave for gen, except that instrumentation requires old form.
+ if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime {
+ return n
+ }
+
+ var l []ir.Node
+
+ ns := typecheck.Temp(nsrc.Type())
+ l = append(l, ir.NewAssignStmt(base.Pos, ns, nsrc)) // s = src
+
+ na := ir.NewInt(int64(argc)) // const argc
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na)
+
+ fn := typecheck.LookupRuntime("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
+ fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
+
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns,
+ ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))}
+
+ l = append(l, nif)
+
+ nn := typecheck.Temp(types.Types[types.TINT])
+ l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns))) // n = len(s)
+
+ slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns) // ...s[:n+argc]
+ slice.SetSliceBounds(nil, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, na), nil)
+ slice.SetBounded(true)
+ l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc]
+
+ ls = n.Args[1:]
+ for i, n := range ls {
+ ix := ir.NewIndexExpr(base.Pos, ns, nn) // s[n] ...
+ ix.SetBounded(true)
+ l = append(l, ir.NewAssignStmt(base.Pos, ix, n)) // s[n] = arg
+ if i+1 < len(ls) {
+ l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, ir.NewInt(1)))) // n = n + 1
+ }
+ }
+
+ typecheck.Stmts(l)
+ walkStmtList(l)
+ init.Append(l...)
+ return ns
+}
+
+// walkClose walks an OCLOSE node.
+func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+ // cannot use chanfn - closechan takes any, not chan any
+ fn := typecheck.LookupRuntime("closechan")
+ fn = typecheck.SubstArgTypes(fn, n.X.Type())
+ return mkcall1(fn, nil, init, n.X)
+}
+
+// Lower copy(a, b) to a memmove call or a runtime call.
+//
+// init {
+// n := len(a)
+// if n > len(b) { n = len(b) }
+// if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
+// }
+// n;
+//
+// Also works if b is a string.
+//
+func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
+ if n.X.Type().Elem().HasPointers() {
+ ir.CurFunc.SetWBPos(n.Pos())
+ fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem())
+ n.X = cheapExpr(n.X, init)
+ ptrL, lenL := backingArrayPtrLen(n.X)
+ n.Y = cheapExpr(n.Y, init)
+ ptrR, lenR := backingArrayPtrLen(n.Y)
+ return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR)
+ }
+
+ if runtimecall {
+ // rely on runtime to instrument:
+ // copy(n.Left, n.Right)
+ // n.Right can be a slice or string.
+
+ n.X = cheapExpr(n.X, init)
+ ptrL, lenL := backingArrayPtrLen(n.X)
+ n.Y = cheapExpr(n.Y, init)
+ ptrR, lenR := backingArrayPtrLen(n.Y)
+
+ fn := typecheck.LookupRuntime("slicecopy")
+ fn = typecheck.SubstArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
+
+ return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(n.X.Type().Elem().Width))
+ }
+
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+ nl := typecheck.Temp(n.X.Type())
+ nr := typecheck.Temp(n.Y.Type())
+ var l []ir.Node
+ l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X))
+ l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y))
+
+ nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr)
+ nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl)
+
+ nlen := typecheck.Temp(types.Types[types.TINT])
+
+ // n = len(to)
+ l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl)))
+
+ // if n > len(frm) { n = len(frm) }
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))
+ nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)))
+ l = append(l, nif)
+
+ // if to.ptr != frm.ptr { memmove( ... ) }
+ ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil)
+ ne.Likely = true
+ l = append(l, ne)
+
+ fn := typecheck.LookupRuntime("memmove")
+ fn = typecheck.SubstArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
+ nwid := ir.Node(typecheck.Temp(types.Types[types.TUINTPTR]))
+ setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR]))
+ ne.Body.Append(setwid)
+ nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(nl.Type().Elem().Width))
+ call := mkcall1(fn, nil, init, nto, nfrm, nwid)
+ ne.Body.Append(call)
+
+ typecheck.Stmts(l)
+ walkStmtList(l)
+ init.Append(l...)
+ return nlen
+}
+
+// walkDelete walks an ODELETE node.
+func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node {
+ init.Append(n.PtrInit().Take()...)
+ map_ := n.Args[0]
+ key := n.Args[1]
+ map_ = walkExpr(map_, init)
+ key = walkExpr(key, init)
+
+ t := map_.Type()
+ fast := mapfast(t)
+ if fast == mapslow {
+ // order.stmt made sure key is addressable.
+ key = typecheck.NodAddr(key)
+ }
+ return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
+}
+
+// walkLenCap walks an OLEN or OCAP node.
+func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+ if isRuneCount(n) {
+ // Replace len([]rune(string)) with runtime.countrunes(string).
+ return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
+ }
+
+ n.X = walkExpr(n.X, init)
+
+ // replace len(*[10]int) with 10.
+ // delayed until now to preserve side effects.
+ t := n.X.Type()
+
+ if t.IsPtr() {
+ t = t.Elem()
+ }
+ if t.IsArray() {
+ safeExpr(n.X, init)
+ con := typecheck.OrigInt(n, t.NumElem())
+ con.SetTypecheck(1)
+ return con
+ }
+ return n
+}
+
+// walkMakeChan walks an OMAKECHAN node.
+func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+ // When size fits into int, use makechan instead of
+ // makechan64, which is faster and shorter on 32 bit platforms.
+ size := n.Len
+ fnname := "makechan64"
+ argtype := types.Types[types.TINT64]
+
+ // Type checking guarantees that TIDEAL size is positive and fits in an int.
+ // The case of size overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makechan during runtime.
+ if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
+ fnname = "makechan"
+ argtype = types.Types[types.TINT]
+ }
+
+ return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype))
+}
+
+// walkMakeMap walks an OMAKEMAP node.
+func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+ t := n.Type()
+ hmapType := reflectdata.MapType(t)
+ hint := n.Len
+
+ // var h *hmap
+ var h ir.Node
+ if n.Esc() == ir.EscNone {
+ // Allocate hmap on stack.
+
+ // var hv hmap
+ hv := typecheck.Temp(hmapType)
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, hv, nil)))
+ // h = &hv
+ h = typecheck.NodAddr(hv)
+
+ // Allocate one bucket pointed to by hmap.buckets on stack if hint
+ // is not larger than BUCKETSIZE. In case hint is larger than
+ // BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
+ // Maximum key and elem size is 128 bytes, larger objects
+ // are stored with an indirection. So max bucket size is 2048+eps.
+ if !ir.IsConst(hint, constant.Int) ||
+ constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
+
+ // In case hint is larger than BUCKETSIZE runtime.makemap
+ // will allocate the buckets on the heap, see #20184
+ //
+ // if hint <= BUCKETSIZE {
+ // var bv bmap
+ // b = &bv
+ // h.buckets = b
+ // }
+
+ nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil)
+ nif.Likely = true
+
+ // var bv bmap
+ bv := typecheck.Temp(reflectdata.MapBucketType(t))
+ nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil))
+
+ // b = &bv
+ b := typecheck.NodAddr(bv)
+
+ // h.buckets = b
+ bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
+ na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b)
+ nif.Body.Append(na)
+ appendWalkStmt(init, nif)
+ }
+ }
+
+ if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
+ // Handling make(map[any]any) and
+ // make(map[any]any, hint) where hint <= BUCKETSIZE
+ // special allows for faster map initialization and
+ // improves binary size by using calls with fewer arguments.
+ // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
+ // and no buckets will be allocated by makemap. Therefore,
+ // no buckets need to be allocated in this code path.
+ if n.Esc() == ir.EscNone {
+ // Only need to initialize h.hash0 since
+ // hmap h has been allocated on the stack already.
+ // h.hash0 = fastrand()
+ rand := mkcall("fastrand", types.Types[types.TUINT32], init)
+ hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
+ return typecheck.ConvNop(h, t)
+ }
+ // Call runtime.makehmap to allocate an
+ // hmap on the heap and initialize hmap's hash0 field.
+ fn := typecheck.LookupRuntime("makemap_small")
+ fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+ return mkcall1(fn, n.Type(), init)
+ }
+
+ if n.Esc() != ir.EscNone {
+ h = typecheck.NodNil()
+ }
+ // Map initialization with a variable or large hint is
+ // more complicated. We therefore generate a call to
+ // runtime.makemap to initialize hmap and allocate the
+ // map buckets.
+
+ // When hint fits into int, use makemap instead of
+ // makemap64, which is faster and shorter on 32 bit platforms.
+ fnname := "makemap64"
+ argtype := types.Types[types.TINT64]
+
+ // Type checking guarantees that TIDEAL hint is positive and fits in an int.
+ // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
+ // The case of hint overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makemap during runtime.
+ if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
+ fnname = "makemap"
+ argtype = types.Types[types.TINT]
+ }
+
+ fn := typecheck.LookupRuntime(fnname)
+ fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem())
+ return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h)
+}
+
+// walkMakeSlice walks an OMAKESLICE node.
+func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+ l := n.Len
+ r := n.Cap
+ if r == nil {
+ r = safeExpr(l, init)
+ l = r
+ }
+ t := n.Type()
+ if t.Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ }
+ if n.Esc() == ir.EscNone {
+ if why := escape.HeapAllocReason(n); why != "" {
+ base.Fatalf("%v has EscNone, but %v", n, why)
+ }
+ // var arr [r]T
+ // n = arr[:l]
+ i := typecheck.IndexConst(r)
+ if i < 0 {
+ base.Fatalf("walkexpr: invalid index %v", r)
+ }
+
+ // cap is constrained to [0,2^31) or [0,2^63) depending on whether
+ // we're in 32-bit or 64-bit systems. So it's safe to do:
+ //
+ // if uint64(len) > cap {
+ // if len < 0 { panicmakeslicelen() }
+ // panicmakeslicecap()
+ // }
+ nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(l, types.Types[types.TUINT64]), ir.NewInt(i)), nil, nil)
+ niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(0)), nil, nil)
+ niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
+ nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init))
+ init.Append(typecheck.Stmt(nif))
+
+ t = types.NewArray(t.Elem(), i) // [r]T
+ var_ := typecheck.Temp(t)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp
+ r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_) // arr[:l]
+ r.SetSliceBounds(nil, l, nil)
+ // The conv is necessary in case n.Type is named.
+ return walkExpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init)
+ }
+
+ // n escapes; set up a call to makeslice.
+ // When len and cap can fit into int, use makeslice instead of
+ // makeslice64, which is faster and shorter on 32 bit platforms.
+
+ len, cap := l, r
+
+ fnname := "makeslice64"
+ argtype := types.Types[types.TINT64]
+
+ // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
+ // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makeslice during runtime.
+ if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
+ (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
+ fnname = "makeslice"
+ argtype = types.Types[types.TINT]
+ }
+
+ m := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
+ m.SetType(t)
+
+ fn := typecheck.LookupRuntime(fnname)
+ m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
+ m.Ptr.MarkNonNil()
+ m.LenCap = []ir.Node{typecheck.Conv(len, types.Types[types.TINT]), typecheck.Conv(cap, types.Types[types.TINT])}
+ return walkExpr(typecheck.Expr(m), init)
+}
+
+// walkMakeSliceCopy walks an OMAKESLICECOPY node.
+func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+ if n.Esc() == ir.EscNone {
+ base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
+ }
+
+ t := n.Type()
+ if t.Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ }
+
+ length := typecheck.Conv(n.Len, types.Types[types.TINT])
+ copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap)
+ copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap)
+
+ if !t.Elem().HasPointers() && n.Bounded() {
+ // When len(to)==len(from) and elements have no pointers:
+ // replace make+copy with runtime.mallocgc+runtime.memmove.
+
+ // We do not check for overflow of len(to)*elem.Width here
+ // since len(from) is an existing checked slice capacity
+ // with same elem.Width for the from slice.
+ size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(t.Elem().Width), types.Types[types.TUINTPTR]))
+
+ // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
+ fn := typecheck.LookupRuntime("mallocgc")
+ sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
+ sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false))
+ sh.Ptr.MarkNonNil()
+ sh.LenCap = []ir.Node{length, length}
+ sh.SetType(t)
+
+ s := typecheck.Temp(t)
+ r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh))
+ r = walkExpr(r, init)
+ init.Append(r)
+
+ // instantiate memmove(to *any, frm *any, size uintptr)
+ fn = typecheck.LookupRuntime("memmove")
+ fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
+ ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size)
+ init.Append(walkExpr(typecheck.Stmt(ncopy), init))
+
+ return s
+ }
+ // Replace make+copy with runtime.makeslicecopy.
+ // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
+ fn := typecheck.LookupRuntime("makeslicecopy")
+ s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
+ s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
+ s.Ptr.MarkNonNil()
+ s.LenCap = []ir.Node{length, length}
+ s.SetType(t)
+ return walkExpr(typecheck.Expr(s), init)
+}
+
+// walkNew walks an ONEW node.
+func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+ if n.Type().Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
+ }
+ if n.Esc() == ir.EscNone {
+ if n.Type().Elem().Width >= ir.MaxImplicitStackVarSize {
+ base.Fatalf("large ONEW with EscNone: %v", n)
+ }
+ r := typecheck.Temp(n.Type().Elem())
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, nil))) // zero temp
+ return typecheck.Expr(typecheck.NodAddr(r))
+ }
+ return callnew(n.Type().Elem())
+}
+
+// generate code for print
+func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
+ // Hoist all the argument evaluation up before the lock.
+ walkExprListCheap(nn.Args, init)
+
+ // For println, add " " between elements and "\n" at the end.
+ if nn.Op() == ir.OPRINTN {
+ s := nn.Args
+ t := make([]ir.Node, 0, len(s)*2)
+ for i, n := range s {
+ if i != 0 {
+ t = append(t, ir.NewString(" "))
+ }
+ t = append(t, n)
+ }
+ t = append(t, ir.NewString("\n"))
+ nn.Args.Set(t)
+ }
+
+ // Collapse runs of constant strings.
+ s := nn.Args
+ t := make([]ir.Node, 0, len(s))
+ for i := 0; i < len(s); {
+ var strs []string
+ for i < len(s) && ir.IsConst(s[i], constant.String) {
+ strs = append(strs, ir.StringVal(s[i]))
+ i++
+ }
+ if len(strs) > 0 {
+ t = append(t, ir.NewString(strings.Join(strs, "")))
+ }
+ if i < len(s) {
+ t = append(t, s[i])
+ i++
+ }
+ }
+ nn.Args.Set(t)
+
+ calls := []ir.Node{mkcall("printlock", nil, init)}
+ for i, n := range nn.Args {
+ if n.Op() == ir.OLITERAL {
+ if n.Type() == types.UntypedRune {
+ n = typecheck.DefaultLit(n, types.RuneType)
+ }
+
+ switch n.Val().Kind() {
+ case constant.Int:
+ n = typecheck.DefaultLit(n, types.Types[types.TINT64])
+
+ case constant.Float:
+ n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64])
+ }
+ }
+
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
+ n = typecheck.DefaultLit(n, types.Types[types.TINT64])
+ }
+ n = typecheck.DefaultLit(n, nil)
+ nn.Args[i] = n
+ if n.Type() == nil || n.Type().Kind() == types.TFORW {
+ continue
+ }
+
+ var on *ir.Name
+ switch n.Type().Kind() {
+ case types.TINTER:
+ if n.Type().IsEmptyInterface() {
+ on = typecheck.LookupRuntime("printeface")
+ } else {
+ on = typecheck.LookupRuntime("printiface")
+ }
+ on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+ case types.TPTR:
+ if n.Type().Elem().NotInHeap() {
+ on = typecheck.LookupRuntime("printuintptr")
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(types.Types[types.TUNSAFEPTR])
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(types.Types[types.TUINTPTR])
+ break
+ }
+ fallthrough
+ case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
+ on = typecheck.LookupRuntime("printpointer")
+ on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+ case types.TSLICE:
+ on = typecheck.LookupRuntime("printslice")
+ on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+ case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
+ if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" {
+ on = typecheck.LookupRuntime("printhex")
+ } else {
+ on = typecheck.LookupRuntime("printuint")
+ }
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
+ on = typecheck.LookupRuntime("printint")
+ case types.TFLOAT32, types.TFLOAT64:
+ on = typecheck.LookupRuntime("printfloat")
+ case types.TCOMPLEX64, types.TCOMPLEX128:
+ on = typecheck.LookupRuntime("printcomplex")
+ case types.TBOOL:
+ on = typecheck.LookupRuntime("printbool")
+ case types.TSTRING:
+ cs := ""
+ if ir.IsConst(n, constant.String) {
+ cs = ir.StringVal(n)
+ }
+ switch cs {
+ case " ":
+ on = typecheck.LookupRuntime("printsp")
+ case "\n":
+ on = typecheck.LookupRuntime("printnl")
+ default:
+ on = typecheck.LookupRuntime("printstring")
+ }
+ default:
+ badtype(ir.OPRINT, n.Type(), nil)
+ continue
+ }
+
+ r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil)
+ if params := on.Type().Params().FieldSlice(); len(params) > 0 {
+ t := params[0].Type
+ if !types.Identical(t, n.Type()) {
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(t)
+ }
+ r.Args.Append(n)
+ }
+ calls = append(calls, r)
+ }
+
+ calls = append(calls, mkcall("printunlock", nil, init))
+
+ typecheck.Stmts(calls)
+ walkExprList(calls, init)
+
+ r := ir.NewBlockStmt(base.Pos, nil)
+ r.List.Set(calls)
+ return walkStmt(typecheck.Stmt(r))
+}
+
+func badtype(op ir.Op, tl, tr *types.Type) {
+ var s string
+ if tl != nil {
+ s += fmt.Sprintf("\n\t%v", tl)
+ }
+ if tr != nil {
+ s += fmt.Sprintf("\n\t%v", tr)
+ }
+
+ // common mistake: *struct and *interface.
+ if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
+ if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
+ s += "\n\t(*struct vs *interface)"
+ } else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
+ s += "\n\t(*interface vs *struct)"
+ }
+ }
+
+ base.Errorf("illegal types for operand: %v%s", op, s)
+}
+
+func callnew(t *types.Type) ir.Node {
+ types.CalcSize(t)
+ n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, reflectdata.TypePtr(t))
+ n.SetType(types.NewPtr(t))
+ n.SetTypecheck(1)
+ n.MarkNonNil()
+ return n
+}
+
+func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
+ fn := typecheck.LookupRuntime(name)
+ fn = typecheck.SubstArgTypes(fn, l, r)
+ return fn
+}
+
+// isRuneCount reports whether n is of the form len([]rune(string)).
+// These are optimized into a call to runtime.countrunes.
+func isRuneCount(n ir.Node) bool {
+ return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// Closure is called in a separate phase after escape analysis.
+// It transform closure bodies to properly reference captured variables.
+func Closure(fn *ir.Func) {
+ lno := base.Pos
+ base.Pos = fn.Pos()
+
+ if fn.ClosureCalled() {
+ // If the closure is directly called, we transform it to a plain function call
+ // with variables passed as args. This avoids allocation of a closure object.
+ // Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
+ // will complete the transformation later.
+ // For illustration, the following closure:
+ // func(a int) {
+ // println(byval)
+ // byref++
+ // }(42)
+ // becomes:
+ // func(byval int, &byref *int, a int) {
+ // println(byval)
+ // (*&byref)++
+ // }(byval, &byref, 42)
+
+ // f is ONAME of the actual function.
+ f := fn.Nname
+
+ // We are going to insert captured variables before input args.
+ var params []*types.Field
+ var decls []*ir.Name
+ for _, v := range fn.ClosureVars {
+ if !v.Byval() {
+ // If v of type T is captured by reference,
+ // we introduce function param &v *T
+ // and v remains PAUTOHEAP with &v heapaddr
+ // (accesses will implicitly deref &v).
+ addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name))
+ addr.SetType(types.NewPtr(v.Type()))
+ v.Heapaddr = addr
+ v = addr
+ }
+
+ v.Class_ = ir.PPARAM
+ decls = append(decls, v)
+
+ fld := types.NewField(src.NoXPos, v.Sym(), v.Type())
+ fld.Nname = v
+ params = append(params, fld)
+ }
+
+ if len(params) > 0 {
+ // Prepend params and decls.
+ f.Type().Params().SetFields(append(params, f.Type().Params().FieldSlice()...))
+ fn.Dcl = append(decls, fn.Dcl...)
+ }
+
+ types.CalcSize(f.Type())
+ fn.SetType(f.Type()) // update type of ODCLFUNC
+ } else {
+ // The closure is not called, so it is going to stay as closure.
+ var body []ir.Node
+ offset := int64(types.PtrSize)
+ for _, v := range fn.ClosureVars {
+ // cv refers to the field inside of closure OSTRUCTLIT.
+ typ := v.Type()
+ if !v.Byval() {
+ typ = types.NewPtr(typ)
+ }
+ offset = types.Rnd(offset, int64(typ.Align))
+ cr := ir.NewClosureRead(typ, offset)
+ offset += typ.Width
+
+ if v.Byval() && v.Type().Width <= int64(2*types.PtrSize) {
+ // If it is a small variable captured by value, downgrade it to PAUTO.
+ v.Class_ = ir.PAUTO
+ fn.Dcl = append(fn.Dcl, v)
+ body = append(body, ir.NewAssignStmt(base.Pos, v, cr))
+ } else {
+ // Declare variable holding addresses taken from closure
+ // and initialize in entry prologue.
+ addr := typecheck.NewName(typecheck.Lookup("&" + v.Sym().Name))
+ addr.SetType(types.NewPtr(v.Type()))
+ addr.Class_ = ir.PAUTO
+ addr.SetUsed(true)
+ addr.Curfn = fn
+ fn.Dcl = append(fn.Dcl, addr)
+ v.Heapaddr = addr
+ var src ir.Node = cr
+ if v.Byval() {
+ src = typecheck.NodAddr(cr)
+ }
+ body = append(body, ir.NewAssignStmt(base.Pos, addr, src))
+ }
+ }
+
+ if len(body) > 0 {
+ typecheck.Stmts(body)
+ fn.Enter.Set(body)
+ fn.SetNeedctxt(true)
+ }
+ }
+
+ base.Pos = lno
+}
+
+func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
+ fn := clo.Func
+
+ // If no closure vars, don't bother wrapping.
+ if ir.IsTrivialClosure(clo) {
+ if base.Debug.Closure > 0 {
+ base.WarnfAt(clo.Pos(), "closure converted to global")
+ }
+ return fn.Nname
+ }
+ ir.ClosureDebugRuntimeCheck(clo)
+
+ typ := typecheck.ClosureType(clo)
+
+ clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil)
+ clos.SetEsc(clo.Esc())
+ clos.List.Set(append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, fn.Nname)}, fn.ClosureEnter...))
+
+ addr := typecheck.NodAddr(clos)
+ addr.SetEsc(clo.Esc())
+
+ // Force type conversion from *struct to the func type.
+ cfn := typecheck.ConvNop(addr, clo.Type())
+
+ // non-escaping temp to use, if any.
+ if x := clo.Prealloc; x != nil {
+ if !types.Identical(typ, x.Type()) {
+ panic("closure type does not match order's assigned type")
+ }
+ addr.Alloc = x
+ clo.Prealloc = nil
+ }
+
+ return walkExpr(cfn, init)
+}
+
+func walkCallPart(n *ir.CallPartExpr, init *ir.Nodes) ir.Node {
+ // Create closure in the form of a composite literal.
+ // For x.M with receiver (x) type T, the generated code looks like:
+ //
+ // clos = &struct{F uintptr; R T}{T.M·f, x}
+ //
+ // Like walkclosure above.
+
+ if n.X.Type().IsInterface() {
+ // Trigger panic for method on nil interface now.
+ // Otherwise it happens in the wrapper and is confusing.
+ n.X = cheapExpr(n.X, init)
+ n.X = walkExpr(n.X, nil)
+
+ tab := typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X))
+
+ c := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab)
+ c.SetTypecheck(1)
+ init.Append(c)
+ }
+
+ typ := typecheck.PartialCallType(n)
+
+ clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ).(ir.Ntype), nil)
+ clos.SetEsc(n.Esc())
+ clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, n.Func.Nname), n.X}
+
+ addr := typecheck.NodAddr(clos)
+ addr.SetEsc(n.Esc())
+
+ // Force type conversion from *struct to the func type.
+ cfn := typecheck.ConvNop(addr, n.Type())
+
+ // non-escaping temp to use, if any.
+ if x := n.Prealloc; x != nil {
+ if !types.Identical(typ, x.Type()) {
+ panic("partial call type does not match order's assigned type")
+ }
+ addr.Alloc = x
+ n.Prealloc = nil
+ }
+
+ return walkExpr(cfn, init)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "encoding/binary"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/sys"
+)
+
+// The result of walkCompare MUST be assigned back to n, e.g.
+// n.Left = walkCompare(n.Left, init)
+func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL {
+ return walkCompareInterface(n, init)
+ }
+
+ if n.X.Type().IsString() && n.Y.Type().IsString() {
+ return walkCompareString(n, init)
+ }
+
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+
+ // Given mixed interface/concrete comparison,
+ // rewrite into types-equal && data-equal.
+ // This is efficient, avoids allocations, and avoids runtime calls.
+ if n.X.Type().IsInterface() != n.Y.Type().IsInterface() {
+ // Preserve side-effects in case of short-circuiting; see #32187.
+ l := cheapExpr(n.X, init)
+ r := cheapExpr(n.Y, init)
+ // Swap so that l is the interface value and r is the concrete value.
+ if n.Y.Type().IsInterface() {
+ l, r = r, l
+ }
+
+ // Handle both == and !=.
+ eq := n.Op()
+ andor := ir.OOROR
+ if eq == ir.OEQ {
+ andor = ir.OANDAND
+ }
+ // Check for types equal.
+ // For empty interface, this is:
+ // l.tab == type(r)
+ // For non-empty interface, this is:
+ // l.tab != nil && l.tab._type == type(r)
+ var eqtype ir.Node
+ tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l)
+ rtyp := reflectdata.TypePtr(r.Type())
+ if l.Type().IsEmptyInterface() {
+ tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
+ tab.SetTypecheck(1)
+ eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp)
+ } else {
+ nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), typecheck.NodNil(), tab)
+ match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp)
+ eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match)
+ }
+ // Check for data equal.
+ eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r)
+ // Put it all together.
+ expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata)
+ return finishCompare(n, expr, init)
+ }
+
+ // Must be comparison of array or struct.
+ // Otherwise back end handles it.
+ // While we're here, decide whether to
+ // inline or call an eq alg.
+ t := n.X.Type()
+ var inline bool
+
+ maxcmpsize := int64(4)
+ unalignedLoad := canMergeLoads()
+ if unalignedLoad {
+ // Keep this low enough to generate less code than a function call.
+ maxcmpsize = 2 * int64(ssagen.Arch.LinkArch.RegSize)
+ }
+
+ switch t.Kind() {
+ default:
+ if base.Debug.Libfuzzer != 0 && t.IsInteger() {
+ n.X = cheapExpr(n.X, init)
+ n.Y = cheapExpr(n.Y, init)
+
+ // If exactly one comparison operand is
+ // constant, invoke the constcmp functions
+ // instead, and arrange for the constant
+ // operand to be the first argument.
+ l, r := n.X, n.Y
+ if r.Op() == ir.OLITERAL {
+ l, r = r, l
+ }
+ constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL
+
+ var fn string
+ var paramType *types.Type
+ switch t.Size() {
+ case 1:
+ fn = "libfuzzerTraceCmp1"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp1"
+ }
+ paramType = types.Types[types.TUINT8]
+ case 2:
+ fn = "libfuzzerTraceCmp2"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp2"
+ }
+ paramType = types.Types[types.TUINT16]
+ case 4:
+ fn = "libfuzzerTraceCmp4"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp4"
+ }
+ paramType = types.Types[types.TUINT32]
+ case 8:
+ fn = "libfuzzerTraceCmp8"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp8"
+ }
+ paramType = types.Types[types.TUINT64]
+ default:
+ base.Fatalf("unexpected integer size %d for %v", t.Size(), t)
+ }
+ init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init)))
+ }
+ return n
+ case types.TARRAY:
+ // We can compare several elements at once with 2/4/8 byte integer compares
+ inline = t.NumElem() <= 1 || (types.IsSimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
+ case types.TSTRUCT:
+ inline = t.NumComponents(types.IgnoreBlankFields) <= 4
+ }
+
+ cmpl := n.X
+ for cmpl != nil && cmpl.Op() == ir.OCONVNOP {
+ cmpl = cmpl.(*ir.ConvExpr).X
+ }
+ cmpr := n.Y
+ for cmpr != nil && cmpr.Op() == ir.OCONVNOP {
+ cmpr = cmpr.(*ir.ConvExpr).X
+ }
+
+ // Chose not to inline. Call equality function directly.
+ if !inline {
+ // eq algs take pointers; cmpl and cmpr must be addressable
+ if !ir.IsAssignable(cmpl) || !ir.IsAssignable(cmpr) {
+ base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
+ }
+
+ fn, needsize := eqFor(t)
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+ call.Args.Append(typecheck.NodAddr(cmpl))
+ call.Args.Append(typecheck.NodAddr(cmpr))
+ if needsize {
+ call.Args.Append(ir.NewInt(t.Width))
+ }
+ res := ir.Node(call)
+ if n.Op() != ir.OEQ {
+ res = ir.NewUnaryExpr(base.Pos, ir.ONOT, res)
+ }
+ return finishCompare(n, res, init)
+ }
+
+ // inline: build boolean expression comparing element by element
+ andor := ir.OANDAND
+ if n.Op() == ir.ONE {
+ andor = ir.OOROR
+ }
+ var expr ir.Node
+ compare := func(el, er ir.Node) {
+ a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er)
+ if expr == nil {
+ expr = a
+ } else {
+ expr = ir.NewLogicalExpr(base.Pos, andor, expr, a)
+ }
+ }
+ cmpl = safeExpr(cmpl, init)
+ cmpr = safeExpr(cmpr, init)
+ if t.IsStruct() {
+ for _, f := range t.Fields().Slice() {
+ sym := f.Sym
+ if sym.IsBlank() {
+ continue
+ }
+ compare(
+ ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpl, sym),
+ ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpr, sym),
+ )
+ }
+ } else {
+ step := int64(1)
+ remains := t.NumElem() * t.Elem().Width
+ combine64bit := unalignedLoad && types.RegSize == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger()
+ combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger()
+ combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger()
+ for i := int64(0); remains > 0; {
+ var convType *types.Type
+ switch {
+ case remains >= 8 && combine64bit:
+ convType = types.Types[types.TINT64]
+ step = 8 / t.Elem().Width
+ case remains >= 4 && combine32bit:
+ convType = types.Types[types.TUINT32]
+ step = 4 / t.Elem().Width
+ case remains >= 2 && combine16bit:
+ convType = types.Types[types.TUINT16]
+ step = 2 / t.Elem().Width
+ default:
+ step = 1
+ }
+ if step == 1 {
+ compare(
+ ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)),
+ ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)),
+ )
+ i++
+ remains -= t.Elem().Width
+ } else {
+ elemType := t.Elem().ToUnsigned()
+ cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)))
+ cmplw = typecheck.Conv(cmplw, elemType) // convert to unsigned
+ cmplw = typecheck.Conv(cmplw, convType) // widen
+ cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)))
+ cmprw = typecheck.Conv(cmprw, elemType)
+ cmprw = typecheck.Conv(cmprw, convType)
+ // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
+ // ssa will generate a single large load.
+ for offset := int64(1); offset < step; offset++ {
+ lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i+offset)))
+ lb = typecheck.Conv(lb, elemType)
+ lb = typecheck.Conv(lb, convType)
+ lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, ir.NewInt(8*t.Elem().Width*offset))
+ cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb)
+ rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i+offset)))
+ rb = typecheck.Conv(rb, elemType)
+ rb = typecheck.Conv(rb, convType)
+ rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(8*t.Elem().Width*offset))
+ cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb)
+ }
+ compare(cmplw, cmprw)
+ i += step
+ remains -= step * t.Elem().Width
+ }
+ }
+ }
+ if expr == nil {
+ expr = ir.NewBool(n.Op() == ir.OEQ)
+ // We still need to use cmpl and cmpr, in case they contain
+ // an expression which might panic. See issue 23837.
+ t := typecheck.Temp(cmpl.Type())
+ a1 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpl))
+ a2 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpr))
+ init.Append(a1, a2)
+ }
+ return finishCompare(n, expr, init)
+}
+
+func walkCompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ n.Y = cheapExpr(n.Y, init)
+ n.X = cheapExpr(n.X, init)
+ eqtab, eqdata := reflectdata.EqInterface(n.X, n.Y)
+ var cmp ir.Node
+ if n.Op() == ir.OEQ {
+ cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata)
+ } else {
+ eqtab.SetOp(ir.ONE)
+ cmp = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqtab, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqdata))
+ }
+ return finishCompare(n, cmp, init)
+}
+
+func walkCompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ // Rewrite comparisons to short constant strings as length+byte-wise comparisons.
+ var cs, ncs ir.Node // const string, non-const string
+ switch {
+ case ir.IsConst(n.X, constant.String) && ir.IsConst(n.Y, constant.String):
+ // ignore; will be constant evaluated
+ case ir.IsConst(n.X, constant.String):
+ cs = n.X
+ ncs = n.Y
+ case ir.IsConst(n.Y, constant.String):
+ cs = n.Y
+ ncs = n.X
+ }
+ if cs != nil {
+ cmp := n.Op()
+ // Our comparison below assumes that the non-constant string
+ // is on the left hand side, so rewrite "" cmp x to x cmp "".
+ // See issue 24817.
+ if ir.IsConst(n.X, constant.String) {
+ cmp = brrev(cmp)
+ }
+
+ // maxRewriteLen was chosen empirically.
+ // It is the value that minimizes cmd/go file size
+ // across most architectures.
+ // See the commit description for CL 26758 for details.
+ maxRewriteLen := 6
+ // Some architectures can load unaligned byte sequence as 1 word.
+ // So we can cover longer strings with the same amount of code.
+ canCombineLoads := canMergeLoads()
+ combine64bit := false
+ if canCombineLoads {
+ // Keep this low enough to generate less code than a function call.
+ maxRewriteLen = 2 * ssagen.Arch.LinkArch.RegSize
+ combine64bit = ssagen.Arch.LinkArch.RegSize >= 8
+ }
+
+ var and ir.Op
+ switch cmp {
+ case ir.OEQ:
+ and = ir.OANDAND
+ case ir.ONE:
+ and = ir.OOROR
+ default:
+ // Don't do byte-wise comparisons for <, <=, etc.
+ // They're fairly complicated.
+ // Length-only checks are ok, though.
+ maxRewriteLen = 0
+ }
+ if s := ir.StringVal(cs); len(s) <= maxRewriteLen {
+ if len(s) > 0 {
+ ncs = safeExpr(ncs, init)
+ }
+ r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), ir.NewInt(int64(len(s)))))
+ remains := len(s)
+ for i := 0; remains > 0; {
+ if remains == 1 || !canCombineLoads {
+ cb := ir.NewInt(int64(s[i]))
+ ncb := ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i)))
+ r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb))
+ remains--
+ i++
+ continue
+ }
+ var step int
+ var convType *types.Type
+ switch {
+ case remains >= 8 && combine64bit:
+ convType = types.Types[types.TINT64]
+ step = 8
+ case remains >= 4:
+ convType = types.Types[types.TUINT32]
+ step = 4
+ case remains >= 2:
+ convType = types.Types[types.TUINT16]
+ step = 2
+ }
+ ncsubstr := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))), convType)
+ csubstr := int64(s[i])
+ // Calculate large constant from bytes as sequence of shifts and ors.
+ // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
+ // ssa will combine this into a single large load.
+ for offset := 1; offset < step; offset++ {
+ b := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i+offset))), convType)
+ b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, ir.NewInt(int64(8*offset)))
+ ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b)
+ csubstr |= int64(s[i+offset]) << uint8(8*offset)
+ }
+ csubstrPart := ir.NewInt(csubstr)
+ // Compare "step" bytes as once
+ r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr))
+ remains -= step
+ i += step
+ }
+ return finishCompare(n, r, init)
+ }
+ }
+
+ var r ir.Node
+ if n.Op() == ir.OEQ || n.Op() == ir.ONE {
+ // prepare for rewrite below
+ n.X = cheapExpr(n.X, init)
+ n.Y = cheapExpr(n.Y, init)
+ eqlen, eqmem := reflectdata.EqString(n.X, n.Y)
+ // quick check of len before full compare for == or !=.
+ // memequal then tests equality up to length len.
+ if n.Op() == ir.OEQ {
+ // len(left) == len(right) && memequal(left, right, len)
+ r = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqlen, eqmem)
+ } else {
+ // len(left) != len(right) || !memequal(left, right, len)
+ eqlen.SetOp(ir.ONE)
+ r = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqlen, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqmem))
+ }
+ } else {
+ // sys_cmpstring(s1, s2) :: 0
+ r = mkcall("cmpstring", types.Types[types.TINT], init, typecheck.Conv(n.X, types.Types[types.TSTRING]), typecheck.Conv(n.Y, types.Types[types.TSTRING]))
+ r = ir.NewBinaryExpr(base.Pos, n.Op(), r, ir.NewInt(0))
+ }
+
+ return finishCompare(n, r, init)
+}
+
+// The result of finishCompare MUST be assigned back to n, e.g.
+// n.Left = finishCompare(n.Left, x, r, init)
+func finishCompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node {
+ r = typecheck.Expr(r)
+ r = typecheck.Conv(r, n.Type())
+ r = walkExpr(r, init)
+ return r
+}
+
+func eqFor(t *types.Type) (n ir.Node, needsize bool) {
+ // Should only arrive here with large memory or
+ // a struct/array containing a non-memory field/element.
+ // Small memory is handled inline, and single non-memory
+ // is handled by walkcompare.
+ switch a, _ := types.AlgType(t); a {
+ case types.AMEM:
+ n := typecheck.LookupRuntime("memequal")
+ n = typecheck.SubstArgTypes(n, t, t)
+ return n, true
+ case types.ASPECIAL:
+ sym := reflectdata.TypeSymPrefix(".eq", t)
+ n := typecheck.NewName(sym)
+ ir.MarkFunc(n)
+ n.SetType(typecheck.NewFuncType(nil, []*ir.Field{
+ ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
+ ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
+ }, []*ir.Field{
+ ir.NewField(base.Pos, nil, nil, types.Types[types.TBOOL]),
+ }))
+ return n, false
+ }
+ base.Fatalf("eqfor %v", t)
+ return nil, false
+}
+
+// brcom returns !(op).
+// For example, brcom(==) is !=.
+func brcom(op ir.Op) ir.Op {
+ switch op {
+ case ir.OEQ:
+ return ir.ONE
+ case ir.ONE:
+ return ir.OEQ
+ case ir.OLT:
+ return ir.OGE
+ case ir.OGT:
+ return ir.OLE
+ case ir.OLE:
+ return ir.OGT
+ case ir.OGE:
+ return ir.OLT
+ }
+ base.Fatalf("brcom: no com for %v\n", op)
+ return op
+}
+
+// brrev returns reverse(op).
+// For example, Brrev(<) is >.
+func brrev(op ir.Op) ir.Op {
+ switch op {
+ case ir.OEQ:
+ return ir.OEQ
+ case ir.ONE:
+ return ir.ONE
+ case ir.OLT:
+ return ir.OGT
+ case ir.OGT:
+ return ir.OLT
+ case ir.OLE:
+ return ir.OGE
+ case ir.OGE:
+ return ir.OLE
+ }
+ base.Fatalf("brrev: no rev for %v\n", op)
+ return op
+}
+
+func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
+ // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
+ if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 {
+ n = copyExpr(n, n.Type(), init)
+ }
+
+ return typecheck.Conv(n, t)
+}
+
+// canMergeLoads reports whether the backend optimization passes for
+// the current architecture can combine adjacent loads into a single
+// larger, possibly unaligned, load. Note that currently the
+// optimizations must be able to handle little endian byte order.
+func canMergeLoads() bool {
+ switch ssagen.Arch.LinkArch.Family {
+ case sys.ARM64, sys.AMD64, sys.I386, sys.S390X:
+ return true
+ case sys.PPC64:
+ // Load combining only supported on ppc64le.
+ return ssagen.Arch.LinkArch.ByteOrder == binary.LittleEndian
+ }
+ return false
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/staticinit"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+// walkCompLit walks a composite literal node:
+// OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT (all CompLitExpr), or OPTRLIT (AddrExpr).
+func walkCompLit(n ir.Node, init *ir.Nodes) ir.Node {
+ if isStaticCompositeLiteral(n) && !ssagen.TypeOK(n.Type()) {
+ n := n.(*ir.CompLitExpr) // not OPTRLIT
+ // n can be directly represented in the read-only data section.
+ // Make direct reference to the static data. See issue 12841.
+ vstat := readonlystaticname(n.Type())
+ fixedlit(inInitFunction, initKindStatic, n, vstat, init)
+ return typecheck.Expr(vstat)
+ }
+ var_ := typecheck.Temp(n.Type())
+ anylit(n, var_, init)
+ return var_
+}
+
+// initContext is the context in which static data is populated.
+// It is either in an init function or in any other function.
+// Static data populated in an init function will be written either
+// zero times (as a readonly, static data symbol) or
+// one time (during init function execution).
+// Either way, there is no opportunity for races or further modification,
+// so the data can be written to a (possibly readonly) data symbol.
+// Static data populated in any other function needs to be local to
+// that function to allow multiple instances of that function
+// to execute concurrently without clobbering each others' data.
+type initContext uint8
+
+const (
+ inInitFunction initContext = iota
+ inNonInitFunction
+)
+
+func (c initContext) String() string {
+ if c == inInitFunction {
+ return "inInitFunction"
+ }
+ return "inNonInitFunction"
+}
+
+// readonlystaticname returns a name backed by a (writable) static data symbol.
+func readonlystaticname(t *types.Type) *ir.Name {
+ n := staticinit.StaticName(t)
+ n.MarkReadonly()
+ n.Sym().Linksym().Set(obj.AttrContentAddressable, true)
+ return n
+}
+
+func isSimpleName(nn ir.Node) bool {
+ if nn.Op() != ir.ONAME {
+ return false
+ }
+ n := nn.(*ir.Name)
+ return n.Class_ != ir.PAUTOHEAP && n.Class_ != ir.PEXTERN
+}
+
+func litas(l ir.Node, r ir.Node, init *ir.Nodes) {
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, r))
+}
+
+// initGenType is a bitmap indicating the types of generation that will occur for a static value.
+type initGenType uint8
+
+const (
+ initDynamic initGenType = 1 << iota // contains some dynamic values, for which init code will be generated
+ initConst // contains some constant values, which may be written into data symbols
+)
+
+// getdyn calculates the initGenType for n.
+// If top is false, getdyn is recursing.
+func getdyn(n ir.Node, top bool) initGenType {
+ switch n.Op() {
+ default:
+ if ir.IsConstNode(n) {
+ return initConst
+ }
+ return initDynamic
+
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ if !top {
+ return initDynamic
+ }
+ if n.Len/4 > int64(len(n.List)) {
+ // <25% of entries have explicit values.
+ // Very rough estimation, it takes 4 bytes of instructions
+ // to initialize 1 byte of result. So don't use a static
+ // initializer if the dynamic initialization code would be
+ // smaller than the static value.
+ // See issue 23780.
+ return initDynamic
+ }
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ }
+ lit := n.(*ir.CompLitExpr)
+
+ var mode initGenType
+ for _, n1 := range lit.List {
+ switch n1.Op() {
+ case ir.OKEY:
+ n1 = n1.(*ir.KeyExpr).Value
+ case ir.OSTRUCTKEY:
+ n1 = n1.(*ir.StructKeyExpr).Value
+ }
+ mode |= getdyn(n1, false)
+ if mode == initDynamic|initConst {
+ break
+ }
+ }
+ return mode
+}
+
+// isStaticCompositeLiteral reports whether n is a compile-time constant.
+func isStaticCompositeLiteral(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OSLICELIT:
+ return false
+ case ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, r := range n.List {
+ if r.Op() == ir.OKEY {
+ r = r.(*ir.KeyExpr).Value
+ }
+ if !isStaticCompositeLiteral(r) {
+ return false
+ }
+ }
+ return true
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, r := range n.List {
+ r := r.(*ir.StructKeyExpr)
+ if !isStaticCompositeLiteral(r.Value) {
+ return false
+ }
+ }
+ return true
+ case ir.OLITERAL, ir.ONIL:
+ return true
+ case ir.OCONVIFACE:
+ // See staticassign's OCONVIFACE case for comments.
+ n := n.(*ir.ConvExpr)
+ val := ir.Node(n)
+ for val.Op() == ir.OCONVIFACE {
+ val = val.(*ir.ConvExpr).X
+ }
+ if val.Type().IsInterface() {
+ return val.Op() == ir.ONIL
+ }
+ if types.IsDirectIface(val.Type()) && val.Op() == ir.ONIL {
+ return true
+ }
+ return isStaticCompositeLiteral(val)
+ }
+ return false
+}
+
+// initKind is a kind of static initialization: static, dynamic, or local.
+// Static initialization represents literals and
+// literal components of composite literals.
+// Dynamic initialization represents non-literals and
+// non-literal components of composite literals.
+// LocalCode initialization represents initialization
+// that occurs purely in generated code local to the function of use.
+// Initialization code is sometimes generated in passes,
+// first static then dynamic.
+type initKind uint8
+
+const (
+ initKindStatic initKind = iota + 1
+ initKindDynamic
+ initKindLocalCode
+)
+
+// fixedlit handles struct, array, and slice literals.
+// TODO: expand documentation.
+func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
+ isBlank := var_ == ir.BlankNode
+ var splitnode func(ir.Node) (a ir.Node, value ir.Node)
+ switch n.Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT:
+ var k int64
+ splitnode = func(r ir.Node) (ir.Node, ir.Node) {
+ if r.Op() == ir.OKEY {
+ kv := r.(*ir.KeyExpr)
+ k = typecheck.IndexConst(kv.Key)
+ if k < 0 {
+ base.Fatalf("fixedlit: invalid index %v", kv.Key)
+ }
+ r = kv.Value
+ }
+ a := ir.NewIndexExpr(base.Pos, var_, ir.NewInt(k))
+ k++
+ if isBlank {
+ return ir.BlankNode, r
+ }
+ return a, r
+ }
+ case ir.OSTRUCTLIT:
+ splitnode = func(rn ir.Node) (ir.Node, ir.Node) {
+ r := rn.(*ir.StructKeyExpr)
+ if r.Field.IsBlank() || isBlank {
+ return ir.BlankNode, r.Value
+ }
+ ir.SetPos(r)
+ return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Field), r.Value
+ }
+ default:
+ base.Fatalf("fixedlit bad op: %v", n.Op())
+ }
+
+ for _, r := range n.List {
+ a, value := splitnode(r)
+ if a == ir.BlankNode && !staticinit.AnySideEffects(value) {
+ // Discard.
+ continue
+ }
+
+ switch value.Op() {
+ case ir.OSLICELIT:
+ value := value.(*ir.CompLitExpr)
+ if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
+ slicelit(ctxt, value, a, init)
+ continue
+ }
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ value := value.(*ir.CompLitExpr)
+ fixedlit(ctxt, kind, value, a, init)
+ continue
+ }
+
+ islit := ir.IsConstNode(value)
+ if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
+ continue
+ }
+
+ // build list of assignments: var[index] = expr
+ ir.SetPos(a)
+ as := ir.NewAssignStmt(base.Pos, a, value)
+ as = typecheck.Stmt(as).(*ir.AssignStmt)
+ switch kind {
+ case initKindStatic:
+ genAsStatic(as)
+ case initKindDynamic, initKindLocalCode:
+ a = orderStmtInPlace(as, map[string][]*ir.Name{})
+ a = walkStmt(a)
+ init.Append(a)
+ default:
+ base.Fatalf("fixedlit: bad kind %d", kind)
+ }
+
+ }
+}
+
+func isSmallSliceLit(n *ir.CompLitExpr) bool {
+ if n.Op() != ir.OSLICELIT {
+ return false
+ }
+
+ return n.Type().Elem().Width == 0 || n.Len <= ir.MaxSmallArraySize/n.Type().Elem().Width
+}
+
+func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
+ // make an array type corresponding the number of elements we have
+ t := types.NewArray(n.Type().Elem(), n.Len)
+ types.CalcSize(t)
+
+ if ctxt == inNonInitFunction {
+ // put everything into static array
+ vstat := staticinit.StaticName(t)
+
+ fixedlit(ctxt, initKindStatic, n, vstat, init)
+ fixedlit(ctxt, initKindDynamic, n, vstat, init)
+
+ // copy static to slice
+ var_ = typecheck.AssignExpr(var_)
+ name, offset, ok := staticinit.StaticLoc(var_)
+ if !ok || name.Class_ != ir.PEXTERN {
+ base.Fatalf("slicelit: %v", var_)
+ }
+ staticdata.InitSlice(name, offset, vstat, t.NumElem())
+ return
+ }
+
+ // recipe for var = []t{...}
+ // 1. make a static array
+ // var vstat [...]t
+ // 2. assign (data statements) the constant part
+ // vstat = constpart{}
+ // 3. make an auto pointer to array and allocate heap to it
+ // var vauto *[...]t = new([...]t)
+ // 4. copy the static array to the auto array
+ // *vauto = vstat
+ // 5. for each dynamic part assign to the array
+ // vauto[i] = dynamic part
+ // 6. assign slice of allocated heap to var
+ // var = vauto[:]
+ //
+ // an optimization is done if there is no constant part
+ // 3. var vauto *[...]t = new([...]t)
+ // 5. vauto[i] = dynamic part
+ // 6. var = vauto[:]
+
+ // if the literal contains constants,
+ // make static initialized array (1),(2)
+ var vstat ir.Node
+
+ mode := getdyn(n, true)
+ if mode&initConst != 0 && !isSmallSliceLit(n) {
+ if ctxt == inInitFunction {
+ vstat = readonlystaticname(t)
+ } else {
+ vstat = staticinit.StaticName(t)
+ }
+ fixedlit(ctxt, initKindStatic, n, vstat, init)
+ }
+
+ // make new auto *array (3 declare)
+ vauto := typecheck.Temp(types.NewPtr(t))
+
+ // set auto to point at new temp or heap (3 assign)
+ var a ir.Node
+ if x := n.Prealloc; x != nil {
+ // temp allocated during order.go for dddarg
+ if !types.Identical(t, x.Type()) {
+ panic("dotdotdot base type does not match order's assigned type")
+ }
+
+ if vstat == nil {
+ a = ir.NewAssignStmt(base.Pos, x, nil)
+ a = typecheck.Stmt(a)
+ init.Append(a) // zero new temp
+ } else {
+ // Declare that we're about to initialize all of x.
+ // (Which happens at the *vauto = vstat below.)
+ init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, x))
+ }
+
+ a = typecheck.NodAddr(x)
+ } else if n.Esc() == ir.EscNone {
+ a = typecheck.Temp(t)
+ if vstat == nil {
+ a = ir.NewAssignStmt(base.Pos, typecheck.Temp(t), nil)
+ a = typecheck.Stmt(a)
+ init.Append(a) // zero new temp
+ a = a.(*ir.AssignStmt).X
+ } else {
+ init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, a))
+ }
+
+ a = typecheck.NodAddr(a)
+ } else {
+ a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t))
+ }
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, vauto, a))
+
+ if vstat != nil {
+ // copy static to heap (4)
+ a = ir.NewStarExpr(base.Pos, vauto)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, a, vstat))
+ }
+
+ // put dynamics into array (5)
+ var index int64
+ for _, value := range n.List {
+ if value.Op() == ir.OKEY {
+ kv := value.(*ir.KeyExpr)
+ index = typecheck.IndexConst(kv.Key)
+ if index < 0 {
+ base.Fatalf("slicelit: invalid index %v", kv.Key)
+ }
+ value = kv.Value
+ }
+ a := ir.NewIndexExpr(base.Pos, vauto, ir.NewInt(index))
+ a.SetBounded(true)
+ index++
+
+ // TODO need to check bounds?
+
+ switch value.Op() {
+ case ir.OSLICELIT:
+ break
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ value := value.(*ir.CompLitExpr)
+ k := initKindDynamic
+ if vstat == nil {
+ // Generate both static and dynamic initializations.
+ // See issue #31987.
+ k = initKindLocalCode
+ }
+ fixedlit(ctxt, k, value, a, init)
+ continue
+ }
+
+ if vstat != nil && ir.IsConstNode(value) { // already set by copy from static value
+ continue
+ }
+
+ // build list of vauto[c] = expr
+ ir.SetPos(value)
+ as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, a, value))
+ as = orderStmtInPlace(as, map[string][]*ir.Name{})
+ as = walkStmt(as)
+ init.Append(as)
+ }
+
+ // make slice out of heap (6)
+ a = ir.NewAssignStmt(base.Pos, var_, ir.NewSliceExpr(base.Pos, ir.OSLICE, vauto))
+
+ a = typecheck.Stmt(a)
+ a = orderStmtInPlace(a, map[string][]*ir.Name{})
+ a = walkStmt(a)
+ init.Append(a)
+}
+
+func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
+ // make the map var
+ a := ir.NewCallExpr(base.Pos, ir.OMAKE, nil, nil)
+ a.SetEsc(n.Esc())
+ a.Args = []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(int64(len(n.List)))}
+ litas(m, a, init)
+
+ entries := n.List
+
+ // The order pass already removed any dynamic (runtime-computed) entries.
+ // All remaining entries are static. Double-check that.
+ for _, r := range entries {
+ r := r.(*ir.KeyExpr)
+ if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
+ base.Fatalf("maplit: entry is not a literal: %v", r)
+ }
+ }
+
+ if len(entries) > 25 {
+ // For a large number of entries, put them in an array and loop.
+
+ // build types [count]Tindex and [count]Tvalue
+ tk := types.NewArray(n.Type().Key(), int64(len(entries)))
+ te := types.NewArray(n.Type().Elem(), int64(len(entries)))
+
+ tk.SetNoalg(true)
+ te.SetNoalg(true)
+
+ types.CalcSize(tk)
+ types.CalcSize(te)
+
+ // make and initialize static arrays
+ vstatk := readonlystaticname(tk)
+ vstate := readonlystaticname(te)
+
+ datak := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
+ datae := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
+ for _, r := range entries {
+ r := r.(*ir.KeyExpr)
+ datak.List.Append(r.Key)
+ datae.List.Append(r.Value)
+ }
+ fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
+ fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
+
+ // loop adding structure elements to map
+ // for i = 0; i < len(vstatk); i++ {
+ // map[vstatk[i]] = vstate[i]
+ // }
+ i := typecheck.Temp(types.Types[types.TINT])
+ rhs := ir.NewIndexExpr(base.Pos, vstate, i)
+ rhs.SetBounded(true)
+
+ kidx := ir.NewIndexExpr(base.Pos, vstatk, i)
+ kidx.SetBounded(true)
+ lhs := ir.NewIndexExpr(base.Pos, m, kidx)
+
+ zero := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0))
+ cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(tk.NumElem()))
+ incr := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1)))
+ body := ir.NewAssignStmt(base.Pos, lhs, rhs)
+
+ loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil)
+ loop.Body = []ir.Node{body}
+ *loop.PtrInit() = []ir.Node{zero}
+
+ appendWalkStmt(init, loop)
+ return
+ }
+ // For a small number of entries, just add them directly.
+
+ // Build list of var[c] = expr.
+ // Use temporaries so that mapassign1 can have addressable key, elem.
+ // TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
+ tmpkey := typecheck.Temp(m.Type().Key())
+ tmpelem := typecheck.Temp(m.Type().Elem())
+
+ for _, r := range entries {
+ r := r.(*ir.KeyExpr)
+ index, elem := r.Key, r.Value
+
+ ir.SetPos(index)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpkey, index))
+
+ ir.SetPos(elem)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpelem, elem))
+
+ ir.SetPos(tmpelem)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, tmpkey), tmpelem))
+ }
+
+ appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpkey))
+ appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpelem))
+}
+
+func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
+ t := n.Type()
+ switch n.Op() {
+ default:
+ base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n)
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, n))
+
+ case ir.OMETHEXPR:
+ n := n.(*ir.MethodExpr)
+ anylit(n.FuncName(), var_, init)
+
+ case ir.OPTRLIT:
+ n := n.(*ir.AddrExpr)
+ if !t.IsPtr() {
+ base.Fatalf("anylit: not ptr")
+ }
+
+ var r ir.Node
+ if n.Alloc != nil {
+ // n.Right is stack temporary used as backing store.
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n.Alloc, nil)) // zero backing store, just in case (#18410)
+ r = typecheck.NodAddr(n.Alloc)
+ } else {
+ r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type()))
+ r.SetEsc(n.Esc())
+ }
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, r))
+
+ var_ = ir.NewStarExpr(base.Pos, var_)
+ var_ = typecheck.AssignExpr(var_)
+ anylit(n.X, var_, init)
+
+ case ir.OSTRUCTLIT, ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ if !t.IsStruct() && !t.IsArray() {
+ base.Fatalf("anylit: not struct/array")
+ }
+
+ if isSimpleName(var_) && len(n.List) > 4 {
+ // lay out static data
+ vstat := readonlystaticname(t)
+
+ ctxt := inInitFunction
+ if n.Op() == ir.OARRAYLIT {
+ ctxt = inNonInitFunction
+ }
+ fixedlit(ctxt, initKindStatic, n, vstat, init)
+
+ // copy static to var
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, vstat))
+
+ // add expressions to automatic
+ fixedlit(inInitFunction, initKindDynamic, n, var_, init)
+ break
+ }
+
+ var components int64
+ if n.Op() == ir.OARRAYLIT {
+ components = t.NumElem()
+ } else {
+ components = int64(t.NumFields())
+ }
+ // initialization of an array or struct with unspecified components (missing fields or arrays)
+ if isSimpleName(var_) || int64(len(n.List)) < components {
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil))
+ }
+
+ fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
+
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ slicelit(inInitFunction, n, var_, init)
+
+ case ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
+ if !t.IsMap() {
+ base.Fatalf("anylit: not map")
+ }
+ maplit(n, var_, init)
+ }
+}
+
+// oaslit handles special composite literal assignments.
+// It returns true if n's effects have been added to init,
+// in which case n should be dropped from the program by the caller.
+func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool {
+ if n.X == nil || n.Y == nil {
+ // not a special composite literal assignment
+ return false
+ }
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ // not a special composite literal assignment
+ return false
+ }
+ if !isSimpleName(n.X) {
+ // not a special composite literal assignment
+ return false
+ }
+ if !types.Identical(n.X.Type(), n.Y.Type()) {
+ // not a special composite literal assignment
+ return false
+ }
+
+ switch n.Y.Op() {
+ default:
+ // not a special composite literal assignment
+ return false
+
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
+ if refersToCommonName(n.X, n.Y) {
+ // not a special composite literal assignment
+ return false
+ }
+ anylit(n.Y, n.X, init)
+ }
+
+ return true
+}
+
+func genAsStatic(as *ir.AssignStmt) {
+ if as.X.Type() == nil {
+ base.Fatalf("genAsStatic as.Left not typechecked")
+ }
+
+ name, offset, ok := staticinit.StaticLoc(as.X)
+ if !ok || (name.Class_ != ir.PEXTERN && as.X != ir.BlankNode) {
+ base.Fatalf("genAsStatic: lhs %v", as.X)
+ }
+
+ switch r := as.Y; r.Op() {
+ case ir.OLITERAL:
+ staticdata.InitConst(name, offset, r, int(r.Type().Width))
+ return
+ case ir.OMETHEXPR:
+ r := r.(*ir.MethodExpr)
+ staticdata.InitFunc(name, offset, r.FuncName())
+ return
+ case ir.ONAME:
+ r := r.(*ir.Name)
+ if r.Offset_ != 0 {
+ base.Fatalf("genAsStatic %+v", as)
+ }
+ if r.Class_ == ir.PFUNC {
+ staticdata.InitFunc(name, offset, r)
+ return
+ }
+ }
+ base.Fatalf("genAsStatic: rhs %v", as.Y)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "encoding/binary"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/sys"
+)
+
+// walkConv walks an OCONV or OCONVNOP (but not OCONVIFACE) node.
+func walkConv(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ if n.Op() == ir.OCONVNOP && n.Type() == n.X.Type() {
+ return n.X
+ }
+ if n.Op() == ir.OCONVNOP && ir.ShouldCheckPtr(ir.CurFunc, 1) {
+ if n.Type().IsPtr() && n.X.Type().IsUnsafePtr() { // unsafe.Pointer to *T
+ return walkCheckPtrAlignment(n, init, nil)
+ }
+ if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { // uintptr to unsafe.Pointer
+ return walkCheckPtrArithmetic(n, init)
+ }
+ }
+ param, result := rtconvfn(n.X.Type(), n.Type())
+ if param == types.Txxx {
+ return n
+ }
+ fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result]
+ return typecheck.Conv(mkcall(fn, types.Types[result], init, typecheck.Conv(n.X, types.Types[param])), n.Type())
+}
+
+// walkConvInterface walks an OCONVIFACE node.
+func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+
+ fromType := n.X.Type()
+ toType := n.Type()
+
+ if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) { // skip unnamed functions (func _())
+ reflectdata.MarkTypeUsedInInterface(fromType, ir.CurFunc.LSym)
+ }
+
+ // typeword generates the type word of the interface value.
+ typeword := func() ir.Node {
+ if toType.IsEmptyInterface() {
+ return reflectdata.TypePtr(fromType)
+ }
+ return reflectdata.ITabAddr(fromType, toType)
+ }
+
+ // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
+ if types.IsDirectIface(fromType) {
+ l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), n.X)
+ l.SetType(toType)
+ l.SetTypecheck(n.Typecheck())
+ return l
+ }
+
+ if ir.Names.Staticuint64s == nil {
+ ir.Names.Staticuint64s = typecheck.NewName(ir.Pkgs.Runtime.Lookup("staticuint64s"))
+ ir.Names.Staticuint64s.Class_ = ir.PEXTERN
+ // The actual type is [256]uint64, but we use [256*8]uint8 so we can address
+ // individual bytes.
+ ir.Names.Staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8))
+ ir.Names.Zerobase = typecheck.NewName(ir.Pkgs.Runtime.Lookup("zerobase"))
+ ir.Names.Zerobase.Class_ = ir.PEXTERN
+ ir.Names.Zerobase.SetType(types.Types[types.TUINTPTR])
+ }
+
+ // Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
+ // by using an existing addressable value identical to n.Left
+ // or creating one on the stack.
+ var value ir.Node
+ switch {
+ case fromType.Size() == 0:
+ // n.Left is zero-sized. Use zerobase.
+ cheapExpr(n.X, init) // Evaluate n.Left for side-effects. See issue 19246.
+ value = ir.Names.Zerobase
+ case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
+ // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
+ // and staticuint64s[n.Left * 8 + 7] on big-endian.
+ n.X = cheapExpr(n.X, init)
+ // byteindex widens n.Left so that the multiplication doesn't overflow.
+ index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.X), ir.NewInt(3))
+ if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian {
+ index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7))
+ }
+ xe := ir.NewIndexExpr(base.Pos, ir.Names.Staticuint64s, index)
+ xe.SetBounded(true)
+ value = xe
+ case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class_ == ir.PEXTERN && n.X.(*ir.Name).Readonly():
+ // n.Left is a readonly global; use it directly.
+ value = n.X
+ case !fromType.IsInterface() && n.Esc() == ir.EscNone && fromType.Width <= 1024:
+ // n.Left does not escape. Use a stack temporary initialized to n.Left.
+ value = typecheck.Temp(fromType)
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n.X)))
+ }
+
+ if value != nil {
+ // Value is identical to n.Left.
+ // Construct the interface directly: {type/itab, &value}.
+ l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), typecheck.Expr(typecheck.NodAddr(value)))
+ l.SetType(toType)
+ l.SetTypecheck(n.Typecheck())
+ return l
+ }
+
+ // Implement interface to empty interface conversion.
+ // tmp = i.itab
+ // if tmp != nil {
+ // tmp = tmp.type
+ // }
+ // e = iface{tmp, i.data}
+ if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() {
+ // Evaluate the input interface.
+ c := typecheck.Temp(fromType)
+ init.Append(ir.NewAssignStmt(base.Pos, c, n.X))
+
+ // Get the itab out of the interface.
+ tmp := typecheck.Temp(types.NewPtr(types.Types[types.TUINT8]))
+ init.Append(ir.NewAssignStmt(base.Pos, tmp, typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, c))))
+
+ // Get the type out of the itab.
+ nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, tmp, typecheck.NodNil())), nil, nil)
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))}
+ init.Append(nif)
+
+ // Build the result.
+ e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8])))
+ e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
+ e.SetTypecheck(1)
+ return e
+ }
+
+ fnname, needsaddr := convFuncName(fromType, toType)
+
+ if !needsaddr && !fromType.IsInterface() {
+ // Use a specialized conversion routine that only returns a data pointer.
+ // ptr = convT2X(val)
+ // e = iface{typ/tab, ptr}
+ fn := typecheck.LookupRuntime(fnname)
+ types.CalcSize(fromType)
+ fn = typecheck.SubstArgTypes(fn, fromType)
+ types.CalcSize(fn.Type())
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+ call.Args = []ir.Node{n.X}
+ e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeExpr(walkExpr(typecheck.Expr(call), init), init))
+ e.SetType(toType)
+ e.SetTypecheck(1)
+ return e
+ }
+
+ var tab ir.Node
+ if fromType.IsInterface() {
+ // convI2I
+ tab = reflectdata.TypePtr(toType)
+ } else {
+ // convT2x
+ tab = typeword()
+ }
+
+ v := n.X
+ if needsaddr {
+ // Types of large or unknown size are passed by reference.
+ // Orderexpr arranged for n.Left to be a temporary for all
+ // the conversions it could see. Comparison of an interface
+ // with a non-interface, especially in a switch on interface value
+ // with non-interface cases, is not visible to order.stmt, so we
+ // have to fall back on allocating a temp here.
+ if !ir.IsAssignable(v) {
+ v = copyExpr(v, v.Type(), init)
+ }
+ v = typecheck.NodAddr(v)
+ }
+
+ types.CalcSize(fromType)
+ fn := typecheck.LookupRuntime(fnname)
+ fn = typecheck.SubstArgTypes(fn, fromType, toType)
+ types.CalcSize(fn.Type())
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+ call.Args = []ir.Node{tab, v}
+ return walkExpr(typecheck.Expr(call), init)
+}
+
+// walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node.
+func walkBytesRunesToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ a := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ // Create temporary buffer for string on stack.
+ t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
+ a = typecheck.NodAddr(typecheck.Temp(t))
+ }
+ if n.Op() == ir.ORUNES2STR {
+ // slicerunetostring(*[32]byte, []rune) string
+ return mkcall("slicerunetostring", n.Type(), init, a, n.X)
+ }
+ // slicebytetostring(*[32]byte, ptr *byte, n int) string
+ n.X = cheapExpr(n.X, init)
+ ptr, len := backingArrayPtrLen(n.X)
+ return mkcall("slicebytetostring", n.Type(), init, a, ptr, len)
+}
+
+// walkBytesToStringTemp walks an OBYTES2STRTMP node.
+func walkBytesToStringTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ if !base.Flag.Cfg.Instrumenting {
+ // Let the backend handle OBYTES2STRTMP directly
+ // to avoid a function call to slicebytetostringtmp.
+ return n
+ }
+ // slicebytetostringtmp(ptr *byte, n int) string
+ n.X = cheapExpr(n.X, init)
+ ptr, len := backingArrayPtrLen(n.X)
+ return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len)
+}
+
+// walkRuneToString walks an ORUNESTR node.
+func walkRuneToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ a := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ t := types.NewArray(types.Types[types.TUINT8], 4)
+ a = typecheck.NodAddr(typecheck.Temp(t))
+ }
+ // intstring(*[4]byte, rune)
+ return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64]))
+}
+
+// walkStringToBytes walks an OSTR2BYTES node.
+func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ s := n.X
+ if ir.IsConst(s, constant.String) {
+ sc := ir.StringVal(s)
+
+ // Allocate a [n]byte of the right size.
+ t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
+ var a ir.Node
+ if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) {
+ a = typecheck.NodAddr(typecheck.Temp(t))
+ } else {
+ a = callnew(t)
+ }
+ p := typecheck.Temp(t.PtrTo()) // *[n]byte
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a)))
+
+ // Copy from the static string data to the [n]byte.
+ if len(sc) > 0 {
+ as := ir.NewAssignStmt(base.Pos, ir.NewStarExpr(base.Pos, p), ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), t.PtrTo())))
+ appendWalkStmt(init, as)
+ }
+
+ // Slice the [n]byte to a []byte.
+ slice := ir.NewSliceExpr(n.Pos(), ir.OSLICEARR, p)
+ slice.SetType(n.Type())
+ slice.SetTypecheck(1)
+ return walkExpr(slice, init)
+ }
+
+ a := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ // Create temporary buffer for slice on stack.
+ t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
+ a = typecheck.NodAddr(typecheck.Temp(t))
+ }
+ // stringtoslicebyte(*32[byte], string) []byte
+ return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING]))
+}
+
+// walkStringToBytesTemp walks an OSTR2BYTESTMP node.
+func walkStringToBytesTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ // []byte(string) conversion that creates a slice
+ // referring to the actual string bytes.
+ // This conversion is handled later by the backend and
+ // is only for use by internal compiler optimizations
+ // that know that the slice won't be mutated.
+ // The only such case today is:
+ // for i, c := range []byte(string)
+ n.X = walkExpr(n.X, init)
+ return n
+}
+
+// walkStringToRunes walks an OSTR2RUNES node.
+func walkStringToRunes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ a := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ // Create temporary buffer for slice on stack.
+ t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize)
+ a = typecheck.NodAddr(typecheck.Temp(t))
+ }
+ // stringtoslicerune(*[32]rune, string) []rune
+ return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING]))
+}
+
+// convFuncName builds the runtime function name for interface conversion.
+// It also reports whether the function expects the data by address.
+// Not all names are possible. For example, we never generate convE2E or convE2I.
+func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
+ tkind := to.Tie()
+ switch from.Tie() {
+ case 'I':
+ if tkind == 'I' {
+ return "convI2I", false
+ }
+ case 'T':
+ switch {
+ case from.Size() == 2 && from.Align == 2:
+ return "convT16", false
+ case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
+ return "convT32", false
+ case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers():
+ return "convT64", false
+ }
+ if sc := from.SoleComponent(); sc != nil {
+ switch {
+ case sc.IsString():
+ return "convTstring", false
+ case sc.IsSlice():
+ return "convTslice", false
+ }
+ }
+
+ switch tkind {
+ case 'E':
+ if !from.HasPointers() {
+ return "convT2Enoptr", true
+ }
+ return "convT2E", true
+ case 'I':
+ if !from.HasPointers() {
+ return "convT2Inoptr", true
+ }
+ return "convT2I", true
+ }
+ }
+ base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
+ panic("unreachable")
+}
+
+// rtconvfn returns the parameter and result types that will be used by a
+// runtime function to convert from type src to type dst. The runtime function
+// name can be derived from the names of the returned types.
+//
+// If no such function is necessary, it returns (Txxx, Txxx).
+func rtconvfn(src, dst *types.Type) (param, result types.Kind) {
+ if ssagen.Arch.SoftFloat {
+ return types.Txxx, types.Txxx
+ }
+
+ switch ssagen.Arch.LinkArch.Family {
+ case sys.ARM, sys.MIPS:
+ if src.IsFloat() {
+ switch dst.Kind() {
+ case types.TINT64, types.TUINT64:
+ return types.TFLOAT64, dst.Kind()
+ }
+ }
+ if dst.IsFloat() {
+ switch src.Kind() {
+ case types.TINT64, types.TUINT64:
+ return src.Kind(), types.TFLOAT64
+ }
+ }
+
+ case sys.I386:
+ if src.IsFloat() {
+ switch dst.Kind() {
+ case types.TINT64, types.TUINT64:
+ return types.TFLOAT64, dst.Kind()
+ case types.TUINT32, types.TUINT, types.TUINTPTR:
+ return types.TFLOAT64, types.TUINT32
+ }
+ }
+ if dst.IsFloat() {
+ switch src.Kind() {
+ case types.TINT64, types.TUINT64:
+ return src.Kind(), types.TFLOAT64
+ case types.TUINT32, types.TUINT, types.TUINTPTR:
+ return types.TUINT32, types.TFLOAT64
+ }
+ }
+ }
+ return types.Txxx, types.Txxx
+}
+
+// byteindex converts n, which is byte-sized, to an int used to index into an array.
+// We cannot use conv, because we allow converting bool to int here,
+// which is forbidden in user code.
+func byteindex(n ir.Node) ir.Node {
+ // We cannot convert from bool to int directly.
+ // While converting from int8 to int is possible, it would yield
+ // the wrong result for negative values.
+ // Reinterpreting the value as an unsigned byte solves both cases.
+ if !types.Identical(n.Type(), types.Types[types.TUINT8]) {
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(types.Types[types.TUINT8])
+ n.SetTypecheck(1)
+ }
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(types.Types[types.TINT])
+ n.SetTypecheck(1)
+ return n
+}
+
+func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Node {
+ if !n.Type().IsPtr() {
+ base.Fatalf("expected pointer type: %v", n.Type())
+ }
+ elem := n.Type().Elem()
+ if count != nil {
+ if !elem.IsArray() {
+ base.Fatalf("expected array type: %v", elem)
+ }
+ elem = elem.Elem()
+ }
+
+ size := elem.Size()
+ if elem.Alignment() == 1 && (size == 0 || size == 1 && count == nil) {
+ return n
+ }
+
+ if count == nil {
+ count = ir.NewInt(1)
+ }
+
+ n.X = cheapExpr(n.X, init)
+ init.Append(mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), reflectdata.TypePtr(elem), typecheck.Conv(count, types.Types[types.TUINTPTR])))
+ return n
+}
+
+func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ // Calling cheapexpr(n, init) below leads to a recursive call
+ // to walkexpr, which leads us back here again. Use n.Opt to
+ // prevent infinite loops.
+ if opt := n.Opt(); opt == &walkCheckPtrArithmeticMarker {
+ return n
+ } else if opt != nil {
+ // We use n.Opt() here because today it's not used for OCONVNOP. If that changes,
+ // there's no guarantee that temporarily replacing it is safe, so just hard fail here.
+ base.Fatalf("unexpected Opt: %v", opt)
+ }
+ n.SetOpt(&walkCheckPtrArithmeticMarker)
+ defer n.SetOpt(nil)
+
+ // TODO(mdempsky): Make stricter. We only need to exempt
+ // reflect.Value.Pointer and reflect.Value.UnsafeAddr.
+ switch n.X.Op() {
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ return n
+ }
+
+ if n.X.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(n.X) {
+ return n
+ }
+
+ // Find original unsafe.Pointer operands involved in this
+ // arithmetic expression.
+ //
+ // "It is valid both to add and to subtract offsets from a
+ // pointer in this way. It is also valid to use &^ to round
+ // pointers, usually for alignment."
+ var originals []ir.Node
+ var walk func(n ir.Node)
+ walk = func(n ir.Node) {
+ switch n.Op() {
+ case ir.OADD:
+ n := n.(*ir.BinaryExpr)
+ walk(n.X)
+ walk(n.Y)
+ case ir.OSUB, ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ walk(n.X)
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if n.X.Type().IsUnsafePtr() {
+ n.X = cheapExpr(n.X, init)
+ originals = append(originals, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]))
+ }
+ }
+ }
+ walk(n.X)
+
+ cheap := cheapExpr(n, init)
+
+ slice := typecheck.MakeDotArgs(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
+ slice.SetEsc(ir.EscNone)
+
+ init.Append(mkcall("checkptrArithmetic", nil, init, typecheck.ConvNop(cheap, types.Types[types.TUNSAFEPTR]), slice))
+ // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
+ // the backing store for multiple calls to checkptrArithmetic.
+
+ return cheap
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "fmt"
+ "go/constant"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+)
+
+// The result of walkExpr MUST be assigned back to n, e.g.
+// n.Left = walkExpr(n.Left, init)
+func walkExpr(n ir.Node, init *ir.Nodes) ir.Node {
+ if n == nil {
+ return n
+ }
+
+ // Eagerly checkwidth all expressions for the back end.
+ if n.Type() != nil && !n.Type().WidthCalculated() {
+ switch n.Type().Kind() {
+ case types.TBLANK, types.TNIL, types.TIDEAL:
+ default:
+ types.CheckSize(n.Type())
+ }
+ }
+
+ if init == n.PtrInit() {
+ // not okay to use n->ninit when walking n,
+ // because we might replace n with some other node
+ // and would lose the init list.
+ base.Fatalf("walkexpr init == &n->ninit")
+ }
+
+ if len(n.Init()) != 0 {
+ walkStmtList(n.Init())
+ init.Append(n.PtrInit().Take()...)
+ }
+
+ lno := ir.SetPos(n)
+
+ if base.Flag.LowerW > 1 {
+ ir.Dump("before walk expr", n)
+ }
+
+ if n.Typecheck() != 1 {
+ base.Fatalf("missed typecheck: %+v", n)
+ }
+
+ if n.Type().IsUntyped() {
+ base.Fatalf("expression has untyped type: %+v", n)
+ }
+
+ if n.Op() == ir.ONAME && n.(*ir.Name).Class_ == ir.PAUTOHEAP {
+ n := n.(*ir.Name)
+ nn := ir.NewStarExpr(base.Pos, n.Name().Heapaddr)
+ nn.X.MarkNonNil()
+ return walkExpr(typecheck.Expr(nn), init)
+ }
+
+ n = walkExpr1(n, init)
+
+ // Expressions that are constant at run time but not
+ // considered const by the language spec are not turned into
+ // constants until walk. For example, if n is y%1 == 0, the
+ // walk of y%1 may have replaced it by 0.
+ // Check whether n with its updated args is itself now a constant.
+ t := n.Type()
+ n = typecheck.EvalConst(n)
+ if n.Type() != t {
+ base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type())
+ }
+ if n.Op() == ir.OLITERAL {
+ n = typecheck.Expr(n)
+ // Emit string symbol now to avoid emitting
+ // any concurrently during the backend.
+ if v := n.Val(); v.Kind() == constant.String {
+ _ = staticdata.StringSym(n.Pos(), constant.StringVal(v))
+ }
+ }
+
+ updateHasCall(n)
+
+ if base.Flag.LowerW != 0 && n != nil {
+ ir.Dump("after walk expr", n)
+ }
+
+ base.Pos = lno
+ return n
+}
+
+func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
+ switch n.Op() {
+ default:
+ ir.Dump("walk", n)
+ base.Fatalf("walkexpr: switch 1 unknown op %+v", n.Op())
+ panic("unreachable")
+
+ case ir.ONONAME, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR:
+ return n
+
+ case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
+ // TODO(mdempsky): Just return n; see discussion on CL 38655.
+ // Perhaps refactor to use Node.mayBeShared for these instead.
+ // If these return early, make sure to still call
+ // stringsym for constant strings.
+ return n
+
+ case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA:
+ n := n.(*ir.UnaryExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.ODOTMETH, ir.ODOTINTER:
+ n := n.(*ir.SelectorExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+ return n
+
+ case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ return walkDot(n, init)
+
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := n.(*ir.TypeAssertExpr)
+ return walkDotType(n, init)
+
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
+ return walkLenCap(n, init)
+
+ case ir.OCOMPLEX:
+ n := n.(*ir.BinaryExpr)
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+ return n
+
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ return walkCompare(n, init)
+
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ return walkLogical(n, init)
+
+ case ir.OPRINT, ir.OPRINTN:
+ return walkPrint(n.(*ir.CallExpr), init)
+
+ case ir.OPANIC:
+ n := n.(*ir.UnaryExpr)
+ return mkcall("gopanic", nil, init, n.X)
+
+ case ir.ORECOVER:
+ n := n.(*ir.CallExpr)
+ return mkcall("gorecover", n.Type(), init, typecheck.NodAddr(ir.RegFP))
+
+ case ir.OCLOSUREREAD, ir.OCFUNC:
+ return n
+
+ case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH:
+ n := n.(*ir.CallExpr)
+ return walkCall(n, init)
+
+ case ir.OAS, ir.OASOP:
+ return walkAssign(init, n)
+
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignList(init, n)
+
+ // a,b,... = fn()
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignFunc(init, n)
+
+ // x, y = <-c
+ // order.stmt made sure x is addressable or blank.
+ case ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignRecv(init, n)
+
+ // a,b = m[i]
+ case ir.OAS2MAPR:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignMapRead(init, n)
+
+ case ir.ODELETE:
+ n := n.(*ir.CallExpr)
+ return walkDelete(init, n)
+
+ case ir.OAS2DOTTYPE:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignDotType(n, init)
+
+ case ir.OCONVIFACE:
+ n := n.(*ir.ConvExpr)
+ return walkConvInterface(n, init)
+
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ return walkConv(n, init)
+
+ case ir.ODIV, ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ return walkDivMod(n, init)
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ return walkIndex(n, init)
+
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ return walkIndexMap(n, init)
+
+ case ir.ORECV:
+ base.Fatalf("walkexpr ORECV") // should see inside OAS only
+ panic("unreachable")
+
+ case ir.OSLICEHEADER:
+ n := n.(*ir.SliceHeaderExpr)
+ return walkSliceHeader(n, init)
+
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
+ n := n.(*ir.SliceExpr)
+ return walkSlice(n, init)
+
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
+ return walkNew(n, init)
+
+ case ir.OADDSTR:
+ return walkAddString(n.(*ir.AddStringExpr), init)
+
+ case ir.OAPPEND:
+ // order should make sure we only see OAS(node, OAPPEND), which we handle above.
+ base.Fatalf("append outside assignment")
+ panic("unreachable")
+
+ case ir.OCOPY:
+ return walkCopy(n.(*ir.BinaryExpr), init, base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime)
+
+ case ir.OCLOSE:
+ n := n.(*ir.UnaryExpr)
+ return walkClose(n, init)
+
+ case ir.OMAKECHAN:
+ n := n.(*ir.MakeExpr)
+ return walkMakeChan(n, init)
+
+ case ir.OMAKEMAP:
+ n := n.(*ir.MakeExpr)
+ return walkMakeMap(n, init)
+
+ case ir.OMAKESLICE:
+ n := n.(*ir.MakeExpr)
+ return walkMakeSlice(n, init)
+
+ case ir.OMAKESLICECOPY:
+ n := n.(*ir.MakeExpr)
+ return walkMakeSliceCopy(n, init)
+
+ case ir.ORUNESTR:
+ n := n.(*ir.ConvExpr)
+ return walkRuneToString(n, init)
+
+ case ir.OBYTES2STR, ir.ORUNES2STR:
+ n := n.(*ir.ConvExpr)
+ return walkBytesRunesToString(n, init)
+
+ case ir.OBYTES2STRTMP:
+ n := n.(*ir.ConvExpr)
+ return walkBytesToStringTemp(n, init)
+
+ case ir.OSTR2BYTES:
+ n := n.(*ir.ConvExpr)
+ return walkStringToBytes(n, init)
+
+ case ir.OSTR2BYTESTMP:
+ n := n.(*ir.ConvExpr)
+ return walkStringToBytesTemp(n, init)
+
+ case ir.OSTR2RUNES:
+ n := n.(*ir.ConvExpr)
+ return walkStringToRunes(n, init)
+
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT:
+ return walkCompLit(n, init)
+
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ return walkSend(n, init)
+
+ case ir.OCLOSURE:
+ return walkClosure(n.(*ir.ClosureExpr), init)
+
+ case ir.OCALLPART:
+ return walkCallPart(n.(*ir.CallPartExpr), init)
+ }
+
+ // No return! Each case must return (or panic),
+ // to avoid confusion about what gets returned
+ // in the presence of type assertions.
+}
+
+// walk the whole tree of the body of an
+// expression or simple statement.
+// the types expressions are calculated.
+// compile-time constants are evaluated.
+// complex side effects like statements are appended to init
+func walkExprList(s []ir.Node, init *ir.Nodes) {
+ for i := range s {
+ s[i] = walkExpr(s[i], init)
+ }
+}
+
+func walkExprListCheap(s []ir.Node, init *ir.Nodes) {
+ for i, n := range s {
+ s[i] = cheapExpr(n, init)
+ s[i] = walkExpr(s[i], init)
+ }
+}
+
+func walkExprListSafe(s []ir.Node, init *ir.Nodes) {
+ for i, n := range s {
+ s[i] = safeExpr(n, init)
+ s[i] = walkExpr(s[i], init)
+ }
+}
+
+// return side-effect free and cheap n, appending side effects to init.
+// result may not be assignable.
+func cheapExpr(n ir.Node, init *ir.Nodes) ir.Node {
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
+ return n
+ }
+
+ return copyExpr(n, n.Type(), init)
+}
+
+// return side effect-free n, appending side effects to init.
+// result is assignable if n is.
+func safeExpr(n ir.Node, init *ir.Nodes) ir.Node {
+ if n == nil {
+ return nil
+ }
+
+ if len(n.Init()) != 0 {
+ walkStmtList(n.Init())
+ init.Append(n.PtrInit().Take()...)
+ }
+
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
+ return n
+
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
+ l := safeExpr(n.X, init)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.UnaryExpr)
+ a.X = l
+ return walkExpr(typecheck.Expr(a), init)
+
+ case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ l := safeExpr(n.X, init)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.SelectorExpr)
+ a.X = l
+ return walkExpr(typecheck.Expr(a), init)
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ l := safeExpr(n.X, init)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.StarExpr)
+ a.X = l
+ return walkExpr(typecheck.Expr(a), init)
+
+ case ir.OINDEX, ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ l := safeExpr(n.X, init)
+ r := safeExpr(n.Index, init)
+ if l == n.X && r == n.Index {
+ return n
+ }
+ a := ir.Copy(n).(*ir.IndexExpr)
+ a.X = l
+ a.Index = r
+ return walkExpr(typecheck.Expr(a), init)
+
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ if isStaticCompositeLiteral(n) {
+ return n
+ }
+ }
+
+ // make a copy; must not be used as an lvalue
+ if ir.IsAssignable(n) {
+ base.Fatalf("missing lvalue case in safeexpr: %v", n)
+ }
+ return cheapExpr(n, init)
+}
+
+func copyExpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
+ l := typecheck.Temp(t)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n))
+ return l
+}
+
+func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node {
+ c := len(n.List)
+
+ if c < 2 {
+ base.Fatalf("addstr count %d too small", c)
+ }
+
+ buf := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ sz := int64(0)
+ for _, n1 := range n.List {
+ if n1.Op() == ir.OLITERAL {
+ sz += int64(len(ir.StringVal(n1)))
+ }
+ }
+
+ // Don't allocate the buffer if the result won't fit.
+ if sz < tmpstringbufsize {
+ // Create temporary buffer for result string on stack.
+ t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
+ buf = typecheck.NodAddr(typecheck.Temp(t))
+ }
+ }
+
+ // build list of string arguments
+ args := []ir.Node{buf}
+ for _, n2 := range n.List {
+ args = append(args, typecheck.Conv(n2, types.Types[types.TSTRING]))
+ }
+
+ var fn string
+ if c <= 5 {
+ // small numbers of strings use direct runtime helpers.
+ // note: order.expr knows this cutoff too.
+ fn = fmt.Sprintf("concatstring%d", c)
+ } else {
+ // large numbers of strings are passed to the runtime as a slice.
+ fn = "concatstrings"
+
+ t := types.NewSlice(types.Types[types.TSTRING])
+ // args[1:] to skip buf arg
+ slice := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(t), args[1:])
+ slice.Prealloc = n.Prealloc
+ args = []ir.Node{buf, slice}
+ slice.SetEsc(ir.EscNone)
+ }
+
+ cat := typecheck.LookupRuntime(fn)
+ r := ir.NewCallExpr(base.Pos, ir.OCALL, cat, nil)
+ r.Args.Set(args)
+ r1 := typecheck.Expr(r)
+ r1 = walkExpr(r1, init)
+ r1.SetType(n.Type())
+
+ return r1
+}
+
+// walkCall walks an OCALLFUNC, OCALLINTER, or OCALLMETH node.
+func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+ if n.Op() == ir.OCALLINTER {
+ usemethod(n)
+ reflectdata.MarkUsedIfaceMethod(n)
+ }
+
+ if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE {
+ // Transform direct call of a closure to call of a normal function.
+ // transformclosure already did all preparation work.
+
+ // Prepend captured variables to argument list.
+ clo := n.X.(*ir.ClosureExpr)
+ n.Args.Prepend(clo.Func.ClosureEnter...)
+ clo.Func.ClosureEnter.Set(nil)
+
+ // Replace OCLOSURE with ONAME/PFUNC.
+ n.X = clo.Func.Nname
+
+ // Update type of OCALLFUNC node.
+ // Output arguments had not changed, but their offsets could.
+ if n.X.Type().NumResults() == 1 {
+ n.SetType(n.X.Type().Results().Field(0).Type)
+ } else {
+ n.SetType(n.X.Type().Results())
+ }
+ }
+
+ walkCall1(n, init)
+ return n
+}
+
+func walkCall1(n *ir.CallExpr, init *ir.Nodes) {
+ if len(n.Rargs) != 0 {
+ return // already walked
+ }
+
+ params := n.X.Type().Params()
+ args := n.Args
+
+ n.X = walkExpr(n.X, init)
+ walkExprList(args, init)
+
+ // If this is a method call, add the receiver at the beginning of the args.
+ if n.Op() == ir.OCALLMETH {
+ withRecv := make([]ir.Node, len(args)+1)
+ dot := n.X.(*ir.SelectorExpr)
+ withRecv[0] = dot.X
+ dot.X = nil
+ copy(withRecv[1:], args)
+ args = withRecv
+ }
+
+ // For any argument whose evaluation might require a function call,
+ // store that argument into a temporary variable,
+ // to prevent that calls from clobbering arguments already on the stack.
+ // When instrumenting, all arguments might require function calls.
+ var tempAssigns []ir.Node
+ for i, arg := range args {
+ updateHasCall(arg)
+ // Determine param type.
+ var t *types.Type
+ if n.Op() == ir.OCALLMETH {
+ if i == 0 {
+ t = n.X.Type().Recv().Type
+ } else {
+ t = params.Field(i - 1).Type
+ }
+ } else {
+ t = params.Field(i).Type
+ }
+ if base.Flag.Cfg.Instrumenting || fncall(arg, t) {
+ // make assignment of fncall to tempAt
+ tmp := typecheck.Temp(t)
+ a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init)
+ tempAssigns = append(tempAssigns, a)
+ // replace arg with temp
+ args[i] = tmp
+ }
+ }
+
+ n.Args.Set(tempAssigns)
+ n.Rargs.Set(args)
+}
+
+// walkDivMod walks an ODIV or OMOD node.
+func walkDivMod(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+
+ // rewrite complex div into function call.
+ et := n.X.Type().Kind()
+
+ if types.IsComplex[et] && n.Op() == ir.ODIV {
+ t := n.Type()
+ call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, typecheck.Conv(n.X, types.Types[types.TCOMPLEX128]), typecheck.Conv(n.Y, types.Types[types.TCOMPLEX128]))
+ return typecheck.Conv(call, t)
+ }
+
+ // Nothing to do for float divisions.
+ if types.IsFloat[et] {
+ return n
+ }
+
+ // rewrite 64-bit div and mod on 32-bit architectures.
+ // TODO: Remove this code once we can introduce
+ // runtime calls late in SSA processing.
+ if types.RegSize < 8 && (et == types.TINT64 || et == types.TUINT64) {
+ if n.Y.Op() == ir.OLITERAL {
+ // Leave div/mod by constant powers of 2 or small 16-bit constants.
+ // The SSA backend will handle those.
+ switch et {
+ case types.TINT64:
+ c := ir.Int64Val(n.Y)
+ if c < 0 {
+ c = -c
+ }
+ if c != 0 && c&(c-1) == 0 {
+ return n
+ }
+ case types.TUINT64:
+ c := ir.Uint64Val(n.Y)
+ if c < 1<<16 {
+ return n
+ }
+ if c != 0 && c&(c-1) == 0 {
+ return n
+ }
+ }
+ }
+ var fn string
+ if et == types.TINT64 {
+ fn = "int64"
+ } else {
+ fn = "uint64"
+ }
+ if n.Op() == ir.ODIV {
+ fn += "div"
+ } else {
+ fn += "mod"
+ }
+ return mkcall(fn, n.Type(), init, typecheck.Conv(n.X, types.Types[et]), typecheck.Conv(n.Y, types.Types[et]))
+ }
+ return n
+}
+
+// walkDot walks an ODOT or ODOTPTR node.
+func walkDot(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
+ usefield(n)
+ n.X = walkExpr(n.X, init)
+ return n
+}
+
+// walkDotType walks an ODOTTYPE or ODOTTYPE2 node.
+func walkDotType(n *ir.TypeAssertExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ // Set up interface type addresses for back end.
+ n.Ntype = reflectdata.TypePtr(n.Type())
+ if n.Op() == ir.ODOTTYPE {
+ n.Ntype.(*ir.AddrExpr).Alloc = reflectdata.TypePtr(n.X.Type())
+ }
+ if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() {
+ n.Itab = []ir.Node{reflectdata.ITabAddr(n.Type(), n.X.Type())}
+ }
+ return n
+}
+
+// walkIndex walks an OINDEX node.
+func walkIndex(n *ir.IndexExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+
+ // save the original node for bounds checking elision.
+ // If it was a ODIV/OMOD walk might rewrite it.
+ r := n.Index
+
+ n.Index = walkExpr(n.Index, init)
+
+ // if range of type cannot exceed static array bound,
+ // disable bounds check.
+ if n.Bounded() {
+ return n
+ }
+ t := n.X.Type()
+ if t != nil && t.IsPtr() {
+ t = t.Elem()
+ }
+ if t.IsArray() {
+ n.SetBounded(bounded(r, t.NumElem()))
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) {
+ base.Warn("index bounds check elided")
+ }
+ if ir.IsSmallIntConst(n.Index) && !n.Bounded() {
+ base.Errorf("index out of bounds")
+ }
+ } else if ir.IsConst(n.X, constant.String) {
+ n.SetBounded(bounded(r, int64(len(ir.StringVal(n.X)))))
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) {
+ base.Warn("index bounds check elided")
+ }
+ if ir.IsSmallIntConst(n.Index) && !n.Bounded() {
+ base.Errorf("index out of bounds")
+ }
+ }
+
+ if ir.IsConst(n.Index, constant.Int) {
+ if v := n.Index.Val(); constant.Sign(v) < 0 || ir.ConstOverflow(v, types.Types[types.TINT]) {
+ base.Errorf("index out of bounds")
+ }
+ }
+ return n
+}
+
+// walkIndexMap walks an OINDEXMAP node.
+func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node {
+ // Replace m[k] with *map{access1,assign}(maptype, m, &k)
+ n.X = walkExpr(n.X, init)
+ n.Index = walkExpr(n.Index, init)
+ map_ := n.X
+ key := n.Index
+ t := map_.Type()
+ var call *ir.CallExpr
+ if n.Assigned {
+ // This m[k] expression is on the left-hand side of an assignment.
+ fast := mapfast(t)
+ if fast == mapslow {
+ // standard version takes key by reference.
+ // order.expr made sure key is addressable.
+ key = typecheck.NodAddr(key)
+ }
+ call = mkcall1(mapfn(mapassign[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
+ } else {
+ // m[k] is not the target of an assignment.
+ fast := mapfast(t)
+ if fast == mapslow {
+ // standard version takes key by reference.
+ // order.expr made sure key is addressable.
+ key = typecheck.NodAddr(key)
+ }
+
+ if w := t.Elem().Width; w <= zeroValSize {
+ call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key)
+ } else {
+ z := reflectdata.ZeroAddr(w)
+ call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key, z)
+ }
+ }
+ call.SetType(types.NewPtr(t.Elem()))
+ call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
+ star := ir.NewStarExpr(base.Pos, call)
+ star.SetType(t.Elem())
+ star.SetTypecheck(1)
+ return star
+}
+
+// walkLogical walks an OANDAND or OOROR node.
+func walkLogical(n *ir.LogicalExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+
+ // cannot put side effects from n.Right on init,
+ // because they cannot run before n.Left is checked.
+ // save elsewhere and store on the eventual n.Right.
+ var ll ir.Nodes
+
+ n.Y = walkExpr(n.Y, &ll)
+ n.Y = ir.InitExpr(ll, n.Y)
+ return n
+}
+
+// walkSend walks an OSEND node.
+func walkSend(n *ir.SendStmt, init *ir.Nodes) ir.Node {
+ n1 := n.Value
+ n1 = typecheck.AssignConv(n1, n.Chan.Type().Elem(), "chan send")
+ n1 = walkExpr(n1, init)
+ n1 = typecheck.NodAddr(n1)
+ return mkcall1(chanfn("chansend1", 2, n.Chan.Type()), nil, init, n.Chan, n1)
+}
+
+// walkSlice walks an OSLICE, OSLICEARR, OSLICESTR, OSLICE3, or OSLICE3ARR node.
+func walkSlice(n *ir.SliceExpr, init *ir.Nodes) ir.Node {
+
+ checkSlice := ir.ShouldCheckPtr(ir.CurFunc, 1) && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr()
+ if checkSlice {
+ conv := n.X.(*ir.ConvExpr)
+ conv.X = walkExpr(conv.X, init)
+ } else {
+ n.X = walkExpr(n.X, init)
+ }
+
+ low, high, max := n.SliceBounds()
+ low = walkExpr(low, init)
+ if low != nil && ir.IsZero(low) {
+ // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k].
+ low = nil
+ }
+ high = walkExpr(high, init)
+ max = walkExpr(max, init)
+ n.SetSliceBounds(low, high, max)
+ if checkSlice {
+ n.X = walkCheckPtrAlignment(n.X.(*ir.ConvExpr), init, max)
+ }
+
+ if n.Op().IsSlice3() {
+ if max != nil && max.Op() == ir.OCAP && ir.SameSafeExpr(n.X, max.(*ir.UnaryExpr).X) {
+ // Reduce x[i:j:cap(x)] to x[i:j].
+ if n.Op() == ir.OSLICE3 {
+ n.SetOp(ir.OSLICE)
+ } else {
+ n.SetOp(ir.OSLICEARR)
+ }
+ return reduceSlice(n)
+ }
+ return n
+ }
+ return reduceSlice(n)
+}
+
+// walkSliceHeader walks an OSLICEHEADER node.
+func walkSliceHeader(n *ir.SliceHeaderExpr, init *ir.Nodes) ir.Node {
+ n.Ptr = walkExpr(n.Ptr, init)
+ n.LenCap[0] = walkExpr(n.LenCap[0], init)
+ n.LenCap[1] = walkExpr(n.LenCap[1], init)
+ return n
+}
+
+// TODO(josharian): combine this with its caller and simplify
+func reduceSlice(n *ir.SliceExpr) ir.Node {
+ low, high, max := n.SliceBounds()
+ if high != nil && high.Op() == ir.OLEN && ir.SameSafeExpr(n.X, high.(*ir.UnaryExpr).X) {
+ // Reduce x[i:len(x)] to x[i:].
+ high = nil
+ }
+ n.SetSliceBounds(low, high, max)
+ if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && low == nil && high == nil {
+ // Reduce x[:] to x.
+ if base.Debug.Slice > 0 {
+ base.Warn("slice: omit slice operation")
+ }
+ return n.X
+ }
+ return n
+}
+
+// return 1 if integer n must be in range [0, max), 0 otherwise
+func bounded(n ir.Node, max int64) bool {
+ if n.Type() == nil || !n.Type().IsInteger() {
+ return false
+ }
+
+ sign := n.Type().IsSigned()
+ bits := int32(8 * n.Type().Width)
+
+ if ir.IsSmallIntConst(n) {
+ v := ir.Int64Val(n)
+ return 0 <= v && v < max
+ }
+
+ switch n.Op() {
+ case ir.OAND, ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ v := int64(-1)
+ switch {
+ case ir.IsSmallIntConst(n.X):
+ v = ir.Int64Val(n.X)
+ case ir.IsSmallIntConst(n.Y):
+ v = ir.Int64Val(n.Y)
+ if n.Op() == ir.OANDNOT {
+ v = ^v
+ if !sign {
+ v &= 1<<uint(bits) - 1
+ }
+ }
+ }
+ if 0 <= v && v < max {
+ return true
+ }
+
+ case ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ if !sign && ir.IsSmallIntConst(n.Y) {
+ v := ir.Int64Val(n.Y)
+ if 0 <= v && v <= max {
+ return true
+ }
+ }
+
+ case ir.ODIV:
+ n := n.(*ir.BinaryExpr)
+ if !sign && ir.IsSmallIntConst(n.Y) {
+ v := ir.Int64Val(n.Y)
+ for bits > 0 && v >= 2 {
+ bits--
+ v >>= 1
+ }
+ }
+
+ case ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ if !sign && ir.IsSmallIntConst(n.Y) {
+ v := ir.Int64Val(n.Y)
+ if v > int64(bits) {
+ return true
+ }
+ bits -= int32(v)
+ }
+ }
+
+ if !sign && bits <= 62 && 1<<uint(bits) <= max {
+ return true
+ }
+
+ return false
+}
+
+// usemethod checks interface method calls for uses of reflect.Type.Method.
+func usemethod(n *ir.CallExpr) {
+ t := n.X.Type()
+
+ // Looking for either of:
+ // Method(int) reflect.Method
+ // MethodByName(string) (reflect.Method, bool)
+ //
+ // TODO(crawshaw): improve precision of match by working out
+ // how to check the method name.
+ if n := t.NumParams(); n != 1 {
+ return
+ }
+ if n := t.NumResults(); n != 1 && n != 2 {
+ return
+ }
+ p0 := t.Params().Field(0)
+ res0 := t.Results().Field(0)
+ var res1 *types.Field
+ if t.NumResults() == 2 {
+ res1 = t.Results().Field(1)
+ }
+
+ if res1 == nil {
+ if p0.Type.Kind() != types.TINT {
+ return
+ }
+ } else {
+ if !p0.Type.IsString() {
+ return
+ }
+ if !res1.Type.IsBoolean() {
+ return
+ }
+ }
+
+ // Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors
+ // (including global variables such as numImports - was issue #19028).
+ // Also need to check for reflect package itself (see Issue #38515).
+ if s := res0.Type.Sym(); s != nil && s.Name == "Method" && types.IsReflectPkg(s.Pkg) {
+ ir.CurFunc.SetReflectMethod(true)
+ // The LSym is initialized at this point. We need to set the attribute on the LSym.
+ ir.CurFunc.LSym.Set(obj.AttrReflectMethod, true)
+ }
+}
+
+func usefield(n *ir.SelectorExpr) {
+ if objabi.Fieldtrack_enabled == 0 {
+ return
+ }
+
+ switch n.Op() {
+ default:
+ base.Fatalf("usefield %v", n.Op())
+
+ case ir.ODOT, ir.ODOTPTR:
+ break
+ }
+ if n.Sel == nil {
+ // No field name. This DOTPTR was built by the compiler for access
+ // to runtime data structures. Ignore.
+ return
+ }
+
+ t := n.X.Type()
+ if t.IsPtr() {
+ t = t.Elem()
+ }
+ field := n.Selection
+ if field == nil {
+ base.Fatalf("usefield %v %v without paramfld", n.X.Type(), n.Sel)
+ }
+ if field.Sym != n.Sel || field.Offset != n.Offset {
+ base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sel, n.Offset)
+ }
+ if !strings.Contains(field.Note, "go:\"track\"") {
+ return
+ }
+
+ outer := n.X.Type()
+ if outer.IsPtr() {
+ outer = outer.Elem()
+ }
+ if outer.Sym() == nil {
+ base.Errorf("tracked field must be in named struct type")
+ }
+ if !types.IsExported(field.Sym.Name) {
+ base.Errorf("tracked field must be exported (upper case)")
+ }
+
+ sym := reflectdata.TrackSym(outer, field)
+ if ir.CurFunc.FieldTrack == nil {
+ ir.CurFunc.FieldTrack = make(map[*types.Sym]struct{})
+ }
+ ir.CurFunc.FieldTrack[sym] = struct{}{}
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package walk
import (
+ "fmt"
+
"cmd/compile/internal/base"
+ "cmd/compile/internal/escape"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/staticinit"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
- "fmt"
)
// Rewrite tree to use separate statements to enforce
// it can result in unnecessary zeroing of those variables in the function
// prologue.
-// Order holds state during the ordering process.
-type Order struct {
+// orderState holds state during the ordering process.
+type orderState struct {
out []ir.Node // list of generated statements
temp []*ir.Name // stack of temporary variables
free map[string][]*ir.Name // free list of unused temporaries, by type.LongString().
func order(fn *ir.Func) {
if base.Flag.W > 1 {
s := fmt.Sprintf("\nbefore order %v", fn.Sym())
- ir.DumpList(s, fn.Body())
+ ir.DumpList(s, fn.Body)
}
- orderBlock(fn.PtrBody(), map[string][]*ir.Name{})
+ orderBlock(&fn.Body, map[string][]*ir.Name{})
}
// append typechecks stmt and appends it to out.
-func (o *Order) append(stmt ir.Node) {
- o.out = append(o.out, typecheck(stmt, ctxStmt))
+func (o *orderState) append(stmt ir.Node) {
+ o.out = append(o.out, typecheck.Stmt(stmt))
}
// newTemp allocates a new temporary with the given type,
// pushes it onto the temp stack, and returns it.
// If clear is true, newTemp emits code to zero the temporary.
-func (o *Order) newTemp(t *types.Type, clear bool) *ir.Name {
+func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name {
var v *ir.Name
// Note: LongString is close to the type equality we want,
// but not exactly. We still need to double-check with types.Identical.
}
}
if v == nil {
- v = temp(t)
+ v = typecheck.Temp(t)
}
if clear {
- o.append(ir.Nod(ir.OAS, v, nil))
+ o.append(ir.NewAssignStmt(base.Pos, v, nil))
}
o.temp = append(o.temp, v)
// copyExpr behaves like newTemp but also emits
// code to initialize the temporary to the value n.
-func (o *Order) copyExpr(n ir.Node) ir.Node {
+func (o *orderState) copyExpr(n ir.Node) ir.Node {
return o.copyExpr1(n, false)
}
// (The other candidate would be map access, but map access
// returns a pointer to the result data instead of taking a pointer
// to be filled in.)
-func (o *Order) copyExprClear(n ir.Node) *ir.Name {
+func (o *orderState) copyExprClear(n ir.Node) *ir.Name {
return o.copyExpr1(n, true)
}
-func (o *Order) copyExpr1(n ir.Node, clear bool) *ir.Name {
+func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name {
t := n.Type()
v := o.newTemp(t, clear)
- o.append(ir.Nod(ir.OAS, v, n))
+ o.append(ir.NewAssignStmt(base.Pos, v, n))
return v
}
// The definition of cheap is that n is a variable or constant.
// If not, cheapExpr allocates a new tmp, emits tmp = n,
// and then returns tmp.
-func (o *Order) cheapExpr(n ir.Node) ir.Node {
+func (o *orderState) cheapExpr(n ir.Node) ir.Node {
if n == nil {
return nil
}
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
case ir.OLEN, ir.OCAP:
- l := o.cheapExpr(n.Left())
- if l == n.Left() {
+ n := n.(*ir.UnaryExpr)
+ l := o.cheapExpr(n.X)
+ if l == n.X {
return n
}
a := ir.SepCopy(n).(*ir.UnaryExpr)
- a.SetLeft(l)
- return typecheck(a, ctxExpr)
+ a.X = l
+ return typecheck.Expr(a)
}
return o.copyExpr(n)
// as assigning to the original n.
//
// The intended use is to apply to x when rewriting x += y into x = x + y.
-func (o *Order) safeExpr(n ir.Node) ir.Node {
+func (o *orderState) safeExpr(n ir.Node) ir.Node {
switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
case ir.OLEN, ir.OCAP:
- l := o.safeExpr(n.Left())
- if l == n.Left() {
+ n := n.(*ir.UnaryExpr)
+ l := o.safeExpr(n.X)
+ if l == n.X {
return n
}
a := ir.SepCopy(n).(*ir.UnaryExpr)
- a.SetLeft(l)
- return typecheck(a, ctxExpr)
+ a.X = l
+ return typecheck.Expr(a)
case ir.ODOT:
- l := o.safeExpr(n.Left())
- if l == n.Left() {
+ n := n.(*ir.SelectorExpr)
+ l := o.safeExpr(n.X)
+ if l == n.X {
return n
}
a := ir.SepCopy(n).(*ir.SelectorExpr)
- a.SetLeft(l)
- return typecheck(a, ctxExpr)
+ a.X = l
+ return typecheck.Expr(a)
case ir.ODOTPTR:
- l := o.cheapExpr(n.Left())
- if l == n.Left() {
+ n := n.(*ir.SelectorExpr)
+ l := o.cheapExpr(n.X)
+ if l == n.X {
return n
}
a := ir.SepCopy(n).(*ir.SelectorExpr)
- a.SetLeft(l)
- return typecheck(a, ctxExpr)
+ a.X = l
+ return typecheck.Expr(a)
case ir.ODEREF:
- l := o.cheapExpr(n.Left())
- if l == n.Left() {
+ n := n.(*ir.StarExpr)
+ l := o.cheapExpr(n.X)
+ if l == n.X {
return n
}
a := ir.SepCopy(n).(*ir.StarExpr)
- a.SetLeft(l)
- return typecheck(a, ctxExpr)
+ a.X = l
+ return typecheck.Expr(a)
case ir.OINDEX, ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
var l ir.Node
- if n.Left().Type().IsArray() {
- l = o.safeExpr(n.Left())
+ if n.X.Type().IsArray() {
+ l = o.safeExpr(n.X)
} else {
- l = o.cheapExpr(n.Left())
+ l = o.cheapExpr(n.X)
}
- r := o.cheapExpr(n.Right())
- if l == n.Left() && r == n.Right() {
+ r := o.cheapExpr(n.Index)
+ if l == n.X && r == n.Index {
return n
}
a := ir.SepCopy(n).(*ir.IndexExpr)
- a.SetLeft(l)
- a.SetRight(r)
- return typecheck(a, ctxExpr)
+ a.X = l
+ a.Index = r
+ return typecheck.Expr(a)
default:
base.Fatalf("order.safeExpr %v", n.Op())
// because we emit explicit VARKILL instructions marking the end of those
// temporaries' lifetimes.
func isaddrokay(n ir.Node) bool {
- return islvalue(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class() == ir.PEXTERN || ir.IsAutoTmp(n))
+ return ir.IsAssignable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class_ == ir.PEXTERN || ir.IsAutoTmp(n))
}
// addrTemp ensures that n is okay to pass by address to runtime routines.
// tmp = n, and then returns tmp.
// The result of addrTemp MUST be assigned back to n, e.g.
// n.Left = o.addrTemp(n.Left)
-func (o *Order) addrTemp(n ir.Node) ir.Node {
+func (o *orderState) addrTemp(n ir.Node) ir.Node {
if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
// TODO: expand this to all static composite literal nodes?
- n = defaultlit(n, nil)
- dowidth(n.Type())
+ n = typecheck.DefaultLit(n, nil)
+ types.CalcSize(n.Type())
vstat := readonlystaticname(n.Type())
- var s InitSchedule
- s.staticassign(vstat, 0, n, n.Type())
- if s.out != nil {
+ var s staticinit.Schedule
+ s.StaticAssign(vstat, 0, n, n.Type())
+ if s.Out != nil {
base.Fatalf("staticassign of const generated code: %+v", n)
}
- vstat = typecheck(vstat, ctxExpr).(*ir.Name)
+ vstat = typecheck.Expr(vstat).(*ir.Name)
return vstat
}
if isaddrokay(n) {
// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
// It should only be used for map runtime calls which have *_fast* versions.
-func (o *Order) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
+func (o *orderState) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
// Most map calls need to take the address of the key.
// Exception: map*_fast* calls. See golang.org/issue/19015.
if mapfast(t) == mapslow {
var replaced bool
switch n.Op() {
case ir.OBYTES2STR:
+ n := n.(*ir.ConvExpr)
n.SetOp(ir.OBYTES2STRTMP)
replaced = true
case ir.OSTRUCTLIT:
- for _, elem := range n.List().Slice() {
+ n := n.(*ir.CompLitExpr)
+ for _, elem := range n.List {
elem := elem.(*ir.StructKeyExpr)
- if mapKeyReplaceStrConv(elem.Left()) {
+ if mapKeyReplaceStrConv(elem.Value) {
replaced = true
}
}
case ir.OARRAYLIT:
- for _, elem := range n.List().Slice() {
+ n := n.(*ir.CompLitExpr)
+ for _, elem := range n.List {
if elem.Op() == ir.OKEY {
- elem = elem.(*ir.KeyExpr).Right()
+ elem = elem.(*ir.KeyExpr).Value
}
if mapKeyReplaceStrConv(elem) {
replaced = true
type ordermarker int
// markTemp returns the top of the temporary variable stack.
-func (o *Order) markTemp() ordermarker {
+func (o *orderState) markTemp() ordermarker {
return ordermarker(len(o.temp))
}
// popTemp pops temporaries off the stack until reaching the mark,
// which must have been returned by markTemp.
-func (o *Order) popTemp(mark ordermarker) {
+func (o *orderState) popTemp(mark ordermarker) {
for _, n := range o.temp[mark:] {
key := n.Type().LongString()
o.free[key] = append(o.free[key], n)
// cleanTempNoPop emits VARKILL instructions to *out
// for each temporary above the mark on the temporary stack.
// It does not pop the temporaries from the stack.
-func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node {
+func (o *orderState) cleanTempNoPop(mark ordermarker) []ir.Node {
var out []ir.Node
for i := len(o.temp) - 1; i >= int(mark); i-- {
n := o.temp[i]
- out = append(out, typecheck(ir.Nod(ir.OVARKILL, n, nil), ctxStmt))
+ out = append(out, typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARKILL, n)))
}
return out
}
// cleanTemp emits VARKILL instructions for each temporary above the
// mark on the temporary stack and removes them from the stack.
-func (o *Order) cleanTemp(top ordermarker) {
+func (o *orderState) cleanTemp(top ordermarker) {
o.out = append(o.out, o.cleanTempNoPop(top)...)
o.popTemp(top)
}
// stmtList orders each of the statements in the list.
-func (o *Order) stmtList(l ir.Nodes) {
- s := l.Slice()
+func (o *orderState) stmtList(l ir.Nodes) {
+ s := l
for i := range s {
orderMakeSliceCopy(s[i:])
o.stmt(s[i])
// and rewrites it to:
// m = OMAKESLICECOPY([]T, x, s); nil
func orderMakeSliceCopy(s []ir.Node) {
- if base.Flag.N != 0 || instrumenting {
+ if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
return
}
if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY {
as := s[0].(*ir.AssignStmt)
cp := s[1].(*ir.BinaryExpr)
- if as.Right() == nil || as.Right().Op() != ir.OMAKESLICE || ir.IsBlank(as.Left()) ||
- as.Left().Op() != ir.ONAME || cp.Left().Op() != ir.ONAME || cp.Right().Op() != ir.ONAME ||
- as.Left().Name() != cp.Left().Name() || cp.Left().Name() == cp.Right().Name() {
+ if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) ||
+ as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME ||
+ as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() {
// The line above this one is correct with the differing equality operators:
// we want as.X and cp.X to be the same name,
// but we want the initial data to be coming from a different name.
return
}
- mk := as.Right().(*ir.MakeExpr)
- if mk.Esc() == EscNone || mk.Left() == nil || mk.Right() != nil {
+ mk := as.Y.(*ir.MakeExpr)
+ if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil {
return
}
mk.SetOp(ir.OMAKESLICECOPY)
- mk.SetRight(cp.Right())
+ mk.Cap = cp.Y
// Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
- mk.SetBounded(mk.Left().Op() == ir.OLEN && samesafeexpr(mk.Left().(*ir.UnaryExpr).Left(), cp.Right()))
- as.SetRight(typecheck(mk, ctxExpr))
+ mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y))
+ as.Y = typecheck.Expr(mk)
s[1] = nil // remove separate copy call
}
// edge inserts coverage instrumentation for libfuzzer.
-func (o *Order) edge() {
+func (o *orderState) edge() {
if base.Debug.Libfuzzer == 0 {
return
}
// Create a new uint8 counter to be allocated in section
// __libfuzzer_extra_counters.
- counter := staticname(types.Types[types.TUINT8])
+ counter := staticinit.StaticName(types.Types[types.TUINT8])
counter.Name().SetLibfuzzerExtraCounter(true)
// counter += 1
- incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, nodintconst(1))
+ incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(1))
o.append(incr)
}
// and then replaces the old slice in n with the new slice.
// free is a map that can be used to obtain temporary variables by type.
func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
- var order Order
+ var order orderState
order.free = free
mark := order.markTemp()
order.edge()
// leaves them as the init list of the final *np.
// The result of exprInPlace MUST be assigned back to n, e.g.
// n.Left = o.exprInPlace(n.Left)
-func (o *Order) exprInPlace(n ir.Node) ir.Node {
- var order Order
+func (o *orderState) exprInPlace(n ir.Node) ir.Node {
+ var order orderState
order.free = o.free
n = order.expr(n, nil)
- n = initExpr(order.out, n)
+ n = ir.InitExpr(order.out, n)
// insert new temporaries from order
// at head of outer list.
// n.Left = orderStmtInPlace(n.Left)
// free is a map that can be used to obtain temporary variables by type.
func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
- var order Order
+ var order orderState
order.free = free
mark := order.markTemp()
order.stmt(n)
order.cleanTemp(mark)
- return liststmt(order.out)
+ return ir.NewBlockStmt(src.NoXPos, order.out)
}
// init moves n's init list to o.out.
-func (o *Order) init(n ir.Node) {
+func (o *orderState) init(n ir.Node) {
if ir.MayBeShared(n) {
// For concurrency safety, don't mutate potentially shared nodes.
// First, ensure that no work is required here.
- if n.Init().Len() > 0 {
+ if len(n.Init()) > 0 {
base.Fatalf("order.init shared node with ninit")
}
return
// call orders the call expression n.
// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
-func (o *Order) call(nn ir.Node) {
- if nn.Init().Len() > 0 {
+func (o *orderState) call(nn ir.Node) {
+ if len(nn.Init()) > 0 {
// Caller should have already called o.init(nn).
base.Fatalf("%v with unexpected ninit", nn.Op())
}
default:
base.Fatalf("unexpected call: %+v", n)
case *ir.UnaryExpr:
- n.SetLeft(o.expr(n.Left(), nil))
+ n.X = o.expr(n.X, nil)
case *ir.ConvExpr:
- n.SetLeft(o.expr(n.Left(), nil))
+ n.X = o.expr(n.X, nil)
case *ir.BinaryExpr:
- n.SetLeft(o.expr(n.Left(), nil))
- n.SetRight(o.expr(n.Right(), nil))
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, nil)
case *ir.MakeExpr:
- n.SetLeft(o.expr(n.Left(), nil))
- n.SetRight(o.expr(n.Right(), nil))
+ n.Len = o.expr(n.Len, nil)
+ n.Cap = o.expr(n.Cap, nil)
case *ir.CallExpr:
- o.exprList(n.List())
+ o.exprList(n.Args)
}
return
}
n := nn.(*ir.CallExpr)
- fixVariadicCall(n)
- n.SetLeft(o.expr(n.Left(), nil))
- o.exprList(n.List())
+ typecheck.FixVariadicCall(n)
+ n.X = o.expr(n.X, nil)
+ o.exprList(n.Args)
if n.Op() == ir.OCALLINTER {
return
// by copying it into a temp and marking that temp
// still alive when we pop the temp stack.
if arg.Op() == ir.OCONVNOP {
- if arg.Left().Type().IsUnsafePtr() {
- x := o.copyExpr(arg.Left())
- arg.SetLeft(x)
+ arg := arg.(*ir.ConvExpr)
+ if arg.X.Type().IsUnsafePtr() {
+ x := o.copyExpr(arg.X)
+ arg.X = x
x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable
- n.PtrBody().Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt))
+ n.Body.Append(typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARLIVE, x)))
}
}
}
// Check for "unsafe-uintptr" tag provided by escape analysis.
- for i, param := range n.Left().Type().Params().FieldSlice() {
- if param.Note == unsafeUintptrTag || param.Note == uintptrEscapesTag {
- if arg := n.List().Index(i); arg.Op() == ir.OSLICELIT {
- for _, elt := range arg.List().Slice() {
+ for i, param := range n.X.Type().Params().FieldSlice() {
+ if param.Note == escape.UnsafeUintptrNote || param.Note == escape.UintptrEscapesNote {
+ if arg := n.Args[i]; arg.Op() == ir.OSLICELIT {
+ arg := arg.(*ir.CompLitExpr)
+ for _, elt := range arg.List {
keepAlive(elt)
}
} else {
// cases they are also typically registerizable, so not much harm done.
// And this only applies to the multiple-assignment form.
// We could do a more precise analysis if needed, like in walk.go.
-func (o *Order) mapAssign(n ir.Node) {
+func (o *orderState) mapAssign(n ir.Node) {
switch n.Op() {
default:
base.Fatalf("order.mapAssign %v", n.Op())
case ir.OAS:
- if n.Left().Op() == ir.OINDEXMAP {
- n.SetRight(o.safeMapRHS(n.Right()))
+ n := n.(*ir.AssignStmt)
+ if n.X.Op() == ir.OINDEXMAP {
+ n.Y = o.safeMapRHS(n.Y)
}
o.out = append(o.out, n)
case ir.OASOP:
- if n.Left().Op() == ir.OINDEXMAP {
- n.SetRight(o.safeMapRHS(n.Right()))
+ n := n.(*ir.AssignOpStmt)
+ if n.X.Op() == ir.OINDEXMAP {
+ n.Y = o.safeMapRHS(n.Y)
}
o.out = append(o.out, n)
case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
var post []ir.Node
- for i, m := range n.List().Slice() {
+ for i, m := range n.Lhs {
switch {
case m.Op() == ir.OINDEXMAP:
m := m.(*ir.IndexExpr)
- if !ir.IsAutoTmp(m.Left()) {
- m.SetLeft(o.copyExpr(m.Left()))
+ if !ir.IsAutoTmp(m.X) {
+ m.X = o.copyExpr(m.X)
}
- if !ir.IsAutoTmp(m.Right()) {
- m.SetRight(o.copyExpr(m.Right()))
+ if !ir.IsAutoTmp(m.Index) {
+ m.Index = o.copyExpr(m.Index)
}
fallthrough
- case instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m):
+ case base.Flag.Cfg.Instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m):
t := o.newTemp(m.Type(), false)
- n.List().SetIndex(i, t)
- a := ir.Nod(ir.OAS, m, t)
- post = append(post, typecheck(a, ctxStmt))
+ n.Lhs[i] = t
+ a := ir.NewAssignStmt(base.Pos, m, t)
+ post = append(post, typecheck.Stmt(a))
}
}
}
}
-func (o *Order) safeMapRHS(r ir.Node) ir.Node {
+func (o *orderState) safeMapRHS(r ir.Node) ir.Node {
// Make sure we evaluate the RHS before starting the map insert.
// We need to make sure the RHS won't panic. See issue 22881.
if r.Op() == ir.OAPPEND {
- s := r.List().Slice()[1:]
+ r := r.(*ir.CallExpr)
+ s := r.Args[1:]
for i, n := range s {
s[i] = o.cheapExpr(n)
}
// stmt orders the statement n, appending to o.out.
// Temporaries created during the statement are cleaned
// up using VARKILL instructions as possible.
-func (o *Order) stmt(n ir.Node) {
+func (o *orderState) stmt(n ir.Node) {
if n == nil {
return
}
- lno := setlineno(n)
+ lno := ir.SetPos(n)
o.init(n)
switch n.Op() {
o.out = append(o.out, n)
case ir.OAS:
+ n := n.(*ir.AssignStmt)
t := o.markTemp()
- n.SetLeft(o.expr(n.Left(), nil))
- n.SetRight(o.expr(n.Right(), n.Left()))
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, n.X)
o.mapAssign(n)
o.cleanTemp(t)
case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
t := o.markTemp()
- n.SetLeft(o.expr(n.Left(), nil))
- n.SetRight(o.expr(n.Right(), nil))
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, nil)
- if instrumenting || n.Left().Op() == ir.OINDEXMAP && (n.SubOp() == ir.ODIV || n.SubOp() == ir.OMOD) {
+ if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) {
// Rewrite m[k] op= r into m[k] = m[k] op r so
// that we can ensure that if op panics
// because r is zero, the panic happens before
// the map assignment.
// DeepCopy is a big hammer here, but safeExpr
// makes sure there is nothing too deep being copied.
- l1 := o.safeExpr(n.Left())
+ l1 := o.safeExpr(n.X)
l2 := ir.DeepCopy(src.NoXPos, l1)
if l2.Op() == ir.OINDEXMAP {
- l2.SetIndexMapLValue(false)
+ l2 := l2.(*ir.IndexExpr)
+ l2.Assigned = false
}
l2 = o.copyExpr(l2)
- r := o.expr(typecheck(ir.NewBinaryExpr(n.Pos(), n.SubOp(), l2, n.Right()), ctxExpr), nil)
- as := typecheck(ir.NodAt(n.Pos(), ir.OAS, l1, r), ctxStmt)
+ r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil)
+ as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r))
o.mapAssign(as)
o.cleanTemp(t)
return
o.cleanTemp(t)
case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
t := o.markTemp()
- o.exprList(n.List())
- o.exprList(n.Rlist())
+ o.exprList(n.Lhs)
+ o.exprList(n.Rhs)
o.mapAssign(n)
o.cleanTemp(t)
case ir.OAS2FUNC:
n := n.(*ir.AssignListStmt)
t := o.markTemp()
- o.exprList(n.List())
- o.init(n.Rlist().First())
- o.call(n.Rlist().First())
+ o.exprList(n.Lhs)
+ o.init(n.Rhs[0])
+ o.call(n.Rhs[0])
o.as2(n)
o.cleanTemp(t)
case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
n := n.(*ir.AssignListStmt)
t := o.markTemp()
- o.exprList(n.List())
+ o.exprList(n.Lhs)
- switch r := n.Rlist().First(); r.Op() {
+ switch r := n.Rhs[0]; r.Op() {
case ir.ODOTTYPE2:
- r.SetLeft(o.expr(r.Left(), nil))
+ r := r.(*ir.TypeAssertExpr)
+ r.X = o.expr(r.X, nil)
case ir.ORECV:
- r.SetLeft(o.expr(r.Left(), nil))
+ r := r.(*ir.UnaryExpr)
+ r.X = o.expr(r.X, nil)
case ir.OINDEXMAP:
- r.SetLeft(o.expr(r.Left(), nil))
- r.SetRight(o.expr(r.Right(), nil))
+ r := r.(*ir.IndexExpr)
+ r.X = o.expr(r.X, nil)
+ r.Index = o.expr(r.Index, nil)
// See similar conversion for OINDEXMAP below.
- _ = mapKeyReplaceStrConv(r.Right())
- r.SetRight(o.mapKeyTemp(r.Left().Type(), r.Right()))
+ _ = mapKeyReplaceStrConv(r.Index)
+ r.Index = o.mapKeyTemp(r.X.Type(), r.Index)
default:
base.Fatalf("order.stmt: %v", r.Op())
}
// Special: does not save n onto out.
case ir.OBLOCK:
- o.stmtList(n.List())
+ n := n.(*ir.BlockStmt)
+ o.stmtList(n.List)
// Special: n->left is not an expression; save as is.
case ir.OBREAK,
// Special: handle call arguments.
case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
+ n := n.(*ir.CallExpr)
t := o.markTemp()
o.call(n)
o.out = append(o.out, n)
o.cleanTemp(t)
case ir.OCLOSE, ir.ORECV:
+ n := n.(*ir.UnaryExpr)
t := o.markTemp()
- n.SetLeft(o.expr(n.Left(), nil))
+ n.X = o.expr(n.X, nil)
o.out = append(o.out, n)
o.cleanTemp(t)
case ir.OCOPY:
+ n := n.(*ir.BinaryExpr)
t := o.markTemp()
- n.SetLeft(o.expr(n.Left(), nil))
- n.SetRight(o.expr(n.Right(), nil))
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, nil)
o.out = append(o.out, n)
o.cleanTemp(t)
case ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ n := n.(*ir.CallExpr)
t := o.markTemp()
- o.exprList(n.List())
+ o.exprList(n.Args)
o.out = append(o.out, n)
o.cleanTemp(t)
// Special: order arguments to inner call but not call itself.
case ir.ODEFER, ir.OGO:
+ n := n.(*ir.GoDeferStmt)
t := o.markTemp()
- o.init(n.Left())
- o.call(n.Left())
+ o.init(n.Call)
+ o.call(n.Call)
o.out = append(o.out, n)
o.cleanTemp(t)
case ir.ODELETE:
+ n := n.(*ir.CallExpr)
t := o.markTemp()
- n.List().SetFirst(o.expr(n.List().First(), nil))
- n.List().SetSecond(o.expr(n.List().Second(), nil))
- n.List().SetSecond(o.mapKeyTemp(n.List().First().Type(), n.List().Second()))
+ n.Args[0] = o.expr(n.Args[0], nil)
+ n.Args[1] = o.expr(n.Args[1], nil)
+ n.Args[1] = o.mapKeyTemp(n.Args[0].Type(), n.Args[1])
o.out = append(o.out, n)
o.cleanTemp(t)
// Clean temporaries from condition evaluation at
// beginning of loop body and after for statement.
case ir.OFOR:
+ n := n.(*ir.ForStmt)
t := o.markTemp()
- n.SetLeft(o.exprInPlace(n.Left()))
- n.PtrBody().Prepend(o.cleanTempNoPop(t)...)
- orderBlock(n.PtrBody(), o.free)
- n.SetRight(orderStmtInPlace(n.Right(), o.free))
+ n.Cond = o.exprInPlace(n.Cond)
+ n.Body.Prepend(o.cleanTempNoPop(t)...)
+ orderBlock(&n.Body, o.free)
+ n.Post = orderStmtInPlace(n.Post, o.free)
o.out = append(o.out, n)
o.cleanTemp(t)
// Clean temporaries from condition at
// beginning of both branches.
case ir.OIF:
+ n := n.(*ir.IfStmt)
t := o.markTemp()
- n.SetLeft(o.exprInPlace(n.Left()))
- n.PtrBody().Prepend(o.cleanTempNoPop(t)...)
- n.PtrRlist().Prepend(o.cleanTempNoPop(t)...)
+ n.Cond = o.exprInPlace(n.Cond)
+ n.Body.Prepend(o.cleanTempNoPop(t)...)
+ n.Else.Prepend(o.cleanTempNoPop(t)...)
o.popTemp(t)
- orderBlock(n.PtrBody(), o.free)
- orderBlock(n.PtrRlist(), o.free)
+ orderBlock(&n.Body, o.free)
+ orderBlock(&n.Else, o.free)
o.out = append(o.out, n)
// Special: argument will be converted to interface using convT2E
// so make sure it is an addressable temporary.
case ir.OPANIC:
+ n := n.(*ir.UnaryExpr)
t := o.markTemp()
- n.SetLeft(o.expr(n.Left(), nil))
- if !n.Left().Type().IsInterface() {
- n.SetLeft(o.addrTemp(n.Left()))
+ n.X = o.expr(n.X, nil)
+ if !n.X.Type().IsInterface() {
+ n.X = o.addrTemp(n.X)
}
o.out = append(o.out, n)
o.cleanTemp(t)
// Mark []byte(str) range expression to reuse string backing storage.
// It is safe because the storage cannot be mutated.
n := n.(*ir.RangeStmt)
- if n.Right().Op() == ir.OSTR2BYTES {
- n.Right().(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
+ if n.X.Op() == ir.OSTR2BYTES {
+ n.X.(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
}
t := o.markTemp()
- n.SetRight(o.expr(n.Right(), nil))
+ n.X = o.expr(n.X, nil)
orderBody := true
switch n.Type().Kind() {
base.Fatalf("order.stmt range %v", n.Type())
case types.TARRAY, types.TSLICE:
- if n.List().Len() < 2 || ir.IsBlank(n.List().Second()) {
+ if len(n.Vars) < 2 || ir.IsBlank(n.Vars[1]) {
// for i := range x will only use x once, to compute len(x).
// No need to copy it.
break
case types.TCHAN, types.TSTRING:
// chan, string, slice, array ranges use value multiple times.
// make copy.
- r := n.Right()
+ r := n.X
if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
- r = ir.Nod(ir.OCONV, r, nil)
+ r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r)
r.SetType(types.Types[types.TSTRING])
- r = typecheck(r, ctxExpr)
+ r = typecheck.Expr(r)
}
- n.SetRight(o.copyExpr(r))
+ n.X = o.copyExpr(r)
case types.TMAP:
if isMapClear(n) {
// copy the map value in case it is a map literal.
// TODO(rsc): Make tmp = literal expressions reuse tmp.
// For maps tmp is just one word so it hardly matters.
- r := n.Right()
- n.SetRight(o.copyExpr(r))
+ r := n.X
+ n.X = o.copyExpr(r)
// n.Prealloc is the temp for the iterator.
// hiter contains pointers and needs to be zeroed.
- n.Prealloc = o.newTemp(hiter(n.Type()), true)
+ n.Prealloc = o.newTemp(reflectdata.MapIterType(n.Type()), true)
}
- o.exprListInPlace(n.List())
+ o.exprListInPlace(n.Vars)
if orderBody {
- orderBlock(n.PtrBody(), o.free)
+ orderBlock(&n.Body, o.free)
}
o.out = append(o.out, n)
o.cleanTemp(t)
case ir.ORETURN:
- o.exprList(n.List())
+ n := n.(*ir.ReturnStmt)
+ o.exprList(n.Results)
o.out = append(o.out, n)
// Special: clean case temporaries in each block entry.
// case (if p were nil, then the timing of the fault would
// give this away).
case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
t := o.markTemp()
- for _, ncas := range n.List().Slice() {
+ for _, ncas := range n.Cases {
ncas := ncas.(*ir.CaseStmt)
- r := ncas.Left()
- setlineno(ncas)
+ r := ncas.Comm
+ ir.SetPos(ncas)
// Append any new body prologue to ninit.
// The next loop will insert ninit into nbody.
- if ncas.Init().Len() != 0 {
+ if len(ncas.Init()) != 0 {
base.Fatalf("order select ninit")
}
if r == nil {
case ir.OSELRECV2:
// case x, ok = <-c
r := r.(*ir.AssignListStmt)
- recv := r.Rlist().First().(*ir.UnaryExpr)
- recv.SetLeft(o.expr(recv.Left(), nil))
- if !ir.IsAutoTmp(recv.Left()) {
- recv.SetLeft(o.copyExpr(recv.Left()))
+ recv := r.Rhs[0].(*ir.UnaryExpr)
+ recv.X = o.expr(recv.X, nil)
+ if !ir.IsAutoTmp(recv.X) {
+ recv.X = o.copyExpr(recv.X)
}
- init := r.PtrInit().Slice()
+ init := *r.PtrInit()
r.PtrInit().Set(nil)
- colas := r.Colas()
+ colas := r.Def
do := func(i int, t *types.Type) {
- n := r.List().Index(i)
+ n := r.Lhs[i]
if ir.IsBlank(n) {
return
}
// declaration (and possible allocation) until inside the case body.
// Delete the ODCL nodes here and recreate them inside the body below.
if colas {
- if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).Left() == n {
+ if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n {
init = init[1:]
}
- dcl := typecheck(ir.Nod(ir.ODCL, n, nil), ctxStmt)
+ dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n))
ncas.PtrInit().Append(dcl)
}
tmp := o.newTemp(t, t.HasPointers())
- as := typecheck(ir.Nod(ir.OAS, n, conv(tmp, n.Type())), ctxStmt)
+ as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type())))
ncas.PtrInit().Append(as)
- r.PtrList().SetIndex(i, tmp)
+ r.Lhs[i] = tmp
}
- do(0, recv.Left().Type().Elem())
+ do(0, recv.X.Type().Elem())
do(1, types.Types[types.TBOOL])
if len(init) != 0 {
ir.DumpList("ninit", r.Init())
orderBlock(ncas.PtrInit(), o.free)
case ir.OSEND:
- if r.Init().Len() != 0 {
+ r := r.(*ir.SendStmt)
+ if len(r.Init()) != 0 {
ir.DumpList("ninit", r.Init())
base.Fatalf("ninit on select send")
}
// case c <- x
// r->left is c, r->right is x, both are always evaluated.
- r.SetLeft(o.expr(r.Left(), nil))
+ r.Chan = o.expr(r.Chan, nil)
- if !ir.IsAutoTmp(r.Left()) {
- r.SetLeft(o.copyExpr(r.Left()))
+ if !ir.IsAutoTmp(r.Chan) {
+ r.Chan = o.copyExpr(r.Chan)
}
- r.SetRight(o.expr(r.Right(), nil))
- if !ir.IsAutoTmp(r.Right()) {
- r.SetRight(o.copyExpr(r.Right()))
+ r.Value = o.expr(r.Value, nil)
+ if !ir.IsAutoTmp(r.Value) {
+ r.Value = o.copyExpr(r.Value)
}
}
}
// Now that we have accumulated all the temporaries, clean them.
// Also insert any ninit queued during the previous loop.
// (The temporary cleaning must follow that ninit work.)
- for _, cas := range n.List().Slice() {
+ for _, cas := range n.Cases {
cas := cas.(*ir.CaseStmt)
- orderBlock(cas.PtrBody(), o.free)
- cas.PtrBody().Prepend(o.cleanTempNoPop(t)...)
+ orderBlock(&cas.Body, o.free)
+ cas.Body.Prepend(o.cleanTempNoPop(t)...)
// TODO(mdempsky): Is this actually necessary?
// walkselect appears to walk Ninit.
- cas.PtrBody().Prepend(cas.Init().Slice()...)
+ cas.Body.Prepend(cas.Init()...)
cas.PtrInit().Set(nil)
}
// Special: value being sent is passed as a pointer; make it addressable.
case ir.OSEND:
+ n := n.(*ir.SendStmt)
t := o.markTemp()
- n.SetLeft(o.expr(n.Left(), nil))
- n.SetRight(o.expr(n.Right(), nil))
- if instrumenting {
+ n.Chan = o.expr(n.Chan, nil)
+ n.Value = o.expr(n.Value, nil)
+ if base.Flag.Cfg.Instrumenting {
// Force copying to the stack so that (chan T)(nil) <- x
// is still instrumented as a read of x.
- n.SetRight(o.copyExpr(n.Right()))
+ n.Value = o.copyExpr(n.Value)
} else {
- n.SetRight(o.addrTemp(n.Right()))
+ n.Value = o.addrTemp(n.Value)
}
o.out = append(o.out, n)
o.cleanTemp(t)
n := n.(*ir.SwitchStmt)
if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
// Add empty "default:" case for instrumentation.
- n.PtrList().Append(ir.Nod(ir.OCASE, nil, nil))
+ n.Cases.Append(ir.NewCaseStmt(base.Pos, nil, nil))
}
t := o.markTemp()
- n.SetLeft(o.expr(n.Left(), nil))
- for _, ncas := range n.List().Slice() {
+ n.Tag = o.expr(n.Tag, nil)
+ for _, ncas := range n.Cases {
ncas := ncas.(*ir.CaseStmt)
- o.exprListInPlace(ncas.List())
- orderBlock(ncas.PtrBody(), o.free)
+ o.exprListInPlace(ncas.List)
+ orderBlock(&ncas.Body, o.free)
}
o.out = append(o.out, n)
}
func hasDefaultCase(n *ir.SwitchStmt) bool {
- for _, ncas := range n.List().Slice() {
+ for _, ncas := range n.Cases {
ncas := ncas.(*ir.CaseStmt)
- if ncas.List().Len() == 0 {
+ if len(ncas.List) == 0 {
return true
}
}
}
// exprList orders the expression list l into o.
-func (o *Order) exprList(l ir.Nodes) {
- s := l.Slice()
+func (o *orderState) exprList(l ir.Nodes) {
+ s := l
for i := range s {
s[i] = o.expr(s[i], nil)
}
// exprListInPlace orders the expression list l but saves
// the side effects on the individual expression ninit lists.
-func (o *Order) exprListInPlace(l ir.Nodes) {
- s := l.Slice()
+func (o *orderState) exprListInPlace(l ir.Nodes) {
+ s := l
for i := range s {
s[i] = o.exprInPlace(s[i])
}
}
-func (o *Order) exprNoLHS(n ir.Node) ir.Node {
+func (o *orderState) exprNoLHS(n ir.Node) ir.Node {
return o.expr(n, nil)
}
// to avoid copying the result of the expression to a temporary.)
// The result of expr MUST be assigned back to n, e.g.
// n.Left = o.expr(n.Left, lhs)
-func (o *Order) expr(n, lhs ir.Node) ir.Node {
+func (o *orderState) expr(n, lhs ir.Node) ir.Node {
if n == nil {
return n
}
- lno := setlineno(n)
+ lno := ir.SetPos(n)
n = o.expr1(n, lhs)
base.Pos = lno
return n
}
-func (o *Order) expr1(n, lhs ir.Node) ir.Node {
+func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
o.init(n)
switch n.Op() {
// Fewer than 5 strings use direct runtime helpers.
case ir.OADDSTR:
n := n.(*ir.AddStringExpr)
- o.exprList(n.List())
+ o.exprList(n.List)
- if n.List().Len() > 5 {
- t := types.NewArray(types.Types[types.TSTRING], int64(n.List().Len()))
+ if len(n.List) > 5 {
+ t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List)))
n.Prealloc = o.newTemp(t, false)
}
hasbyte := false
haslit := false
- for _, n1 := range n.List().Slice() {
+ for _, n1 := range n.List {
hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0
}
if haslit && hasbyte {
- for _, n2 := range n.List().Slice() {
+ for _, n2 := range n.List {
if n2.Op() == ir.OBYTES2STR {
+ n2 := n2.(*ir.ConvExpr)
n2.SetOp(ir.OBYTES2STRTMP)
}
}
return n
case ir.OINDEXMAP:
- n.SetLeft(o.expr(n.Left(), nil))
- n.SetRight(o.expr(n.Right(), nil))
+ n := n.(*ir.IndexExpr)
+ n.X = o.expr(n.X, nil)
+ n.Index = o.expr(n.Index, nil)
needCopy := false
- if !n.IndexMapLValue() {
+ if !n.Assigned {
// Enforce that any []byte slices we are not copying
// can not be changed before the map index by forcing
// the map index to happen immediately following the
// conversions. See copyExpr a few lines below.
- needCopy = mapKeyReplaceStrConv(n.Right())
+ needCopy = mapKeyReplaceStrConv(n.Index)
- if instrumenting {
+ if base.Flag.Cfg.Instrumenting {
// Race detector needs the copy.
needCopy = true
}
}
// key must be addressable
- n.SetRight(o.mapKeyTemp(n.Left().Type(), n.Right()))
+ n.Index = o.mapKeyTemp(n.X.Type(), n.Index)
if needCopy {
return o.copyExpr(n)
}
// concrete type (not interface) argument might need an addressable
// temporary to pass to the runtime conversion routine.
case ir.OCONVIFACE:
- n.SetLeft(o.expr(n.Left(), nil))
- if n.Left().Type().IsInterface() {
+ n := n.(*ir.ConvExpr)
+ n.X = o.expr(n.X, nil)
+ if n.X.Type().IsInterface() {
return n
}
- if _, needsaddr := convFuncName(n.Left().Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.Left()) {
+ if _, needsaddr := convFuncName(n.X.Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
// Need a temp if we need to pass the address to the conversion function.
// We also process static composite literal node here, making a named static global
// whose address we can put directly in an interface (see OCONVIFACE case in walk).
- n.SetLeft(o.addrTemp(n.Left()))
+ n.X = o.addrTemp(n.X)
}
return n
case ir.OCONVNOP:
- if n.Type().IsKind(types.TUNSAFEPTR) && n.Left().Type().IsKind(types.TUINTPTR) && (n.Left().Op() == ir.OCALLFUNC || n.Left().Op() == ir.OCALLINTER || n.Left().Op() == ir.OCALLMETH) {
- call := n.Left().(*ir.CallExpr)
+ n := n.(*ir.ConvExpr)
+ if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER || n.X.Op() == ir.OCALLMETH) {
+ call := n.X.(*ir.CallExpr)
// When reordering unsafe.Pointer(f()) into a separate
// statement, the conversion and function call must stay
// together. See golang.org/issue/15329.
o.init(call)
o.call(call)
- if lhs == nil || lhs.Op() != ir.ONAME || instrumenting {
+ if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
return o.copyExpr(n)
}
} else {
- n.SetLeft(o.expr(n.Left(), nil))
+ n.X = o.expr(n.X, nil)
}
return n
// }
// ... = r
+ n := n.(*ir.LogicalExpr)
r := o.newTemp(n.Type(), false)
// Evaluate left-hand side.
- lhs := o.expr(n.Left(), nil)
- o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, lhs), ctxStmt))
+ lhs := o.expr(n.X, nil)
+ o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs)))
// Evaluate right-hand side, save generated code.
saveout := o.out
o.out = nil
t := o.markTemp()
o.edge()
- rhs := o.expr(n.Right(), nil)
- o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, rhs), ctxStmt))
+ rhs := o.expr(n.Y, nil)
+ o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs)))
o.cleanTemp(t)
gen := o.out
o.out = saveout
// If left-hand side doesn't cause a short-circuit, issue right-hand side.
- nif := ir.Nod(ir.OIF, r, nil)
+ nif := ir.NewIfStmt(base.Pos, r, nil, nil)
if n.Op() == ir.OANDAND {
- nif.PtrBody().Set(gen)
+ nif.Body.Set(gen)
} else {
- nif.PtrRlist().Set(gen)
+ nif.Else.Set(gen)
}
o.out = append(o.out, nif)
return r
if isRuneCount(n) {
// len([]rune(s)) is rewritten to runtime.countrunes(s) later.
- conv := n.(*ir.UnaryExpr).Left().(*ir.ConvExpr)
- conv.SetLeft(o.expr(conv.Left(), nil))
+ conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr)
+ conv.X = o.expr(conv.X, nil)
} else {
o.call(n)
}
- if lhs == nil || lhs.Op() != ir.ONAME || instrumenting {
+ if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
return o.copyExpr(n)
}
return n
case ir.OAPPEND:
// Check for append(x, make([]T, y)...) .
+ n := n.(*ir.CallExpr)
if isAppendOfMake(n) {
- n.List().SetFirst(o.expr(n.List().First(), nil)) // order x
- mk := n.List().Second().(*ir.MakeExpr)
- mk.SetLeft(o.expr(mk.Left(), nil)) // order y
+ n.Args[0] = o.expr(n.Args[0], nil) // order x
+ mk := n.Args[1].(*ir.MakeExpr)
+ mk.Len = o.expr(mk.Len, nil) // order y
} else {
- o.exprList(n.List())
+ o.exprList(n.Args)
}
- if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.List().First()) {
+ if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) {
return o.copyExpr(n)
}
return n
case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
- n.SetLeft(o.expr(n.Left(), nil))
+ n := n.(*ir.SliceExpr)
+ n.X = o.expr(n.X, nil)
low, high, max := n.SliceBounds()
low = o.expr(low, nil)
low = o.cheapExpr(low)
max = o.expr(max, nil)
max = o.cheapExpr(max)
n.SetSliceBounds(low, high, max)
- if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Left()) {
+ if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) {
return o.copyExpr(n)
}
return n
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
- if n.Transient() && len(n.Func().ClosureVars) > 0 {
- n.Prealloc = o.newTemp(closureType(n), false)
+ if n.Transient() && len(n.Func.ClosureVars) > 0 {
+ n.Prealloc = o.newTemp(typecheck.ClosureType(n), false)
}
return n
case ir.OCALLPART:
n := n.(*ir.CallPartExpr)
- n.SetLeft(o.expr(n.Left(), nil))
+ n.X = o.expr(n.X, nil)
if n.Transient() {
- t := partialCallType(n)
+ t := typecheck.PartialCallType(n)
n.Prealloc = o.newTemp(t, false)
}
return n
case ir.OSLICELIT:
n := n.(*ir.CompLitExpr)
- o.exprList(n.List())
+ o.exprList(n.List)
if n.Transient() {
t := types.NewArray(n.Type().Elem(), n.Len)
n.Prealloc = o.newTemp(t, false)
return n
case ir.ODOTTYPE, ir.ODOTTYPE2:
- n.SetLeft(o.expr(n.Left(), nil))
- if !isdirectiface(n.Type()) || instrumenting {
+ n := n.(*ir.TypeAssertExpr)
+ n.X = o.expr(n.X, nil)
+ if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting {
return o.copyExprClear(n)
}
return n
case ir.ORECV:
- n.SetLeft(o.expr(n.Left(), nil))
+ n := n.(*ir.UnaryExpr)
+ n.X = o.expr(n.X, nil)
return o.copyExprClear(n)
case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
- n.SetLeft(o.expr(n.Left(), nil))
- n.SetRight(o.expr(n.Right(), nil))
+ n := n.(*ir.BinaryExpr)
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, nil)
- t := n.Left().Type()
+ t := n.X.Type()
switch {
case t.IsString():
// Mark string(byteSlice) arguments to reuse byteSlice backing
// buffer during conversion. String comparison does not
// memorize the strings for later use, so it is safe.
- if n.Left().Op() == ir.OBYTES2STR {
- n.Left().(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
+ if n.X.Op() == ir.OBYTES2STR {
+ n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
}
- if n.Right().Op() == ir.OBYTES2STR {
- n.Right().(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
+ if n.Y.Op() == ir.OBYTES2STR {
+ n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
}
case t.IsStruct() || t.IsArray():
// for complex comparisons, we need both args to be
// addressable so we can pass them to the runtime.
- n.SetLeft(o.addrTemp(n.Left()))
- n.SetRight(o.addrTemp(n.Right()))
+ n.X = o.addrTemp(n.X)
+ n.Y = o.addrTemp(n.Y)
}
return n
// Without this special case, order would otherwise compute all
// the keys and values before storing any of them to the map.
// See issue 26552.
- entries := n.List().Slice()
+ n := n.(*ir.CompLitExpr)
+ entries := n.List
statics := entries[:0]
var dynamics []*ir.KeyExpr
for _, r := range entries {
r := r.(*ir.KeyExpr)
- if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
+ if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
dynamics = append(dynamics, r)
continue
}
// Recursively ordering some static entries can change them to dynamic;
// e.g., OCONVIFACE nodes. See #31777.
r = o.expr(r, nil).(*ir.KeyExpr)
- if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
+ if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
dynamics = append(dynamics, r)
continue
}
statics = append(statics, r)
}
- n.PtrList().Set(statics)
+ n.List.Set(statics)
if len(dynamics) == 0 {
return n
// Emit the creation of the map (with all its static entries).
m := o.newTemp(n.Type(), false)
- as := ir.Nod(ir.OAS, m, n)
- typecheck(as, ctxStmt)
+ as := ir.NewAssignStmt(base.Pos, m, n)
+ typecheck.Stmt(as)
o.stmt(as)
// Emit eval+insert of dynamic entries, one at a time.
for _, r := range dynamics {
- as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, r.Left()), r.Right())
- typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP
+ as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Key), r.Value)
+ typecheck.Stmt(as) // Note: this converts the OINDEX to an OINDEXMAP
o.stmt(as)
}
return m
// tmp1, tmp2, tmp3 = ...
// a, b, a = tmp1, tmp2, tmp3
// This is necessary to ensure left to right assignment order.
-func (o *Order) as2(n *ir.AssignListStmt) {
+func (o *orderState) as2(n *ir.AssignListStmt) {
tmplist := []ir.Node{}
left := []ir.Node{}
- for ni, l := range n.List().Slice() {
+ for ni, l := range n.Lhs {
if !ir.IsBlank(l) {
tmp := o.newTemp(l.Type(), l.Type().HasPointers())
- n.List().SetIndex(ni, tmp)
+ n.Lhs[ni] = tmp
tmplist = append(tmplist, tmp)
left = append(left, l)
}
o.out = append(o.out, n)
- as := ir.Nod(ir.OAS2, nil, nil)
- as.PtrList().Set(left)
- as.PtrRlist().Set(tmplist)
- o.stmt(typecheck(as, ctxStmt))
+ as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ as.Lhs.Set(left)
+ as.Rhs.Set(tmplist)
+ o.stmt(typecheck.Stmt(as))
}
// okAs2 orders OAS2XXX with ok.
// Just like as2, this also adds temporaries to ensure left-to-right assignment.
-func (o *Order) okAs2(n *ir.AssignListStmt) {
+func (o *orderState) okAs2(n *ir.AssignListStmt) {
var tmp1, tmp2 ir.Node
- if !ir.IsBlank(n.List().First()) {
- typ := n.Rlist().First().Type()
+ if !ir.IsBlank(n.Lhs[0]) {
+ typ := n.Rhs[0].Type()
tmp1 = o.newTemp(typ, typ.HasPointers())
}
- if !ir.IsBlank(n.List().Second()) {
+ if !ir.IsBlank(n.Lhs[1]) {
tmp2 = o.newTemp(types.Types[types.TBOOL], false)
}
o.out = append(o.out, n)
if tmp1 != nil {
- r := ir.Nod(ir.OAS, n.List().First(), tmp1)
- o.mapAssign(typecheck(r, ctxStmt))
- n.List().SetFirst(tmp1)
+ r := ir.NewAssignStmt(base.Pos, n.Lhs[0], tmp1)
+ o.mapAssign(typecheck.Stmt(r))
+ n.Lhs[0] = tmp1
}
if tmp2 != nil {
- r := ir.Nod(ir.OAS, n.List().Second(), conv(tmp2, n.List().Second().Type()))
- o.mapAssign(typecheck(r, ctxStmt))
- n.List().SetSecond(tmp2)
+ r := ir.NewAssignStmt(base.Pos, n.Lhs[1], typecheck.Conv(tmp2, n.Lhs[1].Type()))
+ o.mapAssign(typecheck.Stmt(r))
+ n.Lhs[1] = tmp2
}
}
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+)
+
+func instrument(fn *ir.Func) {
+ if fn.Pragma&ir.Norace != 0 || (fn.Sym().Linksym() != nil && fn.Sym().Linksym().ABIWrapper()) {
+ return
+ }
+
+ if !base.Flag.Race || !base.Compiling(base.NoRacePkgs) {
+ fn.SetInstrumentBody(true)
+ }
+
+ if base.Flag.Race {
+ lno := base.Pos
+ base.Pos = src.NoXPos
+
+ if ssagen.Arch.LinkArch.Arch.Family != sys.AMD64 {
+ fn.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
+ fn.Exit.Append(mkcall("racefuncexit", nil, nil))
+ } else {
+
+ // nodpc is the PC of the caller as extracted by
+ // getcallerpc. We use -widthptr(FP) for x86.
+ // This only works for amd64. This will not
+ // work on arm or others that might support
+ // race in the future.
+ nodpc := ir.RegFP.CloneName()
+ nodpc.SetType(types.Types[types.TUINTPTR])
+ nodpc.SetFrameOffset(int64(-types.PtrSize))
+ fn.Dcl = append(fn.Dcl, nodpc)
+ fn.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
+ fn.Exit.Append(mkcall("racefuncexit", nil, nil))
+ }
+ base.Pos = lno
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "unicode/utf8"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/sys"
+)
+
+func cheapComputableIndex(width int64) bool {
+ switch ssagen.Arch.LinkArch.Family {
+ // MIPS does not have R+R addressing
+ // Arm64 may lack ability to generate this code in our assembler,
+ // but the architecture supports it.
+ case sys.PPC64, sys.S390X:
+ return width == 1
+ case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
+ switch width {
+ case 1, 2, 4, 8:
+ return true
+ }
+ }
+ return false
+}
+
+// walkRange transforms various forms of ORANGE into
+// simpler forms. The result must be assigned back to n.
+// Node n may also be modified in place, and may also be
+// the returned node.
+func walkRange(nrange *ir.RangeStmt) ir.Node {
+ if isMapClear(nrange) {
+ m := nrange.X
+ lno := ir.SetPos(m)
+ n := mapClear(m)
+ base.Pos = lno
+ return n
+ }
+
+ nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil)
+ nfor.SetInit(nrange.Init())
+ nfor.Label = nrange.Label
+
+ // variable name conventions:
+ // ohv1, hv1, hv2: hidden (old) val 1, 2
+ // ha, hit: hidden aggregate, iterator
+ // hn, hp: hidden len, pointer
+ // hb: hidden bool
+ // a, v1, v2: not hidden aggregate, val 1, 2
+
+ t := nrange.Type()
+
+ a := nrange.X
+ lno := ir.SetPos(a)
+
+ var v1, v2 ir.Node
+ l := len(nrange.Vars)
+ if l > 0 {
+ v1 = nrange.Vars[0]
+ }
+
+ if l > 1 {
+ v2 = nrange.Vars[1]
+ }
+
+ if ir.IsBlank(v2) {
+ v2 = nil
+ }
+
+ if ir.IsBlank(v1) && v2 == nil {
+ v1 = nil
+ }
+
+ if v1 == nil && v2 != nil {
+ base.Fatalf("walkrange: v2 != nil while v1 == nil")
+ }
+
+ var ifGuard *ir.IfStmt
+
+ var body []ir.Node
+ var init []ir.Node
+ switch t.Kind() {
+ default:
+ base.Fatalf("walkrange")
+
+ case types.TARRAY, types.TSLICE:
+ if nn := arrayClear(nrange, v1, v2, a); nn != nil {
+ base.Pos = lno
+ return nn
+ }
+
+ // order.stmt arranged for a copy of the array/slice variable if needed.
+ ha := a
+
+ hv1 := typecheck.Temp(types.Types[types.TINT])
+ hn := typecheck.Temp(types.Types[types.TINT])
+
+ init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+ init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)))
+
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
+ nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))
+
+ // for range ha { body }
+ if v1 == nil {
+ break
+ }
+
+ // for v1 := range ha { body }
+ if v2 == nil {
+ body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
+ break
+ }
+
+ // for v1, v2 := range ha { body }
+ if cheapComputableIndex(nrange.Type().Elem().Width) {
+ // v1, v2 = hv1, ha[hv1]
+ tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
+ tmp.SetBounded(true)
+ // Use OAS2 to correctly handle assignments
+ // of the form "v1, a[v1] := range".
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ a.Lhs = []ir.Node{v1, v2}
+ a.Rhs = []ir.Node{hv1, tmp}
+ body = []ir.Node{a}
+ break
+ }
+
+ // TODO(austin): OFORUNTIL is a strange beast, but is
+ // necessary for expressing the control flow we need
+ // while also making "break" and "continue" work. It
+ // would be nice to just lower ORANGE during SSA, but
+ // racewalk needs to see many of the operations
+ // involved in ORANGE's implementation. If racewalk
+ // moves into SSA, consider moving ORANGE into SSA and
+ // eliminating OFORUNTIL.
+
+ // TODO(austin): OFORUNTIL inhibits bounds-check
+ // elimination on the index variable (see #20711).
+ // Enhance the prove pass to understand this.
+ ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil)
+ ifGuard.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
+ nfor.SetOp(ir.OFORUNTIL)
+
+ hp := typecheck.Temp(types.NewPtr(nrange.Type().Elem()))
+ tmp := ir.NewIndexExpr(base.Pos, ha, ir.NewInt(0))
+ tmp.SetBounded(true)
+ init = append(init, ir.NewAssignStmt(base.Pos, hp, typecheck.NodAddr(tmp)))
+
+ // Use OAS2 to correctly handle assignments
+ // of the form "v1, a[v1] := range".
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ a.Lhs = []ir.Node{v1, v2}
+ a.Rhs = []ir.Node{hv1, ir.NewStarExpr(base.Pos, hp)}
+ body = append(body, a)
+
+ // Advance pointer as part of the late increment.
+ //
+ // This runs *after* the condition check, so we know
+ // advancing the pointer is safe and won't go past the
+ // end of the allocation.
+ as := ir.NewAssignStmt(base.Pos, hp, addptr(hp, t.Elem().Width))
+ nfor.Late = []ir.Node{typecheck.Stmt(as)}
+
+ case types.TMAP:
+ // order.stmt allocated the iterator for us.
+ // we only use a once, so no copy needed.
+ ha := a
+
+ hit := nrange.Prealloc
+ th := hit.Type()
+ keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
+ elemsym := th.Field(1).Sym // ditto
+
+ fn := typecheck.LookupRuntime("mapiterinit")
+
+ fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th)
+ init = append(init, mkcall1(fn, nil, nil, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit)))
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
+
+ fn = typecheck.LookupRuntime("mapiternext")
+ fn = typecheck.SubstArgTypes(fn, th)
+ nfor.Post = mkcall1(fn, nil, nil, typecheck.NodAddr(hit))
+
+ key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym))
+ if v1 == nil {
+ body = nil
+ } else if v2 == nil {
+ body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, key)}
+ } else {
+ elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym))
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ a.Lhs = []ir.Node{v1, v2}
+ a.Rhs = []ir.Node{key, elem}
+ body = []ir.Node{a}
+ }
+
+ case types.TCHAN:
+ // order.stmt arranged for a copy of the channel variable.
+ ha := a
+
+ hv1 := typecheck.Temp(t.Elem())
+ hv1.SetTypecheck(1)
+ if t.Elem().HasPointers() {
+ init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+ }
+ hb := typecheck.Temp(types.Types[types.TBOOL])
+
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(false))
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, nil, nil)
+ a.SetTypecheck(1)
+ a.Lhs = []ir.Node{hv1, hb}
+ a.Rhs = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)}
+ *nfor.Cond.PtrInit() = []ir.Node{a}
+ if v1 == nil {
+ body = nil
+ } else {
+ body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
+ }
+ // Zero hv1. This prevents hv1 from being the sole, inaccessible
+ // reference to an otherwise GC-able value during the next channel receive.
+ // See issue 15281.
+ body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil))
+
+ case types.TSTRING:
+ // Transform string range statements like "for v1, v2 = range a" into
+ //
+ // ha := a
+ // for hv1 := 0; hv1 < len(ha); {
+ // hv1t := hv1
+ // hv2 := rune(ha[hv1])
+ // if hv2 < utf8.RuneSelf {
+ // hv1++
+ // } else {
+ // hv2, hv1 = decoderune(ha, hv1)
+ // }
+ // v1, v2 = hv1t, hv2
+ // // original body
+ // }
+
+ // order.stmt arranged for a copy of the string variable.
+ ha := a
+
+ hv1 := typecheck.Temp(types.Types[types.TINT])
+ hv1t := typecheck.Temp(types.Types[types.TINT])
+ hv2 := typecheck.Temp(types.RuneType)
+
+ // hv1 := 0
+ init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+
+ // hv1 < len(ha)
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))
+
+ if v1 != nil {
+ // hv1t = hv1
+ body = append(body, ir.NewAssignStmt(base.Pos, hv1t, hv1))
+ }
+
+ // hv2 := rune(ha[hv1])
+ nind := ir.NewIndexExpr(base.Pos, ha, hv1)
+ nind.SetBounded(true)
+ body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType)))
+
+ // if hv2 < utf8.RuneSelf
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, ir.NewInt(utf8.RuneSelf))
+
+ // hv1++
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))}
+
+ // } else {
+ eif := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ nif.Else = []ir.Node{eif}
+
+ // hv2, hv1 = decoderune(ha, hv1)
+ eif.Lhs = []ir.Node{hv2, hv1}
+ fn := typecheck.LookupRuntime("decoderune")
+ eif.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, ha, hv1)}
+
+ body = append(body, nif)
+
+ if v1 != nil {
+ if v2 != nil {
+ // v1, v2 = hv1t, hv2
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ a.Lhs = []ir.Node{v1, v2}
+ a.Rhs = []ir.Node{hv1t, hv2}
+ body = append(body, a)
+ } else {
+ // v1 = hv1t
+ body = append(body, ir.NewAssignStmt(base.Pos, v1, hv1t))
+ }
+ }
+ }
+
+ typecheck.Stmts(init)
+
+ if ifGuard != nil {
+ ifGuard.PtrInit().Append(init...)
+ ifGuard = typecheck.Stmt(ifGuard).(*ir.IfStmt)
+ } else {
+ nfor.PtrInit().Append(init...)
+ }
+
+ typecheck.Stmts(nfor.Cond.Init())
+
+ nfor.Cond = typecheck.Expr(nfor.Cond)
+ nfor.Cond = typecheck.DefaultLit(nfor.Cond, nil)
+ nfor.Post = typecheck.Stmt(nfor.Post)
+ typecheck.Stmts(body)
+ nfor.Body.Append(body...)
+ nfor.Body.Append(nrange.Body...)
+
+ var n ir.Node = nfor
+ if ifGuard != nil {
+ ifGuard.Body = []ir.Node{n}
+ n = ifGuard
+ }
+
+ n = walkStmt(n)
+
+ base.Pos = lno
+ return n
+}
+
+// isMapClear checks if n is of the form:
+//
+// for k := range m {
+// delete(m, k)
+// }
+//
+// where == for keys of map m is reflexive.
+func isMapClear(n *ir.RangeStmt) bool {
+ if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+ return false
+ }
+
+ if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || len(n.Vars) != 1 {
+ return false
+ }
+
+ k := n.Vars[0]
+ if k == nil || ir.IsBlank(k) {
+ return false
+ }
+
+ // Require k to be a new variable name.
+ if !ir.DeclaredBy(k, n) {
+ return false
+ }
+
+ if len(n.Body) != 1 {
+ return false
+ }
+
+ stmt := n.Body[0] // only stmt in body
+ if stmt == nil || stmt.Op() != ir.ODELETE {
+ return false
+ }
+
+ m := n.X
+ if delete := stmt.(*ir.CallExpr); !ir.SameSafeExpr(delete.Args[0], m) || !ir.SameSafeExpr(delete.Args[1], k) {
+ return false
+ }
+
+ // Keys where equality is not reflexive can not be deleted from maps.
+ if !types.IsReflexive(m.Type().Key()) {
+ return false
+ }
+
+ return true
+}
+
+// mapClear constructs a call to runtime.mapclear for the map m.
+func mapClear(m ir.Node) ir.Node {
+ t := m.Type()
+
+ // instantiate mapclear(typ *type, hmap map[any]any)
+ fn := typecheck.LookupRuntime("mapclear")
+ fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+ n := mkcall1(fn, nil, nil, reflectdata.TypePtr(t), m)
+ return walkStmt(typecheck.Stmt(n))
+}
+
+// Lower n into runtime·memclr if possible, for
+// fast zeroing of slices and arrays (issue 5373).
+// Look for instances of
+//
+// for i := range a {
+// a[i] = zero
+// }
+//
+// in which the evaluation of a is side-effect-free.
+//
+// Parameters are as in walkrange: "for v1, v2 = range a".
+func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
+ if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+ return nil
+ }
+
+ if v1 == nil || v2 != nil {
+ return nil
+ }
+
+ if len(loop.Body) != 1 || loop.Body[0] == nil {
+ return nil
+ }
+
+ stmt1 := loop.Body[0] // only stmt in body
+ if stmt1.Op() != ir.OAS {
+ return nil
+ }
+ stmt := stmt1.(*ir.AssignStmt)
+ if stmt.X.Op() != ir.OINDEX {
+ return nil
+ }
+ lhs := stmt.X.(*ir.IndexExpr)
+
+ if !ir.SameSafeExpr(lhs.X, a) || !ir.SameSafeExpr(lhs.Index, v1) {
+ return nil
+ }
+
+ elemsize := loop.Type().Elem().Width
+ if elemsize <= 0 || !ir.IsZero(stmt.Y) {
+ return nil
+ }
+
+ // Convert to
+ // if len(a) != 0 {
+ // hp = &a[0]
+ // hn = len(a)*sizeof(elem(a))
+ // memclr{NoHeap,Has}Pointers(hp, hn)
+ // i = len(a) - 1
+ // }
+ n := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ n.Body.Set(nil)
+ n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(0))
+
+ // hp = &a[0]
+ hp := typecheck.Temp(types.Types[types.TUNSAFEPTR])
+
+ ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(0))
+ ix.SetBounded(true)
+ addr := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
+ n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr))
+
+ // hn = len(a) * sizeof(elem(a))
+ hn := typecheck.Temp(types.Types[types.TUINTPTR])
+ mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(elemsize)), types.Types[types.TUINTPTR])
+ n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul))
+
+ var fn ir.Node
+ if a.Type().Elem().HasPointers() {
+ // memclrHasPointers(hp, hn)
+ ir.CurFunc.SetWBPos(stmt.Pos())
+ fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
+ } else {
+ // memclrNoHeapPointers(hp, hn)
+ fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
+ }
+
+ n.Body.Append(fn)
+
+ // i = len(a) - 1
+ v1 = ir.NewAssignStmt(base.Pos, v1, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(1)))
+
+ n.Body.Append(v1)
+
+ n.Cond = typecheck.Expr(n.Cond)
+ n.Cond = typecheck.DefaultLit(n.Cond, nil)
+ typecheck.Stmts(n.Body)
+ return walkStmt(n)
+}
+
+// addptr returns (*T)(uintptr(p) + n).
+func addptr(p ir.Node, n int64) ir.Node {
+ t := p.Type()
+
+ p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
+ p.SetType(types.Types[types.TUINTPTR])
+
+ p = ir.NewBinaryExpr(base.Pos, ir.OADD, p, ir.NewInt(n))
+
+ p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
+ p.SetType(t)
+
+ return p
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+func walkSelect(sel *ir.SelectStmt) {
+ lno := ir.SetPos(sel)
+ if len(sel.Compiled) != 0 {
+ base.Fatalf("double walkselect")
+ }
+
+ init := sel.Init()
+ sel.PtrInit().Set(nil)
+
+ init = append(init, walkSelectCases(sel.Cases)...)
+ sel.Cases = ir.Nodes{}
+
+ sel.Compiled.Set(init)
+ walkStmtList(sel.Compiled)
+
+ base.Pos = lno
+}
+
+func walkSelectCases(cases ir.Nodes) []ir.Node {
+ ncas := len(cases)
+ sellineno := base.Pos
+
+ // optimization: zero-case select
+ if ncas == 0 {
+ return []ir.Node{mkcall("block", nil, nil)}
+ }
+
+ // optimization: one-case select: single op.
+ if ncas == 1 {
+ cas := cases[0].(*ir.CaseStmt)
+ ir.SetPos(cas)
+ l := cas.Init()
+ if cas.Comm != nil { // not default:
+ n := cas.Comm
+ l = append(l, n.Init()...)
+ n.PtrInit().Set(nil)
+ switch n.Op() {
+ default:
+ base.Fatalf("select %v", n.Op())
+
+ case ir.OSEND:
+ // already ok
+
+ case ir.OSELRECV2:
+ r := n.(*ir.AssignListStmt)
+ if ir.IsBlank(r.Lhs[0]) && ir.IsBlank(r.Lhs[1]) {
+ n = r.Rhs[0]
+ break
+ }
+ r.SetOp(ir.OAS2RECV)
+ }
+
+ l = append(l, n)
+ }
+
+ l = append(l, cas.Body...)
+ l = append(l, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil))
+ return l
+ }
+
+ // convert case value arguments to addresses.
+ // this rewrite is used by both the general code and the next optimization.
+ var dflt *ir.CaseStmt
+ for _, cas := range cases {
+ cas := cas.(*ir.CaseStmt)
+ ir.SetPos(cas)
+ n := cas.Comm
+ if n == nil {
+ dflt = cas
+ continue
+ }
+ switch n.Op() {
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ n.Value = typecheck.NodAddr(n.Value)
+ n.Value = typecheck.Expr(n.Value)
+
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ if !ir.IsBlank(n.Lhs[0]) {
+ n.Lhs[0] = typecheck.NodAddr(n.Lhs[0])
+ n.Lhs[0] = typecheck.Expr(n.Lhs[0])
+ }
+ }
+ }
+
+ // optimization: two-case select but one is default: single non-blocking op.
+ if ncas == 2 && dflt != nil {
+ cas := cases[0].(*ir.CaseStmt)
+ if cas == dflt {
+ cas = cases[1].(*ir.CaseStmt)
+ }
+
+ n := cas.Comm
+ ir.SetPos(n)
+ r := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ r.PtrInit().Set(cas.Init())
+ var call ir.Node
+ switch n.Op() {
+ default:
+ base.Fatalf("select %v", n.Op())
+
+ case ir.OSEND:
+ // if selectnbsend(c, v) { body } else { default body }
+ n := n.(*ir.SendStmt)
+ ch := n.Chan
+ call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Value)
+
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ recv := n.Rhs[0].(*ir.UnaryExpr)
+ ch := recv.X
+ elem := n.Lhs[0]
+ if ir.IsBlank(elem) {
+ elem = typecheck.NodNil()
+ }
+ if ir.IsBlank(n.Lhs[1]) {
+ // if selectnbrecv(&v, c) { body } else { default body }
+ call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch)
+ } else {
+ // TODO(cuonglm): make this use selectnbrecv()
+ // if selectnbrecv2(&v, &received, c) { body } else { default body }
+ receivedp := typecheck.Expr(typecheck.NodAddr(n.Lhs[1]))
+ call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch)
+ }
+ }
+
+ r.Cond = typecheck.Expr(call)
+ r.Body.Set(cas.Body)
+ r.Else.Set(append(dflt.Init(), dflt.Body...))
+ return []ir.Node{r, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)}
+ }
+
+ if dflt != nil {
+ ncas--
+ }
+ casorder := make([]*ir.CaseStmt, ncas)
+ nsends, nrecvs := 0, 0
+
+ var init []ir.Node
+
+ // generate sel-struct
+ base.Pos = sellineno
+ selv := typecheck.Temp(types.NewArray(scasetype(), int64(ncas)))
+ init = append(init, typecheck.Stmt(ir.NewAssignStmt(base.Pos, selv, nil)))
+
+ // No initialization for order; runtime.selectgo is responsible for that.
+ order := typecheck.Temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
+
+ var pc0, pcs ir.Node
+ if base.Flag.Race {
+ pcs = typecheck.Temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
+ pc0 = typecheck.Expr(typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(0))))
+ } else {
+ pc0 = typecheck.NodNil()
+ }
+
+ // register cases
+ for _, cas := range cases {
+ cas := cas.(*ir.CaseStmt)
+ ir.SetPos(cas)
+
+ init = append(init, cas.Init()...)
+ cas.PtrInit().Set(nil)
+
+ n := cas.Comm
+ if n == nil { // default:
+ continue
+ }
+
+ var i int
+ var c, elem ir.Node
+ switch n.Op() {
+ default:
+ base.Fatalf("select %v", n.Op())
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ i = nsends
+ nsends++
+ c = n.Chan
+ elem = n.Value
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ nrecvs++
+ i = ncas - nrecvs
+ recv := n.Rhs[0].(*ir.UnaryExpr)
+ c = recv.X
+ elem = n.Lhs[0]
+ }
+
+ casorder[i] = cas
+
+ setField := func(f string, val ir.Node) {
+ r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, ir.NewIndexExpr(base.Pos, selv, ir.NewInt(int64(i))), typecheck.Lookup(f)), val)
+ init = append(init, typecheck.Stmt(r))
+ }
+
+ c = typecheck.ConvNop(c, types.Types[types.TUNSAFEPTR])
+ setField("c", c)
+ if !ir.IsBlank(elem) {
+ elem = typecheck.ConvNop(elem, types.Types[types.TUNSAFEPTR])
+ setField("elem", elem)
+ }
+
+ // TODO(mdempsky): There should be a cleaner way to
+ // handle this.
+ if base.Flag.Race {
+ r := mkcall("selectsetpc", nil, nil, typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(int64(i)))))
+ init = append(init, r)
+ }
+ }
+ if nsends+nrecvs != ncas {
+ base.Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
+ }
+
+ // run the select
+ base.Pos = sellineno
+ chosen := typecheck.Temp(types.Types[types.TINT])
+ recvOK := typecheck.Temp(types.Types[types.TBOOL])
+ r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ r.Lhs = []ir.Node{chosen, recvOK}
+ fn := typecheck.LookupRuntime("selectgo")
+ r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(int64(nsends)), ir.NewInt(int64(nrecvs)), ir.NewBool(dflt == nil))}
+ init = append(init, typecheck.Stmt(r))
+
+ // selv and order are no longer alive after selectgo.
+ init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, selv))
+ init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, order))
+ if base.Flag.Race {
+ init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, pcs))
+ }
+
+ // dispatch cases
+ dispatch := func(cond ir.Node, cas *ir.CaseStmt) {
+ cond = typecheck.Expr(cond)
+ cond = typecheck.DefaultLit(cond, nil)
+
+ r := ir.NewIfStmt(base.Pos, cond, nil, nil)
+
+ if n := cas.Comm; n != nil && n.Op() == ir.OSELRECV2 {
+ n := n.(*ir.AssignListStmt)
+ if !ir.IsBlank(n.Lhs[1]) {
+ x := ir.NewAssignStmt(base.Pos, n.Lhs[1], recvOK)
+ r.Body.Append(typecheck.Stmt(x))
+ }
+ }
+
+ r.Body.Append(cas.Body.Take()...)
+ r.Body.Append(ir.NewBranchStmt(base.Pos, ir.OBREAK, nil))
+ init = append(init, r)
+ }
+
+ if dflt != nil {
+ ir.SetPos(dflt)
+ dispatch(ir.NewBinaryExpr(base.Pos, ir.OLT, chosen, ir.NewInt(0)), dflt)
+ }
+ for i, cas := range casorder {
+ ir.SetPos(cas)
+ dispatch(ir.NewBinaryExpr(base.Pos, ir.OEQ, chosen, ir.NewInt(int64(i))), cas)
+ }
+
+ return init
+}
+
+// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
+func bytePtrToIndex(n ir.Node, i int64) ir.Node {
+ s := typecheck.NodAddr(ir.NewIndexExpr(base.Pos, n, ir.NewInt(i)))
+ t := types.NewPtr(types.Types[types.TUINT8])
+ return typecheck.ConvNop(s, t)
+}
+
+var scase *types.Type
+
+// Keep in sync with src/runtime/select.go.
+func scasetype() *types.Type {
+ if scase == nil {
+ scase = typecheck.NewStructType([]*ir.Field{
+ ir.NewField(base.Pos, typecheck.Lookup("c"), nil, types.Types[types.TUNSAFEPTR]),
+ ir.NewField(base.Pos, typecheck.Lookup("elem"), nil, types.Types[types.TUNSAFEPTR]),
+ })
+ scase.SetNoalg(true)
+ }
+ return scase
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+)
+
+// The result of walkStmt MUST be assigned back to n, e.g.
+// n.Left = walkStmt(n.Left)
+func walkStmt(n ir.Node) ir.Node {
+ if n == nil {
+ return n
+ }
+
+ ir.SetPos(n)
+
+ walkStmtList(n.Init())
+
+ switch n.Op() {
+ default:
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ base.Errorf("%v is not a top level statement", n.Sym())
+ } else {
+ base.Errorf("%v is not a top level statement", n.Op())
+ }
+ ir.Dump("nottop", n)
+ return n
+
+ case ir.OAS,
+ ir.OASOP,
+ ir.OAS2,
+ ir.OAS2DOTTYPE,
+ ir.OAS2RECV,
+ ir.OAS2FUNC,
+ ir.OAS2MAPR,
+ ir.OCLOSE,
+ ir.OCOPY,
+ ir.OCALLMETH,
+ ir.OCALLINTER,
+ ir.OCALL,
+ ir.OCALLFUNC,
+ ir.ODELETE,
+ ir.OSEND,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.OPANIC,
+ ir.ORECOVER,
+ ir.OGETG:
+ if n.Typecheck() == 0 {
+ base.Fatalf("missing typecheck: %+v", n)
+ }
+ init := n.Init()
+ n.PtrInit().Set(nil)
+ n = walkExpr(n, &init)
+ if n.Op() == ir.ONAME {
+ // copy rewrote to a statement list and a temp for the length.
+ // Throw away the temp to avoid plain values as statements.
+ n = ir.NewBlockStmt(n.Pos(), init)
+ init.Set(nil)
+ }
+ if len(init) > 0 {
+ switch n.Op() {
+ case ir.OAS, ir.OAS2, ir.OBLOCK:
+ n.PtrInit().Prepend(init...)
+
+ default:
+ init.Append(n)
+ n = ir.NewBlockStmt(n.Pos(), init)
+ }
+ }
+ return n
+
+ // special case for a receive where we throw away
+ // the value received.
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ return walkRecv(n)
+
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.OFALL,
+ ir.OGOTO,
+ ir.OLABEL,
+ ir.ODCLCONST,
+ ir.ODCLTYPE,
+ ir.OCHECKNIL,
+ ir.OVARDEF,
+ ir.OVARKILL,
+ ir.OVARLIVE:
+ return n
+
+ case ir.ODCL:
+ n := n.(*ir.Decl)
+ return walkDecl(n)
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ walkStmtList(n.List)
+ return n
+
+ case ir.OCASE:
+ base.Errorf("case statement out of place")
+ panic("unreachable")
+
+ case ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
+ ir.CurFunc.SetHasDefer(true)
+ ir.CurFunc.NumDefers++
+ if ir.CurFunc.NumDefers > maxOpenDefers {
+ // Don't allow open-coded defers if there are more than
+ // 8 defers in the function, since we use a single
+ // byte to record active defers.
+ ir.CurFunc.SetOpenCodedDeferDisallowed(true)
+ }
+ if n.Esc() != ir.EscNever {
+ // If n.Esc is not EscNever, then this defer occurs in a loop,
+ // so open-coded defers cannot be used in this function.
+ ir.CurFunc.SetOpenCodedDeferDisallowed(true)
+ }
+ fallthrough
+ case ir.OGO:
+ n := n.(*ir.GoDeferStmt)
+ return walkGoDefer(n)
+
+ case ir.OFOR, ir.OFORUNTIL:
+ n := n.(*ir.ForStmt)
+ return walkFor(n)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ return walkIf(n)
+
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ return walkReturn(n)
+
+ case ir.ORETJMP:
+ n := n.(*ir.BranchStmt)
+ return n
+
+ case ir.OINLMARK:
+ n := n.(*ir.InlineMarkStmt)
+ return n
+
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ walkSelect(n)
+ return n
+
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ walkSwitch(n)
+ return n
+
+ case ir.ORANGE:
+ n := n.(*ir.RangeStmt)
+ return walkRange(n)
+ }
+
+ // No return! Each case must return (or panic),
+ // to avoid confusion about what gets returned
+ // in the presence of type assertions.
+}
+
+func walkStmtList(s []ir.Node) {
+ for i := range s {
+ s[i] = walkStmt(s[i])
+ }
+}
+
+// walkDecl walks an ODCL node.
+func walkDecl(n *ir.Decl) ir.Node {
+ v := n.X.(*ir.Name)
+ if v.Class_ == ir.PAUTOHEAP {
+ if base.Flag.CompilingRuntime {
+ base.Errorf("%v escapes to heap, not allowed in runtime", v)
+ }
+ nn := ir.NewAssignStmt(base.Pos, v.Name().Heapaddr, callnew(v.Type()))
+ nn.Def = true
+ return walkStmt(typecheck.Stmt(nn))
+ }
+ return n
+}
+
+// walkFor walks an OFOR or OFORUNTIL node.
+func walkFor(n *ir.ForStmt) ir.Node {
+ if n.Cond != nil {
+ walkStmtList(n.Cond.Init())
+ init := n.Cond.Init()
+ n.Cond.PtrInit().Set(nil)
+ n.Cond = walkExpr(n.Cond, &init)
+ n.Cond = ir.InitExpr(init, n.Cond)
+ }
+
+ n.Post = walkStmt(n.Post)
+ if n.Op() == ir.OFORUNTIL {
+ walkStmtList(n.Late)
+ }
+ walkStmtList(n.Body)
+ return n
+}
+
+// walkGoDefer walks an OGO or ODEFER node.
+func walkGoDefer(n *ir.GoDeferStmt) ir.Node {
+ var init ir.Nodes
+ switch call := n.Call; call.Op() {
+ case ir.OPRINT, ir.OPRINTN:
+ call := call.(*ir.CallExpr)
+ n.Call = wrapCall(call, &init)
+
+ case ir.ODELETE:
+ call := call.(*ir.CallExpr)
+ if mapfast(call.Args[0].Type()) == mapslow {
+ n.Call = wrapCall(call, &init)
+ } else {
+ n.Call = walkExpr(call, &init)
+ }
+
+ case ir.OCOPY:
+ call := call.(*ir.BinaryExpr)
+ n.Call = walkCopy(call, &init, true)
+
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ call := call.(*ir.CallExpr)
+ if len(call.Body) > 0 {
+ n.Call = wrapCall(call, &init)
+ } else {
+ n.Call = walkExpr(call, &init)
+ }
+
+ default:
+ n.Call = walkExpr(call, &init)
+ }
+ if len(init) > 0 {
+ init.Append(n)
+ return ir.NewBlockStmt(n.Pos(), init)
+ }
+ return n
+}
+
+// walkIf walks an OIF node.
+func walkIf(n *ir.IfStmt) ir.Node {
+ n.Cond = walkExpr(n.Cond, n.PtrInit())
+ walkStmtList(n.Body)
+ walkStmtList(n.Else)
+ return n
+}
+
+// The result of wrapCall MUST be assigned back to n, e.g.
+// n.Left = wrapCall(n.Left, init)
+func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+ if len(n.Init()) != 0 {
+ walkStmtList(n.Init())
+ init.Append(n.PtrInit().Take()...)
+ }
+
+ isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER
+
+ // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
+ if !isBuiltinCall && n.IsDDD {
+ last := len(n.Args) - 1
+ if va := n.Args[last]; va.Op() == ir.OSLICELIT {
+ va := va.(*ir.CompLitExpr)
+ n.Args.Set(append(n.Args[:last], va.List...))
+ n.IsDDD = false
+ }
+ }
+
+ // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
+ origArgs := make([]ir.Node, len(n.Args))
+ var funcArgs []*ir.Field
+ for i, arg := range n.Args {
+ s := typecheck.LookupNum("a", i)
+ if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
+ origArgs[i] = arg
+ arg = arg.(*ir.ConvExpr).X
+ n.Args[i] = arg
+ }
+ funcArgs = append(funcArgs, ir.NewField(base.Pos, s, nil, arg.Type()))
+ }
+ t := ir.NewFuncType(base.Pos, nil, funcArgs, nil)
+
+ wrapCall_prgen++
+ sym := typecheck.LookupNum("wrap·", wrapCall_prgen)
+ fn := typecheck.DeclFunc(sym, t)
+
+ args := ir.ParamNames(t.Type())
+ for i, origArg := range origArgs {
+ if origArg == nil {
+ continue
+ }
+ args[i] = ir.NewConvExpr(base.Pos, origArg.Op(), origArg.Type(), args[i])
+ }
+ call := ir.NewCallExpr(base.Pos, n.Op(), n.X, args)
+ if !isBuiltinCall {
+ call.SetOp(ir.OCALL)
+ call.IsDDD = n.IsDDD
+ }
+ fn.Body = []ir.Node{call}
+
+ typecheck.FinishFuncBody()
+
+ typecheck.Func(fn)
+ typecheck.Stmts(fn.Body)
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+
+ call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.Args)
+ return walkExpr(typecheck.Stmt(call), init)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "go/constant"
+ "go/token"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// walkSwitch walks a switch statement.
+func walkSwitch(sw *ir.SwitchStmt) {
+ // Guard against double walk, see #25776.
+ if len(sw.Cases) == 0 && len(sw.Compiled) > 0 {
+ return // Was fatal, but eliminating every possible source of double-walking is hard
+ }
+
+ if sw.Tag != nil && sw.Tag.Op() == ir.OTYPESW {
+ walkSwitchType(sw)
+ } else {
+ walkSwitchExpr(sw)
+ }
+}
+
+// walkSwitchExpr generates an AST implementing sw. sw is an
+// expression switch.
+func walkSwitchExpr(sw *ir.SwitchStmt) {
+ lno := ir.SetPos(sw)
+
+ cond := sw.Tag
+ sw.Tag = nil
+
+ // convert switch {...} to switch true {...}
+ if cond == nil {
+ cond = ir.NewBool(true)
+ cond = typecheck.Expr(cond)
+ cond = typecheck.DefaultLit(cond, nil)
+ }
+
+ // Given "switch string(byteslice)",
+ // with all cases being side-effect free,
+ // use a zero-cost alias of the byte slice.
+ // Do this before calling walkexpr on cond,
+ // because walkexpr will lower the string
+ // conversion into a runtime call.
+ // See issue 24937 for more discussion.
+ if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
+ cond := cond.(*ir.ConvExpr)
+ cond.SetOp(ir.OBYTES2STRTMP)
+ }
+
+ cond = walkExpr(cond, sw.PtrInit())
+ if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL {
+ cond = copyExpr(cond, cond.Type(), &sw.Compiled)
+ }
+
+ base.Pos = lno
+
+ s := exprSwitch{
+ exprname: cond,
+ }
+
+ var defaultGoto ir.Node
+ var body ir.Nodes
+ for _, ncase := range sw.Cases {
+ ncase := ncase.(*ir.CaseStmt)
+ label := typecheck.AutoLabel(".s")
+ jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label)
+
+ // Process case dispatch.
+ if len(ncase.List) == 0 {
+ if defaultGoto != nil {
+ base.Fatalf("duplicate default case not detected during typechecking")
+ }
+ defaultGoto = jmp
+ }
+
+ for _, n1 := range ncase.List {
+ s.Add(ncase.Pos(), n1, jmp)
+ }
+
+ // Process body.
+ body.Append(ir.NewLabelStmt(ncase.Pos(), label))
+ body.Append(ncase.Body...)
+ if fall, pos := endsInFallthrough(ncase.Body); !fall {
+ br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+ br.SetPos(pos)
+ body.Append(br)
+ }
+ }
+ sw.Cases.Set(nil)
+
+ if defaultGoto == nil {
+ br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+ br.SetPos(br.Pos().WithNotStmt())
+ defaultGoto = br
+ }
+
+ s.Emit(&sw.Compiled)
+ sw.Compiled.Append(defaultGoto)
+ sw.Compiled.Append(body.Take()...)
+ walkStmtList(sw.Compiled)
+}
+
+// An exprSwitch walks an expression switch.
+type exprSwitch struct {
+ exprname ir.Node // value being switched on
+
+ done ir.Nodes
+ clauses []exprClause
+}
+
+type exprClause struct {
+ pos src.XPos
+ lo, hi ir.Node
+ jmp ir.Node
+}
+
+func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
+ c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
+ if types.IsOrdered[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL {
+ s.clauses = append(s.clauses, c)
+ return
+ }
+
+ s.flush()
+ s.clauses = append(s.clauses, c)
+ s.flush()
+}
+
+func (s *exprSwitch) Emit(out *ir.Nodes) {
+ s.flush()
+ out.Append(s.done.Take()...)
+}
+
+func (s *exprSwitch) flush() {
+ cc := s.clauses
+ s.clauses = nil
+ if len(cc) == 0 {
+ return
+ }
+
+ // Caution: If len(cc) == 1, then cc[0] might not an OLITERAL.
+ // The code below is structured to implicitly handle this case
+ // (e.g., sort.Slice doesn't need to invoke the less function
+ // when there's only a single slice element).
+
+ if s.exprname.Type().IsString() && len(cc) >= 2 {
+ // Sort strings by length and then by value. It is
+ // much cheaper to compare lengths than values, and
+ // all we need here is consistency. We respect this
+ // sorting below.
+ sort.Slice(cc, func(i, j int) bool {
+ si := ir.StringVal(cc[i].lo)
+ sj := ir.StringVal(cc[j].lo)
+ if len(si) != len(sj) {
+ return len(si) < len(sj)
+ }
+ return si < sj
+ })
+
+ // runLen returns the string length associated with a
+ // particular run of exprClauses.
+ runLen := func(run []exprClause) int64 { return int64(len(ir.StringVal(run[0].lo))) }
+
+ // Collapse runs of consecutive strings with the same length.
+ var runs [][]exprClause
+ start := 0
+ for i := 1; i < len(cc); i++ {
+ if runLen(cc[start:]) != runLen(cc[i:]) {
+ runs = append(runs, cc[start:i])
+ start = i
+ }
+ }
+ runs = append(runs, cc[start:])
+
+ // Perform two-level binary search.
+ binarySearch(len(runs), &s.done,
+ func(i int) ir.Node {
+ return ir.NewBinaryExpr(base.Pos, ir.OLE, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), ir.NewInt(runLen(runs[i-1])))
+ },
+ func(i int, nif *ir.IfStmt) {
+ run := runs[i]
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), ir.NewInt(runLen(run)))
+ s.search(run, &nif.Body)
+ },
+ )
+ return
+ }
+
+ sort.Slice(cc, func(i, j int) bool {
+ return constant.Compare(cc[i].lo.Val(), token.LSS, cc[j].lo.Val())
+ })
+
+ // Merge consecutive integer cases.
+ if s.exprname.Type().IsInteger() {
+ merged := cc[:1]
+ for _, c := range cc[1:] {
+ last := &merged[len(merged)-1]
+ if last.jmp == c.jmp && ir.Int64Val(last.hi)+1 == ir.Int64Val(c.lo) {
+ last.hi = c.lo
+ } else {
+ merged = append(merged, c)
+ }
+ }
+ cc = merged
+ }
+
+ s.search(cc, &s.done)
+}
+
+func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
+ binarySearch(len(cc), out,
+ func(i int) ir.Node {
+ return ir.NewBinaryExpr(base.Pos, ir.OLE, s.exprname, cc[i-1].hi)
+ },
+ func(i int, nif *ir.IfStmt) {
+ c := &cc[i]
+ nif.Cond = c.test(s.exprname)
+ nif.Body = []ir.Node{c.jmp}
+ },
+ )
+}
+
+func (c *exprClause) test(exprname ir.Node) ir.Node {
+ // Integer range.
+ if c.hi != c.lo {
+ low := ir.NewBinaryExpr(c.pos, ir.OGE, exprname, c.lo)
+ high := ir.NewBinaryExpr(c.pos, ir.OLE, exprname, c.hi)
+ return ir.NewLogicalExpr(c.pos, ir.OANDAND, low, high)
+ }
+
+ // Optimize "switch true { ...}" and "switch false { ... }".
+ if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() {
+ if ir.BoolVal(exprname) {
+ return c.lo
+ } else {
+ return ir.NewUnaryExpr(c.pos, ir.ONOT, c.lo)
+ }
+ }
+
+ return ir.NewBinaryExpr(c.pos, ir.OEQ, exprname, c.lo)
+}
+
+func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool {
+ // In theory, we could be more aggressive, allowing any
+ // side-effect-free expressions in cases, but it's a bit
+ // tricky because some of that information is unavailable due
+ // to the introduction of temporaries during order.
+ // Restricting to constants is simple and probably powerful
+ // enough.
+
+ for _, ncase := range sw.Cases {
+ ncase := ncase.(*ir.CaseStmt)
+ for _, v := range ncase.List {
+ if v.Op() != ir.OLITERAL {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// endsInFallthrough reports whether stmts ends with a "fallthrough" statement.
+func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) {
+ // Search backwards for the index of the fallthrough
+ // statement. Do not assume it'll be in the last
+ // position, since in some cases (e.g. when the statement
+ // list contains autotmp_ variables), one or more OVARKILL
+ // nodes will be at the end of the list.
+
+ i := len(stmts) - 1
+ for i >= 0 && stmts[i].Op() == ir.OVARKILL {
+ i--
+ }
+ if i < 0 {
+ return false, src.NoXPos
+ }
+ return stmts[i].Op() == ir.OFALL, stmts[i].Pos()
+}
+
+// walkSwitchType generates an AST that implements sw, where sw is a
+// type switch.
+func walkSwitchType(sw *ir.SwitchStmt) {
+ var s typeSwitch
+ s.facename = sw.Tag.(*ir.TypeSwitchGuard).X
+ sw.Tag = nil
+
+ s.facename = walkExpr(s.facename, sw.PtrInit())
+ s.facename = copyExpr(s.facename, s.facename.Type(), &sw.Compiled)
+ s.okname = typecheck.Temp(types.Types[types.TBOOL])
+
+ // Get interface descriptor word.
+ // For empty interfaces this will be the type.
+ // For non-empty interfaces this will be the itab.
+ itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s.facename)
+
+ // For empty interfaces, do:
+ // if e._type == nil {
+ // do nil case if it exists, otherwise default
+ // }
+ // h := e._type.hash
+ // Use a similar strategy for non-empty interfaces.
+ ifNil := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, itab, typecheck.NodNil())
+ base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
+ ifNil.Cond = typecheck.Expr(ifNil.Cond)
+ ifNil.Cond = typecheck.DefaultLit(ifNil.Cond, nil)
+ // ifNil.Nbody assigned at end.
+ sw.Compiled.Append(ifNil)
+
+ // Load hash from type or itab.
+ dotHash := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil)
+ dotHash.SetType(types.Types[types.TUINT32])
+ dotHash.SetTypecheck(1)
+ if s.facename.Type().IsEmptyInterface() {
+ dotHash.Offset = int64(2 * types.PtrSize) // offset of hash in runtime._type
+ } else {
+ dotHash.Offset = int64(2 * types.PtrSize) // offset of hash in runtime.itab
+ }
+ dotHash.SetBounded(true) // guaranteed not to fault
+ s.hashname = copyExpr(dotHash, dotHash.Type(), &sw.Compiled)
+
+ br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+ var defaultGoto, nilGoto ir.Node
+ var body ir.Nodes
+ for _, ncase := range sw.Cases {
+ ncase := ncase.(*ir.CaseStmt)
+ var caseVar ir.Node
+ if len(ncase.Vars) != 0 {
+ caseVar = ncase.Vars[0]
+ }
+
+ // For single-type cases with an interface type,
+ // we initialize the case variable as part of the type assertion.
+ // In other cases, we initialize it in the body.
+ var singleType *types.Type
+ if len(ncase.List) == 1 && ncase.List[0].Op() == ir.OTYPE {
+ singleType = ncase.List[0].Type()
+ }
+ caseVarInitialized := false
+
+ label := typecheck.AutoLabel(".s")
+ jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label)
+
+ if len(ncase.List) == 0 { // default:
+ if defaultGoto != nil {
+ base.Fatalf("duplicate default case not detected during typechecking")
+ }
+ defaultGoto = jmp
+ }
+
+ for _, n1 := range ncase.List {
+ if ir.IsNil(n1) { // case nil:
+ if nilGoto != nil {
+ base.Fatalf("duplicate nil case not detected during typechecking")
+ }
+ nilGoto = jmp
+ continue
+ }
+
+ if singleType != nil && singleType.IsInterface() {
+ s.Add(ncase.Pos(), n1.Type(), caseVar, jmp)
+ caseVarInitialized = true
+ } else {
+ s.Add(ncase.Pos(), n1.Type(), nil, jmp)
+ }
+ }
+
+ body.Append(ir.NewLabelStmt(ncase.Pos(), label))
+ if caseVar != nil && !caseVarInitialized {
+ val := s.facename
+ if singleType != nil {
+ // We have a single concrete type. Extract the data.
+ if singleType.IsInterface() {
+ base.Fatalf("singleType interface should have been handled in Add")
+ }
+ val = ifaceData(ncase.Pos(), s.facename, singleType)
+ }
+ l := []ir.Node{
+ ir.NewDecl(ncase.Pos(), ir.ODCL, caseVar),
+ ir.NewAssignStmt(ncase.Pos(), caseVar, val),
+ }
+ typecheck.Stmts(l)
+ body.Append(l...)
+ }
+ body.Append(ncase.Body...)
+ body.Append(br)
+ }
+ sw.Cases.Set(nil)
+
+ if defaultGoto == nil {
+ defaultGoto = br
+ }
+ if nilGoto == nil {
+ nilGoto = defaultGoto
+ }
+ ifNil.Body = []ir.Node{nilGoto}
+
+ s.Emit(&sw.Compiled)
+ sw.Compiled.Append(defaultGoto)
+ sw.Compiled.Append(body.Take()...)
+
+ walkStmtList(sw.Compiled)
+}
+
+// A typeSwitch walks a type switch.
+type typeSwitch struct {
+ // Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
+ facename ir.Node // value being type-switched on
+ hashname ir.Node // type hash of the value being type-switched on
+ okname ir.Node // boolean used for comma-ok type assertions
+
+ done ir.Nodes
+ clauses []typeClause
+}
+
+type typeClause struct {
+ hash uint32
+ body ir.Nodes
+}
+
+func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) {
+ var body ir.Nodes
+ if caseVar != nil {
+ l := []ir.Node{
+ ir.NewDecl(pos, ir.ODCL, caseVar),
+ ir.NewAssignStmt(pos, caseVar, nil),
+ }
+ typecheck.Stmts(l)
+ body.Append(l...)
+ } else {
+ caseVar = ir.BlankNode
+ }
+
+ // cv, ok = iface.(type)
+ as := ir.NewAssignListStmt(pos, ir.OAS2, nil, nil)
+ as.Lhs = []ir.Node{caseVar, s.okname} // cv, ok =
+ dot := ir.NewTypeAssertExpr(pos, s.facename, nil)
+ dot.SetType(typ) // iface.(type)
+ as.Rhs = []ir.Node{dot}
+ appendWalkStmt(&body, as)
+
+ // if ok { goto label }
+ nif := ir.NewIfStmt(pos, nil, nil, nil)
+ nif.Cond = s.okname
+ nif.Body = []ir.Node{jmp}
+ body.Append(nif)
+
+ if !typ.IsInterface() {
+ s.clauses = append(s.clauses, typeClause{
+ hash: types.TypeHash(typ),
+ body: body,
+ })
+ return
+ }
+
+ s.flush()
+ s.done.Append(body.Take()...)
+}
+
+func (s *typeSwitch) Emit(out *ir.Nodes) {
+ s.flush()
+ out.Append(s.done.Take()...)
+}
+
+func (s *typeSwitch) flush() {
+ cc := s.clauses
+ s.clauses = nil
+ if len(cc) == 0 {
+ return
+ }
+
+ sort.Slice(cc, func(i, j int) bool { return cc[i].hash < cc[j].hash })
+
+ // Combine adjacent cases with the same hash.
+ merged := cc[:1]
+ for _, c := range cc[1:] {
+ last := &merged[len(merged)-1]
+ if last.hash == c.hash {
+ last.body.Append(c.body.Take()...)
+ } else {
+ merged = append(merged, c)
+ }
+ }
+ cc = merged
+
+ binarySearch(len(cc), &s.done,
+ func(i int) ir.Node {
+ return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashname, ir.NewInt(int64(cc[i-1].hash)))
+ },
+ func(i int, nif *ir.IfStmt) {
+ // TODO(mdempsky): Omit hash equality check if
+ // there's only one type.
+ c := cc[i]
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashname, ir.NewInt(int64(c.hash)))
+ nif.Body.Append(c.body.Take()...)
+ },
+ )
+}
+
+// binarySearch constructs a binary search tree for handling n cases,
+// and appends it to out. It's used for efficiently implementing
+// switch statements.
+//
+// less(i) should return a boolean expression. If it evaluates true,
+// then cases before i will be tested; otherwise, cases i and later.
+//
+// leaf(i, nif) should setup nif (an OIF node) to test case i. In
+// particular, it should set nif.Left and nif.Nbody.
+func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif *ir.IfStmt)) {
+ const binarySearchMin = 4 // minimum number of cases for binary search
+
+ var do func(lo, hi int, out *ir.Nodes)
+ do = func(lo, hi int, out *ir.Nodes) {
+ n := hi - lo
+ if n < binarySearchMin {
+ for i := lo; i < hi; i++ {
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ leaf(i, nif)
+ base.Pos = base.Pos.WithNotStmt()
+ nif.Cond = typecheck.Expr(nif.Cond)
+ nif.Cond = typecheck.DefaultLit(nif.Cond, nil)
+ out.Append(nif)
+ out = &nif.Else
+ }
+ return
+ }
+
+ half := lo + n/2
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nif.Cond = less(half)
+ base.Pos = base.Pos.WithNotStmt()
+ nif.Cond = typecheck.Expr(nif.Cond)
+ nif.Cond = typecheck.DefaultLit(nif.Cond, nil)
+ do(lo, half, &nif.Body)
+ do(half, hi, &nif.Else)
+ out.Append(nif)
+ }
+
+ do(0, n, out)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// The constant is known to runtime.
+const tmpstringbufsize = 32
+const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
+
+func Walk(fn *ir.Func) {
+ ir.CurFunc = fn
+ errorsBefore := base.Errors()
+ order(fn)
+ if base.Errors() > errorsBefore {
+ return
+ }
+
+ if base.Flag.W != 0 {
+ s := fmt.Sprintf("\nbefore walk %v", ir.CurFunc.Sym())
+ ir.DumpList(s, ir.CurFunc.Body)
+ }
+
+ lno := base.Pos
+
+ // Final typecheck for any unused variables.
+ for i, ln := range fn.Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class_ == ir.PAUTO || ln.Class_ == ir.PAUTOHEAP) {
+ ln = typecheck.AssignExpr(ln).(*ir.Name)
+ fn.Dcl[i] = ln
+ }
+ }
+
+ // Propagate the used flag for typeswitch variables up to the NONAME in its definition.
+ for _, ln := range fn.Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class_ == ir.PAUTO || ln.Class_ == ir.PAUTOHEAP) && ln.Defn != nil && ln.Defn.Op() == ir.OTYPESW && ln.Used() {
+ ln.Defn.(*ir.TypeSwitchGuard).Used = true
+ }
+ }
+
+ for _, ln := range fn.Dcl {
+ if ln.Op() != ir.ONAME || (ln.Class_ != ir.PAUTO && ln.Class_ != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Used() {
+ continue
+ }
+ if defn, ok := ln.Defn.(*ir.TypeSwitchGuard); ok {
+ if defn.Used {
+ continue
+ }
+ base.ErrorfAt(defn.Tag.Pos(), "%v declared but not used", ln.Sym())
+ defn.Used = true // suppress repeats
+ } else {
+ base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym())
+ }
+ }
+
+ base.Pos = lno
+ if base.Errors() > errorsBefore {
+ return
+ }
+ walkStmtList(ir.CurFunc.Body)
+ if base.Flag.W != 0 {
+ s := fmt.Sprintf("after walk %v", ir.CurFunc.Sym())
+ ir.DumpList(s, ir.CurFunc.Body)
+ }
+
+ zeroResults()
+ heapmoves()
+ if base.Flag.W != 0 && len(ir.CurFunc.Enter) > 0 {
+ s := fmt.Sprintf("enter %v", ir.CurFunc.Sym())
+ ir.DumpList(s, ir.CurFunc.Enter)
+ }
+
+ if base.Flag.Cfg.Instrumenting {
+ instrument(fn)
+ }
+}
+
+func paramoutheap(fn *ir.Func) bool {
+ for _, ln := range fn.Dcl {
+ switch ln.Class_ {
+ case ir.PPARAMOUT:
+ if ir.IsParamStackCopy(ln) || ln.Addrtaken() {
+ return true
+ }
+
+ case ir.PAUTO:
+ // stop early - parameters are over
+ return false
+ }
+ }
+
+ return false
+}
+
+// walkRecv walks an ORECV node.
+func walkRecv(n *ir.UnaryExpr) ir.Node {
+ if n.Typecheck() == 0 {
+ base.Fatalf("missing typecheck: %+v", n)
+ }
+ init := n.Init()
+ n.PtrInit().Set(nil)
+
+ n.X = walkExpr(n.X, &init)
+ call := walkExpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init)
+ return ir.InitExpr(init, call)
+}
+
+func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
+ if n.Op() != ir.OAS {
+ base.Fatalf("convas: not OAS %v", n.Op())
+ }
+ defer updateHasCall(n)
+
+ n.SetTypecheck(1)
+
+ if n.X == nil || n.Y == nil {
+ return n
+ }
+
+ lt := n.X.Type()
+ rt := n.Y.Type()
+ if lt == nil || rt == nil {
+ return n
+ }
+
+ if ir.IsBlank(n.X) {
+ n.Y = typecheck.DefaultLit(n.Y, nil)
+ return n
+ }
+
+ if !types.Identical(lt, rt) {
+ n.Y = typecheck.AssignConv(n.Y, lt, "assignment")
+ n.Y = walkExpr(n.Y, init)
+ }
+ types.CalcSize(n.Y.Type())
+
+ return n
+}
+
+var stop = errors.New("stop")
+
+// paramstoheap returns code to allocate memory for heap-escaped parameters
+// and to copy non-result parameters' values from the stack.
+func paramstoheap(params *types.Type) []ir.Node {
+ var nn []ir.Node
+ for _, t := range params.Fields().Slice() {
+ v := ir.AsNode(t.Nname)
+ if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result
+ v = nil
+ }
+ if v == nil {
+ continue
+ }
+
+ if stackcopy := v.Name().Stackcopy; stackcopy != nil {
+ nn = append(nn, walkStmt(ir.NewDecl(base.Pos, ir.ODCL, v)))
+ if stackcopy.Class_ == ir.PPARAM {
+ nn = append(nn, walkStmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, v, stackcopy))))
+ }
+ }
+ }
+
+ return nn
+}
+
+// zeroResults zeros the return values at the start of the function.
+// We need to do this very early in the function. Defer might stop a
+// panic and show the return values as they exist at the time of
+// panic. For precise stacks, the garbage collector assumes results
+// are always live, so we need to zero them before any allocations,
+// even allocations to move params/results to the heap.
+// The generated code is added to Curfn's Enter list.
+func zeroResults() {
+ for _, f := range ir.CurFunc.Type().Results().Fields().Slice() {
+ v := ir.AsNode(f.Nname)
+ if v != nil && v.Name().Heapaddr != nil {
+ // The local which points to the return value is the
+ // thing that needs zeroing. This is already handled
+ // by a Needzero annotation in plive.go:livenessepilogue.
+ continue
+ }
+ if ir.IsParamHeapCopy(v) {
+ // TODO(josharian/khr): Investigate whether we can switch to "continue" here,
+ // and document more in either case.
+ // In the review of CL 114797, Keith wrote (roughly):
+ // I don't think the zeroing below matters.
+ // The stack return value will never be marked as live anywhere in the function.
+ // It is not written to until deferreturn returns.
+ v = v.Name().Stackcopy
+ }
+ // Zero the stack location containing f.
+ ir.CurFunc.Enter.Append(ir.NewAssignStmt(ir.CurFunc.Pos(), v, nil))
+ }
+}
+
+// returnsfromheap returns code to copy values for heap-escaped parameters
+// back to the stack.
+func returnsfromheap(params *types.Type) []ir.Node {
+ var nn []ir.Node
+ for _, t := range params.Fields().Slice() {
+ v := ir.AsNode(t.Nname)
+ if v == nil {
+ continue
+ }
+ if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class_ == ir.PPARAMOUT {
+ nn = append(nn, walkStmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, stackcopy, v))))
+ }
+ }
+
+ return nn
+}
+
+// heapmoves generates code to handle migrating heap-escaped parameters
+// between the stack and the heap. The generated code is added to Curfn's
+// Enter and Exit lists.
+func heapmoves() {
+ lno := base.Pos
+ base.Pos = ir.CurFunc.Pos()
+ nn := paramstoheap(ir.CurFunc.Type().Recvs())
+ nn = append(nn, paramstoheap(ir.CurFunc.Type().Params())...)
+ nn = append(nn, paramstoheap(ir.CurFunc.Type().Results())...)
+ ir.CurFunc.Enter.Append(nn...)
+ base.Pos = ir.CurFunc.Endlineno
+ ir.CurFunc.Exit.Append(returnsfromheap(ir.CurFunc.Type().Results())...)
+ base.Pos = lno
+}
+
+func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
+ if fn.Type() == nil || fn.Type().Kind() != types.TFUNC {
+ base.Fatalf("mkcall %v %v", fn, fn.Type())
+ }
+
+ n := fn.Type().NumParams()
+ if n != len(va) {
+ base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
+ }
+
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, va)
+ typecheck.Call(call)
+ call.SetType(t)
+ return walkExpr(call, init).(*ir.CallExpr)
+}
+
+func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
+ return vmkcall(typecheck.LookupRuntime(name), t, init, args)
+}
+
+func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
+ return vmkcall(fn, t, init, args)
+}
+
+func chanfn(name string, n int, t *types.Type) ir.Node {
+ if !t.IsChan() {
+ base.Fatalf("chanfn %v", t)
+ }
+ fn := typecheck.LookupRuntime(name)
+ switch n {
+ default:
+ base.Fatalf("chanfn %d", n)
+ case 1:
+ fn = typecheck.SubstArgTypes(fn, t.Elem())
+ case 2:
+ fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
+ }
+ return fn
+}
+
+func mapfn(name string, t *types.Type) ir.Node {
+ if !t.IsMap() {
+ base.Fatalf("mapfn %v", t)
+ }
+ fn := typecheck.LookupRuntime(name)
+ fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
+ return fn
+}
+
+func mapfndel(name string, t *types.Type) ir.Node {
+ if !t.IsMap() {
+ base.Fatalf("mapfn %v", t)
+ }
+ fn := typecheck.LookupRuntime(name)
+ fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key())
+ return fn
+}
+
+const (
+ mapslow = iota
+ mapfast32
+ mapfast32ptr
+ mapfast64
+ mapfast64ptr
+ mapfaststr
+ nmapfast
+)
+
+type mapnames [nmapfast]string
+
+func mkmapnames(base string, ptr string) mapnames {
+ return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
+}
+
+var mapaccess1 = mkmapnames("mapaccess1", "")
+var mapaccess2 = mkmapnames("mapaccess2", "")
+var mapassign = mkmapnames("mapassign", "ptr")
+var mapdelete = mkmapnames("mapdelete", "")
+
+func mapfast(t *types.Type) int {
+ // Check runtime/map.go:maxElemSize before changing.
+ if t.Elem().Width > 128 {
+ return mapslow
+ }
+ switch reflectdata.AlgType(t.Key()) {
+ case types.AMEM32:
+ if !t.Key().HasPointers() {
+ return mapfast32
+ }
+ if types.PtrSize == 4 {
+ return mapfast32ptr
+ }
+ base.Fatalf("small pointer %v", t.Key())
+ case types.AMEM64:
+ if !t.Key().HasPointers() {
+ return mapfast64
+ }
+ if types.PtrSize == 8 {
+ return mapfast64ptr
+ }
+ // Two-word object, at least one of which is a pointer.
+ // Use the slow path.
+ case types.ASTRING:
+ return mapfaststr
+ }
+ return mapslow
+}
+
+func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) {
+ walkExprListSafe(n.Args, init)
+
+ // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ ls := n.Args
+ for i1, n1 := range ls {
+ ls[i1] = cheapExpr(n1, init)
+ }
+}
+
+// Rewrite
+// go builtin(x, y, z)
+// into
+// go func(a1, a2, a3) {
+// builtin(a1, a2, a3)
+// }(x, y, z)
+// for print, println, and delete.
+//
+// Rewrite
+// go f(x, y, uintptr(unsafe.Pointer(z)))
+// into
+// go func(a1, a2, a3) {
+// builtin(a1, a2, uintptr(a3))
+// }(x, y, unsafe.Pointer(z))
+// for function contains unsafe-uintptr arguments.
+
+var wrapCall_prgen int
+
+var walkCheckPtrArithmeticMarker byte
+
+// appendWalkStmt typechecks and walks stmt and then appends it to init.
+func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
+ op := stmt.Op()
+ n := typecheck.Stmt(stmt)
+ if op == ir.OAS || op == ir.OAS2 {
+ // If the assignment has side effects, walkexpr will append them
+ // directly to init for us, while walkstmt will wrap it in an OBLOCK.
+ // We need to append them directly.
+ // TODO(rsc): Clean this up.
+ n = walkExpr(n, init)
+ } else {
+ n = walkStmt(n)
+ }
+ init.Append(n)
+}
+
+// The max number of defers in a function using open-coded defers. We enforce this
+// limit because the deferBits bitmask is currently a single byte (to minimize code size)
+const maxOpenDefers = 8
+
+// backingArrayPtrLen extracts the pointer and length from a slice or string.
+// This constructs two nodes referring to n, so n must be a cheapexpr.
+func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
+ var init ir.Nodes
+ c := cheapExpr(n, &init)
+ if c != n || len(init) != 0 {
+ base.Fatalf("backingArrayPtrLen not cheap: %v", n)
+ }
+ ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n)
+ if n.Type().IsString() {
+ ptr.SetType(types.Types[types.TUINT8].PtrTo())
+ } else {
+ ptr.SetType(n.Type().Elem().PtrTo())
+ }
+ length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
+ length.SetType(types.Types[types.TINT])
+ return ptr, length
+}
+
+// updateHasCall checks whether expression n contains any function
+// calls and sets the n.HasCall flag if so.
+func updateHasCall(n ir.Node) {
+ if n == nil {
+ return
+ }
+ n.SetHasCall(calcHasCall(n))
+}
+
+func calcHasCall(n ir.Node) bool {
+ if len(n.Init()) != 0 {
+ // TODO(mdempsky): This seems overly conservative.
+ return true
+ }
+
+ switch n.Op() {
+ default:
+ base.Fatalf("calcHasCall %+v", n)
+ panic("unreachable")
+
+ case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE, ir.ONAMEOFFSET:
+ if n.HasCall() {
+ base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
+ }
+ return false
+ case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ return true
+ case ir.OANDAND, ir.OOROR:
+ // hard with instrumented code
+ n := n.(*ir.LogicalExpr)
+ if base.Flag.Cfg.Instrumenting {
+ return true
+ }
+ return n.X.HasCall() || n.Y.HasCall()
+ case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
+ ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD:
+ // These ops might panic, make sure they are done
+ // before we start marshaling args for a call. See issue 16760.
+ return true
+
+ // When using soft-float, these ops might be rewritten to function calls
+ // so we ensure they are evaluated first.
+ case ir.OADD, ir.OSUB, ir.OMUL:
+ n := n.(*ir.BinaryExpr)
+ if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
+ return true
+ }
+ return n.X.HasCall() || n.Y.HasCall()
+ case ir.ONEG:
+ n := n.(*ir.UnaryExpr)
+ if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
+ return true
+ }
+ return n.X.HasCall()
+ case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+ n := n.(*ir.BinaryExpr)
+ if ssagen.Arch.SoftFloat && (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()]) {
+ return true
+ }
+ return n.X.HasCall() || n.Y.HasCall()
+ case ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ if ssagen.Arch.SoftFloat && ((types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) || (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()])) {
+ return true
+ }
+ return n.X.HasCall()
+
+ case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE:
+ n := n.(*ir.BinaryExpr)
+ return n.X.HasCall() || n.Y.HasCall()
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ return n.X.HasCall() || n.Y != nil && n.Y.HasCall()
+
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ return n.X.HasCall()
+ case ir.OPAREN:
+ n := n.(*ir.ParenExpr)
+ return n.X.HasCall()
+ case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV,
+ ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW,
+ ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF,
+ ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE:
+ n := n.(*ir.UnaryExpr)
+ return n.X.HasCall()
+ case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
+ n := n.(*ir.SelectorExpr)
+ return n.X.HasCall()
+
+ case ir.OGETG, ir.OCLOSUREREAD, ir.OMETHEXPR:
+ return false
+
+ // TODO(rsc): These look wrong in various ways but are what calcHasCall has always done.
+ case ir.OADDSTR:
+ // TODO(rsc): This used to check left and right, which are not part of OADDSTR.
+ return false
+ case ir.OBLOCK:
+ // TODO(rsc): Surely the block's statements matter.
+ return false
+ case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR:
+ // TODO(rsc): Some conversions are themselves calls, no?
+ n := n.(*ir.ConvExpr)
+ return n.X.HasCall()
+ case ir.ODOTTYPE2:
+ // TODO(rsc): Shouldn't this be up with ODOTTYPE above?
+ n := n.(*ir.TypeAssertExpr)
+ return n.X.HasCall()
+ case ir.OSLICEHEADER:
+ // TODO(rsc): What about len and cap?
+ n := n.(*ir.SliceHeaderExpr)
+ return n.Ptr.HasCall()
+ case ir.OAS2DOTTYPE, ir.OAS2FUNC:
+ // TODO(rsc): Surely we need to check List and Rlist.
+ return false
+ }
+}
+
+// itabType loads the _type field from a runtime.itab struct.
+func itabType(itab ir.Node) ir.Node {
+ typ := ir.NewSelectorExpr(base.Pos, ir.ODOTPTR, itab, nil)
+ typ.SetType(types.NewPtr(types.Types[types.TUINT8]))
+ typ.SetTypecheck(1)
+ typ.Offset = int64(types.PtrSize) // offset of _type in runtime.itab
+ typ.SetBounded(true) // guaranteed not to fault
+ return typ
+}
+
+// ifaceData loads the data field from an interface.
+// The concrete type must be known to have type t.
+// It follows the pointer if !isdirectiface(t).
+func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
+ if t.IsInterface() {
+ base.Fatalf("ifaceData interface: %v", t)
+ }
+ ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n)
+ if types.IsDirectIface(t) {
+ ptr.SetType(t)
+ ptr.SetTypecheck(1)
+ return ptr
+ }
+ ptr.SetType(types.NewPtr(t))
+ ptr.SetTypecheck(1)
+ ind := ir.NewStarExpr(pos, ptr)
+ ind.SetType(t)
+ ind.SetTypecheck(1)
+ ind.SetBounded(true)
+ return ind
+}
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/wasm"
"cmd/internal/objabi"
)
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &wasm.Linkwasm
arch.REGSP = wasm.REG_SP
arch.MAXWIDTH = 1 << 50
arch.SSAGenBlock = ssaGenBlock
}
-func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
if cnt == 0 {
return p
}
}
for i := int64(0); i < cnt; i += 8 {
- p = pp.Appendpp(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
- p = pp.Appendpp(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
- p = pp.Appendpp(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
+ p = pp.Append(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
+ p = pp.Append(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
+ p = pp.Append(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
return pp.Prog(wasm.ANop)
}
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
}
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if next != b.Succs[0].Block() {
}
}
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall:
s.PrepareCall(v)
- if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == gc.Deferreturn {
+ if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn {
// add a resume point before call to deferreturn so it can be called again via jmpdefer
s.Prog(wasm.ARESUMEPOINT)
}
getValue32(s, v.Args[1])
i32Const(s, int32(v.AuxInt))
p := s.Prog(wasm.ACall)
- p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmMove}
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmMove}
case ssa.OpWasmLoweredZero:
getValue32(s, v.Args[0])
i32Const(s, int32(v.AuxInt))
p := s.Prog(wasm.ACall)
- p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmZero}
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmZero}
case ssa.OpWasmLoweredNilCheck:
getValue64(s, v.Args[0])
s.Prog(wasm.AI64Eqz)
s.Prog(wasm.AIf)
p := s.Prog(wasm.ACALLNORESUME)
- p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.SigPanic}
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.SigPanic}
s.Prog(wasm.AEnd)
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
getReg(s, wasm.REG_SP)
getValue64(s, v.Args[0])
p := s.Prog(storeOp(v.Type))
- gc.AddrAuto(&p.To, v)
+ ssagen.AddrAuto(&p.To, v)
default:
if v.Type.IsMemory() {
}
}
-func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) {
+func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) {
switch v.Op {
case ssa.OpWasmLoweredGetClosurePtr:
getReg(s, wasm.REG_CTXT)
p.From.Type = obj.TYPE_ADDR
switch v.Aux.(type) {
case *obj.LSym:
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case *ir.Name:
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
default:
panic("wasm: bad LoweredAddr")
}
if v.Type.Size() == 8 {
// Division of int64 needs helper function wasmDiv to handle the MinInt64 / -1 case.
p := s.Prog(wasm.ACall)
- p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmDiv}
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmDiv}
break
}
s.Prog(wasm.AI64DivS)
s.Prog(wasm.AF64PromoteF32)
}
p := s.Prog(wasm.ACall)
- p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmTruncS}
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncS}
}
case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U:
s.Prog(wasm.AF64PromoteF32)
}
p := s.Prog(wasm.ACall)
- p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmTruncU}
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncU}
}
case ssa.OpWasmF32DemoteF64:
case ssa.OpLoadReg:
p := s.Prog(loadOp(v.Type))
- gc.AddrAuto(&p.From, v.Args[0])
+ ssagen.AddrAuto(&p.From, v.Args[0])
case ssa.OpCopy:
getValue64(s, v.Args[0])
}
}
-func getValue32(s *gc.SSAGenState, v *ssa.Value) {
+func getValue32(s *ssagen.State, v *ssa.Value) {
if v.OnWasmStack {
s.OnWasmStackSkipped--
ssaGenValueOnStack(s, v, false)
}
}
-func getValue64(s *gc.SSAGenState, v *ssa.Value) {
+func getValue64(s *ssagen.State, v *ssa.Value) {
if v.OnWasmStack {
s.OnWasmStackSkipped--
ssaGenValueOnStack(s, v, true)
}
}
-func i32Const(s *gc.SSAGenState, val int32) {
+func i32Const(s *ssagen.State, val int32) {
p := s.Prog(wasm.AI32Const)
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)}
}
-func i64Const(s *gc.SSAGenState, val int64) {
+func i64Const(s *ssagen.State, val int64) {
p := s.Prog(wasm.AI64Const)
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val}
}
-func f32Const(s *gc.SSAGenState, val float64) {
+func f32Const(s *ssagen.State, val float64) {
p := s.Prog(wasm.AF32Const)
p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
}
-func f64Const(s *gc.SSAGenState, val float64) {
+func f64Const(s *ssagen.State, val float64) {
p := s.Prog(wasm.AF64Const)
p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
}
-func getReg(s *gc.SSAGenState, reg int16) {
+func getReg(s *ssagen.State, reg int16) {
p := s.Prog(wasm.AGet)
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
}
-func setReg(s *gc.SSAGenState, reg int16) {
+func setReg(s *ssagen.State, reg int16) {
p := s.Prog(wasm.ASet)
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
}
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssagen"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
"fmt"
"os"
)
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &x86.Link386
arch.REGSP = x86.REGSP
arch.SSAGenValue = ssaGenValue
package x86
import (
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if *ax == 0 {
- p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1
}
- if cnt <= int64(4*gc.Widthreg) {
- for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
- p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
+ if cnt <= int64(4*types.RegSize) {
+ for i := int64(0); i < cnt; i += int64(types.RegSize) {
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
}
- } else if cnt <= int64(128*gc.Widthreg) {
- p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
- p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg)))
- p.To.Sym = gc.Duffzero
+ } else if cnt <= int64(128*types.RegSize) {
+ p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(types.RegSize)))
+ p.To.Sym = ir.Syms.Duffzero
} else {
- p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
- p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
- p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
- p = pp.Appendpp(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
+ p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
// See comment in ../amd64/ggen.go.
p := pp.Prog(x86.AXCHGL)
p.From.Type = obj.TYPE_REG
"math"
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
flive := b.FlagsLiveAtEnd
for _, c := range b.ControlValues() {
flive = c.Type.IsFlags() || flive
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
-func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
return p
}
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.Op386ADDL:
r := v.Reg()
p.From.Type = obj.TYPE_MEM
p.From.Reg = r
p.From.Index = i
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386LEAL:
p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB,
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg()
case ssa.Op386CMPLconstload, ssa.Op386CMPWconstload, ssa.Op386CMPBconstload:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.From, v, sc.Off())
+ ssagen.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.Op386MOVLconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1,
}
p.From.Reg = r
p.From.Index = i
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386ADDLloadidx4, ssa.Op386SUBLloadidx4, ssa.Op386MULLloadidx4,
p.From.Reg = v.Args[1].Reg()
p.From.Index = v.Args[2].Reg()
p.From.Scale = 4
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.Op386ADDLconstmodify:
sc := v.AuxValAndOff()
val := sc.Val()
off := sc.Off()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, off)
+ ssagen.AddAux2(&p.To, v, off)
break
}
fallthrough
p.From.Offset = val
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, off)
+ ssagen.AddAux2(&p.To, v, off)
case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1,
ssa.Op386MOVSDstoreidx8, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4, ssa.Op386MOVWstoreidx2,
ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4:
}
p.To.Reg = r
p.To.Index = i
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, sc.Off())
+ ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.Op386ADDLconstmodifyidx4:
sc := v.AuxValAndOff()
val := sc.Val()
p.To.Reg = v.Args[0].Reg()
p.To.Scale = 4
p.To.Index = v.Args[1].Reg()
- gc.AddAux2(&p.To, v, off)
+ ssagen.AddAux2(&p.To, v, off)
break
}
fallthrough
p.To.Type = obj.TYPE_MEM
p.To.Reg = r
p.To.Index = i
- gc.AddAux2(&p.To, v, sc.Off())
+ ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.Op386MOVWLSX, ssa.Op386MOVBLSX, ssa.Op386MOVWLZX, ssa.Op386MOVBLZX,
ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD,
ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL,
case ssa.Op386DUFFZERO:
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
- p.To.Sym = gc.Duffzero
+ p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.Op386DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
- p.To.Sym = gc.Duffcopy
+ p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpCopy: // TODO: use MOVLreg for reg->reg copies instead of OpCopy?
return
}
p := s.Prog(loadByType(v.Type))
- gc.AddrAuto(&p.From, v.Args[0])
+ ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
- gc.AddrAuto(&p.To, v)
+ ssagen.AddrAuto(&p.To, v)
case ssa.Op386LoweredGetClosurePtr:
// Closure pointer is DX.
- gc.CheckLoweredGetClosurePtr(v)
+ ssagen.CheckLoweredGetClosurePtr(v)
case ssa.Op386LoweredGetG:
r := v.Reg()
// See the comments in cmd/internal/obj/x86/obj6.go
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(8) // space used in callee args area by assembly stubs
case ssa.Op386LoweredPanicExtendA, ssa.Op386LoweredPanicExtendB, ssa.Op386LoweredPanicExtendC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
+ p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
s.UseArgs(12) // space used in callee args area by assembly stubs
case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter:
p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
}
ssa.Block386NAN: {x86.AJPS, x86.AJPC},
}
-var eqfJumps = [2][2]gc.IndexJump{
+var eqfJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
}
-var nefJumps = [2][2]gc.IndexJump{
+var nefJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
}
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in rax:
p.To.Reg = x86.REG_AX
p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:
"cmd/compile/internal/ppc64"
"cmd/compile/internal/riscv64"
"cmd/compile/internal/s390x"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/wasm"
"cmd/compile/internal/x86"
"cmd/internal/objabi"
"os"
)
-var archInits = map[string]func(*gc.Arch){
+var archInits = map[string]func(*ssagen.ArchInfo){
"386": x86.Init,
"amd64": amd64.Init,
"arm": arm.Init,
s += sixteenSpaces[:16-len(s)]
return s
}
+
+// architecture-independent object file output
+const HeaderSize = 60
+
+func ReadHeader(b *bufio.Reader, name string) int {
+ var buf [HeaderSize]byte
+ if _, err := io.ReadFull(b, buf[:]); err != nil {
+ return -1
+ }
+ aname := strings.Trim(string(buf[0:16]), " ")
+ if !strings.HasPrefix(aname, name) {
+ return -1
+ }
+ asize := strings.Trim(string(buf[48:58]), " ")
+ i, _ := strconv.Atoi(asize)
+ return i
+}
+
+func FormatHeader(arhdr []byte, name string, size int64) {
+ copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
+}